diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java index 15b468a92b..a439e58fff 100644 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java +++ b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java @@ -145,7 +145,11 @@ public BigQueryWriteStub getStub() { // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a write stream to the given table. + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. * *

Sample code: * @@ -174,7 +178,11 @@ public final Stream.WriteStream createWriteStream( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a write stream to the given table. + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. * *

Sample code: * @@ -202,7 +210,11 @@ public final Stream.WriteStream createWriteStream(String parent, Stream.WriteStr // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a write stream to the given table. + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. * *

Sample code: * @@ -225,7 +237,11 @@ public final Stream.WriteStream createWriteStream(Storage.CreateWriteStreamReque // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Creates a write stream to the given table. + * Creates a write stream to the given table. Additionally, every table has a special COMMITTED + * stream named '_default' to which data can be written. This stream doesn't need to be created + * using CreateWriteStream. It is a stream that can be used simultaneously by any number of + * clients. Data written to this stream is considered committed as soon as an acknowledgement is + * received. * *

Sample code: * @@ -390,7 +406,8 @@ public final Stream.WriteStream getWriteStream(Storage.GetWriteStreamRequest req // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Finalize a write stream so that no new data can be appended to the stream. + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. * *

Sample code: * @@ -415,7 +432,8 @@ public final Storage.FinalizeWriteStreamResponse finalizeWriteStream(WriteStream // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Finalize a write stream so that no new data can be appended to the stream. + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. * *

Sample code: * @@ -438,7 +456,8 @@ public final Storage.FinalizeWriteStreamResponse finalizeWriteStream(String name // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Finalize a write stream so that no new data can be appended to the stream. + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. * *

Sample code: * @@ -464,7 +483,8 @@ public final Storage.FinalizeWriteStreamResponse finalizeWriteStream( // AUTO-GENERATED DOCUMENTATION AND METHOD. /** - * Finalize a write stream so that no new data can be appended to the stream. + * Finalize a write stream so that no new data can be appended to the stream. Finalize is not + * supported on the '_default' stream. * *

Sample code: * @@ -603,7 +623,7 @@ public final Storage.BatchCommitWriteStreamsResponse batchCommitWriteStreams( * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush * operation is required in order for the rows to become available for reading. A Flush operation * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. * *

Sample code: * @@ -631,7 +651,7 @@ public final Storage.FlushRowsResponse flushRows(WriteStreamName writeStream) { * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush * operation is required in order for the rows to become available for reading. A Flush operation * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. * *

Sample code: * @@ -657,7 +677,7 @@ public final Storage.FlushRowsResponse flushRows(String writeStream) { * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush * operation is required in order for the rows to become available for reading. A Flush operation * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. * *

Sample code: * @@ -685,7 +705,7 @@ public final Storage.FlushRowsResponse flushRows(Storage.FlushRowsRequest reques * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush * operation is required in order for the rows to become available for reading. A Flush operation * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. + * the request. Flush is not supported on the _default stream, since it is not BUFFERED. * *

Sample code: * diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java index 65bbb090a3..e075df92de 100644 --- a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java +++ b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java @@ -28,6 +28,7 @@ @javax.annotation.Generated( value = "by gRPC proto compiler", comments = "Source: google/cloud/bigquery/storage/v1alpha2/storage.proto") +@java.lang.Deprecated public final class BigQueryWriteGrpc { private BigQueryWriteGrpc() {} @@ -378,6 +379,7 @@ public BigQueryWriteFutureStub newStub( * The Write API can be used to write data to BigQuery. * */ + @java.lang.Deprecated public abstract static class BigQueryWriteImplBase implements io.grpc.BindableService { /** @@ -385,6 +387,11 @@ public abstract static class BigQueryWriteImplBase implements io.grpc.BindableSe * *

      * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
      * 
*/ public void createWriteStream( @@ -447,7 +454,7 @@ public void getWriteStream( * *
      * Finalize a write stream so that no new data can be appended to the
-     * stream.
+     * stream. Finalize is not supported on the '_default' stream.
      * 
*/ public void finalizeWriteStream( @@ -488,6 +495,7 @@ public void batchCommitWriteStreams( * required in order for the rows to become available for reading. A * Flush operation flushes up to any previously flushed offset in a BUFFERED * stream, to the offset specified in the request. + * Flush is not supported on the _default stream, since it is not BUFFERED. * */ public void flushRows( @@ -557,6 +565,7 @@ public final io.grpc.ServerServiceDefinition bindService() { * The Write API can be used to write data to BigQuery. * */ + @java.lang.Deprecated public static final class BigQueryWriteStub extends io.grpc.stub.AbstractAsyncStub { private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { @@ -573,6 +582,11 @@ protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions c * *
      * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
      * 
*/ public void createWriteStream( @@ -639,7 +653,7 @@ public void getWriteStream( * *
      * Finalize a write stream so that no new data can be appended to the
-     * stream.
+     * stream. Finalize is not supported on the '_default' stream.
      * 
*/ public void finalizeWriteStream( @@ -684,6 +698,7 @@ public void batchCommitWriteStreams( * required in order for the rows to become available for reading. A * Flush operation flushes up to any previously flushed offset in a BUFFERED * stream, to the offset specified in the request. + * Flush is not supported on the _default stream, since it is not BUFFERED. * */ public void flushRows( @@ -704,6 +719,7 @@ public void flushRows( * The Write API can be used to write data to BigQuery. * */ + @java.lang.Deprecated public static final class BigQueryWriteBlockingStub extends io.grpc.stub.AbstractBlockingStub { private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { @@ -721,6 +737,11 @@ protected BigQueryWriteBlockingStub build( * *
      * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
      * 
*/ public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream createWriteStream( @@ -747,7 +768,7 @@ public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStr * *
      * Finalize a write stream so that no new data can be appended to the
-     * stream.
+     * stream. Finalize is not supported on the '_default' stream.
      * 
*/ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse @@ -785,6 +806,7 @@ public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStr * required in order for the rows to become available for reading. A * Flush operation flushes up to any previously flushed offset in a BUFFERED * stream, to the offset specified in the request. + * Flush is not supported on the _default stream, since it is not BUFFERED. * */ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse flushRows( @@ -802,6 +824,7 @@ public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse flus * The Write API can be used to write data to BigQuery. * */ + @java.lang.Deprecated public static final class BigQueryWriteFutureStub extends io.grpc.stub.AbstractFutureStub { private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { @@ -819,6 +842,11 @@ protected BigQueryWriteFutureStub build( * *
      * Creates a write stream to the given table.
+     * Additionally, every table has a special COMMITTED stream named '_default'
+     * to which data can be written. This stream doesn't need to be created using
+     * CreateWriteStream. It is a stream that can be used simultaneously by any
+     * number of clients. Data written to this stream is considered committed as
+     * soon as an acknowledgement is received.
      * 
*/ public com.google.common.util.concurrent.ListenableFuture< @@ -849,7 +877,7 @@ protected BigQueryWriteFutureStub build( * *
      * Finalize a write stream so that no new data can be appended to the
-     * stream.
+     * stream. Finalize is not supported on the '_default' stream.
      * 
*/ public com.google.common.util.concurrent.ListenableFuture< @@ -889,6 +917,7 @@ protected BigQueryWriteFutureStub build( * required in order for the rows to become available for reading. A * Flush operation flushes up to any previously flushed offset in a BUFFERED * stream, to the offset specified in the request. + * Flush is not supported on the _default stream, since it is not BUFFERED. * */ public com.google.common.util.concurrent.ListenableFuture< diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java index 559510088f..e478e9aac2 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java @@ -1096,6 +1096,8 @@ public interface AppendRowsRequestOrBuilder * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -1112,6 +1114,8 @@ public interface AppendRowsRequestOrBuilder * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -1128,7 +1132,8 @@ public interface AppendRowsRequestOrBuilder *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -1143,7 +1148,8 @@ public interface AppendRowsRequestOrBuilder *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -1158,7 +1164,8 @@ public interface AppendRowsRequestOrBuilder *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2535,6 +2542,8 @@ public RowsCase getRowsCase() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -2562,6 +2571,8 @@ public java.lang.String getWriteStream() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -2591,7 +2602,8 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2609,7 +2621,8 @@ public boolean hasOffset() { *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -2627,7 +2640,8 @@ public com.google.protobuf.Int64Value getOffset() { *
      * Optional. If present, the write is only performed if the next append offset is same
      * as the provided value. If not present, the write is performed at the
-     * current end of stream.
+     * current end of stream. Specifying a value for this field is not allowed
+     * when calling AppendRows for the '_default' stream.
      * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3157,6 +3171,8 @@ public Builder clearRows() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -3183,6 +3199,8 @@ public java.lang.String getWriteStream() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -3209,6 +3227,8 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -3234,6 +3254,8 @@ public Builder setWriteStream(java.lang.String value) { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -3255,6 +3277,8 @@ public Builder clearWriteStream() { * Required. The stream that is the target of the append operation. This value must be * specified for the initial request. If subsequent requests specify the * stream name, it must equal to the value provided in the first request. + * To write to the _default stream, populate this field with a string in the + * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. * * * @@ -3287,7 +3311,8 @@ public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3304,7 +3329,8 @@ public boolean hasOffset() { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3325,7 +3351,8 @@ public com.google.protobuf.Int64Value getOffset() { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3350,7 +3377,8 @@ public Builder setOffset(com.google.protobuf.Int64Value value) { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3372,7 +3400,8 @@ public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3399,7 +3428,8 @@ public Builder mergeOffset(com.google.protobuf.Int64Value value) { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3422,7 +3452,8 @@ public Builder clearOffset() { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3439,7 +3470,8 @@ public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3458,7 +3490,8 @@ public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { *
        * Optional. If present, the write is only performed if the next append offset is same
        * as the provided value. If not present, the write is performed at the
-       * current end of stream.
+       * current end of stream. Specifying a value for this field is not allowed
+       * when calling AppendRows for the '_default' stream.
        * 
* * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; @@ -3818,7 +3851,18 @@ public interface AppendRowsResponseOrBuilder * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -3831,7 +3875,18 @@ public interface AppendRowsResponseOrBuilder * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -3844,7 +3899,18 @@ public interface AppendRowsResponseOrBuilder * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -4106,7 +4172,18 @@ public long getOffset() { * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -4122,7 +4199,18 @@ public boolean hasError() { * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -4141,7 +4229,18 @@ public com.google.rpc.Status getError() { * *
      * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry within the same connection.
+     * into the system. Users can retry or continue with other requests within
+     * the same connection.
+     * ALREADY_EXISTS: happens when offset is specified, it means the row is
+     *   already appended, it is safe to ignore this error.
+     * OUT_OF_RANGE: happens when offset is specified, it means the specified
+     *   offset is beyond the end of the stream.
+     * INVALID_ARGUMENT: error caused by malformed request or data.
+     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+     *   append without offset.
+     * ABORTED: request processing is aborted because of prior failures, request
+     *   can be retried if previous failure is fixed.
+     * INTERNAL: server side errors that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -4714,7 +4813,18 @@ public Builder clearOffset() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4730,7 +4840,18 @@ public boolean hasError() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4756,7 +4877,18 @@ public com.google.rpc.Status getError() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4779,7 +4911,18 @@ public Builder setError(com.google.rpc.Status value) { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4799,7 +4942,18 @@ public Builder setError(com.google.rpc.Status.Builder builderForValue) { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4829,7 +4983,18 @@ public Builder mergeError(com.google.rpc.Status value) { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4855,7 +5020,18 @@ public Builder clearError() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4868,7 +5044,18 @@ public com.google.rpc.Status.Builder getErrorBuilder() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -4889,7 +5076,18 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * *
        * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry within the same connection.
+       * into the system. Users can retry or continue with other requests within
+       * the same connection.
+       * ALREADY_EXISTS: happens when offset is specified, it means the row is
+       *   already appended, it is safe to ignore this error.
+       * OUT_OF_RANGE: happens when offset is specified, it means the specified
+       *   offset is beyond the end of the stream.
+       * INVALID_ARGUMENT: error caused by malformed request or data.
+       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
+       *   append without offset.
+       * ABORTED: request processing is aborted because of prior failures, request
+       *   can be retried if previous failure is fixed.
+       * INTERNAL: server side errors that can be retried.
        * 
* * .google.rpc.Status error = 2; @@ -10468,7 +10666,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "st\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A,\n*bigque" + "rystorage.googleapis.com/WriteStream\022\016\n\006" + "offset\030\002 \001(\003\"#\n\021FlushRowsResponse\022\016\n\006off" - + "set\030\001 \001(\0032\250\014\n\rBigQueryWrite\022\351\001\n\021CreateWr" + + "set\030\001 \001(\0032\253\014\n\rBigQueryWrite\022\351\001\n\021CreateWr" + "iteStream\022@.google.cloud.bigquery.storag" + "e.v1alpha2.CreateWriteStreamRequest\0323.go" + "ogle.cloud.bigquery.storage.v1alpha2.Wri" @@ -10503,15 +10701,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "y.storage.v1alpha2.FlushRowsResponse\"[\202\323" + "\344\223\002F\"A/v1alpha2/{write_stream=projects/*" + "/datasets/*/tables/*/streams/*}:\001*\332A\014wri" - + "te_stream\032\260\001\312A\036bigquerystorage.googleapi" - + "s.com\322A\213\001https://www.googleapis.com/auth" - + "/bigquery,https://www.googleapis.com/aut" - + "h/bigquery.insertdata,https://www.google" - + "apis.com/auth/cloud-platformB{\n*com.goog" - + "le.cloud.bigquery.storage.v1alpha2ZMgoog" - + "le.golang.org/genproto/googleapis/cloud/" - + "bigquery/storage/v1alpha2;storageb\006proto" - + "3" + + "te_stream\032\263\001\210\002\001\312A\036bigquerystorage.google" + + "apis.com\322A\213\001https://www.googleapis.com/a" + + "uth/bigquery,https://www.googleapis.com/" + + "auth/bigquery.insertdata,https://www.goo" + + "gleapis.com/auth/cloud-platformB{\n*com.g" + + "oogle.cloud.bigquery.storage.v1alpha2ZMg" + + "oogle.golang.org/genproto/googleapis/clo" + + "ud/bigquery/storage/v1alpha2;storageb\006pr" + + "oto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java index 5e8e6f78c7..a072fab6ad 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java @@ -80,7 +80,8 @@ public interface WriteStreamOrBuilder * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -94,7 +95,8 @@ public interface WriteStreamOrBuilder * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -108,7 +110,8 @@ public interface WriteStreamOrBuilder * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -661,7 +664,8 @@ public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getTyp * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -678,7 +682,8 @@ public boolean hasCreateTime() { * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -695,7 +700,8 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
-     * Output only. Create time of the stream.
+     * Output only. Create time of the stream. For the _default stream, this is the
+     * creation_time of the table.
      * 
* * @@ -1522,7 +1528,8 @@ public Builder clearType() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1538,7 +1545,8 @@ public boolean hasCreateTime() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1560,7 +1568,8 @@ public com.google.protobuf.Timestamp getCreateTime() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1584,7 +1593,8 @@ public Builder setCreateTime(com.google.protobuf.Timestamp value) { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1605,7 +1615,8 @@ public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForVal * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1633,7 +1644,8 @@ public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1655,7 +1667,8 @@ public Builder clearCreateTime() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1671,7 +1684,8 @@ public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * @@ -1691,7 +1705,8 @@ public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { * * *
-       * Output only. Create time of the stream.
+       * Output only. Create time of the stream. For the _default stream, this is the
+       * creation_time of the table.
        * 
* * diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java index 0e33bf8ccb..6b6e6d4bbc 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java @@ -1496,7 +1496,7 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * * *
-       * Geography object (go/googlesql_geography)
+       * Geography object
        * 
* * GEOGRAPHY = 11; @@ -1506,7 +1506,7 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * * *
-       * Numeric value (go/googlesql_numeric)
+       * Numeric value
        * 
* * NUMERIC = 12; @@ -1629,7 +1629,7 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * * *
-       * Geography object (go/googlesql_geography)
+       * Geography object
        * 
* * GEOGRAPHY = 11; @@ -1639,7 +1639,7 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * * *
-       * Numeric value (go/googlesql_numeric)
+       * Numeric value
        * 
* * NUMERIC = 12; diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto index 882adf5b42..63bce1fc9a 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto index 431f925b39..03c846313d 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -50,6 +50,7 @@ message CreateWriteStreamRequest { // // The Write API can be used to write data to BigQuery. service BigQueryWrite { + option deprecated = true; option (google.api.default_host) = "bigquerystorage.googleapis.com"; option (google.api.oauth_scopes) = "https://www.googleapis.com/auth/bigquery," @@ -57,6 +58,11 @@ service BigQueryWrite { "https://www.googleapis.com/auth/cloud-platform"; // Creates a write stream to the given table. + // Additionally, every table has a special COMMITTED stream named '_default' + // to which data can be written. This stream doesn't need to be created using + // CreateWriteStream. It is a stream that can be used simultaneously by any + // number of clients. Data written to this stream is considered committed as + // soon as an acknowledgement is received. rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { option (google.api.http) = { post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" @@ -102,7 +108,7 @@ service BigQueryWrite { } // Finalize a write stream so that no new data can be appended to the - // stream. + // stream. Finalize is not supported on the '_default' stream. rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { option (google.api.http) = { post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" @@ -128,6 +134,7 @@ service BigQueryWrite { // required in order for the rows to become available for reading. A // Flush operation flushes up to any previously flushed offset in a BUFFERED // stream, to the offset specified in the request. + // Flush is not supported on the _default stream, since it is not BUFFERED. rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { option (google.api.http) = { post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" @@ -150,6 +157,8 @@ message AppendRowsRequest { // Required. The stream that is the target of the append operation. This value must be // specified for the initial request. If subsequent requests specify the // stream name, it must equal to the value provided in the first request. + // To write to the _default stream, populate this field with a string in the + // format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. string write_stream = 1 [ (google.api.field_behavior) = REQUIRED, (google.api.resource_reference) = { @@ -159,7 +168,8 @@ message AppendRowsRequest { // Optional. If present, the write is only performed if the next append offset is same // as the provided value. If not present, the write is performed at the - // current end of stream. + // current end of stream. Specifying a value for this field is not allowed + // when calling AppendRows for the '_default' stream. google.protobuf.Int64Value offset = 2 [(google.api.field_behavior) = OPTIONAL]; // Input rows. The `writer_schema` field must be specified at the initial @@ -183,7 +193,18 @@ message AppendRowsResponse { int64 offset = 1; // Error in case of append failure. If set, it means rows are not accepted - // into the system. Users can retry within the same connection. + // into the system. Users can retry or continue with other requests within + // the same connection. + // ALREADY_EXISTS: happens when offset is specified, it means the row is + // already appended, it is safe to ignore this error. + // OUT_OF_RANGE: happens when offset is specified, it means the specified + // offset is beyond the end of the stream. + // INVALID_ARGUMENT: error caused by malformed request or data. + // RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when + // append without offset. + // ABORTED: request processing is aborted because of prior failures, request + // can be retried if previous failure is fixed. + // INTERNAL: server side errors that can be retried. google.rpc.Status error = 2; } diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto index 0ec0ef81a2..2483e9315b 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -56,7 +56,8 @@ message WriteStream { Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; - // Output only. Create time of the stream. + // Output only. Create time of the stream. For the _default stream, this is the + // creation_time of the table. google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; // Output only. Commit time of the stream. diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto index d4bc017165..a3e7ad48a0 100644 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto +++ b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -63,10 +63,10 @@ message TableFieldSchema { // Combination of civil date and civil time DATETIME = 10; - // Geography object (go/googlesql_geography) + // Geography object GEOGRAPHY = 11; - // Numeric value (go/googlesql_numeric) + // Numeric value NUMERIC = 12; } diff --git a/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml b/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml index 78fef92e9f..6e03637b76 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml +++ b/proto-google-cloud-bigquerystorage-v1beta2/clirr-ignored-differences.xml @@ -4,17 +4,17 @@ 7012 - com/google/cloud/bigquery/storage/v1/*OrBuilder + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder * get*(*) 7012 - com/google/cloud/bigquery/storage/v1/*OrBuilder + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder boolean contains*(*) 7012 - com/google/cloud/bigquery/storage/v1/*OrBuilder + com/google/cloud/bigquery/storage/v1beta2/*OrBuilder boolean has*(*) \ No newline at end of file diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java index d42a64264c..602f184958 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequest.java @@ -1312,9 +1312,9 @@ public RowsCase getRowsCase() { * * *
-   * Required. The stream that is the target of the append operation. This value
-   * must be specified for the initial request. If subsequent requests specify
-   * the stream name, it must equal to the value provided in the first request.
+   * Required. The stream that is the target of the append operation. This value must be
+   * specified for the initial request. If subsequent requests specify the
+   * stream name, it must equal to the value provided in the first request.
    * To write to the _default stream, populate this field with a string in the
    * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
    * 
@@ -1341,9 +1341,9 @@ public java.lang.String getWriteStream() { * * *
-   * Required. The stream that is the target of the append operation. This value
-   * must be specified for the initial request. If subsequent requests specify
-   * the stream name, it must equal to the value provided in the first request.
+   * Required. The stream that is the target of the append operation. This value must be
+   * specified for the initial request. If subsequent requests specify the
+   * stream name, it must equal to the value provided in the first request.
    * To write to the _default stream, populate this field with a string in the
    * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
    * 
@@ -1966,9 +1966,9 @@ public Builder clearRows() { * * *
-     * Required. The stream that is the target of the append operation. This value
-     * must be specified for the initial request. If subsequent requests specify
-     * the stream name, it must equal to the value provided in the first request.
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
      * To write to the _default stream, populate this field with a string in the
      * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
      * 
@@ -1994,9 +1994,9 @@ public java.lang.String getWriteStream() { * * *
-     * Required. The stream that is the target of the append operation. This value
-     * must be specified for the initial request. If subsequent requests specify
-     * the stream name, it must equal to the value provided in the first request.
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
      * To write to the _default stream, populate this field with a string in the
      * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
      * 
@@ -2022,9 +2022,9 @@ public com.google.protobuf.ByteString getWriteStreamBytes() { * * *
-     * Required. The stream that is the target of the append operation. This value
-     * must be specified for the initial request. If subsequent requests specify
-     * the stream name, it must equal to the value provided in the first request.
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
      * To write to the _default stream, populate this field with a string in the
      * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
      * 
@@ -2049,9 +2049,9 @@ public Builder setWriteStream(java.lang.String value) { * * *
-     * Required. The stream that is the target of the append operation. This value
-     * must be specified for the initial request. If subsequent requests specify
-     * the stream name, it must equal to the value provided in the first request.
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
      * To write to the _default stream, populate this field with a string in the
      * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
      * 
@@ -2072,9 +2072,9 @@ public Builder clearWriteStream() { * * *
-     * Required. The stream that is the target of the append operation. This value
-     * must be specified for the initial request. If subsequent requests specify
-     * the stream name, it must equal to the value provided in the first request.
+     * Required. The stream that is the target of the append operation. This value must be
+     * specified for the initial request. If subsequent requests specify the
+     * stream name, it must equal to the value provided in the first request.
      * To write to the _default stream, populate this field with a string in the
      * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
      * 
diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java index fb9bb565f1..8b721dcaf2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsRequestOrBuilder.java @@ -27,9 +27,9 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The stream that is the target of the append operation. This value
-   * must be specified for the initial request. If subsequent requests specify
-   * the stream name, it must equal to the value provided in the first request.
+   * Required. The stream that is the target of the append operation. This value must be
+   * specified for the initial request. If subsequent requests specify the
+   * stream name, it must equal to the value provided in the first request.
    * To write to the _default stream, populate this field with a string in the
    * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
    * 
@@ -45,9 +45,9 @@ public interface AppendRowsRequestOrBuilder * * *
-   * Required. The stream that is the target of the append operation. This value
-   * must be specified for the initial request. If subsequent requests specify
-   * the stream name, it must equal to the value provided in the first request.
+   * Required. The stream that is the target of the append operation. This value must be
+   * specified for the initial request. If subsequent requests specify the
+   * stream name, it must equal to the value provided in the first request.
    * To write to the _default stream, populate this field with a string in the
    * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
    * 
diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java index 9edad09966..d710849ff2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponse.java @@ -204,7 +204,7 @@ public interface AppendResultOrBuilder * * *
-   * A success append result.
+   * AppendResult is returned for successful append requests.
    * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult} @@ -526,7 +526,7 @@ protected Builder newBuilderForType( * * *
-     * A success append result.
+     * AppendResult is returned for successful append requests.
      * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.AppendRowsResponse.AppendResult} @@ -1057,19 +1057,20 @@ public boolean hasAppendResult() { * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -1084,19 +1085,20 @@ public boolean hasError() { * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -1114,19 +1116,20 @@ public com.google.rpc.Status getError() { * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -1146,8 +1149,8 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -1163,8 +1166,8 @@ public boolean hasUpdatedSchema() { * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -1182,8 +1185,8 @@ public com.google.cloud.bigquery.storage.v1beta2.TableSchema getUpdatedSchema() * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -1867,19 +1870,20 @@ public Builder clearAppendResult() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -1894,19 +1898,20 @@ public boolean hasError() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -1931,19 +1936,20 @@ public com.google.rpc.Status getError() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -1965,19 +1971,20 @@ public Builder setError(com.google.rpc.Status value) { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -1996,19 +2003,20 @@ public Builder setError(com.google.rpc.Status.Builder builderForValue) { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -2037,19 +2045,20 @@ public Builder mergeError(com.google.rpc.Status value) { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -2074,19 +2083,20 @@ public Builder clearError() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -2098,19 +2108,20 @@ public com.google.rpc.Status.Builder getErrorBuilder() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -2130,19 +2141,20 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * * *
-     * Error in case of request failed. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the entire
-     *   request is already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
+     * Error returned when problems were encountered.  If present,
+     * it indicates rows were not accepted into the system.
+     * Users can retry or continue with other append requests within the
+     * same connection.
+     * Additional information about error signalling:
+     * ALREADY_EXISTS: Happens when an append specified an offset, and the
+     * backend already has received data at this offset.  Typically encountered
+     * in retry scenarios, and can be ignored.
+     * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+     * the current end of the stream.
+     * INVALID_ARGUMENT: Indicates a malformed request or data.
+     * ABORTED: Request processing is aborted because of prior failures.  The
+     * request can be retried if previous failure is addressed.
+     * INTERNAL: Indicates server side error(s) that can be retried.
      * 
* * .google.rpc.Status error = 2; @@ -2179,8 +2191,8 @@ public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2195,8 +2207,8 @@ public boolean hasUpdatedSchema() { * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2217,8 +2229,8 @@ public com.google.cloud.bigquery.storage.v1beta2.TableSchema getUpdatedSchema() * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2241,8 +2253,8 @@ public Builder setUpdatedSchema(com.google.cloud.bigquery.storage.v1beta2.TableS * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2263,8 +2275,8 @@ public Builder setUpdatedSchema( * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2291,8 +2303,8 @@ public Builder mergeUpdatedSchema(com.google.cloud.bigquery.storage.v1beta2.Tabl * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2313,8 +2325,8 @@ public Builder clearUpdatedSchema() { * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2329,8 +2341,8 @@ public com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder getUpdatedS * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -2350,8 +2362,8 @@ public com.google.cloud.bigquery.storage.v1beta2.TableSchema.Builder getUpdatedS * *
      * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
+     * use it to input new type of message. It will be empty when no schema
+     * updates have occurred.
      * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java index fe320fb6c5..5d920cd579 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/AppendRowsResponseOrBuilder.java @@ -66,19 +66,20 @@ public interface AppendRowsResponseOrBuilder * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -90,19 +91,20 @@ public interface AppendRowsResponseOrBuilder * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -114,19 +116,20 @@ public interface AppendRowsResponseOrBuilder * * *
-   * Error in case of request failed. If set, it means rows are not accepted
-   * into the system. Users can retry or continue with other requests within
-   * the same connection.
-   * ALREADY_EXISTS: happens when offset is specified, it means the entire
-   *   request is already appended, it is safe to ignore this error.
-   * OUT_OF_RANGE: happens when offset is specified, it means the specified
-   *   offset is beyond the end of the stream.
-   * INVALID_ARGUMENT: error caused by malformed request or data.
-   * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-   *   append without offset.
-   * ABORTED: request processing is aborted because of prior failures, request
-   *   can be retried if previous failure is fixed.
-   * INTERNAL: server side errors that can be retried.
+   * Error returned when problems were encountered.  If present,
+   * it indicates rows were not accepted into the system.
+   * Users can retry or continue with other append requests within the
+   * same connection.
+   * Additional information about error signalling:
+   * ALREADY_EXISTS: Happens when an append specified an offset, and the
+   * backend already has received data at this offset.  Typically encountered
+   * in retry scenarios, and can be ignored.
+   * OUT_OF_RANGE: Returned when the specified offset in the stream is beyond
+   * the current end of the stream.
+   * INVALID_ARGUMENT: Indicates a malformed request or data.
+   * ABORTED: Request processing is aborted because of prior failures.  The
+   * request can be retried if previous failure is addressed.
+   * INTERNAL: Indicates server side error(s) that can be retried.
    * 
* * .google.rpc.Status error = 2; @@ -138,8 +141,8 @@ public interface AppendRowsResponseOrBuilder * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -152,8 +155,8 @@ public interface AppendRowsResponseOrBuilder * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; @@ -166,8 +169,8 @@ public interface AppendRowsResponseOrBuilder * *
    * If backend detects a schema update, pass it to user so that user can
-   * use it to input new type of message. It will be empty when there is no
-   * schema updates.
+   * use it to input new type of message. It will be empty when no schema
+   * updates have occurred.
    * 
* * .google.cloud.bigquery.storage.v1beta2.TableSchema updated_schema = 3; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java index c271b1d3cb..fe6826a739 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequest.java @@ -133,8 +133,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * * *
-   * Required. Parent table that all the streams should belong to, in the form
-   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
    * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -157,8 +157,8 @@ public java.lang.String getParent() { * * *
-   * Required. Parent table that all the streams should belong to, in the form
-   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
    * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -611,8 +611,8 @@ public Builder mergeFrom( * * *
-     * Required. Parent table that all the streams should belong to, in the form
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
      * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -634,8 +634,8 @@ public java.lang.String getParent() { * * *
-     * Required. Parent table that all the streams should belong to, in the form
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
      * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -657,8 +657,8 @@ public com.google.protobuf.ByteString getParentBytes() { * * *
-     * Required. Parent table that all the streams should belong to, in the form
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
      * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -679,8 +679,8 @@ public Builder setParent(java.lang.String value) { * * *
-     * Required. Parent table that all the streams should belong to, in the form
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
      * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -697,8 +697,8 @@ public Builder clearParent() { * * *
-     * Required. Parent table that all the streams should belong to, in the form
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+     * Required. Parent table that all the streams should belong to, in the form of
+     * `projects/{project}/datasets/{dataset}/tables/{table}`.
      * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java index f51b28293a..d06e88a517 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsRequestOrBuilder.java @@ -27,8 +27,8 @@ public interface BatchCommitWriteStreamsRequestOrBuilder * * *
-   * Required. Parent table that all the streams should belong to, in the form
-   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
    * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; @@ -40,8 +40,8 @@ public interface BatchCommitWriteStreamsRequestOrBuilder * * *
-   * Required. Parent table that all the streams should belong to, in the form
-   * of `projects/{project}/datasets/{dataset}/tables/{table}`.
+   * Required. Parent table that all the streams should belong to, in the form of
+   * `projects/{project}/datasets/{dataset}/tables/{table}`.
    * 
* * string parent = 1 [(.google.api.field_behavior) = REQUIRED]; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java index 0fa6c5e7c5..3f30a674f0 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponse.java @@ -146,7 +146,8 @@ public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -162,7 +163,8 @@ public boolean hasCommitTime() { * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -178,7 +180,8 @@ public com.google.protobuf.Timestamp getCommitTime() { * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -196,6 +199,9 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -211,6 +217,9 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -226,6 +235,9 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -240,6 +252,9 @@ public int getStreamErrorsCount() { *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -254,6 +269,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageError getStreamErrors(in *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -678,7 +696,8 @@ public Builder mergeFrom( * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -693,7 +712,8 @@ public boolean hasCommitTime() { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -714,7 +734,8 @@ public com.google.protobuf.Timestamp getCommitTime() { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -737,7 +758,8 @@ public Builder setCommitTime(com.google.protobuf.Timestamp value) { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -757,7 +779,8 @@ public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForVal * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -782,7 +805,8 @@ public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -803,7 +827,8 @@ public Builder clearCommitTime() { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -818,7 +843,8 @@ public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -837,7 +863,8 @@ public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { * *
      * The time at which streams were committed in microseconds granularity.
-     * This field will only exist when there is no stream errors.
+     * This field will only exist when there are no stream errors.
+     * **Note** if this field is not set, it means the commit was not successful.
      * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -883,6 +910,9 @@ private void ensureStreamErrorsIsMutable() { *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -901,6 +931,9 @@ private void ensureStreamErrorsIsMutable() { *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -918,6 +951,9 @@ public int getStreamErrorsCount() { *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -935,6 +971,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageError getStreamErrors(in *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -959,6 +998,9 @@ public Builder setStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -980,6 +1022,9 @@ public Builder setStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1003,6 +1048,9 @@ public Builder addStreamErrors(com.google.cloud.bigquery.storage.v1beta2.Storage *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1027,6 +1075,9 @@ public Builder addStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1048,6 +1099,9 @@ public Builder addStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1069,6 +1123,9 @@ public Builder addStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1091,6 +1148,9 @@ public Builder addAllStreamErrors( *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1111,6 +1171,9 @@ public Builder clearStreamErrors() { *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1131,6 +1194,9 @@ public Builder removeStreamErrors(int index) { *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1145,6 +1211,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder getStreamE *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1163,6 +1232,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder getStream *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1181,6 +1253,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageErrorOrBuilder getStream *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1195,6 +1270,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder addStreamE *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -1211,6 +1289,9 @@ public com.google.cloud.bigquery.storage.v1beta2.StorageError.Builder addStreamE *
      * Stream level error if commit failed. Only streams with error will be in
      * the list.
+     * If empty, there is no error and all streams are committed successfully.
+     * If non empty, certain streams have errors and ZERO stream is committed due
+     * to atomicity guarantee.
      * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java index 77dea40ef7..4ee30d3923 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BatchCommitWriteStreamsResponseOrBuilder.java @@ -28,7 +28,8 @@ public interface BatchCommitWriteStreamsResponseOrBuilder * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -41,7 +42,8 @@ public interface BatchCommitWriteStreamsResponseOrBuilder * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -54,7 +56,8 @@ public interface BatchCommitWriteStreamsResponseOrBuilder * *
    * The time at which streams were committed in microseconds granularity.
-   * This field will only exist when there is no stream errors.
+   * This field will only exist when there are no stream errors.
+   * **Note** if this field is not set, it means the commit was not successful.
    * 
* * .google.protobuf.Timestamp commit_time = 1; @@ -67,6 +70,9 @@ public interface BatchCommitWriteStreamsResponseOrBuilder *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -78,6 +84,9 @@ public interface BatchCommitWriteStreamsResponseOrBuilder *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -89,6 +98,9 @@ public interface BatchCommitWriteStreamsResponseOrBuilder *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -100,6 +112,9 @@ public interface BatchCommitWriteStreamsResponseOrBuilder *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; @@ -112,6 +127,9 @@ public interface BatchCommitWriteStreamsResponseOrBuilder *
    * Stream level error if commit failed. Only streams with error will be in
    * the list.
+   * If empty, there is no error and all streams are committed successfully.
+   * If non empty, certain streams have errors and ZERO stream is committed due
+   * to atomicity guarantee.
    * 
* * repeated .google.cloud.bigquery.storage.v1beta2.StorageError stream_errors = 2; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java index 911bc53151..2792f31b13 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoRows.java @@ -18,15 +18,7 @@ package com.google.cloud.bigquery.storage.v1beta2; -/** - * - * - *
- * Protobuf rows.
- * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} - */ +/** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} */ public final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 implements // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1beta2.ProtoRows) @@ -337,15 +329,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build Builder builder = new Builder(parent); return builder; } - /** - * - * - *
-   * Protobuf rows.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} - */ + /** Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoRows} */ public static final class Builder extends com.google.protobuf.GeneratedMessageV3.Builder implements // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1beta2.ProtoRows) diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java index 0dd9c16c4e..3f4c55002a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ProtoSchema.java @@ -22,7 +22,7 @@ * * *
- * Protobuf schema is an API presentation the proto buffer schema.
+ * ProtoSchema describes the schema of the serialized protocol buffer data rows.
  * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoSchema} @@ -349,7 +349,7 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * * *
-   * Protobuf schema is an API presentation the proto buffer schema.
+   * ProtoSchema describes the schema of the serialized protocol buffer data rows.
    * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.ProtoSchema} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java index a569e1c6d2..e7f68eba50 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponse.java @@ -146,6 +146,44 @@ private ReadRowsResponse( rowCount_ = input.readInt64(); break; } + case 58: + { + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder subBuilder = null; + if (schemaCase_ == 7) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_).toBuilder(); + } + schema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + schema_ = subBuilder.buildPartial(); + } + schemaCase_ = 7; + break; + } + case 66: + { + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder subBuilder = null; + if (schemaCase_ == 8) { + subBuilder = + ((com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_).toBuilder(); + } + schema_ = + input.readMessage( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.parser(), + extensionRegistry); + if (subBuilder != null) { + subBuilder.mergeFrom( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + schema_ = subBuilder.buildPartial(); + } + schemaCase_ = 8; + break; + } default: { if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { @@ -227,6 +265,53 @@ public RowsCase getRowsCase() { return RowsCase.forNumber(rowsCase_); } + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public enum SchemaCase + implements + com.google.protobuf.Internal.EnumLite, + com.google.protobuf.AbstractMessage.InternalOneOfEnum { + AVRO_SCHEMA(7), + ARROW_SCHEMA(8), + SCHEMA_NOT_SET(0); + private final int value; + + private SchemaCase(int value) { + this.value = value; + } + /** + * @param value The number of the enum to look for. + * @return The enum associated with the given number. + * @deprecated Use {@link #forNumber(int)} instead. + */ + @java.lang.Deprecated + public static SchemaCase valueOf(int value) { + return forNumber(value); + } + + public static SchemaCase forNumber(int value) { + switch (value) { + case 7: + return AVRO_SCHEMA; + case 8: + return ARROW_SCHEMA; + case 0: + return SCHEMA_NOT_SET; + default: + return null; + } + } + + public int getNumber() { + return this.value; + } + }; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + public static final int AVRO_ROWS_FIELD_NUMBER = 3; /** * @@ -448,6 +533,120 @@ public com.google.cloud.bigquery.storage.v1beta2.ThrottleState getThrottleState( return getThrottleState(); } + public static final int AVRO_SCHEMA_FIELD_NUMBER = 7; + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + + public static final int ARROW_SCHEMA_FIELD_NUMBER = 8; + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder() { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + private byte memoizedIsInitialized = -1; @java.lang.Override @@ -477,6 +676,12 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io if (rowCount_ != 0L) { output.writeInt64(6, rowCount_); } + if (schemaCase_ == 7) { + output.writeMessage(7, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + output.writeMessage(8, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } unknownFields.writeTo(output); } @@ -505,6 +710,16 @@ public int getSerializedSize() { if (rowCount_ != 0L) { size += com.google.protobuf.CodedOutputStream.computeInt64Size(6, rowCount_); } + if (schemaCase_ == 7) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 7, (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_); + } + if (schemaCase_ == 8) { + size += + com.google.protobuf.CodedOutputStream.computeMessageSize( + 8, (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_); + } size += unknownFields.getSerializedSize(); memoizedSize = size; return size; @@ -541,6 +756,17 @@ public boolean equals(final java.lang.Object obj) { case 0: default: } + if (!getSchemaCase().equals(other.getSchemaCase())) return false; + switch (schemaCase_) { + case 7: + if (!getAvroSchema().equals(other.getAvroSchema())) return false; + break; + case 8: + if (!getArrowSchema().equals(other.getArrowSchema())) return false; + break; + case 0: + default: + } if (!unknownFields.equals(other.unknownFields)) return false; return true; } @@ -574,6 +800,18 @@ public int hashCode() { case 0: default: } + switch (schemaCase_) { + case 7: + hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getAvroSchema().hashCode(); + break; + case 8: + hash = (37 * hash) + ARROW_SCHEMA_FIELD_NUMBER; + hash = (53 * hash) + getArrowSchema().hashCode(); + break; + case 0: + default: + } hash = (29 * hash) + unknownFields.hashCode(); memoizedHashCode = hash; return hash; @@ -737,6 +975,8 @@ public Builder clear() { } rowsCase_ = 0; rows_ = null; + schemaCase_ = 0; + schema_ = null; return this; } @@ -789,7 +1029,22 @@ public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse buildPartial() } else { result.throttleState_ = throttleStateBuilder_.build(); } + if (schemaCase_ == 7) { + if (avroSchemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = avroSchemaBuilder_.build(); + } + } + if (schemaCase_ == 8) { + if (arrowSchemaBuilder_ == null) { + result.schema_ = schema_; + } else { + result.schema_ = arrowSchemaBuilder_.build(); + } + } result.rowsCase_ = rowsCase_; + result.schemaCase_ = schemaCase_; onBuilt(); return result; } @@ -865,6 +1120,22 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1beta2.ReadRowsRespo break; } } + switch (other.getSchemaCase()) { + case AVRO_SCHEMA: + { + mergeAvroSchema(other.getAvroSchema()); + break; + } + case ARROW_SCHEMA: + { + mergeArrowSchema(other.getArrowSchema()); + break; + } + case SCHEMA_NOT_SET: + { + break; + } + } this.mergeUnknownFields(other.unknownFields); onChanged(); return this; @@ -909,6 +1180,20 @@ public Builder clearRows() { return this; } + private int schemaCase_ = 0; + private java.lang.Object schema_; + + public SchemaCase getSchemaCase() { + return SchemaCase.forNumber(schemaCase_); + } + + public Builder clearSchema() { + schemaCase_ = 0; + schema_ = null; + onChanged(); + return this; + } + private com.google.protobuf.SingleFieldBuilderV3< com.google.cloud.bigquery.storage.v1beta2.AvroRows, com.google.cloud.bigquery.storage.v1beta2.AvroRows.Builder, @@ -1769,6 +2054,463 @@ public Builder clearThrottleState() { return throttleStateBuilder_; } + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + avroSchemaBuilder_; + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + @java.lang.Override + public boolean hasAvroSchema() { + return schemaCase_ == 7; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 7) { + return avroSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 7; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setAvroSchema( + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder builderForValue) { + if (avroSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + avroSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 7; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeAvroSchema(com.google.cloud.bigquery.storage.v1beta2.AvroSchema value) { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 7) { + avroSchemaBuilder_.mergeFrom(value); + } + avroSchemaBuilder_.setMessage(value); + } + schemaCase_ = 7; + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearAvroSchema() { + if (avroSchemaBuilder_ == null) { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 7) { + schemaCase_ = 0; + schema_ = null; + } + avroSchemaBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder getAvroSchemaBuilder() { + return getAvroSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder() { + if ((schemaCase_ == 7) && (avroSchemaBuilder_ != null)) { + return avroSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 7) { + return (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Avro schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder> + getAvroSchemaFieldBuilder() { + if (avroSchemaBuilder_ == null) { + if (!(schemaCase_ == 7)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.AvroSchema.getDefaultInstance(); + } + avroSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.AvroSchema, + com.google.cloud.bigquery.storage.v1beta2.AvroSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.AvroSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 7; + onChanged(); + ; + return avroSchemaBuilder_; + } + + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + arrowSchemaBuilder_; + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + @java.lang.Override + public boolean hasArrowSchema() { + return schemaCase_ == 8; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } else { + if (schemaCase_ == 8) { + return arrowSchemaBuilder_.getMessage(); + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (value == null) { + throw new NullPointerException(); + } + schema_ = value; + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 8; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder setArrowSchema( + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder builderForValue) { + if (arrowSchemaBuilder_ == null) { + schema_ = builderForValue.build(); + onChanged(); + } else { + arrowSchemaBuilder_.setMessage(builderForValue.build()); + } + schemaCase_ = 8; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder mergeArrowSchema(com.google.cloud.bigquery.storage.v1beta2.ArrowSchema value) { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8 + && schema_ + != com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance()) { + schema_ = + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.newBuilder( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_) + .mergeFrom(value) + .buildPartial(); + } else { + schema_ = value; + } + onChanged(); + } else { + if (schemaCase_ == 8) { + arrowSchemaBuilder_.mergeFrom(value); + } + arrowSchemaBuilder_.setMessage(value); + } + schemaCase_ = 8; + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public Builder clearArrowSchema() { + if (arrowSchemaBuilder_ == null) { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + onChanged(); + } + } else { + if (schemaCase_ == 8) { + schemaCase_ = 0; + schema_ = null; + } + arrowSchemaBuilder_.clear(); + } + return this; + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder getArrowSchemaBuilder() { + return getArrowSchemaFieldBuilder().getBuilder(); + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + @java.lang.Override + public com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder + getArrowSchemaOrBuilder() { + if ((schemaCase_ == 8) && (arrowSchemaBuilder_ != null)) { + return arrowSchemaBuilder_.getMessageOrBuilder(); + } else { + if (schemaCase_ == 8) { + return (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_; + } + return com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + } + /** + * + * + *
+     * Output only. Arrow schema.
+     * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + private com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder> + getArrowSchemaFieldBuilder() { + if (arrowSchemaBuilder_ == null) { + if (!(schemaCase_ == 8)) { + schema_ = com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.getDefaultInstance(); + } + arrowSchemaBuilder_ = + new com.google.protobuf.SingleFieldBuilderV3< + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema.Builder, + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder>( + (com.google.cloud.bigquery.storage.v1beta2.ArrowSchema) schema_, + getParentForChildren(), + isClean()); + schema_ = null; + } + schemaCase_ = 8; + onChanged(); + ; + return arrowSchemaBuilder_; + } + @java.lang.Override public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) { return super.setUnknownFields(unknownFields); diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java index 6a451072e5..9dac211a11 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadRowsResponseOrBuilder.java @@ -180,5 +180,89 @@ public interface ReadRowsResponseOrBuilder */ com.google.cloud.bigquery.storage.v1beta2.ThrottleStateOrBuilder getThrottleStateOrBuilder(); + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the avroSchema field is set. + */ + boolean hasAvroSchema(); + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The avroSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchema getAvroSchema(); + /** + * + * + *
+   * Output only. Avro schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.AvroSchema avro_schema = 7 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.AvroSchemaOrBuilder getAvroSchemaOrBuilder(); + + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return Whether the arrowSchema field is set. + */ + boolean hasArrowSchema(); + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + * + * @return The arrowSchema. + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchema getArrowSchema(); + /** + * + * + *
+   * Output only. Arrow schema.
+   * 
+ * + * + * .google.cloud.bigquery.storage.v1beta2.ArrowSchema arrow_schema = 8 [(.google.api.field_behavior) = OUTPUT_ONLY]; + * + */ + com.google.cloud.bigquery.storage.v1beta2.ArrowSchemaOrBuilder getArrowSchemaOrBuilder(); + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.RowsCase getRowsCase(); + + public com.google.cloud.bigquery.storage.v1beta2.ReadRowsResponse.SchemaCase getSchemaCase(); } diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java index e188ff9a50..1ca9b33ac2 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/ReadSession.java @@ -1085,6 +1085,7 @@ public interface TableReadOptionsOrBuilder * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -1103,6 +1104,7 @@ public interface TableReadOptionsOrBuilder * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -1367,6 +1369,7 @@ public com.google.protobuf.ByteString getSelectedFieldsBytes(int index) { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -1396,6 +1399,7 @@ public java.lang.String getRowRestriction() { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -2081,6 +2085,7 @@ public Builder addSelectedFieldsBytes(com.google.protobuf.ByteString value) { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -2109,6 +2114,7 @@ public java.lang.String getRowRestriction() { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -2137,6 +2143,7 @@ public com.google.protobuf.ByteString getRowRestrictionBytes() { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -2164,6 +2171,7 @@ public Builder setRowRestriction(java.lang.String value) { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; @@ -2187,6 +2195,7 @@ public Builder clearRowRestriction() { * "nullable_field is not NULL" * "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" * "numeric_field BETWEEN 1.0 AND 5.0" + * Restricted to a maximum length for 1 MB. * * * string row_restriction = 2; diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java index 11e197c11f..9b99d67ed4 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageError.java @@ -23,8 +23,9 @@ * *
  * Structured custom BigQuery Storage error message. The error can be attached
- * as error details in the returned rpc Status. User can use the info to process
- * errors in a structural way, rather than having to parse error messages.
+ * as error details in the returned rpc Status. In particular, the use of error
+ * codes allows more structured error handling, and reduces the need to evaluate
+ * unstructured error text strings.
  * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StorageError} @@ -195,13 +196,23 @@ public enum StorageErrorCode implements com.google.protobuf.ProtocolMessageEnum * *
      * Invalid Stream state.
-     * For example, you try to commit a stream that is not fianlized or is
+     * For example, you try to commit a stream that is not finalized or is
      * garbaged.
      * 
* * INVALID_STREAM_STATE = 5; */ INVALID_STREAM_STATE(5), + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + STREAM_FINALIZED(6), UNRECOGNIZED(-1), ; @@ -261,13 +272,23 @@ public enum StorageErrorCode implements com.google.protobuf.ProtocolMessageEnum * *
      * Invalid Stream state.
-     * For example, you try to commit a stream that is not fianlized or is
+     * For example, you try to commit a stream that is not finalized or is
      * garbaged.
      * 
* * INVALID_STREAM_STATE = 5; */ public static final int INVALID_STREAM_STATE_VALUE = 5; + /** + * + * + *
+     * Stream is finalized.
+     * 
+ * + * STREAM_FINALIZED = 6; + */ + public static final int STREAM_FINALIZED_VALUE = 6; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -305,6 +326,8 @@ public static StorageErrorCode forNumber(int value) { return INVALID_STREAM_TYPE; case 5: return INVALID_STREAM_STATE; + case 6: + return STREAM_FINALIZED; default: return null; } @@ -687,8 +710,9 @@ protected Builder newBuilderForType(com.google.protobuf.GeneratedMessageV3.Build * *
    * Structured custom BigQuery Storage error message. The error can be attached
-   * as error details in the returned rpc Status. User can use the info to process
-   * errors in a structural way, rather than having to parse error messages.
+   * as error details in the returned rpc Status. In particular, the use of error
+   * codes allows more structured error handling, and reduces the need to evaluate
+   * unstructured error text strings.
    * 
* * Protobuf type {@code google.cloud.bigquery.storage.v1beta2.StorageError} diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java index 1ed9b034de..001235ba5a 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/StorageProto.java @@ -146,7 +146,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "tats\022M\n\010progress\030\002 \001(\0132;.google.cloud.bi" + "gquery.storage.v1beta2.StreamStats.Progr" + "ess\032>\n\010Progress\022\031\n\021at_response_start\030\001 \001" - + "(\001\022\027\n\017at_response_end\030\002 \001(\001\"\333\002\n\020ReadRows" + + "(\001\022\027\n\017at_response_end\030\002 \001(\001\"\205\004\n\020ReadRows" + "Response\022D\n\tavro_rows\030\003 \001(\0132/.google.clo" + "ud.bigquery.storage.v1beta2.AvroRowsH\000\022U" + "\n\022arrow_record_batch\030\004 \001(\01327.google.clou" @@ -155,125 +155,130 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "2.google.cloud.bigquery.storage.v1beta2." + "StreamStats\022L\n\016throttle_state\030\005 \001(\01324.go" + "ogle.cloud.bigquery.storage.v1beta2.Thro" - + "ttleStateB\006\n\004rows\"k\n\026SplitReadStreamRequ" - + "est\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)bigquerystora" - + "ge.googleapis.com/ReadStream\022\020\n\010fraction" - + "\030\002 \001(\001\"\261\001\n\027SplitReadStreamResponse\022I\n\016pr" - + "imary_stream\030\001 \001(\01321.google.cloud.bigque" - + "ry.storage.v1beta2.ReadStream\022K\n\020remaind" - + "er_stream\030\002 \001(\01321.google.cloud.bigquery." - + "storage.v1beta2.ReadStream\"\240\001\n\030CreateWri" - + "teStreamRequest\0225\n\006parent\030\001 \001(\tB%\340A\002\372A\037\n" - + "\035bigquery.googleapis.com/Table\022M\n\014write_" - + "stream\030\002 \001(\01322.google.cloud.bigquery.sto" - + "rage.v1beta2.WriteStreamB\003\340A\002\"\227\003\n\021Append" - + "RowsRequest\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A" - + ",\n*bigquerystorage.googleapis.com/WriteS" - + "tream\022+\n\006offset\030\002 \001(\0132\033.google.protobuf." - + "Int64Value\022X\n\nproto_rows\030\004 \001(\0132B.google." - + "cloud.bigquery.storage.v1beta2.AppendRow" - + "sRequest.ProtoDataH\000\022\020\n\010trace_id\030\006 \001(\t\032\226" - + "\001\n\tProtoData\022I\n\rwriter_schema\030\001 \001(\01322.go" - + "ogle.cloud.bigquery.storage.v1beta2.Prot" - + "oSchema\022>\n\004rows\030\002 \001(\01320.google.cloud.big" - + "query.storage.v1beta2.ProtoRowsB\006\n\004rows\"" - + "\257\002\n\022AppendRowsResponse\022_\n\rappend_result\030" - + "\001 \001(\0132F.google.cloud.bigquery.storage.v1" - + "beta2.AppendRowsResponse.AppendResultH\000\022" - + "#\n\005error\030\002 \001(\0132\022.google.rpc.StatusH\000\022J\n\016" - + "updated_schema\030\003 \001(\01322.google.cloud.bigq" - + "uery.storage.v1beta2.TableSchema\032;\n\014Appe" - + "ndResult\022+\n\006offset\030\001 \001(\0132\033.google.protob" - + "uf.Int64ValueB\n\n\010response\"Y\n\025GetWriteStr" - + "eamRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigque" - + "rystorage.googleapis.com/WriteStream\"Q\n\036" - + "BatchCommitWriteStreamsRequest\022\023\n\006parent" - + "\030\001 \001(\tB\003\340A\002\022\032\n\rwrite_streams\030\002 \003(\tB\003\340A\002\"" - + "\236\001\n\037BatchCommitWriteStreamsResponse\022/\n\013c" - + "ommit_time\030\001 \001(\0132\032.google.protobuf.Times" - + "tamp\022J\n\rstream_errors\030\002 \003(\01323.google.clo" - + "ud.bigquery.storage.v1beta2.StorageError" - + "\"^\n\032FinalizeWriteStreamRequest\022@\n\004name\030\001" - + " \001(\tB2\340A\002\372A,\n*bigquerystorage.googleapis" - + ".com/WriteStream\"0\n\033FinalizeWriteStreamR" - + "esponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n\020FlushRows" - + "Request\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A,\n*b" - + "igquerystorage.googleapis.com/WriteStrea" - + "m\022+\n\006offset\030\002 \001(\0132\033.google.protobuf.Int6" - + "4Value\"#\n\021FlushRowsResponse\022\016\n\006offset\030\001 " - + "\001(\003\"\276\002\n\014StorageError\022R\n\004code\030\001 \001(\0162D.goo" - + "gle.cloud.bigquery.storage.v1beta2.Stora" - + "geError.StorageErrorCode\022\016\n\006entity\030\002 \001(\t" - + "\022\025\n\rerror_message\030\003 \001(\t\"\262\001\n\020StorageError" - + "Code\022\"\n\036STORAGE_ERROR_CODE_UNSPECIFIED\020\000" - + "\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREAM_ALREADY_" - + "COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUND\020\003\022\027\n\023INV" - + "ALID_STREAM_TYPE\020\004\022\030\n\024INVALID_STREAM_STA" - + "TE\020\0052\363\006\n\014BigQueryRead\022\370\001\n\021CreateReadSess" - + "ion\022?.google.cloud.bigquery.storage.v1be" - + "ta2.CreateReadSessionRequest\0322.google.cl" - + "oud.bigquery.storage.v1beta2.ReadSession" - + "\"n\202\323\344\223\002A\".google.cloud.bigq" - + "uery.storage.v1beta2.SplitReadStreamResp" - + "onse\"C\202\323\344\223\002=\022;/v1beta2/{name=projects/*/" - + "locations/*/sessions/*/streams/*}\032\256\001\312A\036b" - + "igquerystorage.googleapis.com\322A\211\001https:/" - + "/www.googleapis.com/auth/bigquery,https:" - + "//www.googleapis.com/auth/bigquery.reado" - + "nly,https://www.googleapis.com/auth/clou" - + "d-platform2\226\014\n\rBigQueryWrite\022\346\001\n\021CreateW" - + "riteStream\022?.google.cloud.bigquery.stora" - + "ge.v1beta2.CreateWriteStreamRequest\0322.go" - + "ogle.cloud.bigquery.storage.v1beta2.Writ" - + "eStream\"\\\202\323\344\223\002@\"0/v1beta2/{parent=projec" - + "ts/*/datasets/*/tables/*}:\014write_stream\332" - + "A\023parent,write_stream\022\341\001\n\nAppendRows\0228.g" - + "oogle.cloud.bigquery.storage.v1beta2.App" - + "endRowsRequest\0329.google.cloud.bigquery.s" - + "torage.v1beta2.AppendRowsResponse\"Z\202\323\344\223\002" - + "E\"@/v1beta2/{write_stream=projects/*/dat" - + "asets/*/tables/*/streams/*}:\001*\332A\014write_s" - + "tream(\0010\001\022\316\001\n\016GetWriteStream\022<.google.cl" - + "oud.bigquery.storage.v1beta2.GetWriteStr" - + "eamRequest\0322.google.cloud.bigquery.stora" - + "ge.v1beta2.WriteStream\"J\202\323\344\223\002=\"8/v1beta2" - + "/{name=projects/*/datasets/*/tables/*/st" - + "reams/*}:\001*\332A\004name\022\350\001\n\023FinalizeWriteStre" - + "am\022A.google.cloud.bigquery.storage.v1bet" - + "a2.FinalizeWriteStreamRequest\032B.google.c" - + "loud.bigquery.storage.v1beta2.FinalizeWr" - + "iteStreamResponse\"J\202\323\344\223\002=\"8/v1beta2/{nam" - + "e=projects/*/datasets/*/tables/*/streams" - + "/*}:\001*\332A\004name\022\353\001\n\027BatchCommitWriteStream" - + "s\022E.google.cloud.bigquery.storage.v1beta" - + "2.BatchCommitWriteStreamsRequest\032F.googl" - + "e.cloud.bigquery.storage.v1beta2.BatchCo" - + "mmitWriteStreamsResponse\"A\202\323\344\223\0022\0220/v1bet" - + "a2/{parent=projects/*/datasets/*/tables/" - + "*}\332A\006parent\022\332\001\n\tFlushRows\0227.google.cloud" - + ".bigquery.storage.v1beta2.FlushRowsReque" - + "st\0328.google.cloud.bigquery.storage.v1bet" - + "a2.FlushRowsResponse\"Z\202\323\344\223\002E\"@/v1beta2/{" - + "write_stream=projects/*/datasets/*/table" - + "s/*/streams/*}:\001*\332A\014write_stream\032\260\001\312A\036bi" - + "gquerystorage.googleapis.com\322A\213\001https://" - + "www.googleapis.com/auth/bigquery,https:/" - + "/www.googleapis.com/auth/bigquery.insert" - + "data,https://www.googleapis.com/auth/clo" - + "ud-platformB\211\001\n)com.google.cloud.bigquer" - + "y.storage.v1beta2B\014StorageProtoP\001ZLgoogl" - + "e.golang.org/genproto/googleapis/cloud/b" - + "igquery/storage/v1beta2;storageb\006proto3" + + "ttleState\022M\n\013avro_schema\030\007 \001(\01321.google." + + "cloud.bigquery.storage.v1beta2.AvroSchem" + + "aB\003\340A\003H\001\022O\n\014arrow_schema\030\010 \001(\01322.google." + + "cloud.bigquery.storage.v1beta2.ArrowSche" + + "maB\003\340A\003H\001B\006\n\004rowsB\010\n\006schema\"k\n\026SplitRead" + + "StreamRequest\022?\n\004name\030\001 \001(\tB1\340A\002\372A+\n)big" + + "querystorage.googleapis.com/ReadStream\022\020" + + "\n\010fraction\030\002 \001(\001\"\261\001\n\027SplitReadStreamResp" + + "onse\022I\n\016primary_stream\030\001 \001(\01321.google.cl" + + "oud.bigquery.storage.v1beta2.ReadStream\022" + + "K\n\020remainder_stream\030\002 \001(\01321.google.cloud" + + ".bigquery.storage.v1beta2.ReadStream\"\240\001\n" + + "\030CreateWriteStreamRequest\0225\n\006parent\030\001 \001(" + + "\tB%\340A\002\372A\037\n\035bigquery.googleapis.com/Table" + + "\022M\n\014write_stream\030\002 \001(\01322.google.cloud.bi" + + "gquery.storage.v1beta2.WriteStreamB\003\340A\002\"" + + "\227\003\n\021AppendRowsRequest\022H\n\014write_stream\030\001 " + + "\001(\tB2\340A\002\372A,\n*bigquerystorage.googleapis." + + "com/WriteStream\022+\n\006offset\030\002 \001(\0132\033.google" + + ".protobuf.Int64Value\022X\n\nproto_rows\030\004 \001(\013" + + "2B.google.cloud.bigquery.storage.v1beta2" + + ".AppendRowsRequest.ProtoDataH\000\022\020\n\010trace_" + + "id\030\006 \001(\t\032\226\001\n\tProtoData\022I\n\rwriter_schema\030" + + "\001 \001(\01322.google.cloud.bigquery.storage.v1" + + "beta2.ProtoSchema\022>\n\004rows\030\002 \001(\01320.google" + + ".cloud.bigquery.storage.v1beta2.ProtoRow" + + "sB\006\n\004rows\"\257\002\n\022AppendRowsResponse\022_\n\rappe" + + "nd_result\030\001 \001(\0132F.google.cloud.bigquery." + + "storage.v1beta2.AppendRowsResponse.Appen" + + "dResultH\000\022#\n\005error\030\002 \001(\0132\022.google.rpc.St" + + "atusH\000\022J\n\016updated_schema\030\003 \001(\01322.google." + + "cloud.bigquery.storage.v1beta2.TableSche" + + "ma\032;\n\014AppendResult\022+\n\006offset\030\001 \001(\0132\033.goo" + + "gle.protobuf.Int64ValueB\n\n\010response\"Y\n\025G" + + "etWriteStreamRequest\022@\n\004name\030\001 \001(\tB2\340A\002\372" + + "A,\n*bigquerystorage.googleapis.com/Write" + + "Stream\"Q\n\036BatchCommitWriteStreamsRequest" + + "\022\023\n\006parent\030\001 \001(\tB\003\340A\002\022\032\n\rwrite_streams\030\002" + + " \003(\tB\003\340A\002\"\236\001\n\037BatchCommitWriteStreamsRes" + + "ponse\022/\n\013commit_time\030\001 \001(\0132\032.google.prot" + + "obuf.Timestamp\022J\n\rstream_errors\030\002 \003(\01323." + + "google.cloud.bigquery.storage.v1beta2.St" + + "orageError\"^\n\032FinalizeWriteStreamRequest" + + "\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorage." + + "googleapis.com/WriteStream\"0\n\033FinalizeWr" + + "iteStreamResponse\022\021\n\trow_count\030\001 \001(\003\"\211\001\n" + + "\020FlushRowsRequest\022H\n\014write_stream\030\001 \001(\tB" + + "2\340A\002\372A,\n*bigquerystorage.googleapis.com/" + + "WriteStream\022+\n\006offset\030\002 \001(\0132\033.google.pro" + + "tobuf.Int64Value\"#\n\021FlushRowsResponse\022\016\n" + + "\006offset\030\001 \001(\003\"\324\002\n\014StorageError\022R\n\004code\030\001" + + " \001(\0162D.google.cloud.bigquery.storage.v1b" + + "eta2.StorageError.StorageErrorCode\022\016\n\006en" + + "tity\030\002 \001(\t\022\025\n\rerror_message\030\003 \001(\t\"\310\001\n\020St" + + "orageErrorCode\022\"\n\036STORAGE_ERROR_CODE_UNS" + + "PECIFIED\020\000\022\023\n\017TABLE_NOT_FOUND\020\001\022\034\n\030STREA" + + "M_ALREADY_COMMITTED\020\002\022\024\n\020STREAM_NOT_FOUN" + + "D\020\003\022\027\n\023INVALID_STREAM_TYPE\020\004\022\030\n\024INVALID_" + + "STREAM_STATE\020\005\022\024\n\020STREAM_FINALIZED\020\0062\363\006\n" + + "\014BigQueryRead\022\370\001\n\021CreateReadSession\022?.go" + + "ogle.cloud.bigquery.storage.v1beta2.Crea" + + "teReadSessionRequest\0322.google.cloud.bigq" + + "uery.storage.v1beta2.ReadSession\"n\202\323\344\223\002A" + + "\".google.cloud.bigquery.sto" + + "rage.v1beta2.SplitReadStreamResponse\"C\202\323" + + "\344\223\002=\022;/v1beta2/{name=projects/*/location" + + "s/*/sessions/*/streams/*}\032\256\001\312A\036bigquerys" + + "torage.googleapis.com\322A\211\001https://www.goo" + + "gleapis.com/auth/bigquery,https://www.go" + + "ogleapis.com/auth/bigquery.readonly,http" + + "s://www.googleapis.com/auth/cloud-platfo" + + "rm2\226\014\n\rBigQueryWrite\022\346\001\n\021CreateWriteStre" + + "am\022?.google.cloud.bigquery.storage.v1bet" + + "a2.CreateWriteStreamRequest\0322.google.clo" + + "ud.bigquery.storage.v1beta2.WriteStream\"" + + "\\\202\323\344\223\002@\"0/v1beta2/{parent=projects/*/dat" + + "asets/*/tables/*}:\014write_stream\332A\023parent" + + ",write_stream\022\341\001\n\nAppendRows\0228.google.cl" + + "oud.bigquery.storage.v1beta2.AppendRowsR" + + "equest\0329.google.cloud.bigquery.storage.v" + + "1beta2.AppendRowsResponse\"Z\202\323\344\223\002E\"@/v1be" + + "ta2/{write_stream=projects/*/datasets/*/" + + "tables/*/streams/*}:\001*\332A\014write_stream(\0010" + + "\001\022\316\001\n\016GetWriteStream\022<.google.cloud.bigq" + + "uery.storage.v1beta2.GetWriteStreamReque" + + "st\0322.google.cloud.bigquery.storage.v1bet" + + "a2.WriteStream\"J\202\323\344\223\002=\"8/v1beta2/{name=p" + + "rojects/*/datasets/*/tables/*/streams/*}" + + ":\001*\332A\004name\022\350\001\n\023FinalizeWriteStream\022A.goo" + + "gle.cloud.bigquery.storage.v1beta2.Final" + + "izeWriteStreamRequest\032B.google.cloud.big" + + "query.storage.v1beta2.FinalizeWriteStrea" + + "mResponse\"J\202\323\344\223\002=\"8/v1beta2/{name=projec" + + "ts/*/datasets/*/tables/*/streams/*}:\001*\332A" + + "\004name\022\353\001\n\027BatchCommitWriteStreams\022E.goog" + + "le.cloud.bigquery.storage.v1beta2.BatchC" + + "ommitWriteStreamsRequest\032F.google.cloud." + + "bigquery.storage.v1beta2.BatchCommitWrit" + + "eStreamsResponse\"A\202\323\344\223\0022\0220/v1beta2/{pare" + + "nt=projects/*/datasets/*/tables/*}\332A\006par" + + "ent\022\332\001\n\tFlushRows\0227.google.cloud.bigquer" + + "y.storage.v1beta2.FlushRowsRequest\0328.goo" + + "gle.cloud.bigquery.storage.v1beta2.Flush" + + "RowsResponse\"Z\202\323\344\223\002E\"@/v1beta2/{write_st" + + "ream=projects/*/datasets/*/tables/*/stre" + + "ams/*}:\001*\332A\014write_stream\032\260\001\312A\036bigqueryst" + + "orage.googleapis.com\322A\213\001https://www.goog" + + "leapis.com/auth/bigquery,https://www.goo" + + "gleapis.com/auth/bigquery.insertdata,htt" + + "ps://www.googleapis.com/auth/cloud-platf" + + "ormB\211\001\n)com.google.cloud.bigquery.storag" + + "e.v1beta2B\014StorageProtoP\001ZLgoogle.golang" + + ".org/genproto/googleapis/cloud/bigquery/" + + "storage/v1beta2;storageb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( @@ -340,7 +345,15 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( internal_static_google_cloud_bigquery_storage_v1beta2_ReadRowsResponse_descriptor, new java.lang.String[] { - "AvroRows", "ArrowRecordBatch", "RowCount", "Stats", "ThrottleState", "Rows", + "AvroRows", + "ArrowRecordBatch", + "RowCount", + "Stats", + "ThrottleState", + "AvroSchema", + "ArrowSchema", + "Rows", + "Schema", }); internal_static_google_cloud_bigquery_storage_v1beta2_SplitReadStreamRequest_descriptor = getDescriptor().getMessageTypes().get(5); diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java index d3d8bc15f1..369879d537 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableFieldSchema.java @@ -286,6 +286,26 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * NUMERIC = 12; */ NUMERIC(12), + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + BIGNUMERIC(13), + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + INTERVAL(14), UNRECOGNIZED(-1), ; @@ -419,6 +439,26 @@ public enum Type implements com.google.protobuf.ProtocolMessageEnum { * NUMERIC = 12; */ public static final int NUMERIC_VALUE = 12; + /** + * + * + *
+     * BigNumeric value
+     * 
+ * + * BIGNUMERIC = 13; + */ + public static final int BIGNUMERIC_VALUE = 13; + /** + * + * + *
+     * Interval
+     * 
+ * + * INTERVAL = 14; + */ + public static final int INTERVAL_VALUE = 14; public final int getNumber() { if (this == UNRECOGNIZED) { @@ -470,6 +510,10 @@ public static Type forNumber(int value) { return GEOGRAPHY; case 12: return NUMERIC; + case 13: + return BIGNUMERIC; + case 14: + return INTERVAL; default: return null; } diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java index 9e02cb788b..faf88d105b 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/TableProto.java @@ -49,24 +49,25 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { + "ge.v1beta2\032\037google/api/field_behavior.pr" + "oto\"V\n\013TableSchema\022G\n\006fields\030\001 \003(\01327.goo" + "gle.cloud.bigquery.storage.v1beta2.Table" - + "FieldSchema\"\247\004\n\020TableFieldSchema\022\021\n\004name" + + "FieldSchema\"\305\004\n\020TableFieldSchema\022\021\n\004name" + "\030\001 \001(\tB\003\340A\002\022O\n\004type\030\002 \001(\0162<.google.cloud" + ".bigquery.storage.v1beta2.TableFieldSche" + "ma.TypeB\003\340A\002\022O\n\004mode\030\003 \001(\0162<.google.clou" + "d.bigquery.storage.v1beta2.TableFieldSch" + "ema.ModeB\003\340A\001\022L\n\006fields\030\004 \003(\01327.google.c" + "loud.bigquery.storage.v1beta2.TableField" - + "SchemaB\003\340A\001\022\030\n\013description\030\006 \001(\tB\003\340A\001\"\255\001" + + "SchemaB\003\340A\001\022\030\n\013description\030\006 \001(\tB\003\340A\001\"\313\001" + "\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\n\n\006STRING\020\001" + "\022\t\n\005INT64\020\002\022\n\n\006DOUBLE\020\003\022\n\n\006STRUCT\020\004\022\t\n\005B" + "YTES\020\005\022\010\n\004BOOL\020\006\022\r\n\tTIMESTAMP\020\007\022\010\n\004DATE\020" + "\010\022\010\n\004TIME\020\t\022\014\n\010DATETIME\020\n\022\r\n\tGEOGRAPHY\020\013" - + "\022\013\n\007NUMERIC\020\014\"F\n\004Mode\022\024\n\020MODE_UNSPECIFIE" - + "D\020\000\022\014\n\010NULLABLE\020\001\022\014\n\010REQUIRED\020\002\022\014\n\010REPEA" - + "TED\020\003B\207\001\n)com.google.cloud.bigquery.stor" - + "age.v1beta2B\nTableProtoP\001ZLgoogle.golang" - + ".org/genproto/googleapis/cloud/bigquery/" - + "storage/v1beta2;storageb\006proto3" + + "\022\013\n\007NUMERIC\020\014\022\016\n\nBIGNUMERIC\020\r\022\014\n\010INTERVA" + + "L\020\016\"F\n\004Mode\022\024\n\020MODE_UNSPECIFIED\020\000\022\014\n\010NUL" + + "LABLE\020\001\022\014\n\010REQUIRED\020\002\022\014\n\010REPEATED\020\003B\207\001\n)" + + "com.google.cloud.bigquery.storage.v1beta" + + "2B\nTableProtoP\001ZLgoogle.golang.org/genpr" + + "oto/googleapis/cloud/bigquery/storage/v1" + + "beta2;storageb\006proto3" }; descriptor = com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto index bc2e4eb126..74fe927b7e 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/arrow.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto index 109ec86a2c..495132ec14 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/avro.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto index 741e7d114d..11e851be44 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/protobuf.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -23,7 +23,7 @@ option java_multiple_files = true; option java_outer_classname = "ProtoBufProto"; option java_package = "com.google.cloud.bigquery.storage.v1beta2"; -// Protobuf schema is an API presentation the proto buffer schema. +// ProtoSchema describes the schema of the serialized protocol buffer data rows. message ProtoSchema { // Descriptor for input message. The descriptor has to be self contained, // including all the nested types, excepted for proto buffer well known types @@ -31,7 +31,6 @@ message ProtoSchema { google.protobuf.DescriptorProto proto_descriptor = 1; } -// Protobuf rows. message ProtoRows { // A sequence of rows serialized as a Protocol Buffer. // diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto index 5538e29f28..8c25b84612 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/storage.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -71,8 +71,7 @@ service BigQueryRead { post: "/v1beta2/{read_session.table=projects/*/datasets/*/tables/*}" body: "*" }; - option (google.api.method_signature) = - "parent,read_session,max_stream_count"; + option (google.api.method_signature) = "parent,read_session,max_stream_count"; } // Reads rows from the stream in the format prescribed by the ReadSession. @@ -101,8 +100,7 @@ service BigQueryRead { // original, primary, and residual, that original[0-j] = primary[0-j] and // original[j-n] = residual[0-m] once the streams have been read to // completion. - rpc SplitReadStream(SplitReadStreamRequest) - returns (SplitReadStreamResponse) { + rpc SplitReadStream(SplitReadStreamRequest) returns (SplitReadStreamResponse) { option (google.api.http) = { get: "/v1beta2/{name=projects/*/locations/*/sessions/*/streams/*}" }; @@ -171,8 +169,7 @@ service BigQueryWrite { // Finalize a write stream so that no new data can be appended to the // stream. Finalize is not supported on the '_default' stream. - rpc FinalizeWriteStream(FinalizeWriteStreamRequest) - returns (FinalizeWriteStreamResponse) { + rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { option (google.api.http) = { post: "/v1beta2/{name=projects/*/datasets/*/tables/*/streams/*}" body: "*" @@ -185,8 +182,7 @@ service BigQueryWrite { // Streams must be finalized before commit and cannot be committed multiple // times. Once a stream is committed, data in the stream becomes available // for read operations. - rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) - returns (BatchCommitWriteStreamsResponse) { + rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { option (google.api.http) = { get: "/v1beta2/{parent=projects/*/datasets/*/tables/*}" }; @@ -303,6 +299,19 @@ message ReadRowsResponse { // Throttling state. If unset, the latest response still describes // the current throttling status. ThrottleState throttle_state = 5; + + // The schema for the read. If read_options.selected_fields is set, the + // schema may be different from the table schema as it will only contain + // the selected fields. This schema is equivelant to the one returned by + // CreateSession. This field is only populated in the first ReadRowsResponse + // RPC. + oneof schema { + // Output only. Avro schema. + AvroSchema avro_schema = 7 [(google.api.field_behavior) = OUTPUT_ONLY]; + + // Output only. Arrow schema. + ArrowSchema arrow_schema = 8 [(google.api.field_behavior) = OUTPUT_ONLY]; + } } // Request message for `SplitReadStream`. @@ -342,7 +351,9 @@ message CreateWriteStreamRequest { // of `projects/{project}/datasets/{dataset}/tables/{table}`. string parent = 1 [ (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { type: "bigquery.googleapis.com/Table" } + (google.api.resource_reference) = { + type: "bigquery.googleapis.com/Table" + } ]; // Required. Stream to be created. @@ -360,9 +371,9 @@ message AppendRowsRequest { ProtoRows rows = 2; } - // Required. The stream that is the target of the append operation. This value - // must be specified for the initial request. If subsequent requests specify - // the stream name, it must equal to the value provided in the first request. + // Required. The stream that is the target of the append operation. This value must be + // specified for the initial request. If subsequent requests specify the + // stream name, it must equal to the value provided in the first request. // To write to the _default stream, populate this field with a string in the // format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. string write_stream = 1 [ @@ -394,7 +405,7 @@ message AppendRowsRequest { // Response message for `AppendRows`. message AppendRowsResponse { - // A success append result. + // AppendResult is returned for successful append requests. message AppendResult { // The row offset at which the last append occurred. The offset will not be // set if appending using default streams. @@ -405,25 +416,32 @@ message AppendRowsResponse { // Result if the append is successful. AppendResult append_result = 1; - // Error in case of request failed. If set, it means rows are not accepted - // into the system. Users can retry or continue with other requests within - // the same connection. - // ALREADY_EXISTS: happens when offset is specified, it means the entire - // request is already appended, it is safe to ignore this error. - // OUT_OF_RANGE: happens when offset is specified, it means the specified - // offset is beyond the end of the stream. - // INVALID_ARGUMENT: error caused by malformed request or data. - // RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when - // append without offset. - // ABORTED: request processing is aborted because of prior failures, request - // can be retried if previous failure is fixed. - // INTERNAL: server side errors that can be retried. + // Error returned when problems were encountered. If present, + // it indicates rows were not accepted into the system. + // Users can retry or continue with other append requests within the + // same connection. + // + // Additional information about error signalling: + // + // ALREADY_EXISTS: Happens when an append specified an offset, and the + // backend already has received data at this offset. Typically encountered + // in retry scenarios, and can be ignored. + // + // OUT_OF_RANGE: Returned when the specified offset in the stream is beyond + // the current end of the stream. + // + // INVALID_ARGUMENT: Indicates a malformed request or data. + // + // ABORTED: Request processing is aborted because of prior failures. The + // request can be retried if previous failure is addressed. + // + // INTERNAL: Indicates server side error(s) that can be retried. google.rpc.Status error = 2; } // If backend detects a schema update, pass it to user so that user can - // use it to input new type of message. It will be empty when there is no - // schema updates. + // use it to input new type of message. It will be empty when no schema + // updates have occurred. TableSchema updated_schema = 3; } @@ -441,9 +459,11 @@ message GetWriteStreamRequest { // Request message for `BatchCommitWriteStreams`. message BatchCommitWriteStreamsRequest { - // Required. Parent table that all the streams should belong to, in the form - // of `projects/{project}/datasets/{dataset}/tables/{table}`. - string parent = 1 [(google.api.field_behavior) = REQUIRED]; + // Required. Parent table that all the streams should belong to, in the form of + // `projects/{project}/datasets/{dataset}/tables/{table}`. + string parent = 1 [ + (google.api.field_behavior) = REQUIRED + ]; // Required. The group of streams that will be committed atomically. repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; @@ -452,11 +472,15 @@ message BatchCommitWriteStreamsRequest { // Response message for `BatchCommitWriteStreams`. message BatchCommitWriteStreamsResponse { // The time at which streams were committed in microseconds granularity. - // This field will only exist when there is no stream errors. + // This field will only exist when there are no stream errors. + // **Note** if this field is not set, it means the commit was not successful. google.protobuf.Timestamp commit_time = 1; // Stream level error if commit failed. Only streams with error will be in // the list. + // If empty, there is no error and all streams are committed successfully. + // If non empty, certain streams have errors and ZERO stream is committed due + // to atomicity guarantee. repeated StorageError stream_errors = 2; } @@ -500,8 +524,9 @@ message FlushRowsResponse { } // Structured custom BigQuery Storage error message. The error can be attached -// as error details in the returned rpc Status. User can use the info to process -// errors in a structural way, rather than having to parse error messages. +// as error details in the returned rpc Status. In particular, the use of error +// codes allows more structured error handling, and reduces the need to evaluate +// unstructured error text strings. message StorageError { // Error code for `StorageError`. enum StorageErrorCode { @@ -522,9 +547,12 @@ message StorageError { INVALID_STREAM_TYPE = 4; // Invalid Stream state. - // For example, you try to commit a stream that is not fianlized or is + // For example, you try to commit a stream that is not finalized or is // garbaged. INVALID_STREAM_STATE = 5; + + // Stream is finalized. + STREAM_FINALIZED = 6; } // BigQuery Storage specific error code. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto index 2b0a58c95a..d166e98754 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/stream.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -74,6 +74,8 @@ message ReadSession { // "nullable_field is not NULL" // "st_equals(geo_field, st_geofromtext("POINT(2, 2)"))" // "numeric_field BETWEEN 1.0 AND 5.0" + // + // Restricted to a maximum length for 1 MB. string row_restriction = 2; // Optional. Options specific to the Apache Arrow output format. diff --git a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto index fd8a0a75a5..670a4a64a8 100644 --- a/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto +++ b/proto-google-cloud-bigquerystorage-v1beta2/src/main/proto/google/cloud/bigquery/storage/v1beta2/table.proto @@ -1,4 +1,4 @@ -// Copyright 2020 Google LLC +// Copyright 2021 Google LLC // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. @@ -70,6 +70,12 @@ message TableFieldSchema { // Numeric value NUMERIC = 12; + + // BigNumeric value + BIGNUMERIC = 13; + + // Interval + INTERVAL = 14; } enum Mode { diff --git a/synth.metadata b/synth.metadata index c13a54721d..6c1a2e2c95 100644 --- a/synth.metadata +++ b/synth.metadata @@ -11,39 +11,39 @@ "git": { "name": ".", "remote": "https://github.com/googleapis/java-bigquerystorage.git", - "sha": "9bc1a4b36fa53d2ba8beea0ce38dec788631d458" + "sha": "c7da34252ee8c243be3ce737d03e1e12f10a5eba" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c539b9b08b3366ee00c0ec1950f4df711552a269", - "internalRef": "365759522" + "sha": "1a7d7641a161bec4d5ab13b7ff5590ed8ba26d96", + "internalRef": "366823379" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c539b9b08b3366ee00c0ec1950f4df711552a269", - "internalRef": "365759522" + "sha": "1a7d7641a161bec4d5ab13b7ff5590ed8ba26d96", + "internalRef": "366823379" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c539b9b08b3366ee00c0ec1950f4df711552a269", - "internalRef": "365759522" + "sha": "1a7d7641a161bec4d5ab13b7ff5590ed8ba26d96", + "internalRef": "366823379" } }, { "git": { "name": "googleapis", "remote": "https://github.com/googleapis/googleapis.git", - "sha": "c539b9b08b3366ee00c0ec1950f4df711552a269", - "internalRef": "365759522" + "sha": "1a7d7641a161bec4d5ab13b7ff5590ed8ba26d96", + "internalRef": "366823379" } }, {