From 98cd924e335441f633ad87a0aed118de6951260c Mon Sep 17 00:00:00 2001 From: Yiru Tang Date: Thu, 15 Apr 2021 09:36:13 -0700 Subject: [PATCH] refactor!: remove v1alpha2 library (#1003) * refactor: remove v1alpha2 library * clirr fix * more clirr fix * . * other files * bom --- google-cloud-bigquerystorage-bom/pom.xml | 10 - .../clirr-ignored-differences.xml | 29 + google-cloud-bigquerystorage/pom.xml | 9 - .../BQTableSchemaToProtoDescriptor.java | 155 - .../storage/v1alpha2/BigQueryWriteClient.java | 764 -- .../v1alpha2/BigQueryWriteSettings.java | 252 - .../storage/v1alpha2/DirectWriter.java | 166 - .../storage/v1alpha2/JsonStreamWriter.java | 406 - .../storage/v1alpha2/JsonToProtoMessage.java | 326 - .../storage/v1alpha2/JsonWriterCache.java | 146 - .../v1alpha2/OnSchemaUpdateRunnable.java | 57 - .../v1alpha2/ProtoSchemaConverter.java | 121 - .../storage/v1alpha2/SchemaCompatibility.java | 546 - .../storage/v1alpha2/StreamWriter.java | 1045 -- .../bigquery/storage/v1alpha2/Waiter.java | 182 - .../storage/v1alpha2/WriterCache.java | 193 - .../storage/v1alpha2/gapic_metadata.json | 36 - .../storage/v1alpha2/package-info.java | 39 - .../v1alpha2/stub/BigQueryWriteStub.java | 71 - .../stub/BigQueryWriteStubSettings.java | 452 - .../GrpcBigQueryWriteCallableFactory.java | 118 - .../v1alpha2/stub/GrpcBigQueryWriteStub.java | 372 - .../BQTableSchemaToProtoDescriptorTest.java | 403 - .../v1alpha2/BigQueryWriteClientTest.java | 511 - .../storage/v1alpha2/DirectWriterTest.java | 431 - .../storage/v1alpha2/FakeBigQueryWrite.java | 86 - .../v1alpha2/FakeBigQueryWriteImpl.java | 215 - .../bigquery/storage/v1alpha2/FakeClock.java | 42 - .../FakeScheduledExecutorService.java | 347 - .../v1alpha2/JsonStreamWriterTest.java | 960 -- .../v1alpha2/JsonToProtoMessageTest.java | 750 -- .../storage/v1alpha2/JsonWriterCacheTest.java | 202 - .../storage/v1alpha2/MockBigQueryWrite.java | 59 - .../v1alpha2/MockBigQueryWriteImpl.java | 206 - .../v1alpha2/ProtoSchemaConverterTest.java | 197 - .../v1alpha2/SchemaCompatibilityTest.java | 1015 -- .../storage/v1alpha2/StreamWriterTest.java | 837 -- .../storage/v1alpha2/WriterCacheTest.java | 243 - .../it/ITBigQueryWriteManualClientTest.java | 729 -- .../pom.xml | 50 - .../storage/v1alpha2/BigQueryWriteGrpc.java | 1076 -- pom.xml | 12 - .../pom.xml | 42 - .../storage/v1alpha2/ProtoBufProto.java | 1691 --- .../bigquery/storage/v1alpha2/Storage.java | 10844 ---------------- .../bigquery/storage/v1alpha2/Stream.java | 2430 ---- .../bigquery/storage/v1alpha2/Table.java | 3559 ----- .../bigquery/storage/v1alpha2/TableName.java | 217 - .../storage/v1alpha2/WriteStreamName.java | 257 - .../bigquery/storage/v1alpha2/protobuf.proto | 42 - .../bigquery/storage/v1alpha2/storage.proto | 287 - .../bigquery/storage/v1alpha2/stream.proto | 77 - .../bigquery/storage/v1alpha2/table.proto | 100 - synth.metadata | 31 - synth.py | 2 +- versions.txt | 2 - 56 files changed, 30 insertions(+), 33417 deletions(-) create mode 100644 google-cloud-bigquerystorage/clirr-ignored-differences.xml delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptor.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriter.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriter.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessage.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCache.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/OnSchemaUpdateRunnable.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverter.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibility.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriter.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Waiter.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCache.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/gapic_metadata.json delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java delete mode 100644 google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptorTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriterTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWrite.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWriteImpl.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeClock.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeScheduledExecutorService.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriterTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessageTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCacheTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverterTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibilityTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriterTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCacheTest.java delete mode 100644 google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/it/ITBigQueryWriteManualClientTest.java delete mode 100644 grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml delete mode 100644 grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/pom.xml delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/TableName.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriteStreamName.java delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto delete mode 100644 proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto diff --git a/google-cloud-bigquerystorage-bom/pom.xml b/google-cloud-bigquerystorage-bom/pom.xml index f2e9860b30..752e4ddc7a 100644 --- a/google-cloud-bigquerystorage-bom/pom.xml +++ b/google-cloud-bigquerystorage-bom/pom.xml @@ -60,11 +60,6 @@ - - com.google.api.grpc - proto-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 @@ -81,11 +76,6 @@ 1.18.2-SNAPSHOT - - com.google.api.grpc - grpc-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 diff --git a/google-cloud-bigquerystorage/clirr-ignored-differences.xml b/google-cloud-bigquerystorage/clirr-ignored-differences.xml new file mode 100644 index 0000000000..ea19962285 --- /dev/null +++ b/google-cloud-bigquerystorage/clirr-ignored-differences.xml @@ -0,0 +1,29 @@ + + + + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/* + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings$Builder + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory + + + 8001 + com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub + + \ No newline at end of file diff --git a/google-cloud-bigquerystorage/pom.xml b/google-cloud-bigquerystorage/pom.xml index edf7f55dee..c43e0e4e0d 100644 --- a/google-cloud-bigquerystorage/pom.xml +++ b/google-cloud-bigquerystorage/pom.xml @@ -72,10 +72,6 @@ proto-google-common-protos - - com.google.api.grpc - proto-google-cloud-bigquerystorage-v1alpha2 - com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 @@ -162,11 +158,6 @@ protobuf-java - - com.google.api.grpc - grpc-google-cloud-bigquerystorage-v1alpha2 - test - com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptor.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptor.java deleted file mode 100644 index a219b00694..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptor.java +++ /dev/null @@ -1,155 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.DescriptorProtos.DescriptorProto; -import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; -import com.google.protobuf.DescriptorProtos.FileDescriptorProto; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FileDescriptor; -import java.util.ArrayList; -import java.util.HashMap; -import java.util.List; - -/** - * Converts a BQ table schema to protobuf descriptor. All field names will be converted to lowercase - * when constructing the protobuf descriptor. The mapping between field types and field modes are - * shown in the ImmutableMaps below. - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1beta2.BQTableSchemaToProtoDescriptor} - */ -@Deprecated -public class BQTableSchemaToProtoDescriptor { - private static ImmutableMap - BQTableSchemaModeMap = - ImmutableMap.of( - Table.TableFieldSchema.Mode.NULLABLE, FieldDescriptorProto.Label.LABEL_OPTIONAL, - Table.TableFieldSchema.Mode.REPEATED, FieldDescriptorProto.Label.LABEL_REPEATED, - Table.TableFieldSchema.Mode.REQUIRED, FieldDescriptorProto.Label.LABEL_REQUIRED); - - private static ImmutableMap - BQTableSchemaTypeMap = - new ImmutableMap.Builder() - .put(Table.TableFieldSchema.Type.BOOL, FieldDescriptorProto.Type.TYPE_BOOL) - .put(Table.TableFieldSchema.Type.BYTES, FieldDescriptorProto.Type.TYPE_BYTES) - .put(Table.TableFieldSchema.Type.DATE, FieldDescriptorProto.Type.TYPE_INT32) - .put(Table.TableFieldSchema.Type.DATETIME, FieldDescriptorProto.Type.TYPE_STRING) - .put(Table.TableFieldSchema.Type.DOUBLE, FieldDescriptorProto.Type.TYPE_DOUBLE) - .put(Table.TableFieldSchema.Type.GEOGRAPHY, FieldDescriptorProto.Type.TYPE_STRING) - .put(Table.TableFieldSchema.Type.INT64, FieldDescriptorProto.Type.TYPE_INT64) - .put(Table.TableFieldSchema.Type.NUMERIC, FieldDescriptorProto.Type.TYPE_BYTES) - .put(Table.TableFieldSchema.Type.STRING, FieldDescriptorProto.Type.TYPE_STRING) - .put(Table.TableFieldSchema.Type.STRUCT, FieldDescriptorProto.Type.TYPE_MESSAGE) - .put(Table.TableFieldSchema.Type.TIME, FieldDescriptorProto.Type.TYPE_STRING) - .put(Table.TableFieldSchema.Type.TIMESTAMP, FieldDescriptorProto.Type.TYPE_INT64) - .build(); - - /** - * Converts Table.TableSchema to a Descriptors.Descriptor object. - * - * @param BQTableSchema - * @throws Descriptors.DescriptorValidationException - */ - public static Descriptor convertBQTableSchemaToProtoDescriptor(Table.TableSchema BQTableSchema) - throws Descriptors.DescriptorValidationException { - Preconditions.checkNotNull(BQTableSchema, "BQTableSchema is null."); - return convertBQTableSchemaToProtoDescriptorImpl( - BQTableSchema, "root", new HashMap, Descriptor>()); - } - - /** - * Converts a Table.TableSchema to a Descriptors.Descriptor object. - * - * @param BQTableSchema - * @param scope Keeps track of current scope to prevent repeated naming while constructing - * descriptor. - * @param dependencyMap Stores already constructed descriptors to prevent reconstruction - * @throws Descriptors.DescriptorValidationException - */ - private static Descriptor convertBQTableSchemaToProtoDescriptorImpl( - Table.TableSchema BQTableSchema, - String scope, - HashMap, Descriptor> dependencyMap) - throws Descriptors.DescriptorValidationException { - List dependenciesList = new ArrayList(); - List fields = new ArrayList(); - int index = 1; - for (Table.TableFieldSchema BQTableField : BQTableSchema.getFieldsList()) { - String currentScope = scope + "__" + BQTableField.getName(); - if (BQTableField.getType() == Table.TableFieldSchema.Type.STRUCT) { - ImmutableList fieldList = - ImmutableList.copyOf(BQTableField.getFieldsList()); - if (dependencyMap.containsKey(fieldList)) { - Descriptor descriptor = dependencyMap.get(fieldList); - dependenciesList.add(descriptor.getFile()); - fields.add(convertBQTableFieldToProtoField(BQTableField, index++, descriptor.getName())); - } else { - Descriptor descriptor = - convertBQTableSchemaToProtoDescriptorImpl( - Table.TableSchema.newBuilder().addAllFields(fieldList).build(), - currentScope, - dependencyMap); - dependenciesList.add(descriptor.getFile()); - dependencyMap.put(fieldList, descriptor); - fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); - } - } else { - fields.add(convertBQTableFieldToProtoField(BQTableField, index++, currentScope)); - } - } - FileDescriptor[] dependenciesArray = new FileDescriptor[dependenciesList.size()]; - dependenciesArray = dependenciesList.toArray(dependenciesArray); - DescriptorProto descriptorProto = - DescriptorProto.newBuilder().setName(scope).addAllField(fields).build(); - FileDescriptorProto fileDescriptorProto = - FileDescriptorProto.newBuilder().addMessageType(descriptorProto).build(); - FileDescriptor fileDescriptor = - FileDescriptor.buildFrom(fileDescriptorProto, dependenciesArray); - Descriptor descriptor = fileDescriptor.findMessageTypeByName(scope); - return descriptor; - } - - /** - * Converts a BQTableField to ProtoField - * - * @param BQTableField BQ Field used to construct a FieldDescriptorProto - * @param index Index for protobuf fields. - * @param scope used to name descriptors - */ - private static FieldDescriptorProto convertBQTableFieldToProtoField( - Table.TableFieldSchema BQTableField, int index, String scope) { - Table.TableFieldSchema.Mode mode = BQTableField.getMode(); - String fieldName = BQTableField.getName().toLowerCase(); - if (BQTableField.getType() == Table.TableFieldSchema.Type.STRUCT) { - return FieldDescriptorProto.newBuilder() - .setName(fieldName) - .setTypeName(scope) - .setLabel((FieldDescriptorProto.Label) BQTableSchemaModeMap.get(mode)) - .setNumber(index) - .build(); - } - return FieldDescriptorProto.newBuilder() - .setName(fieldName) - .setType((FieldDescriptorProto.Type) BQTableSchemaTypeMap.get(BQTableField.getType())) - .setLabel((FieldDescriptorProto.Label) BQTableSchemaModeMap.get(mode)) - .setNumber(index) - .build(); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java deleted file mode 100644 index d7915de9fd..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java +++ /dev/null @@ -1,764 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.BetaApi; -import com.google.api.gax.core.BackgroundResource; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStub; -import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStubSettings; -import java.io.IOException; -import java.util.concurrent.TimeUnit; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * Service Description: BigQuery Write API. - * - *

The Write API can be used to write data to BigQuery. - * - *

This class provides the ability to make remote calls to the backing service through method - * calls that map to API methods. Sample code to get started: - * - *

{@code
- * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
- *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
- *   Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build();
- *   Stream.WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
- * }
- * }
- * - *

Note: close() needs to be called on the BigQueryWriteClient object to clean up resources such - * as threads. In the example above, try-with-resources is used, which automatically calls close(). - * - *

The surface of this class includes several types of Java methods for each of the API's - * methods: - * - *

    - *
  1. A "flattened" method. With this type of method, the fields of the request type have been - * converted into function parameters. It may be the case that not all fields are available as - * parameters, and not every API method will have a flattened method entry point. - *
  2. A "request object" method. This type of method only takes one parameter, a request object, - * which must be constructed before the call. Not every API method will have a request object - * method. - *
  3. A "callable" method. This type of method takes no parameters and returns an immutable API - * callable object, which can be used to initiate calls to the service. - *
- * - *

See the individual methods for example code. - * - *

Many parameters require resource names to be formatted in a particular way. To assist with - * these names, this class includes a format method for each type of name, and additionally a parse - * method to extract the individual identifiers contained within names that are returned. - * - *

This class can be customized by passing in a custom instance of BigQueryWriteSettings to - * create(). For example: - * - *

To customize credentials: - * - *

{@code
- * BigQueryWriteSettings bigQueryWriteSettings =
- *     BigQueryWriteSettings.newBuilder()
- *         .setCredentialsProvider(FixedCredentialsProvider.create(myCredentials))
- *         .build();
- * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
- * }
- * - *

To customize the endpoint: - * - *

{@code
- * BigQueryWriteSettings bigQueryWriteSettings =
- *     BigQueryWriteSettings.newBuilder().setEndpoint(myEndpoint).build();
- * BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create(bigQueryWriteSettings);
- * }
- * - *

Please refer to the GitHub repository's samples for more quickstart code snippets. - * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public class BigQueryWriteClient implements BackgroundResource { - private final BigQueryWriteSettings settings; - private final BigQueryWriteStub stub; - - /** Constructs an instance of BigQueryWriteClient with default settings. */ - public static final BigQueryWriteClient create() throws IOException { - return create(BigQueryWriteSettings.newBuilder().build()); - } - - /** - * Constructs an instance of BigQueryWriteClient, using the given settings. The channels are - * created based on the settings passed in, or defaults for any settings that are not set. - */ - public static final BigQueryWriteClient create(BigQueryWriteSettings settings) - throws IOException { - return new BigQueryWriteClient(settings); - } - - /** - * Constructs an instance of BigQueryWriteClient, using the given stub for making calls. This is - * for advanced usage - prefer using create(BigQueryWriteSettings). - */ - @BetaApi("A restructuring of stub classes is planned, so this may break in the future") - public static final BigQueryWriteClient create(BigQueryWriteStub stub) { - return new BigQueryWriteClient(stub); - } - - /** - * Constructs an instance of BigQueryWriteClient, using the given settings. This is protected so - * that it is easy to make a subclass, but otherwise, the static factory methods should be - * preferred. - */ - protected BigQueryWriteClient(BigQueryWriteSettings settings) throws IOException { - this.settings = settings; - this.stub = ((BigQueryWriteStubSettings) settings.getStubSettings()).createStub(); - } - - @BetaApi("A restructuring of stub classes is planned, so this may break in the future") - protected BigQueryWriteClient(BigQueryWriteStub stub) { - this.settings = null; - this.stub = stub; - } - - public final BigQueryWriteSettings getSettings() { - return settings; - } - - @BetaApi("A restructuring of stub classes is planned, so this may break in the future") - public BigQueryWriteStub getStub() { - return stub; - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Creates a write stream to the given table. Additionally, every table has a special COMMITTED - * stream named '_default' to which data can be written. This stream doesn't need to be created - * using CreateWriteStream. It is a stream that can be used simultaneously by any number of - * clients. Data written to this stream is considered committed as soon as an acknowledgement is - * received. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
-   *   Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build();
-   *   Stream.WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
-   * }
-   * }
- * - * @param parent Required. Reference to the table to which the stream belongs, in the format of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - * @param writeStream Required. Stream to be created. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream createWriteStream( - TableName parent, Stream.WriteStream writeStream) { - Storage.CreateWriteStreamRequest request = - Storage.CreateWriteStreamRequest.newBuilder() - .setParent(parent == null ? null : parent.toString()) - .setWriteStream(writeStream) - .build(); - return createWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Creates a write stream to the given table. Additionally, every table has a special COMMITTED - * stream named '_default' to which data can be written. This stream doesn't need to be created - * using CreateWriteStream. It is a stream that can be used simultaneously by any number of - * clients. Data written to this stream is considered committed as soon as an acknowledgement is - * received. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
-   *   Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build();
-   *   Stream.WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
-   * }
-   * }
- * - * @param parent Required. Reference to the table to which the stream belongs, in the format of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - * @param writeStream Required. Stream to be created. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream createWriteStream(String parent, Stream.WriteStream writeStream) { - Storage.CreateWriteStreamRequest request = - Storage.CreateWriteStreamRequest.newBuilder() - .setParent(parent) - .setWriteStream(writeStream) - .build(); - return createWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Creates a write stream to the given table. Additionally, every table has a special COMMITTED - * stream named '_default' to which data can be written. This stream doesn't need to be created - * using CreateWriteStream. It is a stream that can be used simultaneously by any number of - * clients. Data written to this stream is considered committed as soon as an acknowledgement is - * received. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.CreateWriteStreamRequest request =
-   *       Storage.CreateWriteStreamRequest.newBuilder()
-   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
-   *           .build();
-   *   Stream.WriteStream response = bigQueryWriteClient.createWriteStream(request);
-   * }
-   * }
- * - * @param request The request object containing all of the parameters for the API call. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream createWriteStream(Storage.CreateWriteStreamRequest request) { - return createWriteStreamCallable().call(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Creates a write stream to the given table. Additionally, every table has a special COMMITTED - * stream named '_default' to which data can be written. This stream doesn't need to be created - * using CreateWriteStream. It is a stream that can be used simultaneously by any number of - * clients. Data written to this stream is considered committed as soon as an acknowledgement is - * received. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.CreateWriteStreamRequest request =
-   *       Storage.CreateWriteStreamRequest.newBuilder()
-   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
-   *           .build();
-   *   ApiFuture future =
-   *       bigQueryWriteClient.createWriteStreamCallable().futureCall(request);
-   *   // Do something.
-   *   Stream.WriteStream response = future.get();
-   * }
-   * }
- */ - public final UnaryCallable - createWriteStreamCallable() { - return stub.createWriteStreamCallable(); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Appends data to the given stream. - * - *

If `offset` is specified, the `offset` is checked against the end of stream. The server - * returns `OUT_OF_RANGE` in `AppendRowsResponse` if an attempt is made to append to an offset - * beyond the current end of the stream or `ALREADY_EXISTS` if user provids an `offset` that has - * already been written to. User can retry with adjusted offset within the same RPC stream. If - * `offset` is not specified, append happens at the end of the stream. - * - *

The response contains the offset at which the append happened. Responses are received in the - * same order in which requests are sent. There will be one response for each successful request. - * If the `offset` is not set in response, it means append didn't happen due to some errors. If - * one request fails, all the subsequent requests will also fail until a success request is made - * again. - * - *

If the stream is of `PENDING` type, data will only be available for read operations after - * the stream is committed. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   BidiStream bidiStream =
-   *       bigQueryWriteClient.appendRowsCallable().call();
-   *   Storage.AppendRowsRequest request =
-   *       Storage.AppendRowsRequest.newBuilder()
-   *           .setWriteStream(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .setOffset(Int64Value.newBuilder().build())
-   *           .setIgnoreUnknownFields(true)
-   *           .build();
-   *   bidiStream.send(request);
-   *   for (Storage.AppendRowsResponse response : bidiStream) {
-   *     // Do something when a response is received.
-   *   }
-   * }
-   * }
- */ - public final BidiStreamingCallable - appendRowsCallable() { - return stub.appendRowsCallable(); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Gets a write stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
-   *   Stream.WriteStream response = bigQueryWriteClient.getWriteStream(name);
-   * }
-   * }
- * - * @param name Required. Name of the stream to get, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream getWriteStream(WriteStreamName name) { - Storage.GetWriteStreamRequest request = - Storage.GetWriteStreamRequest.newBuilder() - .setName(name == null ? null : name.toString()) - .build(); - return getWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Gets a write stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
-   *   Stream.WriteStream response = bigQueryWriteClient.getWriteStream(name);
-   * }
-   * }
- * - * @param name Required. Name of the stream to get, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream getWriteStream(String name) { - Storage.GetWriteStreamRequest request = - Storage.GetWriteStreamRequest.newBuilder().setName(name).build(); - return getWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Gets a write stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.GetWriteStreamRequest request =
-   *       Storage.GetWriteStreamRequest.newBuilder()
-   *           .setName(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .build();
-   *   Stream.WriteStream response = bigQueryWriteClient.getWriteStream(request);
-   * }
-   * }
- * - * @param request The request object containing all of the parameters for the API call. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Stream.WriteStream getWriteStream(Storage.GetWriteStreamRequest request) { - return getWriteStreamCallable().call(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Gets a write stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.GetWriteStreamRequest request =
-   *       Storage.GetWriteStreamRequest.newBuilder()
-   *           .setName(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .build();
-   *   ApiFuture future =
-   *       bigQueryWriteClient.getWriteStreamCallable().futureCall(request);
-   *   // Do something.
-   *   Stream.WriteStream response = future.get();
-   * }
-   * }
- */ - public final UnaryCallable - getWriteStreamCallable() { - return stub.getWriteStreamCallable(); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Finalize a write stream so that no new data can be appended to the stream. Finalize is not - * supported on the '_default' stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
-   *   Storage.FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
-   * }
-   * }
- * - * @param name Required. Name of the stream to finalize, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FinalizeWriteStreamResponse finalizeWriteStream(WriteStreamName name) { - Storage.FinalizeWriteStreamRequest request = - Storage.FinalizeWriteStreamRequest.newBuilder() - .setName(name == null ? null : name.toString()) - .build(); - return finalizeWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Finalize a write stream so that no new data can be appended to the stream. Finalize is not - * supported on the '_default' stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   String name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
-   *   Storage.FinalizeWriteStreamResponse response = bigQueryWriteClient.finalizeWriteStream(name);
-   * }
-   * }
- * - * @param name Required. Name of the stream to finalize, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FinalizeWriteStreamResponse finalizeWriteStream(String name) { - Storage.FinalizeWriteStreamRequest request = - Storage.FinalizeWriteStreamRequest.newBuilder().setName(name).build(); - return finalizeWriteStream(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Finalize a write stream so that no new data can be appended to the stream. Finalize is not - * supported on the '_default' stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.FinalizeWriteStreamRequest request =
-   *       Storage.FinalizeWriteStreamRequest.newBuilder()
-   *           .setName(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .build();
-   *   Storage.FinalizeWriteStreamResponse response =
-   *       bigQueryWriteClient.finalizeWriteStream(request);
-   * }
-   * }
- * - * @param request The request object containing all of the parameters for the API call. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FinalizeWriteStreamResponse finalizeWriteStream( - Storage.FinalizeWriteStreamRequest request) { - return finalizeWriteStreamCallable().call(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Finalize a write stream so that no new data can be appended to the stream. Finalize is not - * supported on the '_default' stream. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.FinalizeWriteStreamRequest request =
-   *       Storage.FinalizeWriteStreamRequest.newBuilder()
-   *           .setName(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .build();
-   *   ApiFuture future =
-   *       bigQueryWriteClient.finalizeWriteStreamCallable().futureCall(request);
-   *   // Do something.
-   *   Storage.FinalizeWriteStreamResponse response = future.get();
-   * }
-   * }
- */ - public final UnaryCallable< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamCallable() { - return stub.finalizeWriteStreamCallable(); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams - * must be finalized before commit and cannot be committed multiple times. Once a stream is - * committed, data in the stream becomes available for read operations. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
-   *   Storage.BatchCommitWriteStreamsResponse response =
-   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
-   * }
-   * }
- * - * @param parent Required. Parent table that all the streams should belong to, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.BatchCommitWriteStreamsResponse batchCommitWriteStreams(TableName parent) { - Storage.BatchCommitWriteStreamsRequest request = - Storage.BatchCommitWriteStreamsRequest.newBuilder() - .setParent(parent == null ? null : parent.toString()) - .build(); - return batchCommitWriteStreams(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams - * must be finalized before commit and cannot be committed multiple times. Once a stream is - * committed, data in the stream becomes available for read operations. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   String parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString();
-   *   Storage.BatchCommitWriteStreamsResponse response =
-   *       bigQueryWriteClient.batchCommitWriteStreams(parent);
-   * }
-   * }
- * - * @param parent Required. Parent table that all the streams should belong to, in the form of - * `projects/{project}/datasets/{dataset}/tables/{table}`. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.BatchCommitWriteStreamsResponse batchCommitWriteStreams(String parent) { - Storage.BatchCommitWriteStreamsRequest request = - Storage.BatchCommitWriteStreamsRequest.newBuilder().setParent(parent).build(); - return batchCommitWriteStreams(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams - * must be finalized before commit and cannot be committed multiple times. Once a stream is - * committed, data in the stream becomes available for read operations. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.BatchCommitWriteStreamsRequest request =
-   *       Storage.BatchCommitWriteStreamsRequest.newBuilder()
-   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
-   *           .addAllWriteStreams(new ArrayList())
-   *           .build();
-   *   Storage.BatchCommitWriteStreamsResponse response =
-   *       bigQueryWriteClient.batchCommitWriteStreams(request);
-   * }
-   * }
- * - * @param request The request object containing all of the parameters for the API call. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.BatchCommitWriteStreamsResponse batchCommitWriteStreams( - Storage.BatchCommitWriteStreamsRequest request) { - return batchCommitWriteStreamsCallable().call(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Atomically commits a group of `PENDING` streams that belong to the same `parent` table. Streams - * must be finalized before commit and cannot be committed multiple times. Once a stream is - * committed, data in the stream becomes available for read operations. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.BatchCommitWriteStreamsRequest request =
-   *       Storage.BatchCommitWriteStreamsRequest.newBuilder()
-   *           .setParent(TableName.of("[PROJECT]", "[DATASET]", "[TABLE]").toString())
-   *           .addAllWriteStreams(new ArrayList())
-   *           .build();
-   *   ApiFuture future =
-   *       bigQueryWriteClient.batchCommitWriteStreamsCallable().futureCall(request);
-   *   // Do something.
-   *   Storage.BatchCommitWriteStreamsResponse response = future.get();
-   * }
-   * }
- */ - public final UnaryCallable< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsCallable() { - return stub.batchCommitWriteStreamsCallable(); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush - * operation is required in order for the rows to become available for reading. A Flush operation - * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. Flush is not supported on the _default stream, since it is not BUFFERED. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   WriteStreamName writeStream =
-   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]");
-   *   Storage.FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
-   * }
-   * }
- * - * @param writeStream Required. The stream that is the target of the flush operation. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FlushRowsResponse flushRows(WriteStreamName writeStream) { - Storage.FlushRowsRequest request = - Storage.FlushRowsRequest.newBuilder() - .setWriteStream(writeStream == null ? null : writeStream.toString()) - .build(); - return flushRows(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush - * operation is required in order for the rows to become available for reading. A Flush operation - * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. Flush is not supported on the _default stream, since it is not BUFFERED. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   String writeStream =
-   *       WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString();
-   *   Storage.FlushRowsResponse response = bigQueryWriteClient.flushRows(writeStream);
-   * }
-   * }
- * - * @param writeStream Required. The stream that is the target of the flush operation. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FlushRowsResponse flushRows(String writeStream) { - Storage.FlushRowsRequest request = - Storage.FlushRowsRequest.newBuilder().setWriteStream(writeStream).build(); - return flushRows(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush - * operation is required in order for the rows to become available for reading. A Flush operation - * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. Flush is not supported on the _default stream, since it is not BUFFERED. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.FlushRowsRequest request =
-   *       Storage.FlushRowsRequest.newBuilder()
-   *           .setWriteStream(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .setOffset(-1019779949)
-   *           .build();
-   *   Storage.FlushRowsResponse response = bigQueryWriteClient.flushRows(request);
-   * }
-   * }
- * - * @param request The request object containing all of the parameters for the API call. - * @throws com.google.api.gax.rpc.ApiException if the remote call fails - */ - public final Storage.FlushRowsResponse flushRows(Storage.FlushRowsRequest request) { - return flushRowsCallable().call(request); - } - - // AUTO-GENERATED DOCUMENTATION AND METHOD. - /** - * Flushes rows to a BUFFERED stream. If users are appending rows to BUFFERED stream, flush - * operation is required in order for the rows to become available for reading. A Flush operation - * flushes up to any previously flushed offset in a BUFFERED stream, to the offset specified in - * the request. Flush is not supported on the _default stream, since it is not BUFFERED. - * - *

Sample code: - * - *

{@code
-   * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
-   *   Storage.FlushRowsRequest request =
-   *       Storage.FlushRowsRequest.newBuilder()
-   *           .setWriteStream(
-   *               WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString())
-   *           .setOffset(-1019779949)
-   *           .build();
-   *   ApiFuture future =
-   *       bigQueryWriteClient.flushRowsCallable().futureCall(request);
-   *   // Do something.
-   *   Storage.FlushRowsResponse response = future.get();
-   * }
-   * }
- */ - public final UnaryCallable - flushRowsCallable() { - return stub.flushRowsCallable(); - } - - @Override - public final void close() { - stub.close(); - } - - @Override - public void shutdown() { - stub.shutdown(); - } - - @Override - public boolean isShutdown() { - return stub.isShutdown(); - } - - @Override - public boolean isTerminated() { - return stub.isTerminated(); - } - - @Override - public void shutdownNow() { - stub.shutdownNow(); - } - - @Override - public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { - return stub.awaitTermination(duration, unit); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java deleted file mode 100644 index 148b3dc47d..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java +++ /dev/null @@ -1,252 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiFunction; -import com.google.api.core.BetaApi; -import com.google.api.gax.core.GoogleCredentialsProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; -import com.google.api.gax.rpc.ApiClientHeaderProvider; -import com.google.api.gax.rpc.ClientContext; -import com.google.api.gax.rpc.ClientSettings; -import com.google.api.gax.rpc.StreamingCallSettings; -import com.google.api.gax.rpc.TransportChannelProvider; -import com.google.api.gax.rpc.UnaryCallSettings; -import com.google.cloud.bigquery.storage.v1alpha2.stub.BigQueryWriteStubSettings; -import java.io.IOException; -import java.util.List; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * Settings class to configure an instance of {@link BigQueryWriteClient}. - * - *

The default instance has everything set to sensible defaults: - * - *

    - *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are - * used. - *
  • Credentials are acquired automatically through Application Default Credentials. - *
  • Retries are configured for idempotent methods but not for non-idempotent methods. - *
- * - *

The builder of this class is recursive, so contained classes are themselves builders. When - * build() is called, the tree of builders is called to create the complete settings object. - * - *

For example, to set the total timeout of createWriteStream to 30 seconds: - * - *

{@code
- * BigQueryWriteSettings.Builder bigQueryWriteSettingsBuilder = BigQueryWriteSettings.newBuilder();
- * bigQueryWriteSettingsBuilder
- *     .createWriteStreamSettings()
- *     .setRetrySettings(
- *         bigQueryWriteSettingsBuilder
- *             .createWriteStreamSettings()
- *             .getRetrySettings()
- *             .toBuilder()
- *             .setTotalTimeout(Duration.ofSeconds(30))
- *             .build());
- * BigQueryWriteSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
- * }
- * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public class BigQueryWriteSettings extends ClientSettings { - - /** Returns the object with the settings used for calls to createWriteStream. */ - public UnaryCallSettings - createWriteStreamSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).createWriteStreamSettings(); - } - - /** Returns the object with the settings used for calls to appendRows. */ - public StreamingCallSettings - appendRowsSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).appendRowsSettings(); - } - - /** Returns the object with the settings used for calls to getWriteStream. */ - public UnaryCallSettings - getWriteStreamSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).getWriteStreamSettings(); - } - - /** Returns the object with the settings used for calls to finalizeWriteStream. */ - public UnaryCallSettings - finalizeWriteStreamSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).finalizeWriteStreamSettings(); - } - - /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ - public UnaryCallSettings< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).batchCommitWriteStreamsSettings(); - } - - /** Returns the object with the settings used for calls to flushRows. */ - public UnaryCallSettings - flushRowsSettings() { - return ((BigQueryWriteStubSettings) getStubSettings()).flushRowsSettings(); - } - - public static final BigQueryWriteSettings create(BigQueryWriteStubSettings stub) - throws IOException { - return new BigQueryWriteSettings.Builder(stub.toBuilder()).build(); - } - - /** Returns a builder for the default ExecutorProvider for this service. */ - public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { - return BigQueryWriteStubSettings.defaultExecutorProviderBuilder(); - } - - /** Returns the default service endpoint. */ - public static String getDefaultEndpoint() { - return BigQueryWriteStubSettings.getDefaultEndpoint(); - } - - /** Returns the default service scopes. */ - public static List getDefaultServiceScopes() { - return BigQueryWriteStubSettings.getDefaultServiceScopes(); - } - - /** Returns a builder for the default credentials for this service. */ - public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { - return BigQueryWriteStubSettings.defaultCredentialsProviderBuilder(); - } - - /** Returns a builder for the default ChannelProvider for this service. */ - public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { - return BigQueryWriteStubSettings.defaultGrpcTransportProviderBuilder(); - } - - public static TransportChannelProvider defaultTransportChannelProvider() { - return BigQueryWriteStubSettings.defaultTransportChannelProvider(); - } - - @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") - public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { - return BigQueryWriteStubSettings.defaultApiClientHeaderProviderBuilder(); - } - - /** Returns a new builder for this class. */ - public static Builder newBuilder() { - return Builder.createDefault(); - } - - /** Returns a new builder for this class. */ - public static Builder newBuilder(ClientContext clientContext) { - return new Builder(clientContext); - } - - /** Returns a builder containing all the values of this settings class. */ - public Builder toBuilder() { - return new Builder(this); - } - - protected BigQueryWriteSettings(Builder settingsBuilder) throws IOException { - super(settingsBuilder); - } - - /** Builder for BigQueryWriteSettings. */ - public static class Builder extends ClientSettings.Builder { - - protected Builder() throws IOException { - this(((ClientContext) null)); - } - - protected Builder(ClientContext clientContext) { - super(BigQueryWriteStubSettings.newBuilder(clientContext)); - } - - protected Builder(BigQueryWriteSettings settings) { - super(settings.getStubSettings().toBuilder()); - } - - protected Builder(BigQueryWriteStubSettings.Builder stubSettings) { - super(stubSettings); - } - - private static Builder createDefault() { - return new Builder(BigQueryWriteStubSettings.newBuilder()); - } - - public BigQueryWriteStubSettings.Builder getStubSettingsBuilder() { - return ((BigQueryWriteStubSettings.Builder) getStubSettings()); - } - - // NEXT_MAJOR_VER: remove 'throws Exception'. - /** - * Applies the given settings updater function to all of the unary API methods in this service. - * - *

Note: This method does not support applying settings to streaming methods. - */ - public Builder applyToAllUnaryMethods( - ApiFunction, Void> settingsUpdater) throws Exception { - super.applyToAllUnaryMethods( - getStubSettingsBuilder().unaryMethodSettingsBuilders(), settingsUpdater); - return this; - } - - /** Returns the builder for the settings used for calls to createWriteStream. */ - public UnaryCallSettings.Builder - createWriteStreamSettings() { - return getStubSettingsBuilder().createWriteStreamSettings(); - } - - /** Returns the builder for the settings used for calls to appendRows. */ - public StreamingCallSettings.Builder - appendRowsSettings() { - return getStubSettingsBuilder().appendRowsSettings(); - } - - /** Returns the builder for the settings used for calls to getWriteStream. */ - public UnaryCallSettings.Builder - getWriteStreamSettings() { - return getStubSettingsBuilder().getWriteStreamSettings(); - } - - /** Returns the builder for the settings used for calls to finalizeWriteStream. */ - public UnaryCallSettings.Builder< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamSettings() { - return getStubSettingsBuilder().finalizeWriteStreamSettings(); - } - - /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ - public UnaryCallSettings.Builder< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings() { - return getStubSettingsBuilder().batchCommitWriteStreamsSettings(); - } - - /** Returns the builder for the settings used for calls to flushRows. */ - public UnaryCallSettings.Builder - flushRowsSettings() { - return getStubSettingsBuilder().flushRowsSettings(); - } - - @Override - public BigQueryWriteSettings build() throws IOException { - return new BigQueryWriteSettings(this); - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriter.java deleted file mode 100644 index 85dc02ee3f..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriter.java +++ /dev/null @@ -1,166 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiFunction; -import com.google.api.core.ApiFuture; -import com.google.api.core.ApiFutures; -import com.google.api.gax.grpc.GrpcStatusCode; -import com.google.api.gax.rpc.InvalidArgumentException; -import com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.util.concurrent.MoreExecutors; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Message; -import io.grpc.Status; -import java.io.IOException; -import java.util.List; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.logging.Logger; -import org.json.JSONArray; - -/** - * Writer that can help user to write data to BigQuery. This is a simplified version of the Write - * API. For users writing with COMMITTED stream and don't care about row deduplication, it is - * recommended to use this Writer. The DirectWriter can be used to write both JSON and protobuf - * data. - * - *

{@code
- * DataProto data;
- * ApiFuture response = DirectWriter.append("projects/pid/datasets/did/tables/tid", Arrays.asList(data1));
- * }
- * - *

{@link DirectWriter} will use the credentials set on the channel, which uses application - * default credentials through {@link GoogleCredentials#getApplicationDefault} by default. - * - * @deprecated no longer supported - */ -@Deprecated -public class DirectWriter { - private static final Logger LOG = Logger.getLogger(DirectWriter.class.getName()); - private static WriterCache cache = null; - private static JsonWriterCache jsonCache = null; - private static Lock cacheLock = new ReentrantLock(); - private static Lock jsonCacheLock = new ReentrantLock(); - - /** - * Append rows to the given table. - * - * @param tableName table name in the form of "projects/{pName}/datasets/{dName}/tables/{tName}" - * @param protoRows rows in proto buffer format. - * @return A future that contains the offset at which the append happened. Only when the future - * returns with valid offset, then the append actually happened. - * @throws IOException, InterruptedException, InvalidArgumentException - */ - public static ApiFuture append(String tableName, List protoRows) - throws IOException, InterruptedException, InvalidArgumentException { - Preconditions.checkNotNull(tableName, "TableName is null."); - Preconditions.checkNotNull(protoRows, "ProtoRows is null."); - - if (protoRows.isEmpty()) { - throw new InvalidArgumentException( - new Exception("Empty rows are not allowed"), - GrpcStatusCode.of(Status.Code.INVALID_ARGUMENT), - false); - } - try { - cacheLock.lock(); - if (cache == null) { - cache = WriterCache.getInstance(); - } - } finally { - cacheLock.unlock(); - } - - StreamWriter writer = cache.getTableWriter(tableName, protoRows.get(0).getDescriptorForType()); - ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); - Descriptors.Descriptor descriptor = null; - for (Message protoRow : protoRows) { - rowsBuilder.addSerializedRows(protoRow.toByteString()); - } - - AppendRowsRequest.ProtoData.Builder data = AppendRowsRequest.ProtoData.newBuilder(); - data.setWriterSchema(ProtoSchemaConverter.convert(protoRows.get(0).getDescriptorForType())); - data.setRows(rowsBuilder.build()); - - return ApiFutures.transform( - writer.append(AppendRowsRequest.newBuilder().setProtoRows(data.build()).build()), - new ApiFunction() { - @Override - public Long apply(Storage.AppendRowsResponse appendRowsResponse) { - return Long.valueOf(appendRowsResponse.getOffset()); - } - }, - MoreExecutors.directExecutor()); - } - - /** - * Append rows to the given table. - * - * @param tableName table name in the form of "projects/{pName}/datasets/{dName}/tables/{tName}" - * @param json A JSONArray - * @return A future that contains the offset at which the append happened. Only when the future - * returns with valid offset, then the append actually happened. - * @throws IOException, InterruptedException, InvalidArgumentException, - * Descriptors.DescriptorValidationException - */ - public static ApiFuture append(String tableName, JSONArray json) - throws IOException, InterruptedException, InvalidArgumentException, - Descriptors.DescriptorValidationException { - Preconditions.checkNotNull(tableName, "TableName is null."); - Preconditions.checkNotNull(json, "JSONArray is null."); - - if (json.length() == 0) { - throw new InvalidArgumentException( - new Exception("Empty JSONArrays are not allowed"), - GrpcStatusCode.of(Status.Code.INVALID_ARGUMENT), - false); - } - try { - jsonCacheLock.lock(); - if (jsonCache == null) { - jsonCache = JsonWriterCache.getInstance(); - } - } finally { - jsonCacheLock.unlock(); - } - JsonStreamWriter writer = jsonCache.getTableWriter(tableName); - return ApiFutures.transform( - writer.append(json, /* offset = */ -1, /*allowUnknownFields = */ false), - new ApiFunction() { - @Override - public Long apply(Storage.AppendRowsResponse appendRowsResponse) { - return Long.valueOf(appendRowsResponse.getOffset()); - } - }, - MoreExecutors.directExecutor()); - } - - @VisibleForTesting - public static void testSetStub( - BigQueryWriteClient stub, int maxTableEntry, SchemaCompatibility schemaCheck) { - cache = WriterCache.getTestInstance(stub, maxTableEntry, schemaCheck); - jsonCache = JsonWriterCache.getTestInstance(stub, maxTableEntry); - } - - /** Clears the underlying cache and all the transport connections. */ - public static void clearCache() { - cache.clear(); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriter.java deleted file mode 100644 index 214b8b5246..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriter.java +++ /dev/null @@ -1,406 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.batching.BatchingSettings; -import com.google.api.gax.core.CredentialsProvider; -import com.google.api.gax.core.ExecutorProvider; -import com.google.api.gax.retrying.RetrySettings; -import com.google.api.gax.rpc.TransportChannelProvider; -import com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; -import com.google.common.base.Preconditions; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Int64Value; -import com.google.protobuf.Message; -import java.io.IOException; -import java.util.logging.Logger; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import javax.annotation.Nullable; -import org.json.JSONArray; -import org.json.JSONObject; - -/** - * A StreamWriter that can write JSON data (JSONObjects) to BigQuery tables. The JsonStreamWriter is - * built on top of a StreamWriter, and it simply converts all JSON data to protobuf messages then - * calls StreamWriter's append() method to write to BigQuery tables. It maintains all StreamWriter - * functions, but also provides an additional feature: schema update support, where if the BigQuery - * table schema is updated, users will be able to ingest data on the new schema after some time (in - * order of minutes). - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1alpha2.JsonStreamWriter} - */ -@Deprecated -public class JsonStreamWriter implements AutoCloseable { - private static String streamPatternString = - "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+"; - private static Pattern streamPattern = Pattern.compile(streamPatternString); - private static final Logger LOG = Logger.getLogger(JsonStreamWriter.class.getName()); - - private BigQueryWriteClient client; - private String streamName; - private StreamWriter streamWriter; - private Descriptor descriptor; - private Table.TableSchema tableSchema; - - /** - * Constructs the JsonStreamWriter - * - * @param builder The Builder object for the JsonStreamWriter - */ - private JsonStreamWriter(Builder builder) - throws Descriptors.DescriptorValidationException, IllegalArgumentException, IOException, - InterruptedException { - Matcher matcher = streamPattern.matcher(builder.streamName); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid stream name: " + builder.streamName); - } - - this.streamName = builder.streamName; - this.client = builder.client; - this.descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(builder.tableSchema); - - StreamWriter.Builder streamWriterBuilder; - if (this.client == null) { - streamWriterBuilder = StreamWriter.newBuilder(builder.streamName); - } else { - streamWriterBuilder = StreamWriter.newBuilder(builder.streamName, builder.client); - } - setStreamWriterSettings( - streamWriterBuilder, - builder.channelProvider, - builder.credentialsProvider, - builder.batchingSettings, - builder.retrySettings, - builder.executorProvider, - builder.endpoint); - this.streamWriter = streamWriterBuilder.build(); - } - /** - * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON - * data to protobuf messages, then using StreamWriter's append() to write the data. If there is a - * schema update, the OnSchemaUpdateRunnable will be used to determine what actions to perform. - * - * @param jsonArr The JSON array that contains JSONObjects to be written - * @param allowUnknownFields if true, json data can have fields unknown to the BigQuery table. - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture - */ - public ApiFuture append(JSONArray jsonArr, boolean allowUnknownFields) { - return append(jsonArr, -1, allowUnknownFields); - } - - /** - * Writes a JSONArray that contains JSONObjects to the BigQuery table by first converting the JSON - * data to protobuf messages, then using StreamWriter's append() to write the data. If there is a - * schema update, the OnSchemaUpdateRunnable will be used to determine what actions to perform. - * - * @param jsonArr The JSON array that contains JSONObjects to be written - * @param offset Offset for deduplication - * @param allowUnknownFields if true, json data can have fields unknown to the BigQuery table. - * @return ApiFuture returns an AppendRowsResponse message wrapped in an - * ApiFuture - */ - public ApiFuture append( - JSONArray jsonArr, long offset, boolean allowUnknownFields) { - ProtoRows.Builder rowsBuilder = ProtoRows.newBuilder(); - // Any error in convertJsonToProtoMessage will throw an - // IllegalArgumentException/IllegalStateException/NullPointerException and will halt processing - // of JSON data. - for (int i = 0; i < jsonArr.length(); i++) { - JSONObject json = jsonArr.getJSONObject(i); - Message protoMessage = - JsonToProtoMessage.convertJsonToProtoMessage(this.descriptor, json, allowUnknownFields); - rowsBuilder.addSerializedRows(protoMessage.toByteString()); - } - AppendRowsRequest.ProtoData.Builder data = AppendRowsRequest.ProtoData.newBuilder(); - // Need to make sure refreshAppendAndSetDescriptor finish first before this can run - synchronized (this) { - data.setWriterSchema(ProtoSchemaConverter.convert(this.descriptor)); - data.setRows(rowsBuilder.build()); - final ApiFuture appendResponseFuture = - this.streamWriter.append( - AppendRowsRequest.newBuilder() - .setProtoRows(data.build()) - .setOffset(Int64Value.of(offset)) - .build()); - return appendResponseFuture; - } - } - - /** - * Refreshes connection for a JsonStreamWriter by first flushing all remaining rows, then calling - * refreshAppend(), and finally setting the descriptor. All of these actions need to be performed - * atomically to avoid having synchronization issues with append(). Flushing all rows first is - * necessary since if there are rows remaining when the connection refreshes, it will send out the - * old writer schema instead of the new one. - */ - void refreshConnection() - throws IOException, InterruptedException, Descriptors.DescriptorValidationException { - synchronized (this) { - this.streamWriter.writeAllOutstanding(); - this.streamWriter.refreshAppend(); - this.descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(this.tableSchema); - } - } - - /** - * Gets streamName - * - * @return String - */ - public String getStreamName() { - return this.streamName; - } - - /** - * Gets current descriptor - * - * @return Descriptor - */ - public Descriptor getDescriptor() { - return this.descriptor; - } - - /** Sets all StreamWriter settings. */ - private void setStreamWriterSettings( - StreamWriter.Builder builder, - @Nullable TransportChannelProvider channelProvider, - @Nullable CredentialsProvider credentialsProvider, - @Nullable BatchingSettings batchingSettings, - @Nullable RetrySettings retrySettings, - @Nullable ExecutorProvider executorProvider, - @Nullable String endpoint) { - if (channelProvider != null) { - builder.setChannelProvider(channelProvider); - } - if (credentialsProvider != null) { - builder.setCredentialsProvider(credentialsProvider); - } - if (batchingSettings != null) { - builder.setBatchingSettings(batchingSettings); - } - if (retrySettings != null) { - builder.setRetrySettings(retrySettings); - } - if (executorProvider != null) { - builder.setExecutorProvider(executorProvider); - } - if (endpoint != null) { - builder.setEndpoint(endpoint); - } - JsonStreamWriterOnSchemaUpdateRunnable jsonStreamWriterOnSchemaUpdateRunnable = - new JsonStreamWriterOnSchemaUpdateRunnable(); - jsonStreamWriterOnSchemaUpdateRunnable.setJsonStreamWriter(this); - builder.setOnSchemaUpdateRunnable(jsonStreamWriterOnSchemaUpdateRunnable); - } - - /** - * Setter for table schema. Used for schema updates. - * - * @param tableSchema - */ - void setTableSchema(Table.TableSchema tableSchema) { - this.tableSchema = tableSchema; - } - - /** - * newBuilder that constructs a JsonStreamWriter builder with BigQuery client being initialized by - * StreamWriter by default. - * - * @param streamName name of the stream that must follow - * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" - * @param tableSchema The schema of the table when the stream was created, which is passed back - * through {@code WriteStream} - * @return Builder - */ - public static Builder newBuilder(String streamName, Table.TableSchema tableSchema) { - Preconditions.checkNotNull(streamName, "StreamName is null."); - Preconditions.checkNotNull(tableSchema, "TableSchema is null."); - return new Builder(streamName, tableSchema, null); - } - - /** - * newBuilder that constructs a JsonStreamWriter builder. - * - * @param streamName name of the stream that must follow - * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" - * @param tableSchema The schema of the table when the stream was created, which is passed back - * through {@code WriteStream} - * @param client - * @return Builder - */ - public static Builder newBuilder( - String streamName, Table.TableSchema tableSchema, BigQueryWriteClient client) { - Preconditions.checkNotNull(streamName, "StreamName is null."); - Preconditions.checkNotNull(tableSchema, "TableSchema is null."); - Preconditions.checkNotNull(client, "BigQuery client is null."); - return new Builder(streamName, tableSchema, client); - } - - /** Closes the underlying StreamWriter. */ - @Override - public void close() { - this.streamWriter.close(); - } - - private class JsonStreamWriterOnSchemaUpdateRunnable extends OnSchemaUpdateRunnable { - private JsonStreamWriter jsonStreamWriter; - /** - * Setter for the jsonStreamWriter - * - * @param jsonStreamWriter - */ - public void setJsonStreamWriter(JsonStreamWriter jsonStreamWriter) { - this.jsonStreamWriter = jsonStreamWriter; - } - - /** Getter for the jsonStreamWriter */ - public JsonStreamWriter getJsonStreamWriter() { - return this.jsonStreamWriter; - } - - @Override - public void run() { - this.getJsonStreamWriter().setTableSchema(this.getUpdatedSchema()); - try { - this.getJsonStreamWriter().refreshConnection(); - } catch (InterruptedException | IOException e) { - LOG.severe("StreamWriter failed to refresh upon schema update." + e); - return; - } catch (Descriptors.DescriptorValidationException e) { - LOG.severe( - "Schema update fail: updated schema could not be converted to a valid descriptor."); - return; - } - LOG.info("Successfully updated schema: " + this.getUpdatedSchema()); - } - } - - public static final class Builder { - private String streamName; - private BigQueryWriteClient client; - private Table.TableSchema tableSchema; - - private TransportChannelProvider channelProvider; - private CredentialsProvider credentialsProvider; - private BatchingSettings batchingSettings; - private RetrySettings retrySettings; - private ExecutorProvider executorProvider; - private String endpoint; - - /** - * Constructor for JsonStreamWriter's Builder - * - * @param streamName name of the stream that must follow - * "projects/[^/]+/datasets/[^/]+/tables/[^/]+/streams/[^/]+" - * @param tableSchema schema used to convert Json to proto messages. - * @param client - */ - private Builder(String streamName, Table.TableSchema tableSchema, BigQueryWriteClient client) { - this.streamName = streamName; - this.tableSchema = tableSchema; - this.client = client; - } - - /** - * Setter for the underlying StreamWriter's TransportChannelProvider. - * - * @param channelProvider - * @return Builder - */ - public Builder setChannelProvider(TransportChannelProvider channelProvider) { - this.channelProvider = - Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); - return this; - } - - /** - * Setter for the underlying StreamWriter's CredentialsProvider. - * - * @param credentialsProvider - * @return Builder - */ - public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { - this.credentialsProvider = - Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); - return this; - } - - /** - * Setter for the underlying StreamWriter's BatchingSettings. - * - * @param batchingSettings - * @return Builder - */ - public Builder setBatchingSettings(BatchingSettings batchingSettings) { - this.batchingSettings = - Preconditions.checkNotNull(batchingSettings, "BatchingSettings is null."); - return this; - } - - /** - * Setter for the underlying StreamWriter's RetrySettings. - * - * @param retrySettings - * @return Builder - */ - public Builder setRetrySettings(RetrySettings retrySettings) { - this.retrySettings = Preconditions.checkNotNull(retrySettings, "RetrySettings is null."); - return this; - } - - /** - * Setter for the underlying StreamWriter's ExecutorProvider. - * - * @param executorProvider - * @return Builder - */ - public Builder setExecutorProvider(ExecutorProvider executorProvider) { - this.executorProvider = - Preconditions.checkNotNull(executorProvider, "ExecutorProvider is null."); - return this; - } - - /** - * Setter for the underlying StreamWriter's Endpoint. - * - * @param endpoint - * @return Builder - */ - public Builder setEndpoint(String endpoint) { - this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); - return this; - } - - /** - * Builds JsonStreamWriter - * - * @return JsonStreamWriter - */ - public JsonStreamWriter build() - throws Descriptors.DescriptorValidationException, IllegalArgumentException, IOException, - InterruptedException { - return new JsonStreamWriter(this); - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessage.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessage.java deleted file mode 100644 index 34feb24766..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessage.java +++ /dev/null @@ -1,326 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FieldDescriptor; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.Message; -import com.google.protobuf.UninitializedMessageException; -import org.json.JSONArray; -import org.json.JSONException; -import org.json.JSONObject; - -/** - * Converts Json data to protocol buffer messages given the protocol buffer descriptor. The protobuf - * descriptor must have all fields lowercased. - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1beta2.JsonToProtoMessage} - */ -@Deprecated -public class JsonToProtoMessage { - private static ImmutableMap FieldTypeToDebugMessage = - new ImmutableMap.Builder() - .put(FieldDescriptor.Type.BOOL, "boolean") - .put(FieldDescriptor.Type.BYTES, "string") - .put(FieldDescriptor.Type.INT32, "int32") - .put(FieldDescriptor.Type.DOUBLE, "double") - .put(FieldDescriptor.Type.INT64, "int64") - .put(FieldDescriptor.Type.STRING, "string") - .put(FieldDescriptor.Type.MESSAGE, "object") - .build(); - - /** - * Converts Json data to protocol buffer messages given the protocol buffer descriptor. - * - * @param protoSchema - * @param json - * @param allowUnknownFields Ignores unknown JSON fields. - * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. - */ - public static DynamicMessage convertJsonToProtoMessage( - Descriptor protoSchema, JSONObject json, boolean allowUnknownFields) - throws IllegalArgumentException { - Preconditions.checkNotNull(json, "JSONObject is null."); - Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); - Preconditions.checkState(json.length() != 0, "JSONObject is empty."); - - return convertJsonToProtoMessageImpl( - protoSchema, json, "root", /*topLevel=*/ true, allowUnknownFields); - } - - /** - * Converts Json data to protocol buffer messages given the protocol buffer descriptor. - * - * @param protoSchema - * @param json - * @param jsonScope Debugging purposes - * @param allowUnknownFields Ignores unknown JSON fields. - * @param topLevel checks if root level has any matching fields. - * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. - */ - private static DynamicMessage convertJsonToProtoMessageImpl( - Descriptor protoSchema, - JSONObject json, - String jsonScope, - boolean topLevel, - boolean allowUnknownFields) - throws IllegalArgumentException { - - DynamicMessage.Builder protoMsg = DynamicMessage.newBuilder(protoSchema); - String[] jsonNames = JSONObject.getNames(json); - if (jsonNames == null) { - return protoMsg.build(); - } - int matchedFields = 0; - for (int i = 0; i < jsonNames.length; i++) { - String jsonName = jsonNames[i]; - // We want lowercase here to support case-insensitive data writes. - // The protobuf descriptor that is used is assumed to have all lowercased fields - String jsonLowercaseName = jsonName.toLowerCase(); - String currentScope = jsonScope + "." + jsonName; - FieldDescriptor field = protoSchema.findFieldByName(jsonLowercaseName); - if (field == null) { - if (!allowUnknownFields) { - throw new IllegalArgumentException( - String.format( - "JSONObject has fields unknown to BigQuery: %s. Set allowUnknownFields to True to allow unknown fields.", - currentScope)); - } else { - continue; - } - } - matchedFields++; - if (!field.isRepeated()) { - fillField(protoMsg, field, json, jsonName, currentScope, allowUnknownFields); - } else { - fillRepeatedField(protoMsg, field, json, jsonName, currentScope, allowUnknownFields); - } - } - - if (matchedFields == 0 && topLevel) { - throw new IllegalArgumentException( - "There are no matching fields found for the JSONObject and the protocol buffer descriptor."); - } - DynamicMessage msg; - try { - msg = protoMsg.build(); - } catch (UninitializedMessageException e) { - String errorMsg = e.getMessage(); - int idxOfColon = errorMsg.indexOf(":"); - String missingFieldName = errorMsg.substring(idxOfColon + 2); - throw new IllegalArgumentException( - String.format( - "JSONObject does not have the required field %s.%s.", jsonScope, missingFieldName)); - } - if (topLevel && msg.getSerializedSize() == 0) { - throw new IllegalArgumentException("The created protobuf message is empty."); - } - return msg; - } - - /** - * Fills a non-repetaed protoField with the json data. - * - * @param protoMsg The protocol buffer message being constructed - * @param fieldDescriptor - * @param json - * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version - * @param currentScope Debugging purposes - * @param allowUnknownFields Ignores unknown JSON fields. - * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. - */ - private static void fillField( - DynamicMessage.Builder protoMsg, - FieldDescriptor fieldDescriptor, - JSONObject json, - String exactJsonKeyName, - String currentScope, - boolean allowUnknownFields) - throws IllegalArgumentException { - - java.lang.Object val = json.get(exactJsonKeyName); - switch (fieldDescriptor.getType()) { - case BOOL: - if (val instanceof Boolean) { - protoMsg.setField(fieldDescriptor, (Boolean) val); - return; - } - break; - case BYTES: - if (val instanceof String) { - protoMsg.setField(fieldDescriptor, ((String) val).getBytes()); - return; - } - break; - case INT64: - if (val instanceof Integer) { - protoMsg.setField(fieldDescriptor, new Long((Integer) val)); - return; - } else if (val instanceof Long) { - protoMsg.setField(fieldDescriptor, (Long) val); - return; - } - break; - case INT32: - if (val instanceof Integer) { - protoMsg.setField(fieldDescriptor, (Integer) val); - return; - } - break; - case STRING: - if (val instanceof String) { - protoMsg.setField(fieldDescriptor, (String) val); - return; - } - break; - case DOUBLE: - if (val instanceof Double) { - protoMsg.setField(fieldDescriptor, (Double) val); - return; - } else if (val instanceof Float) { - protoMsg.setField(fieldDescriptor, new Double((Float) val)); - return; - } - break; - case MESSAGE: - if (val instanceof JSONObject) { - Message.Builder message = protoMsg.newBuilderForField(fieldDescriptor); - protoMsg.setField( - fieldDescriptor, - convertJsonToProtoMessageImpl( - fieldDescriptor.getMessageType(), - json.getJSONObject(exactJsonKeyName), - currentScope, - /*topLevel =*/ false, - allowUnknownFields)); - return; - } - break; - } - throw new IllegalArgumentException( - String.format( - "JSONObject does not have a %s field at %s.", - FieldTypeToDebugMessage.get(fieldDescriptor.getType()), currentScope)); - } - - /** - * Fills a repeated protoField with the json data. - * - * @param protoMsg The protocol buffer message being constructed - * @param fieldDescriptor - * @param json If root level has no matching fields, throws exception. - * @param exactJsonKeyName Exact key name in JSONObject instead of lowercased version - * @param currentScope Debugging purposes - * @param allowUnknownFields Ignores unknown JSON fields. - * @throws IllegalArgumentException when JSON data is not compatible with proto descriptor. - */ - private static void fillRepeatedField( - DynamicMessage.Builder protoMsg, - FieldDescriptor fieldDescriptor, - JSONObject json, - String exactJsonKeyName, - String currentScope, - boolean allowUnknownFields) - throws IllegalArgumentException { - - JSONArray jsonArray; - try { - jsonArray = json.getJSONArray(exactJsonKeyName); - } catch (JSONException e) { - throw new IllegalArgumentException( - "JSONObject does not have a array field at " + currentScope + "."); - } - java.lang.Object val; - int index; - boolean fail = false; - for (int i = 0; i < jsonArray.length(); i++) { - val = jsonArray.get(i); - index = i; - switch (fieldDescriptor.getType()) { - case BOOL: - if (val instanceof Boolean) { - protoMsg.addRepeatedField(fieldDescriptor, (Boolean) val); - } else { - fail = true; - } - break; - case BYTES: - if (val instanceof String) { - protoMsg.addRepeatedField(fieldDescriptor, ((String) val).getBytes()); - } else { - fail = true; - } - break; - case INT64: - if (val instanceof Integer) { - protoMsg.addRepeatedField(fieldDescriptor, new Long((Integer) val)); - } else if (val instanceof Long) { - protoMsg.addRepeatedField(fieldDescriptor, (Long) val); - } else { - fail = true; - } - break; - case INT32: - if (val instanceof Integer) { - protoMsg.addRepeatedField(fieldDescriptor, (Integer) val); - } else { - fail = true; - } - break; - case STRING: - if (val instanceof String) { - protoMsg.addRepeatedField(fieldDescriptor, (String) val); - } else { - fail = true; - } - break; - case DOUBLE: - if (val instanceof Double) { - protoMsg.addRepeatedField(fieldDescriptor, (Double) val); - } else if (val instanceof Float) { - protoMsg.addRepeatedField(fieldDescriptor, new Double((float) val)); - } else { - fail = true; - } - break; - case MESSAGE: - if (val instanceof JSONObject) { - Message.Builder message = protoMsg.newBuilderForField(fieldDescriptor); - protoMsg.addRepeatedField( - fieldDescriptor, - convertJsonToProtoMessageImpl( - fieldDescriptor.getMessageType(), - jsonArray.getJSONObject(i), - currentScope, - /*topLevel =*/ false, - allowUnknownFields)); - } else { - fail = true; - } - break; - } - if (fail) { - throw new IllegalArgumentException( - String.format( - "JSONObject does not have a %s field at %s[%d].", - FieldTypeToDebugMessage.get(fieldDescriptor.getType()), currentScope, index)); - } - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCache.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCache.java deleted file mode 100644 index d72403a0c2..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCache.java +++ /dev/null @@ -1,146 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.protobuf.Descriptors; -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; -import java.util.logging.Logger; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * A cache of JsonStreamWriters that can be looked up by Table Name. The entries will expire after 5 - * minutes if not used. Code sample: JsonWriterCache cache = JsonWriterCache.getInstance(); - * JsonStreamWriter writer = cache.getWriter(); // Use... cache.returnWriter(writer); - * - * @deprecated No longer supported - */ -@Deprecated -public class JsonWriterCache { - private static final Logger LOG = Logger.getLogger(JsonWriterCache.class.getName()); - - private static String tablePatternString = "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)"; - private static Pattern tablePattern = Pattern.compile(tablePatternString); - - private static JsonWriterCache instance; - private Cache jsonWriterCache; - - // Maximum number of tables to hold in the cache, once the maxium exceeded, the cache will be - // evicted based on least recent used. - private static final int MAX_TABLE_ENTRY = 100; - private static final int MAX_WRITERS_PER_TABLE = 1; - - private final BigQueryWriteClient stub; - - private JsonWriterCache(BigQueryWriteClient stub, int maxTableEntry) { - this.stub = stub; - jsonWriterCache = - CacheBuilder.newBuilder().maximumSize(maxTableEntry).build(); - } - - public static JsonWriterCache getInstance() throws IOException { - if (instance == null) { - BigQueryWriteSettings stubSettings = BigQueryWriteSettings.newBuilder().build(); - BigQueryWriteClient stub = BigQueryWriteClient.create(stubSettings); - instance = new JsonWriterCache(stub, MAX_TABLE_ENTRY); - } - return instance; - } - - /** Returns a cache with custom stub used by test. */ - @VisibleForTesting - public static JsonWriterCache getTestInstance(BigQueryWriteClient stub, int maxTableEntry) { - Preconditions.checkNotNull(stub, "Stub is null."); - return new JsonWriterCache(stub, maxTableEntry); - } - - private Stream.WriteStream CreateNewWriteStream(String tableName) { - Stream.WriteStream stream = - Stream.WriteStream.newBuilder().setType(Stream.WriteStream.Type.COMMITTED).build(); - stream = - stub.createWriteStream( - Storage.CreateWriteStreamRequest.newBuilder() - .setParent(tableName) - .setWriteStream(stream) - .build()); - LOG.info("Created write stream:" + stream.getName()); - return stream; - } - - JsonStreamWriter CreateNewWriter(Stream.WriteStream writeStream) - throws IllegalArgumentException, IOException, InterruptedException, - Descriptors.DescriptorValidationException { - return JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) - .setChannelProvider(stub.getSettings().getTransportChannelProvider()) - .setCredentialsProvider(stub.getSettings().getCredentialsProvider()) - .setExecutorProvider(stub.getSettings().getExecutorProvider()) - .build(); - } - /** - * Gets a writer for a given table with the given tableName - * - * @param tableName - * @return - * @throws Exception - */ - public JsonStreamWriter getTableWriter(String tableName) - throws IllegalArgumentException, IOException, InterruptedException, - Descriptors.DescriptorValidationException { - Preconditions.checkNotNull(tableName, "TableName is null."); - Matcher matcher = tablePattern.matcher(tableName); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid table name: " + tableName); - } - - Stream.WriteStream writeStream = null; - JsonStreamWriter writer = null; - - synchronized (this) { - writer = jsonWriterCache.getIfPresent(tableName); - if (writer != null) { - return writer; - } - writeStream = CreateNewWriteStream(tableName); - writer = CreateNewWriter(writeStream); - jsonWriterCache.put(tableName, writer); - } - return writer; - } - - /** Clear the cache and close all the writers in the cache. */ - public void clear() { - synchronized (this) { - ConcurrentMap map = jsonWriterCache.asMap(); - for (String key : map.keySet()) { - JsonStreamWriter entry = jsonWriterCache.getIfPresent(key); - entry.close(); - } - jsonWriterCache.cleanUp(); - } - } - - @VisibleForTesting - public long cachedTableCount() { - synchronized (jsonWriterCache) { - return jsonWriterCache.size(); - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/OnSchemaUpdateRunnable.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/OnSchemaUpdateRunnable.java deleted file mode 100644 index 5194e6cb36..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/OnSchemaUpdateRunnable.java +++ /dev/null @@ -1,57 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -/** - * A abstract class that implements the Runnable interface and provides access to the current - * StreamWriter and updatedSchema. This runnable will only be called when a updated schema has been - * passed back through the AppendRowsResponse. Users should only implement the run() function. - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1beta2.OnSchemaUpdateRunnable} - */ -@Deprecated -public abstract class OnSchemaUpdateRunnable implements Runnable { - private StreamWriter streamWriter; - private Table.TableSchema updatedSchema; - - /** - * Setter for the updatedSchema - * - * @param updatedSchema - */ - void setUpdatedSchema(Table.TableSchema updatedSchema) { - this.updatedSchema = updatedSchema; - } - - /** - * Setter for the streamWriter - * - * @param streamWriter - */ - void setStreamWriter(StreamWriter streamWriter) { - this.streamWriter = streamWriter; - } - - /** Getter for the updatedSchema */ - Table.TableSchema getUpdatedSchema() { - return this.updatedSchema; - } - - /** Getter for the streamWriter */ - StreamWriter getStreamWriter() { - return this.streamWriter; - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverter.java deleted file mode 100644 index 4367c4ba21..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverter.java +++ /dev/null @@ -1,121 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.gax.grpc.GrpcStatusCode; -import com.google.api.gax.rpc.InvalidArgumentException; -import com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema; -import com.google.protobuf.DescriptorProtos.DescriptorProto; -import com.google.protobuf.DescriptorProtos.EnumDescriptorProto; -import com.google.protobuf.DescriptorProtos.FieldDescriptorProto; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FieldDescriptor; -import io.grpc.Status; -import java.util.HashSet; -import java.util.Set; - -// A Converter class that turns a native protobuf::DescriptorProto to a self contained -// protobuf::DescriptorProto -// that can be reconstructed by the backend. -// @deprecated Use {@link com.google.cloud.bigquery.storage.v1beta2.ProtoSchemaConverter} -@Deprecated -public class ProtoSchemaConverter { - private static String getNameFromFullName(String fullName) { - return fullName.replace('.', '_'); - } - - private static ProtoSchema convertInternal( - Descriptor input, - Set visitedTypes, - Set enumTypes, - Set structTypes, - DescriptorProto.Builder rootProtoSchema) { - DescriptorProto.Builder resultProto = DescriptorProto.newBuilder(); - if (rootProtoSchema == null) { - rootProtoSchema = resultProto; - } - String protoFullName = input.getFullName(); - String protoName = getNameFromFullName(protoFullName); - resultProto.setName(protoName); - Set localEnumTypes = new HashSet(); - visitedTypes.add(input.getFullName()); - for (int i = 0; i < input.getFields().size(); i++) { - FieldDescriptor inputField = input.getFields().get(i); - FieldDescriptorProto.Builder resultField = inputField.toProto().toBuilder(); - if (inputField.getType() == FieldDescriptor.Type.GROUP - || inputField.getType() == FieldDescriptor.Type.MESSAGE) { - String msgFullName = inputField.getMessageType().getFullName(); - String msgName = getNameFromFullName(msgFullName); - if (structTypes.contains(msgFullName)) { - resultField.setTypeName(msgName); - } else { - if (visitedTypes.contains(msgFullName)) { - throw new InvalidArgumentException( - "Recursive type is not supported:" + inputField.getMessageType().getFullName(), - null, - GrpcStatusCode.of(Status.Code.INVALID_ARGUMENT), - false); - } - visitedTypes.add(msgFullName); - rootProtoSchema.addNestedType( - convertInternal( - inputField.getMessageType(), - visitedTypes, - enumTypes, - structTypes, - rootProtoSchema) - .getProtoDescriptor()); - visitedTypes.remove(msgFullName); - resultField.setTypeName( - rootProtoSchema.getNestedType(rootProtoSchema.getNestedTypeCount() - 1).getName()); - } - } - - if (inputField.getType() == FieldDescriptor.Type.ENUM) { - // For enums, in order to avoid value conflict, we will always define - // a enclosing struct called enum_full_name_E that includes the actual - // enum. - String enumFullName = inputField.getEnumType().getFullName(); - String enclosingTypeName = getNameFromFullName(enumFullName) + "_E"; - String enumName = inputField.getEnumType().getName(); - String actualEnumFullName = enclosingTypeName + "." + enumName; - if (enumTypes.contains(enumFullName)) { - resultField.setTypeName(actualEnumFullName); - } else { - EnumDescriptorProto enumType = inputField.getEnumType().toProto(); - resultProto.addNestedType( - DescriptorProto.newBuilder() - .setName(enclosingTypeName) - .addEnumType(enumType.toBuilder().setName(enumName)) - .build()); - resultField.setTypeName(actualEnumFullName); - enumTypes.add(enumFullName); - } - } - resultProto.addField(resultField); - } - structTypes.add(protoFullName); - - return ProtoSchema.newBuilder().setProtoDescriptor(resultProto.build()).build(); - } - - public static ProtoSchema convert(Descriptor descriptor) { - Set visitedTypes = new HashSet(); - Set enumTypes = new HashSet(); - Set structTypes = new HashSet(); - return convertInternal(descriptor, visitedTypes, enumTypes, structTypes, null); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibility.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibility.java deleted file mode 100644 index c79cf934d8..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibility.java +++ /dev/null @@ -1,546 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.cloud.bigquery.BigQuery; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.Table; -import com.google.cloud.bigquery.TableId; -import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.protobuf.Descriptors; -import java.util.Arrays; -import java.util.Collections; -import java.util.HashMap; -import java.util.HashSet; -import java.util.List; -import java.util.Set; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * A class that checks the schema compatibility between Proto schema in proto descriptor and - * Bigquery table schema. If this check is passed, then user can write to BigQuery table using the - * user schema, otherwise the write will fail. - * - *

The implementation as of now is not complete, which measn, if this check passed, there is - * still a possbility of writing will fail. - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1beta2.SchemaCompatibility} - */ -@Deprecated -public class SchemaCompatibility { - private BigQuery bigquery; - private static SchemaCompatibility compat; - private static String tablePatternString = "projects/([^/]+)/datasets/([^/]+)/tables/([^/]+)"; - private static Pattern tablePattern = Pattern.compile(tablePatternString); - private static final int NestingLimit = 15; - // private static Set SupportedTypesHashSet = - - private static Set SupportedTypes = - Collections.unmodifiableSet( - new HashSet<>( - Arrays.asList( - Descriptors.FieldDescriptor.Type.INT32, - Descriptors.FieldDescriptor.Type.INT64, - Descriptors.FieldDescriptor.Type.UINT32, - Descriptors.FieldDescriptor.Type.UINT64, - Descriptors.FieldDescriptor.Type.FIXED32, - Descriptors.FieldDescriptor.Type.FIXED64, - Descriptors.FieldDescriptor.Type.SFIXED32, - Descriptors.FieldDescriptor.Type.SFIXED64, - Descriptors.FieldDescriptor.Type.FLOAT, - Descriptors.FieldDescriptor.Type.DOUBLE, - Descriptors.FieldDescriptor.Type.BOOL, - Descriptors.FieldDescriptor.Type.BYTES, - Descriptors.FieldDescriptor.Type.STRING, - Descriptors.FieldDescriptor.Type.MESSAGE, - Descriptors.FieldDescriptor.Type.GROUP, - Descriptors.FieldDescriptor.Type.ENUM))); - - private SchemaCompatibility(BigQuery bigquery) { - // TODO: Add functionality that allows SchemaCompatibility to build schemas. - this.bigquery = bigquery; - } - - /** - * Gets a singleton {code SchemaCompatibility} object. - * - * @return - */ - public static SchemaCompatibility getInstance() { - if (compat == null) { - RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); - compat = new SchemaCompatibility(bigqueryHelper.getOptions().getService()); - } - return compat; - } - - /** - * Gets a {code SchemaCompatibility} object with custom BigQuery stub. - * - * @param bigquery - * @return - */ - @VisibleForTesting - public static SchemaCompatibility getInstance(BigQuery bigquery) { - Preconditions.checkNotNull(bigquery, "BigQuery is null."); - return new SchemaCompatibility(bigquery); - } - - private TableId getTableId(String tableName) { - Matcher matcher = tablePattern.matcher(tableName); - if (!matcher.matches() || matcher.groupCount() != 3) { - throw new IllegalArgumentException("Invalid table name: " + tableName); - } - return TableId.of(matcher.group(1), matcher.group(2), matcher.group(3)); - } - - /** - * @param field - * @return True if fieldtype is supported by BQ Schema - */ - public static boolean isSupportedType(Descriptors.FieldDescriptor field) { - Preconditions.checkNotNull(field, "Field is null."); - Descriptors.FieldDescriptor.Type fieldType = field.getType(); - if (!SupportedTypes.contains(fieldType)) { - return false; - } - return true; - } - - private static boolean isCompatibleWithBQBool(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.BOOL - || field == Descriptors.FieldDescriptor.Type.INT32 - || field == Descriptors.FieldDescriptor.Type.INT64 - || field == Descriptors.FieldDescriptor.Type.UINT32 - || field == Descriptors.FieldDescriptor.Type.UINT64 - || field == Descriptors.FieldDescriptor.Type.FIXED32 - || field == Descriptors.FieldDescriptor.Type.FIXED64 - || field == Descriptors.FieldDescriptor.Type.SFIXED32 - || field == Descriptors.FieldDescriptor.Type.SFIXED64) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQBytes(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.BYTES) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQDate(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.INT32 - || field == Descriptors.FieldDescriptor.Type.INT64 - || field == Descriptors.FieldDescriptor.Type.SFIXED32 - || field == Descriptors.FieldDescriptor.Type.SFIXED64) { - - return true; - } - return false; - } - - private static boolean isCompatibleWithBQDatetime(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.STRING - || field == Descriptors.FieldDescriptor.Type.INT64) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQFloat(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.FLOAT) { - return true; - } - if (field == Descriptors.FieldDescriptor.Type.DOUBLE) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQGeography(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.STRING) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQInteger(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.INT64 - || field == Descriptors.FieldDescriptor.Type.SFIXED64 - || field == Descriptors.FieldDescriptor.Type.INT32 - || field == Descriptors.FieldDescriptor.Type.UINT32 - || field == Descriptors.FieldDescriptor.Type.FIXED32 - || field == Descriptors.FieldDescriptor.Type.SFIXED32 - || field == Descriptors.FieldDescriptor.Type.ENUM) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQNumeric(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.INT32 - || field == Descriptors.FieldDescriptor.Type.INT64 - || field == Descriptors.FieldDescriptor.Type.UINT32 - || field == Descriptors.FieldDescriptor.Type.UINT64 - || field == Descriptors.FieldDescriptor.Type.FIXED32 - || field == Descriptors.FieldDescriptor.Type.FIXED64 - || field == Descriptors.FieldDescriptor.Type.SFIXED32 - || field == Descriptors.FieldDescriptor.Type.SFIXED64 - || field == Descriptors.FieldDescriptor.Type.STRING - || field == Descriptors.FieldDescriptor.Type.BYTES - || field == Descriptors.FieldDescriptor.Type.FLOAT - || field == Descriptors.FieldDescriptor.Type.DOUBLE) { - return true; - } - - return false; - } - - private static boolean isCompatibleWithBQRecord(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.MESSAGE - || field == Descriptors.FieldDescriptor.Type.GROUP) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQString(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.STRING - || field == Descriptors.FieldDescriptor.Type.ENUM) { - return true; - } - return false; - } - - private static boolean isCompatibleWithBQTime(Descriptors.FieldDescriptor.Type field) { - if (field == Descriptors.FieldDescriptor.Type.INT64 - || field == Descriptors.FieldDescriptor.Type.STRING) { - - return true; - } - return false; - } - - private static boolean isCompatibleWithBQTimestamp(Descriptors.FieldDescriptor.Type field) { - if (isCompatibleWithBQInteger(field)) { - return true; - } - return false; - } - - /** - * Checks if proto field option is compatible with BQ field mode. - * - * @param protoField - * @param BQField - * @param protoScope Debugging purposes to show error if messages are nested. - * @param BQScope Debugging purposes to show error if messages are nested. - * @throws IllegalArgumentException if proto field type is incompatible with BQ field type. - */ - private void protoFieldModeIsCompatibleWithBQFieldMode( - Descriptors.FieldDescriptor protoField, Field BQField, String protoScope, String BQScope) - throws IllegalArgumentException { - if (BQField.getMode() == null) { - throw new IllegalArgumentException( - "Big query schema contains invalid field option for " + BQScope + "."); - } - switch (BQField.getMode()) { - case REPEATED: - if (!protoField.isRepeated()) { - throw new IllegalArgumentException( - "Given proto field " - + protoScope - + " is not repeated but Big Query field " - + BQScope - + " is."); - } - break; - case REQUIRED: - if (!protoField.isRequired()) { - throw new IllegalArgumentException( - "Given proto field " - + protoScope - + " is not required but Big Query field " - + BQScope - + " is."); - } - break; - case NULLABLE: - if (protoField.isRepeated()) { - throw new IllegalArgumentException( - "Given proto field " - + protoScope - + " is repeated but Big Query field " - + BQScope - + " is optional."); - } - break; - } - } - /** - * Checks if proto field type is compatible with BQ field type. - * - * @param protoField - * @param BQField - * @param allowUnknownFields - * @param protoScope Debugging purposes to show error if messages are nested. - * @param BQScope Debugging purposes to show error if messages are nested. - * @param allMessageTypes Keeps track of all current protos to avoid recursively nested protos. - * @param rootProtoName Debugging purposes for nested level > 15. - * @throws IllegalArgumentException if proto field type is incompatible with BQ field type. - */ - private void protoFieldTypeIsCompatibleWithBQFieldType( - Descriptors.FieldDescriptor protoField, - Field BQField, - boolean allowUnknownFields, - String protoScope, - String BQScope, - HashSet allMessageTypes, - String rootProtoName) - throws IllegalArgumentException { - - LegacySQLTypeName BQType = BQField.getType(); - Descriptors.FieldDescriptor.Type protoType = protoField.getType(); - boolean match = false; - switch (BQType.toString()) { - case "BOOLEAN": - match = isCompatibleWithBQBool(protoType); - break; - case "BYTES": - match = isCompatibleWithBQBytes(protoType); - break; - case "DATE": - match = isCompatibleWithBQDate(protoType); - break; - case "DATETIME": - match = isCompatibleWithBQDatetime(protoType); - break; - case "FLOAT": - match = isCompatibleWithBQFloat(protoType); - break; - case "GEOGRAPHY": - match = isCompatibleWithBQGeography(protoType); - break; - case "INTEGER": - match = isCompatibleWithBQInteger(protoType); - break; - case "NUMERIC": - match = isCompatibleWithBQNumeric(protoType); - break; - case "RECORD": - if (allMessageTypes.size() > NestingLimit) { - throw new IllegalArgumentException( - "Proto schema " - + rootProtoName - + " is not supported: contains nested messages of more than 15 levels."); - } - match = isCompatibleWithBQRecord(protoType); - if (!match) { - break; - } - Descriptors.Descriptor message = protoField.getMessageType(); - if (allMessageTypes.contains(message)) { - throw new IllegalArgumentException( - "Proto schema " + protoScope + " is not supported: is a recursively nested message."); - } - allMessageTypes.add(message); - isProtoCompatibleWithBQ( - protoField.getMessageType(), - Schema.of(BQField.getSubFields()), - allowUnknownFields, - protoScope, - BQScope, - false, - allMessageTypes, - rootProtoName); - allMessageTypes.remove(message); - break; - case "STRING": - match = isCompatibleWithBQString(protoType); - break; - case "TIME": - match = isCompatibleWithBQTime(protoType); - break; - case "TIMESTAMP": - match = isCompatibleWithBQTimestamp(protoType); - break; - } - if (!match) { - throw new IllegalArgumentException( - "The proto field " - + protoScope - + " does not have a matching type with the big query field " - + BQScope - + "."); - } - } - - /** - * Checks if proto schema is compatible with BQ schema. - * - * @param protoSchema - * @param BQSchema - * @param allowUnknownFields - * @param protoScope Debugging purposes to show error if messages are nested. - * @param BQScope Debugging purposes to show error if messages are nested. - * @param topLevel True if this is the root level of proto (in terms of nested messages) - * @param allMessageTypes Keeps track of all current protos to avoid recursively nested protos. - * @param rootProtoName Debugging purposes for nested level > 15. - * @throws IllegalArgumentException if proto field type is incompatible with BQ field type. - */ - private void isProtoCompatibleWithBQ( - Descriptors.Descriptor protoSchema, - Schema BQSchema, - boolean allowUnknownFields, - String protoScope, - String BQScope, - boolean topLevel, - HashSet allMessageTypes, - String rootProtoName) - throws IllegalArgumentException { - - int matchedFields = 0; - HashMap protoFieldMap = new HashMap<>(); - List protoFields = protoSchema.getFields(); - List BQFields = BQSchema.getFields(); - - if (protoFields.size() > BQFields.size()) { - if (!allowUnknownFields) { - throw new IllegalArgumentException( - "Proto schema " - + protoScope - + " has " - + protoFields.size() - + " fields, while BQ schema " - + BQScope - + " has " - + BQFields.size() - + " fields."); - } - } - // Use hashmap to map from lowercased name to appropriate field to account for casing difference - for (Descriptors.FieldDescriptor field : protoFields) { - protoFieldMap.put(field.getName().toLowerCase(), field); - } - - for (Field BQField : BQFields) { - String fieldName = BQField.getName().toLowerCase(); - Descriptors.FieldDescriptor protoField = null; - if (protoFieldMap.containsKey(fieldName)) { - protoField = protoFieldMap.get(fieldName); - } - - String currentBQScope = BQScope + "." + BQField.getName(); - if (protoField == null && BQField.getMode() == Field.Mode.REQUIRED) { - throw new IllegalArgumentException( - "The required Big Query field " - + currentBQScope - + " is missing in the proto schema " - + protoScope - + "."); - } - if (protoField == null) { - continue; - } - String currentProtoScope = protoScope + "." + protoField.getName(); - if (!isSupportedType(protoField)) { - throw new IllegalArgumentException( - "Proto schema " - + currentProtoScope - + " is not supported: contains " - + protoField.getType() - + " field type."); - } - if (protoField.isMapField()) { - throw new IllegalArgumentException( - "Proto schema " + currentProtoScope + " is not supported: is a map field."); - } - protoFieldModeIsCompatibleWithBQFieldMode( - protoField, BQField, currentProtoScope, currentBQScope); - protoFieldTypeIsCompatibleWithBQFieldType( - protoField, - BQField, - allowUnknownFields, - currentProtoScope, - currentBQScope, - allMessageTypes, - rootProtoName); - matchedFields++; - } - - if (matchedFields == 0 && topLevel) { - throw new IllegalArgumentException( - "There is no matching fields found for the proto schema " - + protoScope - + " and the BQ table schema " - + BQScope - + "."); - } - } - - /** - * Checks if proto schema is compatible with BQ schema after retrieving BQ schema by BQTableName. - * - * @param BQTableName Must include project_id, dataset_id, and table_id in the form that matches - * the regex "projects/([^/]+)/datasets/([^/]+)/tables/([^/]+)" - * @param protoSchema - * @param allowUnknownFields Flag indicating proto can have unknown fields. - * @throws IllegalArgumentException if proto field type is incompatible with BQ field type. - */ - public void check( - String BQTableName, Descriptors.Descriptor protoSchema, boolean allowUnknownFields) - throws IllegalArgumentException { - Preconditions.checkNotNull(BQTableName, "TableName is null."); - Preconditions.checkNotNull(protoSchema, "Protobuf descriptor is null."); - - TableId tableId = getTableId(BQTableName); - Table table = bigquery.getTable(tableId); - Schema BQSchema = table.getDefinition().getSchema(); - String protoSchemaName = protoSchema.getName(); - HashSet allMessageTypes = new HashSet<>(); - allMessageTypes.add(protoSchema); - isProtoCompatibleWithBQ( - protoSchema, - BQSchema, - allowUnknownFields, - protoSchemaName, - tableId.getTable(), - true, - allMessageTypes, - protoSchemaName); - } - - /** - * Checks if proto schema is compatible with BQ schema after retrieving BQ schema by BQTableName. - * Assumes allowUnknownFields is false. - * - * @param BQTableName Must include project_id, dataset_id, and table_id in the form that matches - * the regex "projects/([^/]+)/datasets/([^/]+)/tables/([^/]+)" - * @param protoSchema - * @throws IllegalArgumentException if proto field type is incompatible with BQ field type. - */ - public void check(String BQTableName, Descriptors.Descriptor protoSchema) - throws IllegalArgumentException { - - check(BQTableName, protoSchema, false); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriter.java deleted file mode 100644 index d94dba51b4..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriter.java +++ /dev/null @@ -1,1045 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiFuture; -import com.google.api.core.SettableApiFuture; -import com.google.api.gax.batching.BatchingSettings; -import com.google.api.gax.batching.FlowControlSettings; -import com.google.api.gax.batching.FlowController; -import com.google.api.gax.core.BackgroundResource; -import com.google.api.gax.core.BackgroundResourceAggregation; -import com.google.api.gax.core.CredentialsProvider; -import com.google.api.gax.core.ExecutorAsBackgroundResource; -import com.google.api.gax.core.ExecutorProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.grpc.GrpcStatusCode; -import com.google.api.gax.retrying.RetrySettings; -import com.google.api.gax.rpc.AbortedException; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.ClientStream; -import com.google.api.gax.rpc.ResponseObserver; -import com.google.api.gax.rpc.StreamController; -import com.google.api.gax.rpc.TransportChannelProvider; -import com.google.auth.oauth2.GoogleCredentials; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; -import com.google.common.base.Preconditions; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import java.io.IOException; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.logging.Level; -import java.util.logging.Logger; -import java.util.regex.Matcher; -import java.util.regex.Pattern; -import org.threeten.bp.Duration; - -/** - * A BigQuery Stream Writer that can be used to write data into BigQuery Table. - * - *

This is to be used to managed streaming write when you are working with PENDING streams or - * want to explicitly manage offset. In that most common cases when writing with COMMITTED stream - * without offset, please use a simpler writer {@code DirectWriter}. - * - *

A {@link StreamWrier} provides built-in capabilities to: handle batching of messages; - * controlling memory utilization (through flow control); automatic connection re-establishment and - * request cleanup (only keeps write schema on first request in the stream). - * - *

With customizable options that control: - * - *

    - *
  • Message batching: such as number of messages or max batch byte size, and batching deadline - *
  • Inflight message control: such as number of messages or max batch byte size - *
- * - *

{@link StreamWriter} will use the credentials set on the channel, which uses application - * default credentials through {@link GoogleCredentials#getApplicationDefault} by default. - * - * @deprecated Use {@link com.google.cloud.bigquery.storage.v1alpha2.JsonStreamWriterV2} - */ -@Deprecated -public class StreamWriter implements AutoCloseable { - private static final Logger LOG = Logger.getLogger(StreamWriter.class.getName()); - - private static String streamPatternString = - "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)/streams/[^/]+"; - - private static Pattern streamPattern = Pattern.compile(streamPatternString); - - private final String streamName; - private final String tableName; - - private final BatchingSettings batchingSettings; - private final RetrySettings retrySettings; - private BigQueryWriteSettings stubSettings; - - private final Lock messagesBatchLock; - private final Lock appendAndRefreshAppendLock; - private final MessagesBatch messagesBatch; - - // Indicates if a stream has some non recoverable exception happened. - private final Lock exceptionLock; - private Throwable streamException; - - private BackgroundResource backgroundResources; - private List backgroundResourceList; - - private BigQueryWriteClient stub; - BidiStreamingCallable bidiStreamingCallable; - ClientStream clientStream; - private final AppendResponseObserver responseObserver; - - private final ScheduledExecutorService executor; - - private final AtomicBoolean shutdown; - private final Waiter messagesWaiter; - private final AtomicBoolean activeAlarm; - private ScheduledFuture currentAlarmFuture; - - private Integer currentRetries = 0; - - // Used for schema updates - private OnSchemaUpdateRunnable onSchemaUpdateRunnable; - - /** The maximum size of one request. Defined by the API. */ - public static long getApiMaxRequestBytes() { - return 10L * 1000L * 1000L; // 10 megabytes (https://en.wikipedia.org/wiki/Megabyte) - } - - /** The maximum size of in flight requests. Defined by the API. */ - public static long getApiMaxInflightRequests() { - return 5000L; - } - - private StreamWriter(Builder builder) - throws IllegalArgumentException, IOException, InterruptedException { - Matcher matcher = streamPattern.matcher(builder.streamName); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid stream name: " + builder.streamName); - } - streamName = builder.streamName; - tableName = matcher.group(1); - - this.batchingSettings = builder.batchingSettings; - this.retrySettings = builder.retrySettings; - this.messagesBatch = new MessagesBatch(batchingSettings, this.streamName, this); - messagesBatchLock = new ReentrantLock(); - appendAndRefreshAppendLock = new ReentrantLock(); - activeAlarm = new AtomicBoolean(false); - this.exceptionLock = new ReentrantLock(); - this.streamException = null; - - executor = builder.executorProvider.getExecutor(); - backgroundResourceList = new ArrayList<>(); - if (builder.executorProvider.shouldAutoClose()) { - backgroundResourceList.add(new ExecutorAsBackgroundResource(executor)); - } - messagesWaiter = new Waiter(this.batchingSettings.getFlowControlSettings()); - responseObserver = new AppendResponseObserver(this); - - if (builder.client == null) { - stubSettings = - BigQueryWriteSettings.newBuilder() - .setCredentialsProvider(builder.credentialsProvider) - .setTransportChannelProvider(builder.channelProvider) - .setEndpoint(builder.endpoint) - .build(); - stub = BigQueryWriteClient.create(stubSettings); - backgroundResourceList.add(stub); - } else { - stub = builder.client; - } - backgroundResources = new BackgroundResourceAggregation(backgroundResourceList); - shutdown = new AtomicBoolean(false); - if (builder.onSchemaUpdateRunnable != null) { - this.onSchemaUpdateRunnable = builder.onSchemaUpdateRunnable; - this.onSchemaUpdateRunnable.setStreamWriter(this); - } - - refreshAppend(); - } - - /** Stream name we are writing to. */ - public String getStreamNameString() { - return streamName; - } - - /** Table name we are writing to. */ - public String getTableNameString() { - return tableName; - } - - /** OnSchemaUpdateRunnable for this streamWriter. */ - OnSchemaUpdateRunnable getOnSchemaUpdateRunnable() { - return this.onSchemaUpdateRunnable; - } - - private void setException(Throwable t) { - exceptionLock.lock(); - if (this.streamException == null) { - this.streamException = t; - } - exceptionLock.unlock(); - } - - /** - * Schedules the writing of a message. The write of the message may occur immediately or be - * delayed based on the writer batching options. - * - *

Example of writing a message. - * - *

{@code
-   * AppendRowsRequest message;
-   * ApiFuture messageIdFuture = writer.append(message);
-   * ApiFutures.addCallback(messageIdFuture, new ApiFutureCallback() {
-   *   public void onSuccess(AppendRowsResponse response) {
-   *     if (response.hasOffset()) {
-   *       System.out.println("written with offset: " + response.getOffset());
-   *     } else {
-   *       System.out.println("received an in stream error: " + response.error().toString());
-   *     }
-   *   }
-   *
-   *   public void onFailure(Throwable t) {
-   *     System.out.println("failed to write: " + t);
-   *   }
-   * }, MoreExecutors.directExecutor());
-   * }
- * - * @param message the message in serialized format to write to BigQuery. - * @return the message ID wrapped in a future. - */ - public ApiFuture append(AppendRowsRequest message) { - appendAndRefreshAppendLock.lock(); - Preconditions.checkState(!shutdown.get(), "Cannot append on a shut-down writer."); - Preconditions.checkNotNull(message, "Message is null."); - final AppendRequestAndFutureResponse outstandingAppend = - new AppendRequestAndFutureResponse(message); - List batchesToSend; - messagesBatchLock.lock(); - try { - batchesToSend = messagesBatch.add(outstandingAppend); - // Setup the next duration based delivery alarm if there are messages batched. - setupAlarm(); - if (!batchesToSend.isEmpty()) { - for (final InflightBatch batch : batchesToSend) { - LOG.fine("Scheduling a batch for immediate sending."); - writeBatch(batch); - } - } - } finally { - messagesBatchLock.unlock(); - appendAndRefreshAppendLock.unlock(); - } - - return outstandingAppend.appendResult; - } - - /** - * This is the general flush method for asynchronise append operation. When you have outstanding - * append requests, calling flush will make sure all outstanding append requests completed and - * successful. Otherwise there will be an exception thrown. - * - * @throws Exception - */ - public void flushAll(long timeoutMillis) throws Exception { - appendAndRefreshAppendLock.lock(); - try { - writeAllOutstanding(); - synchronized (messagesWaiter) { - messagesWaiter.waitComplete(timeoutMillis); - } - } finally { - appendAndRefreshAppendLock.unlock(); - } - exceptionLock.lock(); - try { - if (streamException != null) { - throw new Exception(streamException); - } - } finally { - exceptionLock.unlock(); - } - } - - /** - * Flush the rows on a BUFFERED stream, up to the specified offset. After flush, rows will be - * available for read. If no exception is thrown, it means the flush happened. - * - *

NOTE: Currently the implementation is void, BUFFERED steam acts like COMMITTED stream. It is - * just for Dataflow team to mock the usage. - * - * @param offset Offset to which the rows will be committed to the system. It must fall within the - * row counts on the stream. - * @throws IllegalArgumentException if offset is invalid - */ - public void flush(long offset) { - if (offset < 0) { - throw new IllegalArgumentException("Invalid offset: " + offset); - } - // TODO: Once we persisted stream type, we should check the call can only be issued on BUFFERED - // stream here. - Storage.FlushRowsRequest request = - Storage.FlushRowsRequest.newBuilder().setWriteStream(streamName).setOffset(offset).build(); - stub.flushRows(request); - // TODO: We will verify if the returned offset is equal to requested offset. - } - - /** - * Re-establishes a stream connection. - * - * @throws IOException - */ - public void refreshAppend() throws IOException, InterruptedException { - appendAndRefreshAppendLock.lock(); - if (shutdown.get()) { - LOG.warning("Cannot refresh on a already shutdown writer."); - appendAndRefreshAppendLock.unlock(); - return; - } - // There could be a moment, stub is not yet initialized. - if (clientStream != null) { - LOG.info("Closing the stream " + streamName); - clientStream.closeSend(); - } - messagesBatch.resetAttachSchema(); - bidiStreamingCallable = stub.appendRowsCallable(); - clientStream = bidiStreamingCallable.splitCall(responseObserver); - try { - while (!clientStream.isSendReady()) { - Thread.sleep(10); - } - } catch (InterruptedException expected) { - } - Thread.sleep(this.retrySettings.getInitialRetryDelay().toMillis()); - // Can only unlock here since need to sleep the full 7 seconds before stream can allow appends. - appendAndRefreshAppendLock.unlock(); - LOG.info("Write Stream " + streamName + " connection established"); - } - - private void setupAlarm() { - if (!messagesBatch.isEmpty()) { - if (!activeAlarm.getAndSet(true)) { - long delayThresholdMs = getBatchingSettings().getDelayThreshold().toMillis(); - LOG.log(Level.FINE, "Setting up alarm for the next {0} ms.", delayThresholdMs); - currentAlarmFuture = - executor.schedule( - new Runnable() { - @Override - public void run() { - LOG.fine("Sending messages based on schedule"); - activeAlarm.getAndSet(false); - messagesBatchLock.lock(); - try { - writeBatch(messagesBatch.popBatch()); - } finally { - messagesBatchLock.unlock(); - } - } - }, - delayThresholdMs, - TimeUnit.MILLISECONDS); - } - } else if (currentAlarmFuture != null) { - LOG.log(Level.FINER, "Cancelling alarm, no more messages"); - if (activeAlarm.getAndSet(false)) { - currentAlarmFuture.cancel(false); - } - } - } - - /** - * Write any outstanding batches if non-empty. This method sends buffered messages, but does not - * wait for the send operations to complete. To wait for messages to send, call {@code get} on the - * futures returned from {@code append}. - */ - public void writeAllOutstanding() { - InflightBatch unorderedOutstandingBatch = null; - messagesBatchLock.lock(); - try { - if (!messagesBatch.isEmpty()) { - writeBatch(messagesBatch.popBatch()); - } - messagesBatch.reset(); - } finally { - messagesBatchLock.unlock(); - } - } - - private void writeBatch(final InflightBatch inflightBatch) { - if (inflightBatch != null) { - AppendRowsRequest request = inflightBatch.getMergedRequest(); - try { - messagesWaiter.acquire(inflightBatch.getByteSize()); - responseObserver.addInflightBatch(inflightBatch); - clientStream.send(request); - } catch (FlowController.FlowControlException ex) { - inflightBatch.onFailure(ex); - } - } - } - - /** Close the stream writer. Shut down all resources. */ - @Override - public void close() { - LOG.info("Closing stream writer:" + streamName); - shutdown(); - try { - awaitTermination(1, TimeUnit.MINUTES); - } catch (InterruptedException ignored) { - } - } - - // The batch of messages that is being sent/processed. - private static final class InflightBatch { - // List of requests that is going to be batched. - final List inflightRequests; - // A list tracks expected offset for each AppendRequest. Used to reconstruct the Response - // future. - private final ArrayList offsetList; - private final long creationTime; - private int attempt; - private long batchSizeBytes; - private long expectedOffset; - private Boolean attachSchema; - private String streamName; - private final AtomicBoolean failed; - private final StreamWriter streamWriter; - - InflightBatch( - List inflightRequests, - long batchSizeBytes, - String streamName, - Boolean attachSchema, - StreamWriter streamWriter) { - this.inflightRequests = inflightRequests; - this.offsetList = new ArrayList(inflightRequests.size()); - for (AppendRequestAndFutureResponse request : inflightRequests) { - if (request.message.getOffset().getValue() > 0) { - offsetList.add(new Long(request.message.getOffset().getValue())); - } else { - offsetList.add(new Long(-1)); - } - } - this.expectedOffset = offsetList.get(0).longValue(); - attempt = 1; - creationTime = System.currentTimeMillis(); - this.batchSizeBytes = batchSizeBytes; - this.attachSchema = attachSchema; - this.streamName = streamName; - this.failed = new AtomicBoolean(false); - this.streamWriter = streamWriter; - } - - int count() { - return inflightRequests.size(); - } - - long getByteSize() { - return this.batchSizeBytes; - } - - long getExpectedOffset() { - return expectedOffset; - } - - private AppendRowsRequest getMergedRequest() throws IllegalStateException { - if (inflightRequests.size() == 0) { - throw new IllegalStateException("Unexpected empty message batch"); - } - ProtoBufProto.ProtoRows.Builder rowsBuilder = - inflightRequests.get(0).message.getProtoRows().getRows().toBuilder(); - for (int i = 1; i < inflightRequests.size(); i++) { - rowsBuilder.addAllSerializedRows( - inflightRequests.get(i).message.getProtoRows().getRows().getSerializedRowsList()); - } - AppendRowsRequest.ProtoData.Builder data = - inflightRequests.get(0).message.getProtoRows().toBuilder().setRows(rowsBuilder.build()); - AppendRowsRequest.Builder requestBuilder = inflightRequests.get(0).message.toBuilder(); - if (!attachSchema) { - data.clearWriterSchema(); - requestBuilder.clearWriteStream(); - } else { - if (!data.hasWriterSchema()) { - throw new IllegalStateException( - "The first message on the connection must have writer schema set"); - } - requestBuilder.setWriteStream(streamName); - } - return requestBuilder.setProtoRows(data.build()).build(); - } - - private void onFailure(Throwable t) { - if (failed.getAndSet(true)) { - // Error has been set already. - LOG.warning("Ignore " + t.toString() + " since error has already been set"); - return; - } else { - LOG.info("Setting " + t.toString() + " on response"); - this.streamWriter.setException(t); - } - - for (AppendRequestAndFutureResponse request : inflightRequests) { - request.appendResult.setException(t); - } - } - - // Disassemble the batched response and sets the furture on individual request. - private void onSuccess(AppendRowsResponse response) { - for (int i = 0; i < inflightRequests.size(); i++) { - AppendRowsResponse.Builder singleResponse = response.toBuilder(); - if (offsetList.get(i) > 0) { - singleResponse.setOffset(offsetList.get(i)); - } else { - long actualOffset = response.getOffset(); - for (int j = 0; j < i; j++) { - actualOffset += - inflightRequests.get(j).message.getProtoRows().getRows().getSerializedRowsCount(); - } - singleResponse.setOffset(actualOffset); - } - inflightRequests.get(i).appendResult.set(singleResponse.build()); - } - } - } - - // Class that wraps AppendRowsRequest and its cooresponding Response future. - private static final class AppendRequestAndFutureResponse { - final SettableApiFuture appendResult; - final AppendRowsRequest message; - final int messageSize; - - AppendRequestAndFutureResponse(AppendRowsRequest message) { - this.appendResult = SettableApiFuture.create(); - this.message = message; - this.messageSize = message.getProtoRows().getSerializedSize(); - if (this.messageSize > getApiMaxRequestBytes()) { - throw new StatusRuntimeException( - Status.fromCode(Status.Code.FAILED_PRECONDITION) - .withDescription("Message exceeded max size limit: " + getApiMaxRequestBytes())); - } - } - } - - /** The batching settings configured on this {@code StreamWriter}. */ - public BatchingSettings getBatchingSettings() { - return batchingSettings; - } - - /** The retry settings configured on this {@code StreamWriter}. */ - public RetrySettings getRetrySettings() { - return retrySettings; - } - - /** - * Schedules immediate flush of any outstanding messages and waits until all are processed. - * - *

Sends remaining outstanding messages and prevents future calls to publish. This method - * should be invoked prior to deleting the {@link WriteStream} object in order to ensure that no - * pending messages are lost. - */ - protected void shutdown() { - if (shutdown.getAndSet(true)) { - LOG.fine("Already shutdown."); - return; - } - LOG.fine("Shutdown called on writer"); - if (currentAlarmFuture != null && activeAlarm.getAndSet(false)) { - currentAlarmFuture.cancel(false); - } - writeAllOutstanding(); - try { - synchronized (messagesWaiter) { - messagesWaiter.waitComplete(0); - } - } catch (InterruptedException e) { - LOG.warning("Failed to wait for messages to return " + e.toString()); - } - if (clientStream.isSendReady()) { - clientStream.closeSend(); - } - backgroundResources.shutdown(); - } - - /** - * Wait for all work has completed execution after a {@link #shutdown()} request, or the timeout - * occurs, or the current thread is interrupted. - * - *

Call this method to make sure all resources are freed properly. - */ - protected boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { - return backgroundResources.awaitTermination(duration, unit); - } - - /** - * Constructs a new {@link Builder} using the given stream. - * - *

Example of creating a {@code WriteStream}. - * - *

{@code
-   * String table = "projects/my_project/datasets/my_dataset/tables/my_table";
-   * String stream;
-   * try (BigQueryWriteClient bigqueryWriteClient = BigQueryWriteClient.create()) {
-   *     CreateWriteStreamRequest request = CreateWriteStreamRequest.newBuilder().setParent(table).build();
-   *     WriteStream response = bigQueryWriteClient.createWriteStream(request);
-   *     stream = response.getName();
-   * }
-   * try (WriteStream writer = WriteStream.newBuilder(stream).build()) {
-   *   //...
-   * }
-   * }
- */ - public static Builder newBuilder(String streamName) { - Preconditions.checkNotNull(streamName, "StreamName is null."); - return new Builder(streamName, null); - } - - /** - * Constructs a new {@link Builder} using the given stream and an existing BigQueryWriteClient. - */ - public static Builder newBuilder(String streamName, BigQueryWriteClient client) { - Preconditions.checkNotNull(streamName, "StreamName is null."); - Preconditions.checkNotNull(client, "Client is null."); - return new Builder(streamName, client); - } - - /** A builder of {@link StreamWriter}s. */ - public static final class Builder { - static final Duration MIN_TOTAL_TIMEOUT = Duration.ofSeconds(10); - static final Duration MIN_RPC_TIMEOUT = Duration.ofMillis(10); - - // Meaningful defaults. - static final FlowControlSettings DEFAULT_FLOW_CONTROL_SETTINGS = - FlowControlSettings.newBuilder() - .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block) - .setMaxOutstandingElementCount(1000L) - .setMaxOutstandingRequestBytes(100 * 1024 * 1024L) // 100 Mb - .build(); - public static final BatchingSettings DEFAULT_BATCHING_SETTINGS = - BatchingSettings.newBuilder() - .setDelayThreshold(Duration.ofMillis(10)) - .setRequestByteThreshold(100 * 1024L) // 100 kb - .setElementCountThreshold(100L) - .setFlowControlSettings(DEFAULT_FLOW_CONTROL_SETTINGS) - .build(); - public static final RetrySettings DEFAULT_RETRY_SETTINGS = - RetrySettings.newBuilder() - .setMaxRetryDelay(Duration.ofSeconds(60)) - .setInitialRetryDelay(Duration.ofMillis(100)) - .setMaxAttempts(3) - .build(); - static final boolean DEFAULT_ENABLE_MESSAGE_ORDERING = false; - private static final int THREADS_PER_CPU = 5; - static final ExecutorProvider DEFAULT_EXECUTOR_PROVIDER = - InstantiatingExecutorProvider.newBuilder() - .setExecutorThreadCount(THREADS_PER_CPU * Runtime.getRuntime().availableProcessors()) - .build(); - - private String streamName; - private String endpoint = BigQueryWriteSettings.getDefaultEndpoint(); - - private BigQueryWriteClient client = null; - - // Batching options - BatchingSettings batchingSettings = DEFAULT_BATCHING_SETTINGS; - - RetrySettings retrySettings = DEFAULT_RETRY_SETTINGS; - - private boolean enableMessageOrdering = DEFAULT_ENABLE_MESSAGE_ORDERING; - - private TransportChannelProvider channelProvider = - BigQueryWriteSettings.defaultGrpcTransportProviderBuilder().setChannelsPerCpu(1).build(); - - ExecutorProvider executorProvider = DEFAULT_EXECUTOR_PROVIDER; - private CredentialsProvider credentialsProvider = - BigQueryWriteSettings.defaultCredentialsProviderBuilder().build(); - - private OnSchemaUpdateRunnable onSchemaUpdateRunnable; - - private Builder(String stream, BigQueryWriteClient client) { - this.streamName = Preconditions.checkNotNull(stream); - this.client = client; - } - - /** - * {@code ChannelProvider} to use to create Channels, which must point at Cloud BigQuery Storage - * API endpoint. - * - *

For performance, this client benefits from having multiple underlying connections. See - * {@link com.google.api.gax.grpc.InstantiatingGrpcChannelProvider.Builder#setPoolSize(int)}. - */ - public Builder setChannelProvider(TransportChannelProvider channelProvider) { - this.channelProvider = - Preconditions.checkNotNull(channelProvider, "ChannelProvider is null."); - return this; - } - - /** {@code CredentialsProvider} to use to create Credentials to authenticate calls. */ - public Builder setCredentialsProvider(CredentialsProvider credentialsProvider) { - this.credentialsProvider = - Preconditions.checkNotNull(credentialsProvider, "CredentialsProvider is null."); - return this; - } - - /** - * Sets the {@code BatchSettings} on the writer. - * - * @param batchingSettings - * @return - */ - public Builder setBatchingSettings(BatchingSettings batchingSettings) { - Preconditions.checkNotNull(batchingSettings, "BatchingSettings is null."); - - BatchingSettings.Builder builder = batchingSettings.toBuilder(); - Preconditions.checkNotNull(batchingSettings.getElementCountThreshold()); - Preconditions.checkArgument(batchingSettings.getElementCountThreshold() > 0); - Preconditions.checkNotNull(batchingSettings.getRequestByteThreshold()); - Preconditions.checkArgument(batchingSettings.getRequestByteThreshold() > 0); - if (batchingSettings.getRequestByteThreshold() > getApiMaxRequestBytes()) { - builder.setRequestByteThreshold(getApiMaxRequestBytes()); - } - Preconditions.checkNotNull(batchingSettings.getDelayThreshold()); - Preconditions.checkArgument(batchingSettings.getDelayThreshold().toMillis() > 0); - if (batchingSettings.getFlowControlSettings() == null) { - builder.setFlowControlSettings(DEFAULT_FLOW_CONTROL_SETTINGS); - } else { - - if (batchingSettings.getFlowControlSettings().getMaxOutstandingElementCount() == null) { - builder.setFlowControlSettings( - batchingSettings - .getFlowControlSettings() - .toBuilder() - .setMaxOutstandingElementCount( - DEFAULT_FLOW_CONTROL_SETTINGS.getMaxOutstandingElementCount()) - .build()); - } else { - Preconditions.checkArgument( - batchingSettings.getFlowControlSettings().getMaxOutstandingElementCount() > 0); - if (batchingSettings.getFlowControlSettings().getMaxOutstandingElementCount() - > getApiMaxInflightRequests()) { - builder.setFlowControlSettings( - batchingSettings - .getFlowControlSettings() - .toBuilder() - .setMaxOutstandingElementCount(getApiMaxInflightRequests()) - .build()); - } - } - if (batchingSettings.getFlowControlSettings().getMaxOutstandingRequestBytes() == null) { - builder.setFlowControlSettings( - batchingSettings - .getFlowControlSettings() - .toBuilder() - .setMaxOutstandingRequestBytes( - DEFAULT_FLOW_CONTROL_SETTINGS.getMaxOutstandingRequestBytes()) - .build()); - } else { - Preconditions.checkArgument( - batchingSettings.getFlowControlSettings().getMaxOutstandingRequestBytes() > 0); - } - if (batchingSettings.getFlowControlSettings().getLimitExceededBehavior() == null) { - builder.setFlowControlSettings( - batchingSettings - .getFlowControlSettings() - .toBuilder() - .setLimitExceededBehavior( - DEFAULT_FLOW_CONTROL_SETTINGS.getLimitExceededBehavior()) - .build()); - } else { - Preconditions.checkArgument( - batchingSettings.getFlowControlSettings().getLimitExceededBehavior() - != FlowController.LimitExceededBehavior.Ignore); - } - } - this.batchingSettings = builder.build(); - return this; - } - - /** - * Sets the {@code RetrySettings} on the writer. - * - * @param retrySettings - * @return - */ - public Builder setRetrySettings(RetrySettings retrySettings) { - this.retrySettings = Preconditions.checkNotNull(retrySettings, "RetrySettings is null."); - return this; - } - - /** Gives the ability to set a custom executor to be used by the library. */ - public Builder setExecutorProvider(ExecutorProvider executorProvider) { - this.executorProvider = - Preconditions.checkNotNull(executorProvider, "ExecutorProvider is null."); - return this; - } - - /** Gives the ability to override the gRPC endpoint. */ - public Builder setEndpoint(String endpoint) { - this.endpoint = Preconditions.checkNotNull(endpoint, "Endpoint is null."); - return this; - } - - /** Gives the ability to set action on schema update. */ - public Builder setOnSchemaUpdateRunnable(OnSchemaUpdateRunnable onSchemaUpdateRunnable) { - this.onSchemaUpdateRunnable = - Preconditions.checkNotNull(onSchemaUpdateRunnable, "onSchemaUpdateRunnable is null."); - return this; - } - - /** Builds the {@code StreamWriter}. */ - public StreamWriter build() throws IllegalArgumentException, IOException, InterruptedException { - return new StreamWriter(this); - } - } - - private static final class AppendResponseObserver - implements ResponseObserver { - private Queue inflightBatches = new LinkedList(); - private StreamWriter streamWriter; - - public void addInflightBatch(InflightBatch batch) { - synchronized (this.inflightBatches) { - this.inflightBatches.add(batch); - } - } - - public AppendResponseObserver(StreamWriter streamWriter) { - this.streamWriter = streamWriter; - } - - private boolean isRecoverableError(Throwable t) { - Status status = Status.fromThrowable(t); - return status.getCode() == Status.Code.UNAVAILABLE; - } - - @Override - public void onStart(StreamController controller) { - // no-op - } - - private void abortInflightRequests(Throwable t) { - synchronized (this.inflightBatches) { - while (!this.inflightBatches.isEmpty()) { - InflightBatch inflightBatch = this.inflightBatches.poll(); - inflightBatch.onFailure( - new AbortedException( - "Request aborted due to previous failures", - t, - GrpcStatusCode.of(Status.Code.ABORTED), - true)); - streamWriter.messagesWaiter.release(inflightBatch.getByteSize()); - } - } - } - - @Override - public void onResponse(AppendRowsResponse response) { - InflightBatch inflightBatch = null; - synchronized (this.inflightBatches) { - inflightBatch = this.inflightBatches.poll(); - } - try { - streamWriter.currentRetries = 0; - if (response == null) { - inflightBatch.onFailure(new IllegalStateException("Response is null")); - } - if (response.hasUpdatedSchema()) { - if (streamWriter.getOnSchemaUpdateRunnable() != null) { - streamWriter.getOnSchemaUpdateRunnable().setUpdatedSchema(response.getUpdatedSchema()); - streamWriter.executor.schedule( - streamWriter.getOnSchemaUpdateRunnable(), 0L, TimeUnit.MILLISECONDS); - } - } - // Currently there is nothing retryable. If the error is already exists, then ignore it. - if (response.hasError()) { - if (response.getError().getCode() != 6 /* ALREADY_EXISTS */) { - StatusRuntimeException exception = - new StatusRuntimeException( - Status.fromCodeValue(response.getError().getCode()) - .withDescription(response.getError().getMessage())); - inflightBatch.onFailure(exception); - } - } - if (inflightBatch.getExpectedOffset() > 0 - && response.getOffset() != inflightBatch.getExpectedOffset()) { - IllegalStateException exception = - new IllegalStateException( - String.format( - "The append result offset %s does not match " + "the expected offset %s.", - response.getOffset(), inflightBatch.getExpectedOffset())); - inflightBatch.onFailure(exception); - abortInflightRequests(exception); - } else { - inflightBatch.onSuccess(response); - } - } finally { - streamWriter.messagesWaiter.release(inflightBatch.getByteSize()); - } - } - - @Override - public void onComplete() { - LOG.info("OnComplete called"); - } - - @Override - public void onError(Throwable t) { - LOG.fine("OnError called"); - if (streamWriter.shutdown.get()) { - return; - } - InflightBatch inflightBatch = null; - synchronized (this.inflightBatches) { - if (inflightBatches.isEmpty()) { - // The batches could have been aborted. - return; - } - inflightBatch = this.inflightBatches.poll(); - } - try { - if (isRecoverableError(t)) { - try { - if (streamWriter.currentRetries < streamWriter.getRetrySettings().getMaxAttempts() - && !streamWriter.shutdown.get()) { - streamWriter.refreshAppend(); - LOG.info("Resending requests on transient error:" + streamWriter.currentRetries); - streamWriter.writeBatch(inflightBatch); - synchronized (streamWriter.currentRetries) { - streamWriter.currentRetries++; - } - } else { - inflightBatch.onFailure(t); - abortInflightRequests(t); - synchronized (streamWriter.currentRetries) { - streamWriter.currentRetries = 0; - } - } - } catch (IOException | InterruptedException e) { - LOG.info("Got exception while retrying."); - inflightBatch.onFailure(e); - abortInflightRequests(e); - synchronized (streamWriter.currentRetries) { - streamWriter.currentRetries = 0; - } - } - } else { - inflightBatch.onFailure(t); - abortInflightRequests(t); - synchronized (streamWriter.currentRetries) { - streamWriter.currentRetries = 0; - } - } - } finally { - streamWriter.messagesWaiter.release(inflightBatch.getByteSize()); - } - } - }; - - // This class controls how many messages are going to be sent out in a batch. - private static class MessagesBatch { - private List messages; - private long batchedBytes; - private final BatchingSettings batchingSettings; - private Boolean attachSchema = true; - private final String streamName; - private final StreamWriter streamWriter; - - private MessagesBatch( - BatchingSettings batchingSettings, String streamName, StreamWriter streamWriter) { - this.batchingSettings = batchingSettings; - this.streamName = streamName; - this.streamWriter = streamWriter; - reset(); - } - - // Get all the messages out in a batch. - private InflightBatch popBatch() { - InflightBatch batch = - new InflightBatch( - messages, batchedBytes, this.streamName, this.attachSchema, this.streamWriter); - this.attachSchema = false; - reset(); - return batch; - } - - private void reset() { - messages = new LinkedList<>(); - batchedBytes = 0; - } - - private void resetAttachSchema() { - attachSchema = true; - } - - private boolean isEmpty() { - return messages.isEmpty(); - } - - private long getBatchedBytes() { - return batchedBytes; - } - - private int getMessagesCount() { - return messages.size(); - } - - private boolean hasBatchingBytes() { - return getMaxBatchBytes() > 0; - } - - private long getMaxBatchBytes() { - return batchingSettings.getRequestByteThreshold(); - } - - // The message batch returned could contain the previous batch of messages plus the current - // message. - // if the message is too large. - private List add(AppendRequestAndFutureResponse outstandingAppend) { - List batchesToSend = new ArrayList<>(); - // Check if the next message makes the current batch exceed the max batch byte size. - if (!isEmpty() - && hasBatchingBytes() - && getBatchedBytes() + outstandingAppend.messageSize >= getMaxBatchBytes()) { - batchesToSend.add(popBatch()); - } - - messages.add(outstandingAppend); - batchedBytes += outstandingAppend.messageSize; - - // Border case: If the message to send is greater or equals to the max batch size then send it - // immediately. - // Alternatively if after adding the message we have reached the batch max messages then we - // have a batch to send. - if ((hasBatchingBytes() && outstandingAppend.messageSize >= getMaxBatchBytes()) - || getMessagesCount() == batchingSettings.getElementCountThreshold()) { - batchesToSend.add(popBatch()); - } - - return batchesToSend; - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Waiter.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Waiter.java deleted file mode 100644 index 29d389f494..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Waiter.java +++ /dev/null @@ -1,182 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.InternalApi; -import com.google.api.gax.batching.FlowControlSettings; -import com.google.api.gax.batching.FlowController; -import java.util.LinkedList; -import java.util.concurrent.CountDownLatch; -import java.util.concurrent.locks.Lock; -import java.util.concurrent.locks.ReentrantLock; -import java.util.logging.Logger; - -/** - * A barrier kind of object that helps keep track of pending actions and synchronously wait until - * all have completed. - * - * @deprecated No longer supported - */ -@Deprecated -class Waiter { - private static final Logger LOG = Logger.getLogger(Waiter.class.getName()); - - private long pendingCount; - private long pendingSize; - private long countLimit; - private long sizeLimit; - private FlowController.LimitExceededBehavior behavior; - private LinkedList awaitingMessageAcquires; - private LinkedList awaitingBytesAcquires; - private final Lock lock; - - Waiter(FlowControlSettings flowControlSettings) { - pendingCount = 0; - pendingSize = 0; - this.awaitingMessageAcquires = new LinkedList(); - this.awaitingBytesAcquires = new LinkedList(); - this.countLimit = flowControlSettings.getMaxOutstandingElementCount(); - this.sizeLimit = flowControlSettings.getMaxOutstandingRequestBytes(); - this.behavior = flowControlSettings.getLimitExceededBehavior(); - this.lock = new ReentrantLock(); - } - - private void notifyNextAcquires() { - if (!awaitingMessageAcquires.isEmpty()) { - CountDownLatch awaitingAcquire = awaitingMessageAcquires.getFirst(); - awaitingAcquire.countDown(); - } - if (!awaitingBytesAcquires.isEmpty()) { - CountDownLatch awaitingAcquire = awaitingBytesAcquires.getFirst(); - awaitingAcquire.countDown(); - } - } - - public synchronized void release(long messageSize) { - lock.lock(); - --pendingCount; - pendingSize -= messageSize; - notifyNextAcquires(); - lock.unlock(); - notifyAll(); - } - - public void acquire(long messageSize) throws FlowController.FlowControlException { - lock.lock(); - try { - if (pendingCount >= countLimit - && behavior == FlowController.LimitExceededBehavior.ThrowException) { - throw new FlowController.MaxOutstandingElementCountReachedException(countLimit); - } - if (pendingSize + messageSize >= sizeLimit - && behavior == FlowController.LimitExceededBehavior.ThrowException) { - throw new FlowController.MaxOutstandingRequestBytesReachedException(sizeLimit); - } - - CountDownLatch messageWaiter = null; - while (pendingCount >= countLimit) { - if (messageWaiter == null) { - messageWaiter = new CountDownLatch(1); - awaitingMessageAcquires.addLast(messageWaiter); - } else { - // This message already in line stays at the head of the line. - messageWaiter = new CountDownLatch(1); - awaitingMessageAcquires.set(0, messageWaiter); - } - lock.unlock(); - try { - messageWaiter.await(); - } catch (InterruptedException e) { - LOG.warning("Interrupted while waiting to acquire flow control tokens"); - } - lock.lock(); - } - ++pendingCount; - if (messageWaiter != null) { - awaitingMessageAcquires.removeFirst(); - } - - if (!awaitingMessageAcquires.isEmpty() && pendingCount < countLimit) { - awaitingMessageAcquires.getFirst().countDown(); - } - - // Now acquire space for bytes. - CountDownLatch bytesWaiter = null; - Long bytesRemaining = messageSize; - while (pendingSize + messageSize >= sizeLimit) { - if (bytesWaiter == null) { - // This message gets added to the back of the line. - bytesWaiter = new CountDownLatch(1); - awaitingBytesAcquires.addLast(bytesWaiter); - } else { - // This message already in line stays at the head of the line. - bytesWaiter = new CountDownLatch(1); - awaitingBytesAcquires.set(0, bytesWaiter); - } - lock.unlock(); - try { - bytesWaiter.await(); - } catch (InterruptedException e) { - LOG.warning("Interrupted while waiting to acquire flow control tokens"); - } - lock.lock(); - } - - pendingSize += messageSize; - if (bytesWaiter != null) { - awaitingBytesAcquires.removeFirst(); - } - // There may be some surplus bytes left; let the next message waiting for bytes have some. - if (!awaitingBytesAcquires.isEmpty() && pendingSize < sizeLimit) { - awaitingBytesAcquires.getFirst().countDown(); - } - } finally { - lock.unlock(); - } - } - - public synchronized void waitComplete(long timeoutMillis) throws InterruptedException { - long end = System.currentTimeMillis() + timeoutMillis; - lock.lock(); - try { - while (pendingCount > 0 && (timeoutMillis == 0 || end > System.currentTimeMillis())) { - lock.unlock(); - try { - wait(timeoutMillis == 0 ? 0 : end - System.currentTimeMillis()); - } catch (InterruptedException e) { - throw e; - } - lock.lock(); - } - if (pendingCount > 0) { - throw new InterruptedException("Wait timeout"); - } - } finally { - lock.unlock(); - } - } - - @InternalApi - public long pendingCount() { - return pendingCount; - } - - @InternalApi - public long pendingSize() { - return pendingSize; - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCache.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCache.java deleted file mode 100644 index 4aa4074f96..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCache.java +++ /dev/null @@ -1,193 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.common.annotations.VisibleForTesting; -import com.google.common.base.Preconditions; -import com.google.common.cache.Cache; -import com.google.common.cache.CacheBuilder; -import com.google.common.cache.RemovalListener; -import com.google.common.cache.RemovalNotification; -import com.google.protobuf.Descriptors.Descriptor; -import java.io.IOException; -import java.util.concurrent.ConcurrentMap; -import java.util.logging.Logger; -import java.util.regex.Matcher; -import java.util.regex.Pattern; - -/** - * A cache of StreamWriters that can be looked up by Table Name. The entries will expire after 5 - * minutes if not used. Code sample: WriterCache cache = WriterCache.getInstance(); StreamWriter - * writer = cache.getWriter(); // Use... cache.returnWriter(writer); - * - * @deprecated No longer supported - */ -@Deprecated -public class WriterCache { - private static final Logger LOG = Logger.getLogger(WriterCache.class.getName()); - - private static String tablePatternString = "(projects/[^/]+/datasets/[^/]+/tables/[^/]+)"; - private static Pattern tablePattern = Pattern.compile(tablePatternString); - - private static WriterCache instance; - private Cache> writerCache; - - // Maximum number of tables to hold in the cache, once the maxium exceeded, the cache will be - // evicted based on least recent used. - private static final int MAX_TABLE_ENTRY = 100; - private static final int MAX_WRITERS_PER_TABLE = 2; - - private final BigQueryWriteClient stub; - private final SchemaCompatibility compat; - - private WriterCache(BigQueryWriteClient stub, int maxTableEntry, SchemaCompatibility compat) { - this.stub = stub; - this.compat = compat; - writerCache = - CacheBuilder.newBuilder() - .maximumSize(maxTableEntry) - .removalListener( - new RemovalListener>() { - @Override - public void onRemoval( - RemovalNotification> - removalNotification) { - removalNotification.getValue().invalidateAll(); - } - }) - .>build(); - } - - public static WriterCache getInstance() throws IOException { - if (instance == null) { - BigQueryWriteSettings stubSettings = BigQueryWriteSettings.newBuilder().build(); - BigQueryWriteClient stub = BigQueryWriteClient.create(stubSettings); - instance = new WriterCache(stub, MAX_TABLE_ENTRY, SchemaCompatibility.getInstance()); - } - return instance; - } - - /** Returns a cache with custom stub used by test. */ - @VisibleForTesting - public static WriterCache getTestInstance( - BigQueryWriteClient stub, int maxTableEntry, SchemaCompatibility compat) { - Preconditions.checkNotNull(stub, "Stub is null."); - Preconditions.checkNotNull(stub, "Compat is null."); - return new WriterCache(stub, maxTableEntry, compat); - } - - /** Returns an entry with {@code StreamWriter} and expiration time in millis. */ - private String CreateNewStream(String tableName) { - Stream.WriteStream stream = - Stream.WriteStream.newBuilder().setType(Stream.WriteStream.Type.COMMITTED).build(); - stream = - stub.createWriteStream( - Storage.CreateWriteStreamRequest.newBuilder() - .setParent(tableName) - .setWriteStream(stream) - .build()); - LOG.info("Created write stream:" + stream.getName()); - return stream.getName(); - } - - StreamWriter CreateNewWriter(String streamName) - throws IllegalArgumentException, IOException, InterruptedException { - return StreamWriter.newBuilder(streamName) - .setChannelProvider(stub.getSettings().getTransportChannelProvider()) - .setCredentialsProvider(stub.getSettings().getCredentialsProvider()) - .setExecutorProvider(stub.getSettings().getExecutorProvider()) - .build(); - } - /** - * Gets a writer for a given table with a given user schema from global cache. - * - * @param tableName - * @param userSchema - * @return - * @throws Exception - */ - public StreamWriter getTableWriter(String tableName, Descriptor userSchema) - throws IllegalArgumentException, IOException, InterruptedException { - Preconditions.checkNotNull(tableName, "TableName is null."); - Preconditions.checkNotNull(tableName, "UserSchema is null."); - Matcher matcher = tablePattern.matcher(tableName); - if (!matcher.matches()) { - throw new IllegalArgumentException("Invalid table name: " + tableName); - } - - String streamName = null; - Boolean streamExpired = false; - StreamWriter writer = null; - Cache tableEntry = null; - - synchronized (this) { - tableEntry = writerCache.getIfPresent(tableName); - if (tableEntry != null) { - writer = tableEntry.getIfPresent(userSchema); - if (writer != null) { - return writer; - } - compat.check(tableName, userSchema); - streamName = CreateNewStream(tableName); - writer = CreateNewWriter(streamName); - tableEntry.put(userSchema, writer); - } else { - compat.check(tableName, userSchema); - streamName = CreateNewStream(tableName); - tableEntry = - CacheBuilder.newBuilder() - .maximumSize(MAX_WRITERS_PER_TABLE) - .removalListener( - new RemovalListener() { - @Override - public void onRemoval( - RemovalNotification removalNotification) { - removalNotification.getValue().close(); - } - }) - .build(); - writer = CreateNewWriter(streamName); - tableEntry.put(userSchema, writer); - writerCache.put(tableName, tableEntry); - } - } - - return writer; - } - - /** Clear the cache and close all the writers in the cache. */ - public void clear() { - synchronized (this) { - ConcurrentMap> map = writerCache.asMap(); - for (String key : map.keySet()) { - Cache entry = writerCache.getIfPresent(key); - ConcurrentMap entryMap = entry.asMap(); - for (Descriptor descriptor : entryMap.keySet()) { - StreamWriter writer = entry.getIfPresent(descriptor); - writer.close(); - } - } - writerCache.cleanUp(); - } - } - - @VisibleForTesting - public long cachedTableCount() { - synchronized (writerCache) { - return writerCache.size(); - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/gapic_metadata.json b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/gapic_metadata.json deleted file mode 100644 index 8c90d24263..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/gapic_metadata.json +++ /dev/null @@ -1,36 +0,0 @@ -{ - "schema": "1.0", - "comment": "This file maps proto services/RPCs to the corresponding library clients/methods", - "language": "java", - "protoPackage": "google.cloud.bigquery.storage.v1alpha2", - "libraryPackage": "com.google.cloud.bigquery.storage.v1alpha2", - "services": { - "BigQueryWrite": { - "clients": { - "grpc": { - "libraryClient": "BigQueryWriteClient", - "rpcs": { - "AppendRows": { - "methods": ["appendRowsCallable"] - }, - "BatchCommitWriteStreams": { - "methods": ["batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreams", "batchCommitWriteStreamsCallable"] - }, - "CreateWriteStream": { - "methods": ["createWriteStream", "createWriteStream", "createWriteStream", "createWriteStreamCallable"] - }, - "FinalizeWriteStream": { - "methods": ["finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStream", "finalizeWriteStreamCallable"] - }, - "FlushRows": { - "methods": ["flushRows", "flushRows", "flushRows", "flushRowsCallable"] - }, - "GetWriteStream": { - "methods": ["getWriteStream", "getWriteStream", "getWriteStream", "getWriteStreamCallable"] - } - } - } - } - } - } -} \ No newline at end of file diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java deleted file mode 100644 index 0f7114bb96..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -/** - * The interfaces provided are listed below, along with usage samples. - * - *

======================= BigQueryWriteClient ======================= - * - *

Service Description: BigQuery Write API. - * - *

The Write API can be used to write data to BigQuery. - * - *

Sample for BigQueryWriteClient: - * - *

{@code
- * try (BigQueryWriteClient bigQueryWriteClient = BigQueryWriteClient.create()) {
- *   TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]");
- *   Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build();
- *   Stream.WriteStream response = bigQueryWriteClient.createWriteStream(parent, writeStream);
- * }
- * }
- */ -@Generated("by gapic-generator-java") -package com.google.cloud.bigquery.storage.v1alpha2; - -import javax.annotation.Generated; diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java deleted file mode 100644 index eef778c3ff..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java +++ /dev/null @@ -1,71 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2.stub; - -import com.google.api.core.BetaApi; -import com.google.api.gax.core.BackgroundResource; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.bigquery.storage.v1alpha2.Storage; -import com.google.cloud.bigquery.storage.v1alpha2.Stream; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * Base stub class for the BigQueryWrite service API. - * - *

This class is for advanced usage and reflects the underlying API directly. - * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public abstract class BigQueryWriteStub implements BackgroundResource { - - public UnaryCallable - createWriteStreamCallable() { - throw new UnsupportedOperationException("Not implemented: createWriteStreamCallable()"); - } - - public BidiStreamingCallable - appendRowsCallable() { - throw new UnsupportedOperationException("Not implemented: appendRowsCallable()"); - } - - public UnaryCallable getWriteStreamCallable() { - throw new UnsupportedOperationException("Not implemented: getWriteStreamCallable()"); - } - - public UnaryCallable - finalizeWriteStreamCallable() { - throw new UnsupportedOperationException("Not implemented: finalizeWriteStreamCallable()"); - } - - public UnaryCallable< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsCallable() { - throw new UnsupportedOperationException("Not implemented: batchCommitWriteStreamsCallable()"); - } - - public UnaryCallable flushRowsCallable() { - throw new UnsupportedOperationException("Not implemented: flushRowsCallable()"); - } - - @Override - public abstract void close(); -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java deleted file mode 100644 index 5b15f6cde2..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java +++ /dev/null @@ -1,452 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2.stub; - -import com.google.api.core.ApiFunction; -import com.google.api.core.BetaApi; -import com.google.api.gax.core.GaxProperties; -import com.google.api.gax.core.GoogleCredentialsProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.grpc.GaxGrpcProperties; -import com.google.api.gax.grpc.GrpcTransportChannel; -import com.google.api.gax.grpc.InstantiatingGrpcChannelProvider; -import com.google.api.gax.retrying.RetrySettings; -import com.google.api.gax.rpc.ApiClientHeaderProvider; -import com.google.api.gax.rpc.ClientContext; -import com.google.api.gax.rpc.StatusCode; -import com.google.api.gax.rpc.StreamingCallSettings; -import com.google.api.gax.rpc.StubSettings; -import com.google.api.gax.rpc.TransportChannelProvider; -import com.google.api.gax.rpc.UnaryCallSettings; -import com.google.cloud.bigquery.storage.v1alpha2.Storage; -import com.google.cloud.bigquery.storage.v1alpha2.Stream; -import com.google.common.collect.ImmutableList; -import com.google.common.collect.ImmutableMap; -import com.google.common.collect.ImmutableSet; -import com.google.common.collect.Lists; -import java.io.IOException; -import java.util.List; -import javax.annotation.Generated; -import org.threeten.bp.Duration; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * Settings class to configure an instance of {@link BigQueryWriteStub}. - * - *

The default instance has everything set to sensible defaults: - * - *

    - *
  • The default service address (bigquerystorage.googleapis.com) and default port (443) are - * used. - *
  • Credentials are acquired automatically through Application Default Credentials. - *
  • Retries are configured for idempotent methods but not for non-idempotent methods. - *
- * - *

The builder of this class is recursive, so contained classes are themselves builders. When - * build() is called, the tree of builders is called to create the complete settings object. - * - *

For example, to set the total timeout of createWriteStream to 30 seconds: - * - *

{@code
- * BigQueryWriteStubSettings.Builder bigQueryWriteSettingsBuilder =
- *     BigQueryWriteStubSettings.newBuilder();
- * bigQueryWriteSettingsBuilder
- *     .createWriteStreamSettings()
- *     .setRetrySettings(
- *         bigQueryWriteSettingsBuilder
- *             .createWriteStreamSettings()
- *             .getRetrySettings()
- *             .toBuilder()
- *             .setTotalTimeout(Duration.ofSeconds(30))
- *             .build());
- * BigQueryWriteStubSettings bigQueryWriteSettings = bigQueryWriteSettingsBuilder.build();
- * }
- * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public class BigQueryWriteStubSettings extends StubSettings { - /** The default scopes of the service. */ - private static final ImmutableList DEFAULT_SERVICE_SCOPES = - ImmutableList.builder() - .add("https://www.googleapis.com/auth/bigquery") - .add("https://www.googleapis.com/auth/bigquery.insertdata") - .add("https://www.googleapis.com/auth/cloud-platform") - .build(); - - private final UnaryCallSettings - createWriteStreamSettings; - private final StreamingCallSettings - appendRowsSettings; - private final UnaryCallSettings - getWriteStreamSettings; - private final UnaryCallSettings< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamSettings; - private final UnaryCallSettings< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings; - private final UnaryCallSettings - flushRowsSettings; - - /** Returns the object with the settings used for calls to createWriteStream. */ - public UnaryCallSettings - createWriteStreamSettings() { - return createWriteStreamSettings; - } - - /** Returns the object with the settings used for calls to appendRows. */ - public StreamingCallSettings - appendRowsSettings() { - return appendRowsSettings; - } - - /** Returns the object with the settings used for calls to getWriteStream. */ - public UnaryCallSettings - getWriteStreamSettings() { - return getWriteStreamSettings; - } - - /** Returns the object with the settings used for calls to finalizeWriteStream. */ - public UnaryCallSettings - finalizeWriteStreamSettings() { - return finalizeWriteStreamSettings; - } - - /** Returns the object with the settings used for calls to batchCommitWriteStreams. */ - public UnaryCallSettings< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings() { - return batchCommitWriteStreamsSettings; - } - - /** Returns the object with the settings used for calls to flushRows. */ - public UnaryCallSettings - flushRowsSettings() { - return flushRowsSettings; - } - - @BetaApi("A restructuring of stub classes is planned, so this may break in the future") - public BigQueryWriteStub createStub() throws IOException { - if (getTransportChannelProvider() - .getTransportName() - .equals(GrpcTransportChannel.getGrpcTransportName())) { - return GrpcBigQueryWriteStub.create(this); - } - throw new UnsupportedOperationException( - String.format( - "Transport not supported: %s", getTransportChannelProvider().getTransportName())); - } - - /** Returns a builder for the default ExecutorProvider for this service. */ - public static InstantiatingExecutorProvider.Builder defaultExecutorProviderBuilder() { - return InstantiatingExecutorProvider.newBuilder(); - } - - /** Returns the default service endpoint. */ - public static String getDefaultEndpoint() { - return "bigquerystorage.googleapis.com:443"; - } - - /** Returns the default service scopes. */ - public static List getDefaultServiceScopes() { - return DEFAULT_SERVICE_SCOPES; - } - - /** Returns a builder for the default credentials for this service. */ - public static GoogleCredentialsProvider.Builder defaultCredentialsProviderBuilder() { - return GoogleCredentialsProvider.newBuilder().setScopesToApply(DEFAULT_SERVICE_SCOPES); - } - - /** Returns a builder for the default ChannelProvider for this service. */ - public static InstantiatingGrpcChannelProvider.Builder defaultGrpcTransportProviderBuilder() { - return InstantiatingGrpcChannelProvider.newBuilder() - .setMaxInboundMessageSize(Integer.MAX_VALUE); - } - - public static TransportChannelProvider defaultTransportChannelProvider() { - return defaultGrpcTransportProviderBuilder().build(); - } - - @BetaApi("The surface for customizing headers is not stable yet and may change in the future.") - public static ApiClientHeaderProvider.Builder defaultApiClientHeaderProviderBuilder() { - return ApiClientHeaderProvider.newBuilder() - .setGeneratedLibToken( - "gapic", GaxProperties.getLibraryVersion(BigQueryWriteStubSettings.class)) - .setTransportToken( - GaxGrpcProperties.getGrpcTokenName(), GaxGrpcProperties.getGrpcVersion()); - } - - /** Returns a new builder for this class. */ - public static Builder newBuilder() { - return Builder.createDefault(); - } - - /** Returns a new builder for this class. */ - public static Builder newBuilder(ClientContext clientContext) { - return new Builder(clientContext); - } - - /** Returns a builder containing all the values of this settings class. */ - public Builder toBuilder() { - return new Builder(this); - } - - protected BigQueryWriteStubSettings(Builder settingsBuilder) throws IOException { - super(settingsBuilder); - - createWriteStreamSettings = settingsBuilder.createWriteStreamSettings().build(); - appendRowsSettings = settingsBuilder.appendRowsSettings().build(); - getWriteStreamSettings = settingsBuilder.getWriteStreamSettings().build(); - finalizeWriteStreamSettings = settingsBuilder.finalizeWriteStreamSettings().build(); - batchCommitWriteStreamsSettings = settingsBuilder.batchCommitWriteStreamsSettings().build(); - flushRowsSettings = settingsBuilder.flushRowsSettings().build(); - } - - /** Builder for BigQueryWriteStubSettings. */ - public static class Builder extends StubSettings.Builder { - private final ImmutableList> unaryMethodSettingsBuilders; - private final UnaryCallSettings.Builder - createWriteStreamSettings; - private final StreamingCallSettings.Builder< - Storage.AppendRowsRequest, Storage.AppendRowsResponse> - appendRowsSettings; - private final UnaryCallSettings.Builder - getWriteStreamSettings; - private final UnaryCallSettings.Builder< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamSettings; - private final UnaryCallSettings.Builder< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings; - private final UnaryCallSettings.Builder - flushRowsSettings; - private static final ImmutableMap> - RETRYABLE_CODE_DEFINITIONS; - - static { - ImmutableMap.Builder> definitions = - ImmutableMap.builder(); - definitions.put( - "retry_policy_0_codes", - ImmutableSet.copyOf( - Lists.newArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, - StatusCode.Code.UNAVAILABLE, - StatusCode.Code.RESOURCE_EXHAUSTED))); - definitions.put( - "retry_policy_1_codes", - ImmutableSet.copyOf( - Lists.newArrayList( - StatusCode.Code.UNAVAILABLE, StatusCode.Code.RESOURCE_EXHAUSTED))); - definitions.put( - "retry_policy_2_codes", - ImmutableSet.copyOf( - Lists.newArrayList( - StatusCode.Code.DEADLINE_EXCEEDED, StatusCode.Code.UNAVAILABLE))); - definitions.put("no_retry_codes", ImmutableSet.copyOf(Lists.newArrayList())); - RETRYABLE_CODE_DEFINITIONS = definitions.build(); - } - - private static final ImmutableMap RETRY_PARAM_DEFINITIONS; - - static { - ImmutableMap.Builder definitions = ImmutableMap.builder(); - RetrySettings settings = null; - settings = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(100L)) - .setRetryDelayMultiplier(1.3) - .setMaxRetryDelay(Duration.ofMillis(60000L)) - .setInitialRpcTimeout(Duration.ofMillis(600000L)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMillis(600000L)) - .setTotalTimeout(Duration.ofMillis(600000L)) - .build(); - definitions.put("retry_policy_0_params", settings); - settings = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(100L)) - .setRetryDelayMultiplier(1.3) - .setMaxRetryDelay(Duration.ofMillis(60000L)) - .setInitialRpcTimeout(Duration.ofMillis(86400000L)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMillis(86400000L)) - .setTotalTimeout(Duration.ofMillis(86400000L)) - .build(); - definitions.put("retry_policy_1_params", settings); - settings = - RetrySettings.newBuilder() - .setInitialRetryDelay(Duration.ofMillis(100L)) - .setRetryDelayMultiplier(1.3) - .setMaxRetryDelay(Duration.ofMillis(60000L)) - .setInitialRpcTimeout(Duration.ofMillis(600000L)) - .setRpcTimeoutMultiplier(1.0) - .setMaxRpcTimeout(Duration.ofMillis(600000L)) - .setTotalTimeout(Duration.ofMillis(600000L)) - .build(); - definitions.put("retry_policy_2_params", settings); - settings = RetrySettings.newBuilder().setRpcTimeoutMultiplier(1.0).build(); - definitions.put("no_retry_params", settings); - RETRY_PARAM_DEFINITIONS = definitions.build(); - } - - protected Builder() { - this(((ClientContext) null)); - } - - protected Builder(ClientContext clientContext) { - super(clientContext); - - createWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - appendRowsSettings = StreamingCallSettings.newBuilder(); - getWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - finalizeWriteStreamSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - batchCommitWriteStreamsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - flushRowsSettings = UnaryCallSettings.newUnaryCallSettingsBuilder(); - - unaryMethodSettingsBuilders = - ImmutableList.>of( - createWriteStreamSettings, - getWriteStreamSettings, - finalizeWriteStreamSettings, - batchCommitWriteStreamsSettings, - flushRowsSettings); - initDefaults(this); - } - - protected Builder(BigQueryWriteStubSettings settings) { - super(settings); - - createWriteStreamSettings = settings.createWriteStreamSettings.toBuilder(); - appendRowsSettings = settings.appendRowsSettings.toBuilder(); - getWriteStreamSettings = settings.getWriteStreamSettings.toBuilder(); - finalizeWriteStreamSettings = settings.finalizeWriteStreamSettings.toBuilder(); - batchCommitWriteStreamsSettings = settings.batchCommitWriteStreamsSettings.toBuilder(); - flushRowsSettings = settings.flushRowsSettings.toBuilder(); - - unaryMethodSettingsBuilders = - ImmutableList.>of( - createWriteStreamSettings, - getWriteStreamSettings, - finalizeWriteStreamSettings, - batchCommitWriteStreamsSettings, - flushRowsSettings); - } - - private static Builder createDefault() { - Builder builder = new Builder(((ClientContext) null)); - - builder.setTransportChannelProvider(defaultTransportChannelProvider()); - builder.setCredentialsProvider(defaultCredentialsProviderBuilder().build()); - builder.setInternalHeaderProvider(defaultApiClientHeaderProviderBuilder().build()); - builder.setEndpoint(getDefaultEndpoint()); - - return initDefaults(builder); - } - - private static Builder initDefaults(Builder builder) { - builder - .createWriteStreamSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_0_codes")) - .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_0_params")); - - builder - .getWriteStreamSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) - .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); - - builder - .finalizeWriteStreamSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) - .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); - - builder - .batchCommitWriteStreamsSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("retry_policy_2_codes")) - .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("retry_policy_2_params")); - - builder - .flushRowsSettings() - .setRetryableCodes(RETRYABLE_CODE_DEFINITIONS.get("no_retry_codes")) - .setRetrySettings(RETRY_PARAM_DEFINITIONS.get("no_retry_params")); - - return builder; - } - - // NEXT_MAJOR_VER: remove 'throws Exception'. - /** - * Applies the given settings updater function to all of the unary API methods in this service. - * - *

Note: This method does not support applying settings to streaming methods. - */ - public Builder applyToAllUnaryMethods( - ApiFunction, Void> settingsUpdater) throws Exception { - super.applyToAllUnaryMethods(unaryMethodSettingsBuilders, settingsUpdater); - return this; - } - - public ImmutableList> unaryMethodSettingsBuilders() { - return unaryMethodSettingsBuilders; - } - - /** Returns the builder for the settings used for calls to createWriteStream. */ - public UnaryCallSettings.Builder - createWriteStreamSettings() { - return createWriteStreamSettings; - } - - /** Returns the builder for the settings used for calls to appendRows. */ - public StreamingCallSettings.Builder - appendRowsSettings() { - return appendRowsSettings; - } - - /** Returns the builder for the settings used for calls to getWriteStream. */ - public UnaryCallSettings.Builder - getWriteStreamSettings() { - return getWriteStreamSettings; - } - - /** Returns the builder for the settings used for calls to finalizeWriteStream. */ - public UnaryCallSettings.Builder< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamSettings() { - return finalizeWriteStreamSettings; - } - - /** Returns the builder for the settings used for calls to batchCommitWriteStreams. */ - public UnaryCallSettings.Builder< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsSettings() { - return batchCommitWriteStreamsSettings; - } - - /** Returns the builder for the settings used for calls to flushRows. */ - public UnaryCallSettings.Builder - flushRowsSettings() { - return flushRowsSettings; - } - - @Override - public BigQueryWriteStubSettings build() throws IOException { - return new BigQueryWriteStubSettings(this); - } - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java deleted file mode 100644 index fb96f12d4c..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java +++ /dev/null @@ -1,118 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2.stub; - -import com.google.api.core.BetaApi; -import com.google.api.gax.grpc.GrpcCallSettings; -import com.google.api.gax.grpc.GrpcCallableFactory; -import com.google.api.gax.grpc.GrpcStubCallableFactory; -import com.google.api.gax.rpc.BatchingCallSettings; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.ClientContext; -import com.google.api.gax.rpc.ClientStreamingCallable; -import com.google.api.gax.rpc.OperationCallSettings; -import com.google.api.gax.rpc.OperationCallable; -import com.google.api.gax.rpc.PagedCallSettings; -import com.google.api.gax.rpc.ServerStreamingCallSettings; -import com.google.api.gax.rpc.ServerStreamingCallable; -import com.google.api.gax.rpc.StreamingCallSettings; -import com.google.api.gax.rpc.UnaryCallSettings; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.longrunning.Operation; -import com.google.longrunning.stub.OperationsStub; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * gRPC callable factory implementation for the BigQueryWrite service API. - * - *

This class is for advanced usage. - * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public class GrpcBigQueryWriteCallableFactory implements GrpcStubCallableFactory { - - @Override - public UnaryCallable createUnaryCallable( - GrpcCallSettings grpcCallSettings, - UnaryCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createUnaryCallable(grpcCallSettings, callSettings, clientContext); - } - - @Override - public - UnaryCallable createPagedCallable( - GrpcCallSettings grpcCallSettings, - PagedCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createPagedCallable(grpcCallSettings, callSettings, clientContext); - } - - @Override - public UnaryCallable createBatchingCallable( - GrpcCallSettings grpcCallSettings, - BatchingCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createBatchingCallable( - grpcCallSettings, callSettings, clientContext); - } - - @Override - public - OperationCallable createOperationCallable( - GrpcCallSettings grpcCallSettings, - OperationCallSettings callSettings, - ClientContext clientContext, - OperationsStub operationsStub) { - return GrpcCallableFactory.createOperationCallable( - grpcCallSettings, callSettings, clientContext, operationsStub); - } - - @Override - public - BidiStreamingCallable createBidiStreamingCallable( - GrpcCallSettings grpcCallSettings, - StreamingCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createBidiStreamingCallable( - grpcCallSettings, callSettings, clientContext); - } - - @Override - public - ServerStreamingCallable createServerStreamingCallable( - GrpcCallSettings grpcCallSettings, - ServerStreamingCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createServerStreamingCallable( - grpcCallSettings, callSettings, clientContext); - } - - @Override - public - ClientStreamingCallable createClientStreamingCallable( - GrpcCallSettings grpcCallSettings, - StreamingCallSettings callSettings, - ClientContext clientContext) { - return GrpcCallableFactory.createClientStreamingCallable( - grpcCallSettings, callSettings, clientContext); - } -} diff --git a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java b/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java deleted file mode 100644 index 98ae87d5d4..0000000000 --- a/google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java +++ /dev/null @@ -1,372 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2.stub; - -import com.google.api.core.BetaApi; -import com.google.api.gax.core.BackgroundResource; -import com.google.api.gax.core.BackgroundResourceAggregation; -import com.google.api.gax.grpc.GrpcCallSettings; -import com.google.api.gax.grpc.GrpcStubCallableFactory; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.ClientContext; -import com.google.api.gax.rpc.RequestParamsExtractor; -import com.google.api.gax.rpc.UnaryCallable; -import com.google.cloud.bigquery.storage.v1alpha2.Storage; -import com.google.cloud.bigquery.storage.v1alpha2.Stream; -import com.google.common.collect.ImmutableMap; -import com.google.longrunning.stub.GrpcOperationsStub; -import io.grpc.MethodDescriptor; -import io.grpc.protobuf.ProtoUtils; -import java.io.IOException; -import java.util.Map; -import java.util.concurrent.TimeUnit; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -/** - * gRPC stub implementation for the BigQueryWrite service API. - * - *

This class is for advanced usage and reflects the underlying API directly. - * - * @deprecated This class is deprecated and will be removed in the next major version update. - */ -@BetaApi -@Deprecated -@Generated("by gapic-generator-java") -public class GrpcBigQueryWriteStub extends BigQueryWriteStub { - private static final MethodDescriptor - createWriteStreamMethodDescriptor = - MethodDescriptor.newBuilder() - .setType(MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/CreateWriteStream") - .setRequestMarshaller( - ProtoUtils.marshaller(Storage.CreateWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller(ProtoUtils.marshaller(Stream.WriteStream.getDefaultInstance())) - .build(); - - private static final MethodDescriptor - appendRowsMethodDescriptor = - MethodDescriptor.newBuilder() - .setType(MethodDescriptor.MethodType.BIDI_STREAMING) - .setFullMethodName("google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/AppendRows") - .setRequestMarshaller( - ProtoUtils.marshaller(Storage.AppendRowsRequest.getDefaultInstance())) - .setResponseMarshaller( - ProtoUtils.marshaller(Storage.AppendRowsResponse.getDefaultInstance())) - .build(); - - private static final MethodDescriptor - getWriteStreamMethodDescriptor = - MethodDescriptor.newBuilder() - .setType(MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/GetWriteStream") - .setRequestMarshaller( - ProtoUtils.marshaller(Storage.GetWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller(ProtoUtils.marshaller(Stream.WriteStream.getDefaultInstance())) - .build(); - - private static final MethodDescriptor< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamMethodDescriptor = - MethodDescriptor - .newBuilder() - .setType(MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FinalizeWriteStream") - .setRequestMarshaller( - ProtoUtils.marshaller(Storage.FinalizeWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller( - ProtoUtils.marshaller(Storage.FinalizeWriteStreamResponse.getDefaultInstance())) - .build(); - - private static final MethodDescriptor< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsMethodDescriptor = - MethodDescriptor - . - newBuilder() - .setType(MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/BatchCommitWriteStreams") - .setRequestMarshaller( - ProtoUtils.marshaller( - Storage.BatchCommitWriteStreamsRequest.getDefaultInstance())) - .setResponseMarshaller( - ProtoUtils.marshaller( - Storage.BatchCommitWriteStreamsResponse.getDefaultInstance())) - .build(); - - private static final MethodDescriptor - flushRowsMethodDescriptor = - MethodDescriptor.newBuilder() - .setType(MethodDescriptor.MethodType.UNARY) - .setFullMethodName("google.cloud.bigquery.storage.v1alpha2.BigQueryWrite/FlushRows") - .setRequestMarshaller( - ProtoUtils.marshaller(Storage.FlushRowsRequest.getDefaultInstance())) - .setResponseMarshaller( - ProtoUtils.marshaller(Storage.FlushRowsResponse.getDefaultInstance())) - .build(); - - private final UnaryCallable - createWriteStreamCallable; - private final BidiStreamingCallable - appendRowsCallable; - private final UnaryCallable - getWriteStreamCallable; - private final UnaryCallable< - Storage.FinalizeWriteStreamRequest, Storage.FinalizeWriteStreamResponse> - finalizeWriteStreamCallable; - private final UnaryCallable< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsCallable; - private final UnaryCallable - flushRowsCallable; - - private final BackgroundResource backgroundResources; - private final GrpcOperationsStub operationsStub; - private final GrpcStubCallableFactory callableFactory; - - public static final GrpcBigQueryWriteStub create(BigQueryWriteStubSettings settings) - throws IOException { - return new GrpcBigQueryWriteStub(settings, ClientContext.create(settings)); - } - - public static final GrpcBigQueryWriteStub create(ClientContext clientContext) throws IOException { - return new GrpcBigQueryWriteStub(BigQueryWriteStubSettings.newBuilder().build(), clientContext); - } - - public static final GrpcBigQueryWriteStub create( - ClientContext clientContext, GrpcStubCallableFactory callableFactory) throws IOException { - return new GrpcBigQueryWriteStub( - BigQueryWriteStubSettings.newBuilder().build(), clientContext, callableFactory); - } - - /** - * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so - * that it is easy to make a subclass, but otherwise, the static factory methods should be - * preferred. - */ - protected GrpcBigQueryWriteStub(BigQueryWriteStubSettings settings, ClientContext clientContext) - throws IOException { - this(settings, clientContext, new GrpcBigQueryWriteCallableFactory()); - } - - /** - * Constructs an instance of GrpcBigQueryWriteStub, using the given settings. This is protected so - * that it is easy to make a subclass, but otherwise, the static factory methods should be - * preferred. - */ - protected GrpcBigQueryWriteStub( - BigQueryWriteStubSettings settings, - ClientContext clientContext, - GrpcStubCallableFactory callableFactory) - throws IOException { - this.callableFactory = callableFactory; - this.operationsStub = GrpcOperationsStub.create(clientContext, callableFactory); - - GrpcCallSettings - createWriteStreamTransportSettings = - GrpcCallSettings.newBuilder() - .setMethodDescriptor(createWriteStreamMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract(Storage.CreateWriteStreamRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("parent", String.valueOf(request.getParent())); - return params.build(); - } - }) - .build(); - GrpcCallSettings - appendRowsTransportSettings = - GrpcCallSettings.newBuilder() - .setMethodDescriptor(appendRowsMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract(Storage.AppendRowsRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("write_stream", String.valueOf(request.getWriteStream())); - return params.build(); - } - }) - .build(); - GrpcCallSettings - getWriteStreamTransportSettings = - GrpcCallSettings.newBuilder() - .setMethodDescriptor(getWriteStreamMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract(Storage.GetWriteStreamRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("name", String.valueOf(request.getName())); - return params.build(); - } - }) - .build(); - GrpcCallSettings - finalizeWriteStreamTransportSettings = - GrpcCallSettings - . - newBuilder() - .setMethodDescriptor(finalizeWriteStreamMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract( - Storage.FinalizeWriteStreamRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("name", String.valueOf(request.getName())); - return params.build(); - } - }) - .build(); - GrpcCallSettings< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsTransportSettings = - GrpcCallSettings - . - newBuilder() - .setMethodDescriptor(batchCommitWriteStreamsMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract( - Storage.BatchCommitWriteStreamsRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("parent", String.valueOf(request.getParent())); - return params.build(); - } - }) - .build(); - GrpcCallSettings - flushRowsTransportSettings = - GrpcCallSettings.newBuilder() - .setMethodDescriptor(flushRowsMethodDescriptor) - .setParamsExtractor( - new RequestParamsExtractor() { - @Override - public Map extract(Storage.FlushRowsRequest request) { - ImmutableMap.Builder params = ImmutableMap.builder(); - params.put("write_stream", String.valueOf(request.getWriteStream())); - return params.build(); - } - }) - .build(); - - this.createWriteStreamCallable = - callableFactory.createUnaryCallable( - createWriteStreamTransportSettings, - settings.createWriteStreamSettings(), - clientContext); - this.appendRowsCallable = - callableFactory.createBidiStreamingCallable( - appendRowsTransportSettings, settings.appendRowsSettings(), clientContext); - this.getWriteStreamCallable = - callableFactory.createUnaryCallable( - getWriteStreamTransportSettings, settings.getWriteStreamSettings(), clientContext); - this.finalizeWriteStreamCallable = - callableFactory.createUnaryCallable( - finalizeWriteStreamTransportSettings, - settings.finalizeWriteStreamSettings(), - clientContext); - this.batchCommitWriteStreamsCallable = - callableFactory.createUnaryCallable( - batchCommitWriteStreamsTransportSettings, - settings.batchCommitWriteStreamsSettings(), - clientContext); - this.flushRowsCallable = - callableFactory.createUnaryCallable( - flushRowsTransportSettings, settings.flushRowsSettings(), clientContext); - - this.backgroundResources = - new BackgroundResourceAggregation(clientContext.getBackgroundResources()); - } - - public GrpcOperationsStub getOperationsStub() { - return operationsStub; - } - - @Override - public UnaryCallable - createWriteStreamCallable() { - return createWriteStreamCallable; - } - - @Override - public BidiStreamingCallable - appendRowsCallable() { - return appendRowsCallable; - } - - @Override - public UnaryCallable getWriteStreamCallable() { - return getWriteStreamCallable; - } - - @Override - public UnaryCallable - finalizeWriteStreamCallable() { - return finalizeWriteStreamCallable; - } - - @Override - public UnaryCallable< - Storage.BatchCommitWriteStreamsRequest, Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreamsCallable() { - return batchCommitWriteStreamsCallable; - } - - @Override - public UnaryCallable flushRowsCallable() { - return flushRowsCallable; - } - - @Override - public final void close() { - shutdown(); - } - - @Override - public void shutdown() { - backgroundResources.shutdown(); - } - - @Override - public boolean isShutdown() { - return backgroundResources.isShutdown(); - } - - @Override - public boolean isTerminated() { - return backgroundResources.isTerminated(); - } - - @Override - public void shutdownNow() { - backgroundResources.shutdownNow(); - } - - @Override - public boolean awaitTermination(long duration, TimeUnit unit) throws InterruptedException { - return backgroundResources.awaitTermination(duration, unit); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptorTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptorTest.java deleted file mode 100644 index ae5b9d802c..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BQTableSchemaToProtoDescriptorTest.java +++ /dev/null @@ -1,403 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -import com.google.cloud.bigquery.storage.test.JsonTest.*; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; -import com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.Descriptors.FieldDescriptor; -import java.util.HashMap; -import java.util.Map; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class BQTableSchemaToProtoDescriptorTest { - // This is a map between the Table.TableFieldSchema.Type and the descriptor it is supposed to - // produce. The produced descriptor will be used to check against the entry values here. - private static ImmutableMap - BQTableTypeToCorrectProtoDescriptorTest = - new ImmutableMap.Builder() - .put(Table.TableFieldSchema.Type.BOOL, BoolType.getDescriptor()) - .put(Table.TableFieldSchema.Type.BYTES, BytesType.getDescriptor()) - .put(Table.TableFieldSchema.Type.DATE, Int32Type.getDescriptor()) - .put(Table.TableFieldSchema.Type.DATETIME, StringType.getDescriptor()) - .put(Table.TableFieldSchema.Type.DOUBLE, DoubleType.getDescriptor()) - .put(Table.TableFieldSchema.Type.GEOGRAPHY, StringType.getDescriptor()) - .put(Table.TableFieldSchema.Type.INT64, Int64Type.getDescriptor()) - .put(Table.TableFieldSchema.Type.NUMERIC, BytesType.getDescriptor()) - .put(Table.TableFieldSchema.Type.STRING, StringType.getDescriptor()) - .put(Table.TableFieldSchema.Type.TIME, StringType.getDescriptor()) - .put(Table.TableFieldSchema.Type.TIMESTAMP, Int64Type.getDescriptor()) - .build(); - - // Creates mapping from descriptor to how many times it was reused. - private void mapDescriptorToCount(Descriptor descriptor, HashMap map) { - for (FieldDescriptor field : descriptor.getFields()) { - if (field.getType() == FieldDescriptor.Type.MESSAGE) { - Descriptor subDescriptor = field.getMessageType(); - String messageName = subDescriptor.getName(); - if (map.containsKey(messageName)) { - map.put(messageName, map.get(messageName) + 1); - } else { - map.put(messageName, 1); - } - mapDescriptorToCount(subDescriptor, map); - } - } - } - - private void isDescriptorEqual(Descriptor convertedProto, Descriptor originalProto) { - // Check same number of fields - assertEquals(convertedProto.getFields().size(), originalProto.getFields().size()); - for (FieldDescriptor convertedField : convertedProto.getFields()) { - // Check field name - FieldDescriptor originalField = originalProto.findFieldByName(convertedField.getName()); - assertNotNull(originalField); - // Check type - FieldDescriptor.Type convertedType = convertedField.getType(); - FieldDescriptor.Type originalType = originalField.getType(); - assertEquals(convertedField.getName(), convertedType, originalType); - // Check mode - assertTrue( - (originalField.isRepeated() == convertedField.isRepeated()) - && (originalField.isRequired() == convertedField.isRequired()) - && (originalField.isOptional() == convertedField.isOptional())); - // Recursively check nested messages - if (convertedType == FieldDescriptor.Type.MESSAGE) { - isDescriptorEqual(convertedField.getMessageType(), originalField.getMessageType()); - } - } - } - - @Test - public void testSimpleTypes() throws Exception { - for (Map.Entry entry : - BQTableTypeToCorrectProtoDescriptorTest.entrySet()) { - final Table.TableFieldSchema tableFieldSchema = - Table.TableFieldSchema.newBuilder() - .setType(entry.getKey()) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, entry.getValue()); - } - } - - @Test - public void testStructSimple() throws Exception { - final Table.TableFieldSchema StringType = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .build(); - final Table.TableFieldSchema tableFieldSchema = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_field_type") - .addFields(0, StringType) - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder().addFields(0, tableFieldSchema).build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, MessageType.getDescriptor()); - } - - @Test - public void testStructComplex() throws Exception { - final Table.TableFieldSchema test_int = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - final Table.TableFieldSchema test_string = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_string") - .build(); - final Table.TableFieldSchema test_bytes = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BYTES) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("test_bytes") - .build(); - final Table.TableFieldSchema test_bool = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BOOL) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_bool") - .build(); - final Table.TableFieldSchema test_double = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DOUBLE) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_double") - .build(); - final Table.TableFieldSchema test_date = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DATE) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("test_date") - .build(); - final Table.TableFieldSchema ComplexLvl2 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .addFields(0, test_int) - .setName("complex_lvl2") - .build(); - final Table.TableFieldSchema ComplexLvl1 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .addFields(0, test_int) - .addFields(1, ComplexLvl2) - .setName("complex_lvl1") - .build(); - final Table.TableFieldSchema TEST_NUMERIC = - Table.TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.BYTES) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_numeric") - .build(); - final Table.TableFieldSchema TEST_GEO = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.GEOGRAPHY) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_geo") - .build(); - final Table.TableFieldSchema TEST_TIMESTAMP = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.TIMESTAMP) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_timestamp") - .build(); - final Table.TableFieldSchema TEST_TIME = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_time") - .build(); - final Table.TableFieldSchema TEST_NUMERIC_REPEATED = - Table.TableFieldSchema.newBuilder() - .setType(TableFieldSchema.Type.NUMERIC) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_numeric_repeated") - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder() - .addFields(0, test_int) - .addFields(1, test_string) - .addFields(2, test_bytes) - .addFields(3, test_bool) - .addFields(4, test_double) - .addFields(5, test_date) - .addFields(6, ComplexLvl1) - .addFields(7, ComplexLvl2) - .addFields(8, TEST_NUMERIC) - .addFields(9, TEST_GEO) - .addFields(10, TEST_TIMESTAMP) - .addFields(11, TEST_TIME) - .addFields(12, TEST_NUMERIC_REPEATED) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, ComplexRoot.getDescriptor()); - } - - @Test - public void testCasingComplexStruct() throws Exception { - final Table.TableFieldSchema required = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("tEsT_ReQuIrEd") - .build(); - final Table.TableFieldSchema repeated = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("tESt_repEATed") - .build(); - final Table.TableFieldSchema optional = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_opTIONal") - .build(); - final Table.TableFieldSchema test_int = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("TEST_INT") - .build(); - final Table.TableFieldSchema test_string = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("TEST_STRING") - .build(); - final Table.TableFieldSchema test_bytes = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BYTES) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("TEST_BYTES") - .build(); - final Table.TableFieldSchema test_bool = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BOOL) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("TEST_BOOL") - .build(); - final Table.TableFieldSchema test_double = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DOUBLE) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("TEST_DOUBLE") - .build(); - final Table.TableFieldSchema test_date = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DATE) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("TEST_DATE") - .build(); - final Table.TableFieldSchema option_test = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .addFields(0, required) - .addFields(1, repeated) - .addFields(2, optional) - .setName("option_test") - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder() - .addFields(0, test_int) - .addFields(1, test_string) - .addFields(2, test_bytes) - .addFields(3, test_bool) - .addFields(4, test_double) - .addFields(5, test_date) - .addFields(6, option_test) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, CasingComplex.getDescriptor()); - } - - @Test - public void testOptions() throws Exception { - final Table.TableFieldSchema required = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("test_required") - .build(); - final Table.TableFieldSchema repeated = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_repeated") - .build(); - final Table.TableFieldSchema optional = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_optional") - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder() - .addFields(0, required) - .addFields(1, repeated) - .addFields(2, optional) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - isDescriptorEqual(descriptor, OptionTest.getDescriptor()); - } - - @Test - public void testDescriptorReuseDuringCreation() throws Exception { - final Table.TableFieldSchema test_int = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - final Table.TableFieldSchema reuse_lvl2 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl2") - .addFields(0, test_int) - .build(); - final Table.TableFieldSchema reuse_lvl1 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final Table.TableFieldSchema reuse_lvl1_1 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1_1") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final Table.TableFieldSchema reuse_lvl1_2 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("reuse_lvl1_2") - .addFields(0, test_int) - .addFields(0, reuse_lvl2) - .build(); - final Table.TableSchema tableSchema = - Table.TableSchema.newBuilder() - .addFields(0, reuse_lvl1) - .addFields(1, reuse_lvl1_1) - .addFields(2, reuse_lvl1_2) - .build(); - final Descriptor descriptor = - BQTableSchemaToProtoDescriptor.convertBQTableSchemaToProtoDescriptor(tableSchema); - HashMap descriptorToCount = new HashMap(); - mapDescriptorToCount(descriptor, descriptorToCount); - assertEquals(descriptorToCount.size(), 2); - assertTrue(descriptorToCount.containsKey("root__reuse_lvl1")); - assertEquals(descriptorToCount.get("root__reuse_lvl1").intValue(), 3); - assertTrue(descriptorToCount.containsKey("root__reuse_lvl1__reuse_lvl2")); - assertEquals(descriptorToCount.get("root__reuse_lvl1__reuse_lvl2").intValue(), 3); - isDescriptorEqual(descriptor, ReuseRoot.getDescriptor()); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java deleted file mode 100644 index fd0fa65e13..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java +++ /dev/null @@ -1,511 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.GaxGrpcProperties; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.api.gax.grpc.testing.MockStreamObserver; -import com.google.api.gax.rpc.ApiClientHeaderProvider; -import com.google.api.gax.rpc.ApiStreamObserver; -import com.google.api.gax.rpc.BidiStreamingCallable; -import com.google.api.gax.rpc.InvalidArgumentException; -import com.google.api.gax.rpc.StatusCode; -import com.google.protobuf.AbstractMessage; -import com.google.protobuf.Int64Value; -import io.grpc.StatusRuntimeException; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import javax.annotation.Generated; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; - -@Generated("by gapic-generator-java") -public class BigQueryWriteClientTest { - private static MockServiceHelper mockServiceHelper; - private BigQueryWriteClient client; - private LocalChannelProvider channelProvider; - private static MockBigQueryWrite mockBigQueryWrite; - - @BeforeClass - public static void startStaticServer() { - mockBigQueryWrite = new MockBigQueryWrite(); - mockServiceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); - mockServiceHelper.start(); - } - - @AfterClass - public static void stopServer() { - mockServiceHelper.stop(); - } - - @Before - public void setUp() throws IOException { - mockServiceHelper.reset(); - channelProvider = mockServiceHelper.createChannelProvider(); - BigQueryWriteSettings settings = - BigQueryWriteSettings.newBuilder() - .setTransportChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build(); - client = BigQueryWriteClient.create(settings); - } - - @After - public void tearDown() throws Exception { - client.close(); - } - - @Test - public void createWriteStreamTest() throws Exception { - Stream.WriteStream expectedResponse = Stream.WriteStream.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); - Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build(); - - Stream.WriteStream actualResponse = client.createWriteStream(parent, writeStream); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.CreateWriteStreamRequest actualRequest = - ((Storage.CreateWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(parent.toString(), actualRequest.getParent()); - Assert.assertEquals(writeStream, actualRequest.getWriteStream()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void createWriteStreamExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); - Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build(); - client.createWriteStream(parent, writeStream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void createWriteStreamTest2() throws Exception { - Stream.WriteStream expectedResponse = Stream.WriteStream.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - String parent = "parent-995424086"; - Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build(); - - Stream.WriteStream actualResponse = client.createWriteStream(parent, writeStream); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.CreateWriteStreamRequest actualRequest = - ((Storage.CreateWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(parent, actualRequest.getParent()); - Assert.assertEquals(writeStream, actualRequest.getWriteStream()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void createWriteStreamExceptionTest2() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - String parent = "parent-995424086"; - Stream.WriteStream writeStream = Stream.WriteStream.newBuilder().build(); - client.createWriteStream(parent, writeStream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void appendRowsTest() throws Exception { - Storage.AppendRowsResponse expectedResponse = Storage.AppendRowsResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - Storage.AppendRowsRequest request = - Storage.AppendRowsRequest.newBuilder() - .setWriteStream( - WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) - .setOffset(Int64Value.newBuilder().build()) - .setIgnoreUnknownFields(true) - .build(); - - MockStreamObserver responseObserver = new MockStreamObserver<>(); - - BidiStreamingCallable callable = - client.appendRowsCallable(); - ApiStreamObserver requestObserver = - callable.bidiStreamingCall(responseObserver); - - requestObserver.onNext(request); - requestObserver.onCompleted(); - - List actualResponses = responseObserver.future().get(); - Assert.assertEquals(1, actualResponses.size()); - Assert.assertEquals(expectedResponse, actualResponses.get(0)); - } - - @Test - public void appendRowsExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - Storage.AppendRowsRequest request = - Storage.AppendRowsRequest.newBuilder() - .setWriteStream( - WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]").toString()) - .setOffset(Int64Value.newBuilder().build()) - .setIgnoreUnknownFields(true) - .build(); - - MockStreamObserver responseObserver = new MockStreamObserver<>(); - - BidiStreamingCallable callable = - client.appendRowsCallable(); - ApiStreamObserver requestObserver = - callable.bidiStreamingCall(responseObserver); - - requestObserver.onNext(request); - - try { - List actualResponses = responseObserver.future().get(); - Assert.fail("No exception thrown"); - } catch (ExecutionException e) { - Assert.assertTrue(e.getCause() instanceof InvalidArgumentException); - InvalidArgumentException apiException = ((InvalidArgumentException) e.getCause()); - Assert.assertEquals(StatusCode.Code.INVALID_ARGUMENT, apiException.getStatusCode().getCode()); - } - } - - @Test - public void getWriteStreamTest() throws Exception { - Stream.WriteStream expectedResponse = Stream.WriteStream.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - - Stream.WriteStream actualResponse = client.getWriteStream(name); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.GetWriteStreamRequest actualRequest = - ((Storage.GetWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(name.toString(), actualRequest.getName()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void getWriteStreamExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - client.getWriteStream(name); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void getWriteStreamTest2() throws Exception { - Stream.WriteStream expectedResponse = Stream.WriteStream.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - String name = "name3373707"; - - Stream.WriteStream actualResponse = client.getWriteStream(name); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.GetWriteStreamRequest actualRequest = - ((Storage.GetWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(name, actualRequest.getName()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void getWriteStreamExceptionTest2() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - String name = "name3373707"; - client.getWriteStream(name); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void finalizeWriteStreamTest() throws Exception { - Storage.FinalizeWriteStreamResponse expectedResponse = - Storage.FinalizeWriteStreamResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - - Storage.FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.FinalizeWriteStreamRequest actualRequest = - ((Storage.FinalizeWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(name.toString(), actualRequest.getName()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void finalizeWriteStreamExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - WriteStreamName name = WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - client.finalizeWriteStream(name); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void finalizeWriteStreamTest2() throws Exception { - Storage.FinalizeWriteStreamResponse expectedResponse = - Storage.FinalizeWriteStreamResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - String name = "name3373707"; - - Storage.FinalizeWriteStreamResponse actualResponse = client.finalizeWriteStream(name); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.FinalizeWriteStreamRequest actualRequest = - ((Storage.FinalizeWriteStreamRequest) actualRequests.get(0)); - - Assert.assertEquals(name, actualRequest.getName()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void finalizeWriteStreamExceptionTest2() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - String name = "name3373707"; - client.finalizeWriteStream(name); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void batchCommitWriteStreamsTest() throws Exception { - Storage.BatchCommitWriteStreamsResponse expectedResponse = - Storage.BatchCommitWriteStreamsResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); - - Storage.BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.BatchCommitWriteStreamsRequest actualRequest = - ((Storage.BatchCommitWriteStreamsRequest) actualRequests.get(0)); - - Assert.assertEquals(parent.toString(), actualRequest.getParent()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void batchCommitWriteStreamsExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - TableName parent = TableName.of("[PROJECT]", "[DATASET]", "[TABLE]"); - client.batchCommitWriteStreams(parent); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void batchCommitWriteStreamsTest2() throws Exception { - Storage.BatchCommitWriteStreamsResponse expectedResponse = - Storage.BatchCommitWriteStreamsResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - String parent = "parent-995424086"; - - Storage.BatchCommitWriteStreamsResponse actualResponse = client.batchCommitWriteStreams(parent); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.BatchCommitWriteStreamsRequest actualRequest = - ((Storage.BatchCommitWriteStreamsRequest) actualRequests.get(0)); - - Assert.assertEquals(parent, actualRequest.getParent()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void batchCommitWriteStreamsExceptionTest2() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - String parent = "parent-995424086"; - client.batchCommitWriteStreams(parent); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void flushRowsTest() throws Exception { - Storage.FlushRowsResponse expectedResponse = Storage.FlushRowsResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - WriteStreamName writeStream = - WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - - Storage.FlushRowsResponse actualResponse = client.flushRows(writeStream); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.FlushRowsRequest actualRequest = ((Storage.FlushRowsRequest) actualRequests.get(0)); - - Assert.assertEquals(writeStream.toString(), actualRequest.getWriteStream()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void flushRowsExceptionTest() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - WriteStreamName writeStream = - WriteStreamName.of("[PROJECT]", "[DATASET]", "[TABLE]", "[STREAM]"); - client.flushRows(writeStream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } - - @Test - public void flushRowsTest2() throws Exception { - Storage.FlushRowsResponse expectedResponse = Storage.FlushRowsResponse.newBuilder().build(); - mockBigQueryWrite.addResponse(expectedResponse); - - String writeStream = "writeStream1412231231"; - - Storage.FlushRowsResponse actualResponse = client.flushRows(writeStream); - Assert.assertEquals(expectedResponse, actualResponse); - - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(1, actualRequests.size()); - Storage.FlushRowsRequest actualRequest = ((Storage.FlushRowsRequest) actualRequests.get(0)); - - Assert.assertEquals(writeStream, actualRequest.getWriteStream()); - Assert.assertTrue( - channelProvider.isHeaderSent( - ApiClientHeaderProvider.getDefaultApiClientHeaderKey(), - GaxGrpcProperties.getDefaultApiClientHeaderPattern())); - } - - @Test - public void flushRowsExceptionTest2() throws Exception { - StatusRuntimeException exception = new StatusRuntimeException(io.grpc.Status.INVALID_ARGUMENT); - mockBigQueryWrite.addException(exception); - - try { - String writeStream = "writeStream1412231231"; - client.flushRows(writeStream); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - // Expected exception. - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriterTest.java deleted file mode 100644 index e0550196ba..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/DirectWriterTest.java +++ /dev/null @@ -1,431 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.cloud.bigquery.storage.test.Test.AllSupportedTypes; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest; -import com.google.common.collect.Sets; -import com.google.protobuf.AbstractMessage; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.Set; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.After; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.Before; -import org.junit.BeforeClass; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(JUnit4.class) -public class DirectWriterTest { - private static final Logger LOG = Logger.getLogger(DirectWriterTest.class.getName()); - - private static final String TEST_TABLE = "projects/p/datasets/d/tables/t"; - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final String TEST_STREAM_2 = "projects/p/datasets/d/tables/t/streams/s2"; - - private static MockBigQueryWrite mockBigQueryWrite; - private static MockServiceHelper serviceHelper; - private BigQueryWriteClient client; - private LocalChannelProvider channelProvider; - - private final Table.TableFieldSchema FOO = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("foo") - .build(); - private final Table.TableSchema TABLE_SCHEMA = - Table.TableSchema.newBuilder().addFields(0, FOO).build(); - - @Mock private static SchemaCompatibility schemaCheck; - - @BeforeClass - public static void startStaticServer() { - mockBigQueryWrite = new MockBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); - serviceHelper.start(); - } - - @AfterClass - public static void stopServer() { - serviceHelper.stop(); - } - - @Before - public void setUp() throws IOException { - serviceHelper.reset(); - channelProvider = serviceHelper.createChannelProvider(); - BigQueryWriteSettings settings = - BigQueryWriteSettings.newBuilder() - .setTransportChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build(); - client = BigQueryWriteClient.create(settings); - MockitoAnnotations.initMocks(this); - } - - @After - public void tearDown() throws Exception { - client.close(); - } - - /** Response mocks for create a new writer */ - void WriterCreationResponseMock(String testStreamName, Set responseOffsets) { - // Response from CreateWriteStream - Stream.WriteStream expectedResponse = - Stream.WriteStream.newBuilder().setName(testStreamName).build(); - mockBigQueryWrite.addResponse(expectedResponse); - - for (Long offset : responseOffsets) { - Storage.AppendRowsResponse response = - Storage.AppendRowsResponse.newBuilder().setOffset(offset).build(); - mockBigQueryWrite.addResponse(response); - } - } - - /** Response mocks for create a new writer */ - void JsonWriterCreationResponseMock(String testStreamName, Set responseOffsets) { - // Response from CreateWriteStream - Stream.WriteStream expectedResponse = - Stream.WriteStream.newBuilder() - .setName(testStreamName) - .setTableSchema(TABLE_SCHEMA) - .build(); - mockBigQueryWrite.addResponse(expectedResponse); - - for (Long offset : responseOffsets) { - Storage.AppendRowsResponse response = - Storage.AppendRowsResponse.newBuilder().setOffset(offset).build(); - mockBigQueryWrite.addResponse(response); - } - } - - @Test - public void testJsonWriteSuccess() throws Exception { - DirectWriter.testSetStub(client, 10, schemaCheck); - FooType m1 = FooType.newBuilder().setFoo("m1").build(); - FooType m2 = FooType.newBuilder().setFoo("m2").build(); - JSONObject m1_json = new JSONObject(); - m1_json.put("foo", "m1"); - JSONObject m2_json = new JSONObject(); - m2_json.put("foo", "m2"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(m1_json); - jsonArr.put(m2_json); - - JSONArray jsonArr2 = new JSONArray(); - jsonArr2.put(m1_json); - - JsonWriterCreationResponseMock(TEST_STREAM, Sets.newHashSet(Long.valueOf(0L))); - ApiFuture ret = DirectWriter.append(TEST_TABLE, jsonArr); - assertEquals(Long.valueOf(0L), ret.get()); - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - assertEquals( - m1.toByteString(), - ((AppendRowsRequest) actualRequests.get(1)).getProtoRows().getRows().getSerializedRows(0)); - assertEquals( - m2.toByteString(), - ((AppendRowsRequest) actualRequests.get(1)).getProtoRows().getRows().getSerializedRows(1)); - - Storage.AppendRowsResponse response = - Storage.AppendRowsResponse.newBuilder().setOffset(2).build(); - mockBigQueryWrite.addResponse(response); - - ret = DirectWriter.append(TEST_TABLE, jsonArr2); - assertEquals(Long.valueOf(2L), ret.get()); - assertEquals( - m1.toByteString(), - ((AppendRowsRequest) actualRequests.get(2)).getProtoRows().getRows().getSerializedRows(0)); - DirectWriter.clearCache(); - } - - @Test - public void testProtobufWriteSuccess() throws Exception { - DirectWriter.testSetStub(client, 10, schemaCheck); - FooType m1 = FooType.newBuilder().setFoo("m1").build(); - FooType m2 = FooType.newBuilder().setFoo("m2").build(); - - WriterCreationResponseMock(TEST_STREAM, Sets.newHashSet(Long.valueOf(0L))); - ApiFuture ret = DirectWriter.append(TEST_TABLE, Arrays.asList(m1, m2)); - verify(schemaCheck).check(TEST_TABLE, FooType.getDescriptor()); - assertEquals(Long.valueOf(0L), ret.get()); - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - - Storage.AppendRowsRequest.ProtoData.Builder dataBuilder = - Storage.AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())); - dataBuilder.setRows( - ProtoBufProto.ProtoRows.newBuilder() - .addSerializedRows(m1.toByteString()) - .addSerializedRows(m2.toByteString()) - .build()); - Storage.AppendRowsRequest expectRequest = - Storage.AppendRowsRequest.newBuilder() - .setWriteStream(TEST_STREAM) - .setProtoRows(dataBuilder.build()) - .build(); - assertEquals(expectRequest.toString(), actualRequests.get(1).toString()); - - Storage.AppendRowsResponse response = - Storage.AppendRowsResponse.newBuilder().setOffset(2).build(); - mockBigQueryWrite.addResponse(response); - // Append again, write stream name and schema are cleared. - ret = DirectWriter.append(TEST_TABLE, Arrays.asList(m1)); - assertEquals(Long.valueOf(2L), ret.get()); - dataBuilder = Storage.AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setRows( - ProtoBufProto.ProtoRows.newBuilder().addSerializedRows(m1.toByteString()).build()); - expectRequest = - Storage.AppendRowsRequest.newBuilder().setProtoRows(dataBuilder.build()).build(); - assertEquals(expectRequest.toString(), actualRequests.get(2).toString()); - - // Write with a different schema. - WriterCreationResponseMock(TEST_STREAM_2, Sets.newHashSet(Long.valueOf(0L))); - AllSupportedTypes m3 = AllSupportedTypes.newBuilder().setStringValue("s").build(); - ret = DirectWriter.append(TEST_TABLE, Arrays.asList(m3)); - verify(schemaCheck).check(TEST_TABLE, AllSupportedTypes.getDescriptor()); - assertEquals(Long.valueOf(0L), ret.get()); - dataBuilder = Storage.AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema(ProtoSchemaConverter.convert(AllSupportedTypes.getDescriptor())); - dataBuilder.setRows( - ProtoBufProto.ProtoRows.newBuilder().addSerializedRows(m3.toByteString()).build()); - expectRequest = - Storage.AppendRowsRequest.newBuilder() - .setWriteStream(TEST_STREAM_2) - .setProtoRows(dataBuilder.build()) - .build(); - Assert.assertEquals(5, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(3)).getParent()); - assertEquals(expectRequest.toString(), actualRequests.get(4).toString()); - - DirectWriter.clearCache(); - } - - @Test - public void testWriteBadTableName() throws Exception { - DirectWriter.testSetStub(client, 10, schemaCheck); - FooType m1 = FooType.newBuilder().setFoo("m1").build(); - FooType m2 = FooType.newBuilder().setFoo("m2").build(); - - try { - ApiFuture ret = DirectWriter.append("abc", Arrays.asList(m1, m2)); - fail("should fail"); - } catch (IllegalArgumentException expected) { - assertEquals("Invalid table name: abc", expected.getMessage()); - } - - DirectWriter.clearCache(); - } - - @Test - public void testJsonWriteBadTableName() throws Exception { - DirectWriter.testSetStub(client, 10, schemaCheck); - JSONObject m1_json = new JSONObject(); - m1_json.put("foo", "m1"); - JSONObject m2_json = new JSONObject(); - m2_json.put("foo", "m2"); - final JSONArray jsonArr = new JSONArray(); - jsonArr.put(m1_json); - jsonArr.put(m2_json); - - try { - ApiFuture ret = DirectWriter.append("abc", jsonArr); - fail("should fail"); - } catch (IllegalArgumentException expected) { - assertEquals("Invalid table name: abc", expected.getMessage()); - } - - DirectWriter.clearCache(); - } - - @Test - public void testConcurrentAccess() throws Exception { - DirectWriter.testSetStub(client, 2, schemaCheck); - final FooType m1 = FooType.newBuilder().setFoo("m1").build(); - final FooType m2 = FooType.newBuilder().setFoo("m2").build(); - final Set expectedOffset = - Sets.newHashSet( - Long.valueOf(0L), - Long.valueOf(2L), - Long.valueOf(4L), - Long.valueOf(6L), - Long.valueOf(8L)); - // Make sure getting the same table writer in multiple thread only cause create to be called - // once. - WriterCreationResponseMock(TEST_STREAM, expectedOffset); - ExecutorService executor = Executors.newFixedThreadPool(5); - for (int i = 0; i < 5; i++) { - executor.execute( - new Runnable() { - @Override - public void run() { - try { - ApiFuture result = - DirectWriter.append(TEST_TABLE, Arrays.asList(m1, m2)); - synchronized (expectedOffset) { - assertTrue(expectedOffset.remove(result.get())); - } - } catch (Exception e) { - fail(e.toString()); - } - } - }); - } - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - DirectWriter.clearCache(); - } - - @Test - public void testJsonConcurrentAccess() throws Exception { - DirectWriter.testSetStub(client, 2, schemaCheck); - FooType m1 = FooType.newBuilder().setFoo("m1").build(); - FooType m2 = FooType.newBuilder().setFoo("m2").build(); - JSONObject m1_json = new JSONObject(); - m1_json.put("foo", "m1"); - JSONObject m2_json = new JSONObject(); - m2_json.put("foo", "m2"); - final JSONArray jsonArr = new JSONArray(); - jsonArr.put(m1_json); - jsonArr.put(m2_json); - - final Set expectedOffset = - Sets.newHashSet( - Long.valueOf(0L), - Long.valueOf(2L), - Long.valueOf(4L), - Long.valueOf(6L), - Long.valueOf(8L)); - // Make sure getting the same table writer in multiple thread only cause create to be called - // once. - JsonWriterCreationResponseMock(TEST_STREAM, expectedOffset); - ExecutorService executor = Executors.newFixedThreadPool(5); - for (int i = 0; i < 5; i++) { - executor.execute( - new Runnable() { - @Override - public void run() { - try { - ApiFuture result = DirectWriter.append(TEST_TABLE, jsonArr); - synchronized (expectedOffset) { - assertTrue(expectedOffset.remove(result.get())); - } - } catch (Exception e) { - fail(e.toString()); - } - } - }); - } - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - DirectWriter.clearCache(); - } - - @Test - public void testJsonProtobufWrite() throws Exception { - DirectWriter.testSetStub(client, 10, schemaCheck); - FooType m1 = FooType.newBuilder().setFoo("m1").build(); - FooType m2 = FooType.newBuilder().setFoo("m2").build(); - JSONObject m1_json = new JSONObject(); - m1_json.put("foo", "m1"); - JSONObject m2_json = new JSONObject(); - m2_json.put("foo", "m2"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(m1_json); - jsonArr.put(m2_json); - - JSONArray jsonArr2 = new JSONArray(); - jsonArr2.put(m1_json); - - WriterCreationResponseMock(TEST_STREAM, Sets.newHashSet(Long.valueOf(0L))); - - ApiFuture ret = DirectWriter.append(TEST_TABLE, Arrays.asList(m1, m2)); - verify(schemaCheck).check(TEST_TABLE, FooType.getDescriptor()); - assertEquals(Long.valueOf(0L), ret.get()); - List actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - - Storage.AppendRowsRequest.ProtoData.Builder dataBuilder = - Storage.AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())); - dataBuilder.setRows( - ProtoBufProto.ProtoRows.newBuilder() - .addSerializedRows(m1.toByteString()) - .addSerializedRows(m2.toByteString()) - .build()); - Storage.AppendRowsRequest expectRequest = - Storage.AppendRowsRequest.newBuilder() - .setWriteStream(TEST_STREAM) - .setProtoRows(dataBuilder.build()) - .build(); - assertEquals(expectRequest.toString(), actualRequests.get(1).toString()); - - JsonWriterCreationResponseMock(TEST_STREAM, Sets.newHashSet(Long.valueOf(0L))); - ret = DirectWriter.append(TEST_TABLE, jsonArr); - assertEquals(Long.valueOf(0L), ret.get()); - actualRequests = mockBigQueryWrite.getRequests(); - Assert.assertEquals(4, actualRequests.size()); - assertEquals( - m1.toByteString(), - ((AppendRowsRequest) actualRequests.get(3)).getProtoRows().getRows().getSerializedRows(0)); - assertEquals( - m2.toByteString(), - ((AppendRowsRequest) actualRequests.get(3)).getProtoRows().getRows().getSerializedRows(1)); - - DirectWriter.clearCache(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWrite.java deleted file mode 100644 index c743b39af7..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWrite.java +++ /dev/null @@ -1,86 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.*; -import com.google.protobuf.AbstractMessage; -import io.grpc.ServerServiceDefinition; -import java.util.LinkedList; -import java.util.List; -import java.util.concurrent.ScheduledExecutorService; -import org.threeten.bp.Duration; - -/** - * A fake implementation of {@link MockGrpcService}, that can be used to test clients of a - * StreamWriter. It forwards calls to the real implementation (@link FakeBigQueryWriteImpl}. - */ -public class FakeBigQueryWrite implements MockGrpcService { - private final FakeBigQueryWriteImpl serviceImpl; - - public FakeBigQueryWrite() { - serviceImpl = new FakeBigQueryWriteImpl(); - } - - @Override - public List getRequests() { - return new LinkedList(serviceImpl.getCapturedRequests()); - } - - public List getAppendRequests() { - return serviceImpl.getCapturedRequests(); - } - - public List getWriteStreamRequests() { - return serviceImpl.getCapturedWriteRequests(); - } - - @Override - public void addResponse(AbstractMessage response) { - if (response instanceof AppendRowsResponse) { - serviceImpl.addResponse((AppendRowsResponse) response); - } else if (response instanceof Stream.WriteStream) { - serviceImpl.addWriteStreamResponse((Stream.WriteStream) response); - } else if (response instanceof FlushRowsResponse) { - serviceImpl.addFlushRowsResponse((FlushRowsResponse) response); - } else { - throw new IllegalStateException("Unsupported service"); - } - } - - @Override - public void addException(Exception exception) { - serviceImpl.addConnectionError(exception); - } - - @Override - public ServerServiceDefinition getServiceDefinition() { - return serviceImpl.bindService(); - } - - @Override - public void reset() { - serviceImpl.reset(); - } - - public void setResponseDelay(Duration delay) { - serviceImpl.setResponseDelay(delay); - } - - public void setExecutor(ScheduledExecutorService executor) { - serviceImpl.setExecutor(executor); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWriteImpl.java deleted file mode 100644 index 39c1e4158a..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeBigQueryWriteImpl.java +++ /dev/null @@ -1,215 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.cloud.bigquery.storage.v1alpha2.Storage.*; -import com.google.common.base.Optional; -import io.grpc.stub.StreamObserver; -import java.util.ArrayList; -import java.util.List; -import java.util.concurrent.LinkedBlockingQueue; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicInteger; -import java.util.logging.Logger; -import org.threeten.bp.Duration; - -/** - * A fake implementation of {@link BigQueryWriteImplBase} that can acts like server in StreamWriter - * unit testing. - */ -class FakeBigQueryWriteImpl extends BigQueryWriteGrpc.BigQueryWriteImplBase { - private static final Logger LOG = Logger.getLogger(FakeBigQueryWriteImpl.class.getName()); - - private final LinkedBlockingQueue requests = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue writeRequests = - new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue flushRequests = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue responses = new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue writeResponses = - new LinkedBlockingQueue<>(); - private final LinkedBlockingQueue flushResponses = new LinkedBlockingQueue<>(); - private final AtomicInteger nextMessageId = new AtomicInteger(1); - private boolean autoPublishResponse; - private ScheduledExecutorService executor = null; - private Duration responseDelay = Duration.ZERO; - - /** Class used to save the state of a possible response. */ - private static class Response { - Optional appendResponse; - Optional error; - - public Response(AppendRowsResponse appendResponse) { - this.appendResponse = Optional.of(appendResponse); - this.error = Optional.absent(); - } - - public Response(Throwable exception) { - this.appendResponse = Optional.absent(); - this.error = Optional.of(exception); - } - - public AppendRowsResponse getResponse() { - return appendResponse.get(); - } - - public Throwable getError() { - return error.get(); - } - - boolean isError() { - return error.isPresent(); - } - - @Override - public String toString() { - if (isError()) { - return error.get().toString(); - } - return appendResponse.get().toString(); - } - } - - @Override - public void getWriteStream( - GetWriteStreamRequest request, StreamObserver responseObserver) { - Object response = writeResponses.remove(); - if (response instanceof Stream.WriteStream) { - writeRequests.add(request); - responseObserver.onNext((Stream.WriteStream) response); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError((Exception) response); - } else { - responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); - } - } - - @Override - public void flushRows( - FlushRowsRequest request, StreamObserver responseObserver) { - Object response = writeResponses.remove(); - if (response instanceof FlushRowsResponse) { - flushRequests.add(request); - responseObserver.onNext((FlushRowsResponse) response); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError((Exception) response); - } else { - responseObserver.onError(new IllegalArgumentException("Unrecognized response type")); - } - } - - @Override - public StreamObserver appendRows( - final StreamObserver responseObserver) { - StreamObserver requestObserver = - new StreamObserver() { - @Override - public void onNext(AppendRowsRequest value) { - LOG.info("Get request:" + value.toString()); - final Response response = responses.remove(); - requests.add(value); - if (responseDelay == Duration.ZERO) { - sendResponse(response, responseObserver); - } else { - final Response responseToSend = response; - LOG.info("Schedule a response to be sent at delay"); - executor.schedule( - new Runnable() { - @Override - public void run() { - sendResponse(responseToSend, responseObserver); - } - }, - responseDelay.toMillis(), - TimeUnit.MILLISECONDS); - } - } - - @Override - public void onError(Throwable t) { - responseObserver.onError(t); - } - - @Override - public void onCompleted() { - responseObserver.onCompleted(); - } - }; - return requestObserver; - } - - private void sendResponse( - Response response, StreamObserver responseObserver) { - LOG.info("Sending response: " + response.toString()); - if (response.isError()) { - responseObserver.onError(response.getError()); - } else { - responseObserver.onNext(response.getResponse()); - } - } - - /** Set an executor to use to delay publish responses. */ - public FakeBigQueryWriteImpl setExecutor(ScheduledExecutorService executor) { - this.executor = executor; - return this; - } - - /** Set an amount of time by which to delay publish responses. */ - public FakeBigQueryWriteImpl setResponseDelay(Duration responseDelay) { - this.responseDelay = responseDelay; - return this; - } - - public FakeBigQueryWriteImpl addResponse(AppendRowsResponse appendRowsResponse) { - responses.add(new Response(appendRowsResponse)); - return this; - } - - public FakeBigQueryWriteImpl addResponse(AppendRowsResponse.Builder appendResponseBuilder) { - return addResponse(appendResponseBuilder.build()); - } - - public FakeBigQueryWriteImpl addWriteStreamResponse(Stream.WriteStream response) { - writeResponses.add(response); - return this; - } - - public FakeBigQueryWriteImpl addFlushRowsResponse(FlushRowsResponse response) { - flushResponses.add(response); - return this; - } - - public FakeBigQueryWriteImpl addConnectionError(Throwable error) { - responses.add(new Response(error)); - return this; - } - - public List getCapturedRequests() { - return new ArrayList(requests); - } - - public List getCapturedWriteRequests() { - return new ArrayList(writeRequests); - } - - public void reset() { - requests.clear(); - responses.clear(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeClock.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeClock.java deleted file mode 100644 index ee8ee3221b..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeClock.java +++ /dev/null @@ -1,42 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiClock; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.atomic.AtomicLong; - -/** A Clock to help with testing time-based logic. */ -public class FakeClock implements ApiClock { - - private final AtomicLong millis = new AtomicLong(); - - // Advances the clock value by {@code time} in {@code timeUnit}. - public void advance(long time, TimeUnit timeUnit) { - millis.addAndGet(timeUnit.toMillis(time)); - } - - @Override - public long nanoTime() { - return millisTime() * 1000_000L; - } - - @Override - public long millisTime() { - return millis.get(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeScheduledExecutorService.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeScheduledExecutorService.java deleted file mode 100644 index 8ee37cc0ba..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/FakeScheduledExecutorService.java +++ /dev/null @@ -1,347 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.ApiClock; -import com.google.common.primitives.Ints; -import com.google.common.util.concurrent.SettableFuture; -import java.util.ArrayList; -import java.util.Deque; -import java.util.LinkedList; -import java.util.List; -import java.util.PriorityQueue; -import java.util.concurrent.AbstractExecutorService; -import java.util.concurrent.Callable; -import java.util.concurrent.Delayed; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.ScheduledExecutorService; -import java.util.concurrent.ScheduledFuture; -import java.util.concurrent.TimeUnit; -import java.util.concurrent.TimeoutException; -import java.util.concurrent.atomic.AtomicBoolean; -import java.util.logging.Logger; -import org.threeten.bp.Duration; -import org.threeten.bp.Instant; - -/** - * Fake implementation of {@link ScheduledExecutorService} that allows tests control the reference - * time of the executor and decide when to execute any outstanding task. - */ -public class FakeScheduledExecutorService extends AbstractExecutorService - implements ScheduledExecutorService { - private static final Logger LOG = Logger.getLogger(FakeScheduledExecutorService.class.getName()); - - private final AtomicBoolean shutdown = new AtomicBoolean(false); - private final PriorityQueue> pendingCallables = new PriorityQueue<>(); - private final FakeClock clock = new FakeClock(); - private final Deque expectedWorkQueue = new LinkedList<>(); - - public ApiClock getClock() { - return clock; - } - - @Override - public ScheduledFuture schedule(Runnable command, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(delay)), command, PendingCallableType.NORMAL)); - } - - @Override - public ScheduledFuture schedule(Callable callable, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(delay)), callable, PendingCallableType.NORMAL)); - } - - @Override - public ScheduledFuture scheduleAtFixedRate( - Runnable command, long initialDelay, long period, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(initialDelay)), - command, - PendingCallableType.FIXED_RATE)); - } - - @Override - public ScheduledFuture scheduleWithFixedDelay( - Runnable command, long initialDelay, long delay, TimeUnit unit) { - return schedulePendingCallable( - new PendingCallable<>( - Duration.ofMillis(unit.toMillis(initialDelay)), - command, - PendingCallableType.FIXED_DELAY)); - } - - /** - * This will advance the reference time of the executor and execute (in the same thread) any - * outstanding callable which execution time has passed. - */ - public void advanceTime(Duration toAdvance) { - LOG.info( - "Advance to time to:" - + Instant.ofEpochMilli(clock.millisTime() + toAdvance.toMillis()).toString()); - clock.advance(toAdvance.toMillis(), TimeUnit.MILLISECONDS); - work(); - } - - private void work() { - for (; ; ) { - PendingCallable callable = null; - Instant cmpTime = Instant.ofEpochMilli(clock.millisTime()); - if (!pendingCallables.isEmpty()) { - LOG.info( - "Going to call: Current time: " - + cmpTime.toString() - + " Scheduled time: " - + pendingCallables.peek().getScheduledTime().toString() - + " Creation time:" - + pendingCallables.peek().getCreationTime().toString()); - } - synchronized (pendingCallables) { - if (pendingCallables.isEmpty() - || pendingCallables.peek().getScheduledTime().isAfter(cmpTime)) { - break; - } - callable = pendingCallables.poll(); - } - if (callable != null) { - try { - callable.call(); - } catch (Exception e) { - // We ignore any callable exception, which should be set to the future but not relevant to - // advanceTime. - } - } - } - - synchronized (pendingCallables) { - if (shutdown.get() && pendingCallables.isEmpty()) { - pendingCallables.notifyAll(); - } - } - } - - @Override - public void shutdown() { - if (shutdown.getAndSet(true)) { - throw new IllegalStateException("This executor has been shutdown already"); - } - } - - @Override - public List shutdownNow() { - if (shutdown.getAndSet(true)) { - throw new IllegalStateException("This executor has been shutdown already"); - } - List pending = new ArrayList<>(); - for (final PendingCallable pendingCallable : pendingCallables) { - pending.add( - new Runnable() { - @Override - public void run() { - pendingCallable.call(); - } - }); - } - synchronized (pendingCallables) { - pendingCallables.notifyAll(); - pendingCallables.clear(); - } - return pending; - } - - @Override - public boolean isShutdown() { - return shutdown.get(); - } - - @Override - public boolean isTerminated() { - return pendingCallables.isEmpty(); - } - - @Override - public boolean awaitTermination(long timeout, TimeUnit unit) throws InterruptedException { - synchronized (pendingCallables) { - if (pendingCallables.isEmpty()) { - return true; - } - LOG.info("Wating on pending callables" + pendingCallables.size()); - pendingCallables.wait(unit.toMillis(timeout)); - return pendingCallables.isEmpty(); - } - } - - @Override - public void execute(Runnable command) { - if (shutdown.get()) { - throw new IllegalStateException("This executor has been shutdown"); - } - command.run(); - } - - ScheduledFuture schedulePendingCallable(PendingCallable callable) { - LOG.info( - "Schedule pending callable called " + callable.delay + " " + callable.getScheduledTime()); - if (shutdown.get()) { - throw new IllegalStateException("This executor has been shutdown"); - } - synchronized (pendingCallables) { - pendingCallables.add(callable); - } - work(); - synchronized (expectedWorkQueue) { - // We compare by the callable delay in order decide when to remove expectations from the - // expected work queue, i.e. only the expected work that matches the delay of the scheduled - // callable is removed from the queue. - if (!expectedWorkQueue.isEmpty() && expectedWorkQueue.peek().equals(callable.delay)) { - expectedWorkQueue.poll(); - } - expectedWorkQueue.notifyAll(); - } - - return callable.getScheduledFuture(); - } - - enum PendingCallableType { - NORMAL, - FIXED_RATE, - FIXED_DELAY - } - - /** Class that saves the state of an scheduled pending callable. */ - class PendingCallable implements Comparable> { - Instant creationTime = Instant.ofEpochMilli(clock.millisTime()); - Duration delay; - Callable pendingCallable; - SettableFuture future = SettableFuture.create(); - AtomicBoolean cancelled = new AtomicBoolean(false); - AtomicBoolean done = new AtomicBoolean(false); - PendingCallableType type; - - PendingCallable(Duration delay, final Runnable runnable, PendingCallableType type) { - pendingCallable = - new Callable() { - @Override - public T call() { - runnable.run(); - return null; - } - }; - this.type = type; - this.delay = delay; - } - - PendingCallable(Duration delay, Callable callable, PendingCallableType type) { - pendingCallable = callable; - this.type = type; - this.delay = delay; - } - - private Instant getScheduledTime() { - return creationTime.plus(delay); - } - - private Instant getCreationTime() { - return creationTime; - } - - ScheduledFuture getScheduledFuture() { - return new ScheduledFuture() { - @Override - public long getDelay(TimeUnit unit) { - return unit.convert( - getScheduledTime().toEpochMilli() - clock.millisTime(), TimeUnit.MILLISECONDS); - } - - @Override - public int compareTo(Delayed o) { - return Ints.saturatedCast( - getDelay(TimeUnit.MILLISECONDS) - o.getDelay(TimeUnit.MILLISECONDS)); - } - - @Override - public boolean cancel(boolean mayInterruptIfRunning) { - synchronized (this) { - cancelled.set(true); - return !done.get(); - } - } - - @Override - public boolean isCancelled() { - return cancelled.get(); - } - - @Override - public boolean isDone() { - return done.get(); - } - - @Override - public T get() throws InterruptedException, ExecutionException { - return future.get(); - } - - @Override - public T get(long timeout, TimeUnit unit) - throws InterruptedException, ExecutionException, TimeoutException { - return future.get(timeout, unit); - } - }; - } - - T call() { - T result = null; - synchronized (this) { - if (cancelled.get()) { - return null; - } - try { - result = pendingCallable.call(); - future.set(result); - } catch (Exception e) { - future.setException(e); - } finally { - switch (type) { - case NORMAL: - done.set(true); - break; - case FIXED_DELAY: - this.creationTime = Instant.ofEpochMilli(clock.millisTime()); - schedulePendingCallable(this); - break; - case FIXED_RATE: - this.creationTime = this.creationTime.plus(delay); - schedulePendingCallable(this); - break; - default: - // Nothing to do - } - } - } - return result; - } - - @Override - public int compareTo(PendingCallable other) { - return getScheduledTime().compareTo(other.getScheduledTime()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriterTest.java deleted file mode 100644 index 1f7ec99aa7..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonStreamWriterTest.java +++ /dev/null @@ -1,960 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.core.ExecutorProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.cloud.bigquery.storage.test.JsonTest.ComplexRoot; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.cloud.bigquery.storage.test.Test.UpdatedFooType; -import com.google.cloud.bigquery.storage.test.Test.UpdatedFooType2; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse; -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors.DescriptorValidationException; -import com.google.protobuf.Timestamp; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.ExecutionException; -import java.util.logging.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.threeten.bp.Instant; - -@RunWith(JUnit4.class) -public class JsonStreamWriterTest { - private static final Logger LOG = Logger.getLogger(JsonStreamWriterTest.class.getName()); - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final ExecutorProvider SINGLE_THREAD_EXECUTOR = - InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(1).build(); - private static LocalChannelProvider channelProvider; - private FakeScheduledExecutorService fakeExecutor; - private FakeBigQueryWrite testBigQueryWrite; - private static MockServiceHelper serviceHelper; - - private final Table.TableFieldSchema FOO = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("foo") - .build(); - private final Table.TableSchema TABLE_SCHEMA = - Table.TableSchema.newBuilder().addFields(0, FOO).build(); - - private final Table.TableFieldSchema BAR = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("bar") - .build(); - private final Table.TableFieldSchema BAZ = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("baz") - .build(); - private final Table.TableSchema UPDATED_TABLE_SCHEMA = - Table.TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).build(); - private final Table.TableSchema UPDATED_TABLE_SCHEMA_2 = - Table.TableSchema.newBuilder().addFields(0, FOO).addFields(1, BAR).addFields(2, BAZ).build(); - - private final Table.TableFieldSchema TEST_INT = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.INT64) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_int") - .build(); - private final Table.TableFieldSchema TEST_STRING = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_string") - .build(); - private final Table.TableFieldSchema TEST_BYTES = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BYTES) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("test_bytes") - .build(); - private final Table.TableFieldSchema TEST_BOOL = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.BOOL) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("test_bool") - .build(); - private final Table.TableFieldSchema TEST_DOUBLE = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DOUBLE) - .setMode(Table.TableFieldSchema.Mode.REPEATED) - .setName("test_double") - .build(); - private final Table.TableFieldSchema TEST_DATE = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.DATE) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .setName("test_date") - .build(); - private final Table.TableFieldSchema COMPLEXLVL2 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .addFields(0, TEST_INT) - .setName("complex_lvl2") - .build(); - private final Table.TableFieldSchema COMPLEXLVL1 = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRUCT) - .setMode(Table.TableFieldSchema.Mode.REQUIRED) - .addFields(0, TEST_INT) - .addFields(1, COMPLEXLVL2) - .setName("complex_lvl1") - .build(); - private final Table.TableSchema COMPLEX_TABLE_SCHEMA = - Table.TableSchema.newBuilder() - .addFields(0, TEST_INT) - .addFields(1, TEST_STRING) - .addFields(2, TEST_BYTES) - .addFields(3, TEST_BOOL) - .addFields(4, TEST_DOUBLE) - .addFields(5, TEST_DATE) - .addFields(6, COMPLEXLVL1) - .addFields(7, COMPLEXLVL2) - .build(); - - @Before - public void setUp() throws Exception { - testBigQueryWrite = new FakeBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); - serviceHelper.start(); - channelProvider = serviceHelper.createChannelProvider(); - fakeExecutor = new FakeScheduledExecutorService(); - testBigQueryWrite.setExecutor(fakeExecutor); - Instant time = Instant.now(); - Timestamp timestamp = - Timestamp.newBuilder().setSeconds(time.getEpochSecond()).setNanos(time.getNano()).build(); - // Add enough GetWriteStream response. - for (int i = 0; i < 4; i++) { - testBigQueryWrite.addResponse( - Stream.WriteStream.newBuilder().setName(TEST_STREAM).setCreateTime(timestamp).build()); - } - } - - @After - public void tearDown() throws Exception { - serviceHelper.stop(); - } - - private JsonStreamWriter.Builder getTestJsonStreamWriterBuilder( - String testStream, Table.TableSchema BQTableSchema) { - return JsonStreamWriter.newBuilder(testStream, BQTableSchema) - .setChannelProvider(channelProvider) - .setExecutorProvider(SINGLE_THREAD_EXECUTOR) - .setCredentialsProvider(NoCredentialsProvider.create()); - } - - @Test - public void testTwoParamNewBuilder_nullSchema() { - try { - getTestJsonStreamWriterBuilder(null, TABLE_SCHEMA); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "StreamName is null."); - } - } - - @Test - public void testTwoParamNewBuilder_nullStream() { - try { - getTestJsonStreamWriterBuilder(TEST_STREAM, null); - Assert.fail("expected NullPointerException"); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "TableSchema is null."); - } - } - - @Test - public void testTwoParamNewBuilder() - throws DescriptorValidationException, IOException, InterruptedException { - JsonStreamWriter writer = getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build(); - assertEquals(TEST_STREAM, writer.getStreamName()); - } - - @Test - public void testSingleAppendSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(0L, appendFuture.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - } - } - - @Test - public void testSingleAppendMultipleSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONObject foo1 = new JSONObject(); - foo1.put("foo", "allen"); - JSONObject foo2 = new JSONObject(); - foo2.put("foo", "allen"); - JSONObject foo3 = new JSONObject(); - foo3.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - jsonArr.put(foo1); - jsonArr.put(foo2); - jsonArr.put(foo3); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(0L, appendFuture.get().getOffset()); - assertEquals( - 4, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - for (int i = 0; i < 4; i++) { - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(i), - expectedProto.toByteString()); - } - } - } - - @Test - public void testMultipleAppendSimpleJson() throws Exception { - FooType expectedProto = FooType.newBuilder().setFoo("allen").build(); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(1).build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(2).build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(3).build()); - ApiFuture appendFuture; - for (int i = 0; i < 4; i++) { - appendFuture = writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - assertEquals((long) i, appendFuture.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - } - } - } - - @Test - public void testSingleAppendComplexJson() throws Exception { - ComplexRoot expectedProto = - ComplexRoot.newBuilder() - .setTestInt(1) - .addTestString("a") - .addTestString("b") - .addTestString("c") - .setTestBytes(ByteString.copyFrom("hello".getBytes())) - .setTestBool(true) - .addTestDouble(1.1) - .addTestDouble(2.2) - .addTestDouble(3.3) - .addTestDouble(4.4) - .setTestDate(1) - .setComplexLvl1( - com.google.cloud.bigquery.storage.test.JsonTest.ComplexLvl1.newBuilder() - .setTestInt(2) - .setComplexLvl2( - com.google.cloud.bigquery.storage.test.JsonTest.ComplexLvl2.newBuilder() - .setTestInt(3) - .build()) - .build()) - .setComplexLvl2( - com.google.cloud.bigquery.storage.test.JsonTest.ComplexLvl2.newBuilder() - .setTestInt(3) - .build()) - .build(); - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("test_int", 3); - - JSONObject complex_lvl1 = new JSONObject(); - complex_lvl1.put("test_int", 2); - complex_lvl1.put("complex_lvl2", complex_lvl2); - - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); - json.put("test_bytes", "hello"); - json.put("test_bool", true); - json.put("test_DOUBLe", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); - json.put("test_date", 1); - json.put("complex_lvl1", complex_lvl1); - json.put("complex_lvl2", complex_lvl2); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(json); - - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, COMPLEX_TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(0L, appendFuture.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - expectedProto.toByteString()); - } - } - - @Test - public void testAppendMultipleSchemaUpdate() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - // Add fake resposne for FakeBigQueryWrite, first response has updated schema. - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setOffset(0) - .setUpdatedSchema(UPDATED_TABLE_SCHEMA) - .build()); - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setOffset(1) - .setUpdatedSchema(UPDATED_TABLE_SCHEMA_2) - .build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(2).build()); - // First append - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - ApiFuture appendFuture1 = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - int millis = 0; - while (millis <= 10000) { - if (writer.getDescriptor().getFields().size() == 2) { - break; - } - Thread.sleep(100); - millis += 100; - } - assertTrue(writer.getDescriptor().getFields().size() == 2); - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - FooType.newBuilder().setFoo("allen").build().toByteString()); - - // Second append with updated schema. - JSONObject updatedFoo = new JSONObject(); - updatedFoo.put("foo", "allen"); - updatedFoo.put("bar", "allen2"); - JSONArray updatedJsonArr = new JSONArray(); - updatedJsonArr.put(updatedFoo); - - ApiFuture appendFuture2 = - writer.append(updatedJsonArr, -1, /* allowUnknownFields */ false); - - millis = 0; - while (millis <= 10000) { - if (writer.getDescriptor().getFields().size() == 3) { - break; - } - Thread.sleep(100); - millis += 100; - } - assertTrue(writer.getDescriptor().getFields().size() == 3); - assertEquals(1L, appendFuture2.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRows(0), - UpdatedFooType.newBuilder().setFoo("allen").setBar("allen2").build().toByteString()); - - // Third append with updated schema. - JSONObject updatedFoo2 = new JSONObject(); - updatedFoo2.put("foo", "allen"); - updatedFoo2.put("bar", "allen2"); - updatedFoo2.put("baz", "allen3"); - JSONArray updatedJsonArr2 = new JSONArray(); - updatedJsonArr2.put(updatedFoo2); - - ApiFuture appendFuture3 = - writer.append(updatedJsonArr2, -1, /* allowUnknownFields */ false); - - assertEquals(2L, appendFuture3.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(2) - .getProtoRows() - .getRows() - .getSerializedRows(0), - UpdatedFooType2.newBuilder() - .setFoo("allen") - .setBar("allen2") - .setBaz("allen3") - .build() - .toByteString()); - // // Check if writer schemas were added in for both connections. - assertTrue(testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - assertTrue(testBigQueryWrite.getAppendRequests().get(1).getProtoRows().hasWriterSchema()); - assertTrue(testBigQueryWrite.getAppendRequests().get(2).getProtoRows().hasWriterSchema()); - } - } - - @Test - // This might be a bug but it is the current behavior. Investigate. - public void testAppendAlreadyExists_doesNotThrowxception() - throws DescriptorValidationException, IOException, InterruptedException, ExecutionException { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setError(com.google.rpc.Status.newBuilder().setCode(6).build()) - .build()); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - appendFuture.get(); - } - } - - @Test - public void testAppendOutOfRangeException() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setError(com.google.rpc.Status.newBuilder().setCode(11).build()) - .build()); - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - try { - appendFuture.get(); - Assert.fail("expected ExecutionException"); - } catch (ExecutionException ex) { - assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); - } - } - } - - @Test - public void testAppendOutOfRangeAndUpdateSchema() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA).build()) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setError(com.google.rpc.Status.newBuilder().setCode(11).build()) - .setUpdatedSchema(UPDATED_TABLE_SCHEMA) - .build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - try { - appendFuture.get(); - Assert.fail("expected ExecutionException"); - } catch (ExecutionException ex) { - assertEquals(ex.getCause().getMessage(), "OUT_OF_RANGE: "); - int millis = 0; - while (millis <= 10000) { - if (writer.getDescriptor().getFields().size() == 2) { - break; - } - Thread.sleep(100); - millis += 100; - } - assertTrue(writer.getDescriptor().getFields().size() == 2); - } - - JSONObject updatedFoo = new JSONObject(); - updatedFoo.put("foo", "allen"); - updatedFoo.put("bar", "allen2"); - JSONArray updatedJsonArr = new JSONArray(); - updatedJsonArr.put(updatedFoo); - - ApiFuture appendFuture2 = - writer.append(updatedJsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(0L, appendFuture2.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRows(0), - UpdatedFooType.newBuilder().setFoo("allen").setBar("allen2").build().toByteString()); - - // Check if writer schemas were added in for both connections. - assertTrue(testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - assertTrue(testBigQueryWrite.getAppendRequests().get(1).getProtoRows().hasWriterSchema()); - } - } - - @Test - public void testSchemaUpdateWithNonemptyBatch() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(2L) - .build()) - .build()) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setOffset(0) - .setUpdatedSchema(UPDATED_TABLE_SCHEMA) - .build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(2).build()); - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(3).build()); - // First append - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - ApiFuture appendFuture1 = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - ApiFuture appendFuture2 = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - ApiFuture appendFuture3 = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - assertEquals( - 2, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(0), - FooType.newBuilder().setFoo("allen").build().toByteString()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRows(1), - FooType.newBuilder().setFoo("allen").build().toByteString()); - - assertEquals(2L, appendFuture3.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRows(0), - FooType.newBuilder().setFoo("allen").build().toByteString()); - - int millis = 0; - while (millis <= 10000) { - if (writer.getDescriptor().getFields().size() == 2) { - break; - } - Thread.sleep(100); - millis += 100; - } - assertTrue(writer.getDescriptor().getFields().size() == 2); - - // Second append with updated schema. - JSONObject updatedFoo = new JSONObject(); - updatedFoo.put("foo", "allen"); - updatedFoo.put("bar", "allen2"); - JSONArray updatedJsonArr = new JSONArray(); - updatedJsonArr.put(updatedFoo); - - ApiFuture appendFuture4 = - writer.append(updatedJsonArr, -1, /* allowUnknownFields */ false); - - assertEquals(3L, appendFuture4.get().getOffset()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(2) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(2) - .getProtoRows() - .getRows() - .getSerializedRows(0), - UpdatedFooType.newBuilder().setFoo("allen").setBar("allen2").build().toByteString()); - - assertTrue(testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - assertTrue( - testBigQueryWrite.getAppendRequests().get(1).getProtoRows().hasWriterSchema() - || testBigQueryWrite.getAppendRequests().get(2).getProtoRows().hasWriterSchema()); - } - } - - @Test - public void testMultiThreadAppendNoSchemaUpdate() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .build()) - .build()) { - - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - final JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - final Collection offsetSets = Collections.synchronizedCollection(new HashSet()); - int thread_nums = 5; - Thread[] thread_arr = new Thread[thread_nums]; - for (int i = 0; i < thread_nums; i++) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder().setOffset((long) i).build()); - offsetSets.add((long) i); - Thread t = - new Thread( - new Runnable() { - public void run() { - try { - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - AppendRowsResponse response = appendFuture.get(); - offsetSets.remove(response.getOffset()); - } catch (Exception e) { - LOG.severe("Thread execution failed: " + e.getMessage()); - } - } - }); - thread_arr[i] = t; - t.start(); - } - - for (int i = 0; i < thread_nums; i++) { - thread_arr[i].join(); - } - assertTrue(offsetSets.size() == 0); - for (int i = 0; i < thread_nums; i++) { - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRows(0), - FooType.newBuilder().setFoo("allen").build().toByteString()); - } - } - } - - @Test - public void testMultiThreadAppendWithSchemaUpdate() throws Exception { - try (JsonStreamWriter writer = - getTestJsonStreamWriterBuilder(TEST_STREAM, TABLE_SCHEMA) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .build()) - .build()) { - JSONObject foo = new JSONObject(); - foo.put("foo", "allen"); - final JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - final Collection offsetSets = Collections.synchronizedCollection(new HashSet()); - int numberThreads = 5; - Thread[] thread_arr = new Thread[numberThreads]; - for (int i = 0; i < numberThreads; i++) { - if (i == 2) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder() - .setOffset((long) i) - .setUpdatedSchema(UPDATED_TABLE_SCHEMA) - .build()); - } else { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder().setOffset((long) i).build()); - } - - offsetSets.add((long) i); - Thread t = - new Thread( - new Runnable() { - public void run() { - try { - ApiFuture appendFuture = - writer.append(jsonArr, -1, /* allowUnknownFields */ false); - AppendRowsResponse response = appendFuture.get(); - offsetSets.remove(response.getOffset()); - } catch (Exception e) { - LOG.severe("Thread execution failed: " + e.getMessage()); - } - } - }); - thread_arr[i] = t; - t.start(); - } - - for (int i = 0; i < numberThreads; i++) { - thread_arr[i].join(); - } - assertTrue(offsetSets.size() == 0); - for (int i = 0; i < numberThreads; i++) { - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(i) - .getProtoRows() - .getRows() - .getSerializedRows(0), - FooType.newBuilder().setFoo("allen").build().toByteString()); - } - - int millis = 0; - while (millis <= 10000) { - if (writer.getDescriptor().getFields().size() == 2) { - break; - } - Thread.sleep(100); - millis += 100; - } - assertEquals(2, writer.getDescriptor().getFields().size()); - - foo.put("bar", "allen2"); - final JSONArray jsonArr2 = new JSONArray(); - jsonArr2.put(foo); - - for (int i = numberThreads; i < numberThreads + 5; i++) { - testBigQueryWrite.addResponse( - Storage.AppendRowsResponse.newBuilder().setOffset((long) i).build()); - offsetSets.add((long) i); - Thread t = - new Thread( - new Runnable() { - public void run() { - try { - ApiFuture appendFuture = - writer.append(jsonArr2, -1, /* allowUnknownFields */ false); - AppendRowsResponse response = appendFuture.get(); - offsetSets.remove(response.getOffset()); - } catch (Exception e) { - LOG.severe("Thread execution failed: " + e.getMessage()); - } - } - }); - thread_arr[i - 5] = t; - t.start(); - } - - for (int i = 0; i < numberThreads; i++) { - thread_arr[i].join(); - } - assertTrue(offsetSets.size() == 0); - for (int i = 0; i < numberThreads; i++) { - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(i + 5) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - testBigQueryWrite - .getAppendRequests() - .get(i + 5) - .getProtoRows() - .getRows() - .getSerializedRows(0), - UpdatedFooType.newBuilder().setFoo("allen").setBar("allen2").build().toByteString()); - } - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessageTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessageTest.java deleted file mode 100644 index 7108367ea6..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonToProtoMessageTest.java +++ /dev/null @@ -1,750 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.assertEquals; - -import com.google.cloud.bigquery.storage.test.JsonTest.*; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; -import com.google.common.collect.ImmutableMap; -import com.google.protobuf.ByteString; -import com.google.protobuf.Descriptors.Descriptor; -import com.google.protobuf.DynamicMessage; -import com.google.protobuf.Message; -import java.util.ArrayList; -import java.util.Map; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; - -@RunWith(JUnit4.class) -public class JsonToProtoMessageTest { - private static ImmutableMap AllTypesToDebugMessageTest = - new ImmutableMap.Builder() - .put(BoolType.getDescriptor(), "boolean") - .put(BytesType.getDescriptor(), "string") - .put(Int64Type.getDescriptor(), "int64") - .put(Int32Type.getDescriptor(), "int32") - .put(DoubleType.getDescriptor(), "double") - .put(StringType.getDescriptor(), "string") - .put(RepeatedType.getDescriptor(), "array") - .put(ObjectType.getDescriptor(), "object") - .build(); - - private static ImmutableMap AllTypesToCorrectProto = - new ImmutableMap.Builder() - .put( - BoolType.getDescriptor(), - new Message[] {BoolType.newBuilder().setTestFieldType(true).build()}) - .put( - BytesType.getDescriptor(), - new Message[] { - BytesType.newBuilder() - .setTestFieldType(ByteString.copyFrom("test".getBytes())) - .build() - }) - .put( - Int64Type.getDescriptor(), - new Message[] { - Int64Type.newBuilder().setTestFieldType(Long.MAX_VALUE).build(), - Int64Type.newBuilder().setTestFieldType(new Long(Integer.MAX_VALUE)).build() - }) - .put( - Int32Type.getDescriptor(), - new Message[] {Int32Type.newBuilder().setTestFieldType(Integer.MAX_VALUE).build()}) - .put( - DoubleType.getDescriptor(), - new Message[] {DoubleType.newBuilder().setTestFieldType(1.23).build()}) - .put( - StringType.getDescriptor(), - new Message[] {StringType.newBuilder().setTestFieldType("test").build()}) - .put( - RepeatedType.getDescriptor(), - new Message[] { - RepeatedType.newBuilder() - .addAllTestFieldType( - new ArrayList() { - { - add(1L); - add(2L); - add(3L); - } - }) - .build() - }) - .put( - ObjectType.getDescriptor(), - new Message[] { - ObjectType.newBuilder() - .setTestFieldType(ComplexLvl2.newBuilder().setTestInt(1).build()) - .build() - }) - .build(); - - private static ImmutableMap AllRepeatedTypesToDebugMessageTest = - new ImmutableMap.Builder() - .put(RepeatedBool.getDescriptor(), "boolean") - .put(RepeatedBytes.getDescriptor(), "string") - .put(RepeatedInt64.getDescriptor(), "int64") - .put(RepeatedInt32.getDescriptor(), "int32") - .put(RepeatedDouble.getDescriptor(), "double") - .put(RepeatedString.getDescriptor(), "string") - .put(RepeatedObject.getDescriptor(), "object") - .build(); - - private static ImmutableMap AllRepeatedTypesToCorrectProto = - new ImmutableMap.Builder() - .put( - RepeatedBool.getDescriptor(), - new Message[] { - RepeatedBool.newBuilder().addTestRepeated(true).addTestRepeated(false).build() - }) - .put( - RepeatedBytes.getDescriptor(), - new Message[] { - RepeatedBytes.newBuilder() - .addTestRepeated(ByteString.copyFrom("hello".getBytes())) - .addTestRepeated(ByteString.copyFrom("test".getBytes())) - .build() - }) - .put( - RepeatedString.getDescriptor(), - new Message[] { - RepeatedString.newBuilder().addTestRepeated("hello").addTestRepeated("test").build() - }) - .put( - RepeatedInt64.getDescriptor(), - new Message[] { - RepeatedInt64.newBuilder() - .addTestRepeated(Long.MAX_VALUE) - .addTestRepeated(Long.MIN_VALUE) - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build(), - RepeatedInt64.newBuilder() - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build() - }) - .put( - RepeatedInt32.getDescriptor(), - new Message[] { - RepeatedInt32.newBuilder() - .addTestRepeated(Integer.MAX_VALUE) - .addTestRepeated(Integer.MIN_VALUE) - .addTestRepeated(Short.MAX_VALUE) - .addTestRepeated(Short.MIN_VALUE) - .addTestRepeated(Byte.MAX_VALUE) - .addTestRepeated(Byte.MIN_VALUE) - .addTestRepeated(0) - .build() - }) - .put( - RepeatedDouble.getDescriptor(), - new Message[] { - RepeatedDouble.newBuilder() - .addTestRepeated(Double.MAX_VALUE) - .addTestRepeated(Double.MIN_VALUE) - .addTestRepeated(Float.MAX_VALUE) - .addTestRepeated(Float.MIN_VALUE) - .build(), - RepeatedDouble.newBuilder() - .addTestRepeated(Float.MAX_VALUE) - .addTestRepeated(Float.MIN_VALUE) - .build() - }) - .put( - RepeatedObject.getDescriptor(), - new Message[] { - RepeatedObject.newBuilder() - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(1).build()) - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(2).build()) - .addTestRepeated(ComplexLvl2.newBuilder().setTestInt(3).build()) - .build() - }) - .build(); - - private static JSONObject[] simpleJSONObjects = { - new JSONObject().put("test_field_type", Long.MAX_VALUE), - new JSONObject().put("test_field_type", Integer.MAX_VALUE), - new JSONObject().put("test_field_type", 1.23), - new JSONObject().put("test_field_type", true), - new JSONObject().put("test_field_type", "test"), - new JSONObject().put("test_field_type", new JSONArray("[1, 2, 3]")), - new JSONObject().put("test_field_type", new JSONObject().put("test_int", 1)) - }; - - private static JSONObject[] simpleJSONArrays = { - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Long[] { - Long.MAX_VALUE, - Long.MIN_VALUE, - (long) Integer.MAX_VALUE, - (long) Integer.MIN_VALUE, - (long) Short.MAX_VALUE, - (long) Short.MIN_VALUE, - (long) Byte.MAX_VALUE, - (long) Byte.MIN_VALUE, - 0L - })), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Integer[] { - Integer.MAX_VALUE, - Integer.MIN_VALUE, - (int) Short.MAX_VALUE, - (int) Short.MIN_VALUE, - (int) Byte.MAX_VALUE, - (int) Byte.MIN_VALUE, - 0 - })), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new Double[] { - Double.MAX_VALUE, - Double.MIN_VALUE, - (double) Float.MAX_VALUE, - (double) Float.MIN_VALUE - })), - new JSONObject() - .put("test_repeated", new JSONArray(new Float[] {Float.MAX_VALUE, Float.MIN_VALUE})), - new JSONObject().put("test_repeated", new JSONArray(new Boolean[] {true, false})), - new JSONObject().put("test_repeated", new JSONArray(new String[] {"hello", "test"})), - new JSONObject() - .put( - "test_repeated", - new JSONArray( - new JSONObject[] { - new JSONObject().put("test_int", 1), - new JSONObject().put("test_int", 2), - new JSONObject().put("test_int", 3) - })) - }; - - @Test - public void testDifferentNameCasing() throws Exception { - TestInt64 expectedProto = - TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); - - JSONObject json = new JSONObject(); - json.put("bYtE", (byte) 1); - json.put("SHORT", (short) 1); - json.put("inT", 1); - json.put("lONg", 1L); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testInt64() throws Exception { - TestInt64 expectedProto = - TestInt64.newBuilder().setByte(1).setShort(1).setInt(1).setLong(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1); - json.put("long", 1L); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testInt32() throws Exception { - TestInt32 expectedProto = TestInt32.newBuilder().setByte(1).setShort(1).setInt(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt32.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testInt32NotMatchInt64() throws Exception { - JSONObject json = new JSONObject(); - json.put("byte", (byte) 1); - json.put("short", (short) 1); - json.put("int", 1L); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt32.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "JSONObject does not have a int32 field at root.int."); - } - } - - @Test - public void testDouble() throws Exception { - TestDouble expectedProto = TestDouble.newBuilder().setDouble(1.2).setFloat(3.4f).build(); - JSONObject json = new JSONObject(); - json.put("double", 1.2); - json.put("float", 3.4f); - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestDouble.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testAllTypes() throws Exception { - for (Map.Entry entry : AllTypesToDebugMessageTest.entrySet()) { - int success = 0; - for (JSONObject json : simpleJSONObjects) { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(entry.getKey(), json, false); - assertEquals(protoMsg, AllTypesToCorrectProto.get(entry.getKey())[success]); - success += 1; - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject does not have a " + entry.getValue() + " field at root.test_field_type."); - } - } - if (entry.getKey() == Int64Type.getDescriptor()) { - assertEquals(2, success); - } else { - assertEquals(1, success); - } - } - } - - @Test - public void testAllRepeatedTypesWithLimits() throws Exception { - for (Map.Entry entry : AllRepeatedTypesToDebugMessageTest.entrySet()) { - int success = 0; - for (JSONObject json : simpleJSONArrays) { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(entry.getKey(), json, false); - assertEquals(protoMsg, AllRepeatedTypesToCorrectProto.get(entry.getKey())[success]); - success += 1; - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject does not have a " - + entry.getValue() - + " field at root.test_repeated[0]."); - } - } - if (entry.getKey() == RepeatedInt64.getDescriptor() - || entry.getKey() == RepeatedDouble.getDescriptor()) { - assertEquals(2, success); - } else { - assertEquals(1, success); - } - } - } - - @Test - public void testOptional() throws Exception { - TestInt64 expectedProto = TestInt64.newBuilder().setByte(1).build(); - JSONObject json = new JSONObject(); - json.put("byte", 1); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testRepeatedIsOptional() throws Exception { - TestRepeatedIsOptional expectedProto = - TestRepeatedIsOptional.newBuilder().setRequiredDouble(1.1).build(); - JSONObject json = new JSONObject(); - json.put("required_double", 1.1); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage( - TestRepeatedIsOptional.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testRequired() throws Exception { - JSONObject json = new JSONObject(); - json.put("optional_double", 1.1); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestRequired.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), "JSONObject does not have the required field root.required_double."); - } - } - - @Test - public void testStructSimple() throws Exception { - MessageType expectedProto = - MessageType.newBuilder() - .setTestFieldType(StringType.newBuilder().setTestFieldType("test").build()) - .build(); - JSONObject stringType = new JSONObject(); - stringType.put("test_field_type", "test"); - JSONObject json = new JSONObject(); - json.put("test_field_type", stringType); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(MessageType.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testStructSimpleFail() throws Exception { - JSONObject stringType = new JSONObject(); - stringType.put("test_field_type", 1); - JSONObject json = new JSONObject(); - json.put("test_field_type", stringType); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(MessageType.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject does not have a string field at root.test_field_type.test_field_type."); - } - } - - @Test - public void testStructComplex() throws Exception { - ComplexRoot expectedProto = - ComplexRoot.newBuilder() - .setTestInt(1) - .addTestString("a") - .addTestString("b") - .addTestString("c") - .setTestBytes(ByteString.copyFrom("hello".getBytes())) - .setTestBool(true) - .addTestDouble(1.1) - .addTestDouble(2.2) - .addTestDouble(3.3) - .addTestDouble(4.4) - .setTestDate(1) - .setComplexLvl1( - ComplexLvl1.newBuilder() - .setTestInt(2) - .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) - .build()) - .setComplexLvl2(ComplexLvl2.newBuilder().setTestInt(3).build()) - .build(); - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("test_int", 3); - - JSONObject complex_lvl1 = new JSONObject(); - complex_lvl1.put("test_int", 2); - complex_lvl1.put("complex_lvl2", complex_lvl2); - - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); - json.put("test_bytes", "hello"); - json.put("test_bool", true); - json.put("test_DOUBLe", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); - json.put("test_date", 1); - json.put("complex_lvl1", complex_lvl1); - json.put("complex_lvl2", complex_lvl2); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexRoot.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testStructComplexFail() throws Exception { - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("test_int", 3); - - JSONObject complex_lvl1 = new JSONObject(); - complex_lvl1.put("test_int", "not_int"); - complex_lvl1.put("complex_lvl2", complex_lvl2); - - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("test_string", new JSONArray(new String[] {"a", "b", "c"})); - json.put("test_bytes", "hello"); - json.put("test_bool", true); - json.put("test_double", new JSONArray(new Double[] {1.1, 2.2, 3.3, 4.4})); - json.put("test_date", 1); - json.put("complex_lvl1", complex_lvl1); - json.put("complex_lvl2", complex_lvl2); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexRoot.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), "JSONObject does not have a int64 field at root.complex_lvl1.test_int."); - } - } - - @Test - public void testRepeatedWithMixedTypes() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray("[1.1, 2.2, true]")); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedDouble.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), "JSONObject does not have a double field at root.test_repeated[2]."); - } - } - - @Test - public void testNestedRepeatedComplex() throws Exception { - NestedRepeated expectedProto = - NestedRepeated.newBuilder() - .addDouble(1.1) - .addDouble(2.2) - .addDouble(3.3) - .addDouble(4.4) - .addDouble(5.5) - .addInt(1) - .addInt(2) - .addInt(3) - .addInt(4) - .addInt(5) - .setRepeatedString( - RepeatedString.newBuilder() - .addTestRepeated("hello") - .addTestRepeated("this") - .addTestRepeated("is") - .addTestRepeated("a") - .addTestRepeated("test") - .build()) - .build(); - double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; - String[] stringArr = {"hello", "this", "is", "a", "test"}; - int[] intArr = {1, 2, 3, 4, 5}; - - JSONObject json = new JSONObject(); - json.put("double", new JSONArray(doubleArr)); - json.put("int", new JSONArray(intArr)); - JSONObject jsonRepeatedString = new JSONObject(); - jsonRepeatedString.put("test_repeated", new JSONArray(stringArr)); - json.put("repeated_string", jsonRepeatedString); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(NestedRepeated.getDescriptor(), json, false); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testNestedRepeatedComplexFail() throws Exception { - double[] doubleArr = {1.1, 2.2, 3.3, 4.4, 5.5}; - Boolean[] fakeStringArr = {true, false}; - int[] intArr = {1, 2, 3, 4, 5}; - - JSONObject json = new JSONObject(); - json.put("double", new JSONArray(doubleArr)); - json.put("int", new JSONArray(intArr)); - JSONObject jsonRepeatedString = new JSONObject(); - jsonRepeatedString.put("test_repeated", new JSONArray(fakeStringArr)); - json.put("repeated_string", jsonRepeatedString); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(NestedRepeated.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject does not have a string field at root.repeated_string.test_repeated[0]."); - } - } - - @Test - public void testAllowUnknownFields() throws Exception { - RepeatedInt64 expectedProto = - RepeatedInt64.newBuilder() - .addTestRepeated(1) - .addTestRepeated(2) - .addTestRepeated(3) - .addTestRepeated(4) - .addTestRepeated(5) - .build(); - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray(new int[] {1, 2, 3, 4, 5})); - json.put("string", "hello"); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedInt64.getDescriptor(), json, true); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testEmptySecondLevelObject() throws Exception { - ComplexLvl1 expectedProto = - ComplexLvl1.newBuilder() - .setTestInt(1) - .setComplexLvl2(ComplexLvl2.newBuilder().build()) - .build(); - JSONObject complexLvl2 = new JSONObject(); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complexLvl2); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json, true); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testAllowUnknownFieldsError() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray(new int[] {1, 2, 3, 4, 5})); - json.put("string", "hello"); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedInt64.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject has fields unknown to BigQuery: root.string. Set allowUnknownFields to True to allow unknown fields."); - } - } - - @Test - public void testEmptyProtoMessage() throws Exception { - JSONObject json = new JSONObject(); - json.put("test_repeated", new JSONArray(new int[0])); - json.put("string", "hello"); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(RepeatedInt64.getDescriptor(), json, true); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "The created protobuf message is empty."); - } - } - - @Test - public void testEmptyJSONObject() throws Exception { - JSONObject json = new JSONObject(); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(Int64Type.getDescriptor(), json, false); - } catch (IllegalStateException e) { - assertEquals(e.getMessage(), "JSONObject is empty."); - } - } - - @Test - public void testNullJson() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(Int64Type.getDescriptor(), null, false); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "JSONObject is null."); - } - } - - @Test - public void testNullDescriptor() throws Exception { - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(null, new JSONObject(), false); - } catch (NullPointerException e) { - assertEquals(e.getMessage(), "Protobuf descriptor is null."); - } - } - - @Test - public void testAllowUnknownFieldsSecondLevel() throws Exception { - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("no_match", 1); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complex_lvl2); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "JSONObject has fields unknown to BigQuery: root.complex_lvl2.no_match. Set allowUnknownFields to True to allow unknown fields."); - } - } - - @Test - public void testTopLevelMismatch() throws Exception { - JSONObject json = new JSONObject(); - json.put("no_match", 1.1); - - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage( - TopLevelMismatch.getDescriptor(), json, true); - } catch (IllegalArgumentException e) { - assertEquals( - e.getMessage(), - "There are no matching fields found for the JSONObject and the protocol buffer descriptor."); - } - } - - @Test - public void testTopLevelMatchSecondLevelMismatch() throws Exception { - ComplexLvl1 expectedProto = - ComplexLvl1.newBuilder() - .setTestInt(1) - .setComplexLvl2(ComplexLvl2.newBuilder().build()) - .build(); - JSONObject complex_lvl2 = new JSONObject(); - complex_lvl2.put("no_match", 1); - JSONObject json = new JSONObject(); - json.put("test_int", 1); - json.put("complex_lvl2", complex_lvl2); - - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(ComplexLvl1.getDescriptor(), json, true); - assertEquals(protoMsg, expectedProto); - } - - @Test - public void testJsonNullValue() throws Exception { - JSONObject json = new JSONObject(); - json.put("long", JSONObject.NULL); - json.put("int", 1); - try { - DynamicMessage protoMsg = - JsonToProtoMessage.convertJsonToProtoMessage(TestInt64.getDescriptor(), json, false); - } catch (IllegalArgumentException e) { - assertEquals(e.getMessage(), "JSONObject does not have a int64 field at root.long."); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCacheTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCacheTest.java deleted file mode 100644 index c95229e59f..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/JsonWriterCacheTest.java +++ /dev/null @@ -1,202 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; - -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.cloud.bigquery.storage.test.Test.*; -import com.google.protobuf.AbstractMessage; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; -import org.junit.*; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(JUnit4.class) -public class JsonWriterCacheTest { - private static final Logger LOG = Logger.getLogger(JsonWriterCacheTest.class.getName()); - - private static final String TEST_TABLE = "projects/p/datasets/d/tables/t"; - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final String TEST_STREAM_2 = "projects/p/datasets/d/tables/t/streams/s2"; - private static final String TEST_STREAM_3 = "projects/p/datasets/d/tables/t/streams/s3"; - private static final String TEST_STREAM_4 = "projects/p/datasets/d/tables/t/streams/s4"; - private static final String TEST_TABLE_2 = "projects/p/datasets/d/tables/t2"; - private static final String TEST_STREAM_21 = "projects/p/datasets/d/tables/t2/streams/s1"; - private static final String TEST_TABLE_3 = "projects/p/datasets/d/tables/t3"; - private static final String TEST_STREAM_31 = "projects/p/datasets/d/tables/t3/streams/s1"; - - private static MockBigQueryWrite mockBigQueryWrite; - private static MockServiceHelper serviceHelper; - @Mock private static SchemaCompatibility mockSchemaCheck; - private BigQueryWriteClient client; - private LocalChannelProvider channelProvider; - - private final Table.TableFieldSchema FOO = - Table.TableFieldSchema.newBuilder() - .setType(Table.TableFieldSchema.Type.STRING) - .setMode(Table.TableFieldSchema.Mode.NULLABLE) - .setName("foo") - .build(); - private final Table.TableSchema TABLE_SCHEMA = - Table.TableSchema.newBuilder().addFields(0, FOO).build(); - - @BeforeClass - public static void startStaticServer() { - mockBigQueryWrite = new MockBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); - serviceHelper.start(); - } - - @AfterClass - public static void stopServer() { - serviceHelper.stop(); - } - - @Before - public void setUp() throws IOException { - serviceHelper.reset(); - channelProvider = serviceHelper.createChannelProvider(); - BigQueryWriteSettings settings = - BigQueryWriteSettings.newBuilder() - .setTransportChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build(); - client = BigQueryWriteClient.create(settings); - MockitoAnnotations.initMocks(this); - } - - /** Response mocks for create a new writer */ - void WriterCreationResponseMock(String testStreamName) { - // Response from CreateWriteStream - Stream.WriteStream expectedResponse = - Stream.WriteStream.newBuilder() - .setName(testStreamName) - .setTableSchema(TABLE_SCHEMA) - .build(); - mockBigQueryWrite.addResponse(expectedResponse); - } - - @After - public void tearDown() throws Exception { - client.close(); - } - - @Test - public void testRejectBadTableName() throws Exception { - JsonWriterCache cache = JsonWriterCache.getTestInstance(client, 10); - try { - cache.getTableWriter("abc"); - fail(); - } catch (IllegalArgumentException expected) { - assertEquals(expected.getMessage(), "Invalid table name: abc"); - } - } - - @Test - public void testCreateNewWriter() throws Exception { - JsonWriterCache cache = JsonWriterCache.getTestInstance(client, 10); - WriterCreationResponseMock(TEST_STREAM); - JsonStreamWriter writer = cache.getTableWriter(TEST_TABLE); - List actualRequests = mockBigQueryWrite.getRequests(); - assertEquals(1, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - - assertEquals(TEST_STREAM, writer.getStreamName()); - assertEquals(1, cache.cachedTableCount()); - cache.clear(); - } - - @Test - public void testWriterWithDifferentTable() throws Exception { - JsonWriterCache cache = JsonWriterCache.getTestInstance(client, 2); - WriterCreationResponseMock(TEST_STREAM); - WriterCreationResponseMock(TEST_STREAM_21); - JsonStreamWriter writer1 = cache.getTableWriter(TEST_TABLE); - JsonStreamWriter writer2 = cache.getTableWriter(TEST_TABLE_2); - - List actualRequests = mockBigQueryWrite.getRequests(); - assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - assertEquals( - TEST_TABLE_2, ((Storage.CreateWriteStreamRequest) actualRequests.get(1)).getParent()); - assertEquals(TEST_STREAM, writer1.getStreamName()); - assertEquals(TEST_STREAM_21, writer2.getStreamName()); - assertEquals(2, cache.cachedTableCount()); - - // Still able to get the FooType writer. - JsonStreamWriter writer3 = cache.getTableWriter(TEST_TABLE_2); - Assert.assertEquals(TEST_STREAM_21, writer3.getStreamName()); - - // Create a writer with a even new schema. - WriterCreationResponseMock(TEST_STREAM_31); - WriterCreationResponseMock(TEST_STREAM); - JsonStreamWriter writer4 = cache.getTableWriter(TEST_TABLE_3); - // This would cause a new stream to be created since the old entry is evicted. - JsonStreamWriter writer5 = cache.getTableWriter(TEST_TABLE); - assertEquals(TEST_STREAM_31, writer4.getStreamName()); - assertEquals(TEST_STREAM, writer5.getStreamName()); - assertEquals(2, cache.cachedTableCount()); - cache.clear(); - } - - @Test - public void testConcurrentAccess() throws Exception { - final JsonWriterCache cache = JsonWriterCache.getTestInstance(client, 2); - // Make sure getting the same table writer in multiple thread only cause create to be called - // once. - WriterCreationResponseMock(TEST_STREAM); - ExecutorService executor = Executors.newFixedThreadPool(10); - for (int i = 0; i < 10; i++) { - executor.execute( - new Runnable() { - @Override - public void run() { - try { - assertTrue(cache.getTableWriter(TEST_TABLE) != null); - } catch (Exception e) { - fail(e.getMessage()); - } - } - }); - } - executor.shutdown(); - try { - executor.awaitTermination(1, TimeUnit.MINUTES); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java deleted file mode 100644 index f3c55e375c..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java +++ /dev/null @@ -1,59 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.BetaApi; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.protobuf.AbstractMessage; -import io.grpc.ServerServiceDefinition; -import java.util.List; -import javax.annotation.Generated; - -@BetaApi -@Generated("by gapic-generator-java") -public class MockBigQueryWrite implements MockGrpcService { - private final MockBigQueryWriteImpl serviceImpl; - - public MockBigQueryWrite() { - serviceImpl = new MockBigQueryWriteImpl(); - } - - @Override - public List getRequests() { - return serviceImpl.getRequests(); - } - - @Override - public void addResponse(AbstractMessage response) { - serviceImpl.addResponse(response); - } - - @Override - public void addException(Exception exception) { - serviceImpl.addException(exception); - } - - @Override - public ServerServiceDefinition getServiceDefinition() { - return serviceImpl.bindService(); - } - - @Override - public void reset() { - serviceImpl.reset(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java deleted file mode 100644 index 4db1302718..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java +++ /dev/null @@ -1,206 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.core.BetaApi; -import com.google.cloud.bigquery.storage.v1alpha2.BigQueryWriteGrpc.BigQueryWriteImplBase; -import com.google.protobuf.AbstractMessage; -import io.grpc.stub.StreamObserver; -import java.util.ArrayList; -import java.util.LinkedList; -import java.util.List; -import java.util.Queue; -import javax.annotation.Generated; - -@BetaApi -@Generated("by gapic-generator-java") -public class MockBigQueryWriteImpl extends BigQueryWriteImplBase { - private List requests; - private Queue responses; - - public MockBigQueryWriteImpl() { - requests = new ArrayList<>(); - responses = new LinkedList<>(); - } - - public List getRequests() { - return requests; - } - - public void addResponse(AbstractMessage response) { - responses.add(response); - } - - public void setResponses(List responses) { - this.responses = new LinkedList(responses); - } - - public void addException(Exception exception) { - responses.add(exception); - } - - public void reset() { - requests = new ArrayList<>(); - responses = new LinkedList<>(); - } - - @Override - public void createWriteStream( - Storage.CreateWriteStreamRequest request, - StreamObserver responseObserver) { - Object response = responses.poll(); - if (response instanceof Stream.WriteStream) { - requests.add(request); - responseObserver.onNext(((Stream.WriteStream) response)); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method CreateWriteStream, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Stream.WriteStream.class.getName(), - Exception.class.getName()))); - } - } - - @Override - public StreamObserver appendRows( - final StreamObserver responseObserver) { - StreamObserver requestObserver = - new StreamObserver() { - @Override - public void onNext(Storage.AppendRowsRequest value) { - requests.add(value); - final Object response = responses.remove(); - if (response instanceof Storage.AppendRowsResponse) { - responseObserver.onNext(((Storage.AppendRowsResponse) response)); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method AppendRows, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Storage.AppendRowsResponse.class.getName(), - Exception.class.getName()))); - } - } - - @Override - public void onError(Throwable t) { - responseObserver.onError(t); - } - - @Override - public void onCompleted() { - responseObserver.onCompleted(); - } - }; - return requestObserver; - } - - @Override - public void getWriteStream( - Storage.GetWriteStreamRequest request, StreamObserver responseObserver) { - Object response = responses.poll(); - if (response instanceof Stream.WriteStream) { - requests.add(request); - responseObserver.onNext(((Stream.WriteStream) response)); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method GetWriteStream, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Stream.WriteStream.class.getName(), - Exception.class.getName()))); - } - } - - @Override - public void finalizeWriteStream( - Storage.FinalizeWriteStreamRequest request, - StreamObserver responseObserver) { - Object response = responses.poll(); - if (response instanceof Storage.FinalizeWriteStreamResponse) { - requests.add(request); - responseObserver.onNext(((Storage.FinalizeWriteStreamResponse) response)); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method FinalizeWriteStream, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Storage.FinalizeWriteStreamResponse.class.getName(), - Exception.class.getName()))); - } - } - - @Override - public void batchCommitWriteStreams( - Storage.BatchCommitWriteStreamsRequest request, - StreamObserver responseObserver) { - Object response = responses.poll(); - if (response instanceof Storage.BatchCommitWriteStreamsResponse) { - requests.add(request); - responseObserver.onNext(((Storage.BatchCommitWriteStreamsResponse) response)); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method BatchCommitWriteStreams, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Storage.BatchCommitWriteStreamsResponse.class.getName(), - Exception.class.getName()))); - } - } - - @Override - public void flushRows( - Storage.FlushRowsRequest request, - StreamObserver responseObserver) { - Object response = responses.poll(); - if (response instanceof Storage.FlushRowsResponse) { - requests.add(request); - responseObserver.onNext(((Storage.FlushRowsResponse) response)); - responseObserver.onCompleted(); - } else if (response instanceof Exception) { - responseObserver.onError(((Exception) response)); - } else { - responseObserver.onError( - new IllegalArgumentException( - String.format( - "Unrecognized response type %s for method FlushRows, expected %s or %s", - response == null ? "null" : response.getClass().getName(), - Storage.FlushRowsResponse.class.getName(), - Exception.class.getName()))); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverterTest.java deleted file mode 100644 index 390ed67286..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoSchemaConverterTest.java +++ /dev/null @@ -1,197 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.gax.rpc.InvalidArgumentException; -import com.google.cloud.bigquery.storage.test.Test.*; -import com.google.protobuf.DescriptorProtos.FileDescriptorProto; -import com.google.protobuf.Descriptors; -import org.junit.*; - -public class ProtoSchemaConverterTest { - @Test - public void convertSimple() { - AllSupportedTypes testProto = AllSupportedTypes.newBuilder().setStringValue("abc").build(); - ProtoBufProto.ProtoSchema protoSchema = - ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( - "name: \"com_google_cloud_bigquery_storage_test_AllSupportedTypes\"\n" - + "field {\n" - + " name: \"int32_value\"\n" - + " number: 1\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_INT32\n" - + "}\n" - + "field {\n" - + " name: \"int64_value\"\n" - + " number: 2\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_INT64\n" - + "}\n" - + "field {\n" - + " name: \"uint32_value\"\n" - + " number: 3\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_UINT32\n" - + "}\n" - + "field {\n" - + " name: \"uint64_value\"\n" - + " number: 4\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_UINT64\n" - + "}\n" - + "field {\n" - + " name: \"float_value\"\n" - + " number: 5\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_FLOAT\n" - + "}\n" - + "field {\n" - + " name: \"double_value\"\n" - + " number: 6\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_DOUBLE\n" - + "}\n" - + "field {\n" - + " name: \"bool_value\"\n" - + " number: 7\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_BOOL\n" - + "}\n" - + "field {\n" - + " name: \"enum_value\"\n" - + " number: 8\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_ENUM\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_TestEnum_E.TestEnum\"\n" - + "}\n" - + "field {\n" - + " name: \"string_value\"\n" - + " number: 9\n" - + " label: LABEL_REQUIRED\n" - + " type: TYPE_STRING\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_TestEnum_E\"\n" - + " enum_type {\n" - + " name: \"TestEnum\"\n" - + " value {\n" - + " name: \"TestEnum0\"\n" - + " number: 0\n" - + " }\n" - + " value {\n" - + " name: \"TestEnum1\"\n" - + " number: 1\n" - + " }\n" - + " }\n" - + "}\n", - protoSchema.getProtoDescriptor().toString()); - } - - @Test - public void convertNested() { - ComplicateType testProto = ComplicateType.newBuilder().build(); - ProtoBufProto.ProtoSchema protoSchema = - ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.assertEquals( - "name: \"com_google_cloud_bigquery_storage_test_ComplicateType\"\n" - + "field {\n" - + " name: \"nested_repeated_type\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" - + "}\n" - + "field {\n" - + " name: \"inner_type\"\n" - + " number: 2\n" - + " label: LABEL_OPTIONAL\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + " field {\n" - + " name: \"value\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_STRING\n" - + " }\n" - + "}\n" - + "nested_type {\n" - + " name: \"com_google_cloud_bigquery_storage_test_NestedType\"\n" - + " field {\n" - + " name: \"inner_type\"\n" - + " number: 1\n" - + " label: LABEL_REPEATED\n" - + " type: TYPE_MESSAGE\n" - + " type_name: \"com_google_cloud_bigquery_storage_test_InnerType\"\n" - + " }\n" - + "}\n", - protoSchema.getProtoDescriptor().toString()); - } - - @Test - public void convertRecursive() { - try { - RecursiveType testProto = RecursiveType.newBuilder().build(); - ProtoBufProto.ProtoSchema protoSchema = - ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveType", - e.getMessage()); - } - } - - @Test - public void convertRecursiveTopMessage() { - try { - RecursiveTypeTopMessage testProto = RecursiveTypeTopMessage.newBuilder().build(); - ProtoBufProto.ProtoSchema protoSchema = - ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - Assert.fail("No exception raised"); - } catch (InvalidArgumentException e) { - Assert.assertEquals( - "Recursive type is not supported:com.google.cloud.bigquery.storage.test.RecursiveTypeTopMessage", - e.getMessage()); - } - } - - @Test - public void convertDuplicateType() { - DuplicateType testProto = DuplicateType.newBuilder().build(); - ProtoBufProto.ProtoSchema protoSchema = - ProtoSchemaConverter.convert(testProto.getDescriptorForType()); - - FileDescriptorProto fileDescriptorProto = - FileDescriptorProto.newBuilder() - .setName("foo.proto") - .addMessageType(protoSchema.getProtoDescriptor()) - .build(); - try { - Descriptors.FileDescriptor fs = - Descriptors.FileDescriptor.buildFrom( - fileDescriptorProto, new Descriptors.FileDescriptor[0]); - Descriptors.Descriptor type = - fs.findMessageTypeByName(protoSchema.getProtoDescriptor().getName()); - Assert.assertEquals(4, type.getFields().size()); - } catch (Descriptors.DescriptorValidationException ex) { - Assert.fail("Got unexpected exception: " + ex.getMessage()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibilityTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibilityTest.java deleted file mode 100644 index 7ec70e51db..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/SchemaCompatibilityTest.java +++ /dev/null @@ -1,1015 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.*; -import static org.mockito.Mockito.*; - -import com.google.cloud.bigquery.*; -import com.google.cloud.bigquery.Field; -import com.google.cloud.bigquery.LegacySQLTypeName; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.Table; -import com.google.cloud.bigquery.storage.test.SchemaTest.*; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.protobuf.Descriptors; -import java.io.IOException; -import java.util.Arrays; -import java.util.HashSet; -import javax.annotation.Nullable; -import org.junit.After; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(JUnit4.class) -public class SchemaCompatibilityTest { - @Mock private BigQuery mockBigquery; - @Mock private Table mockBigqueryTable; - Descriptors.Descriptor[] type_descriptors = { - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - UInt32Type.getDescriptor(), - UInt64Type.getDescriptor(), - Fixed32Type.getDescriptor(), - Fixed64Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor(), - FloatType.getDescriptor(), - DoubleType.getDescriptor(), - BoolType.getDescriptor(), - BytesType.getDescriptor(), - StringType.getDescriptor(), - EnumType.getDescriptor(), - MessageType.getDescriptor(), - GroupType.getDescriptor() - }; - - @Before - public void setUp() throws IOException { - MockitoAnnotations.initMocks(this); - when(mockBigquery.getTable(any(TableId.class))).thenReturn(mockBigqueryTable); - } - - @After - public void tearDown() { - verifyNoMoreInteractions(mockBigquery); - verifyNoMoreInteractions(mockBigqueryTable); - } - - public void customizeSchema(final Schema schema) { - TableDefinition definition = - new TableDefinition() { - @Override - public Type getType() { - return null; - } - - @Nullable - @Override - public Schema getSchema() { - return schema; - } - - @Override - public Builder toBuilder() { - return null; - } - }; - when(mockBigqueryTable.getDefinition()).thenReturn(definition); - } - - @Test - public void testSuccess() throws Exception { - customizeSchema( - Schema.of( - Field.newBuilder("Foo", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", FooType.getDescriptor(), false); - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testBadTableName() throws Exception { - try { - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("blah", FooType.getDescriptor(), false); - fail("should fail"); - } catch (IllegalArgumentException expected) { - assertEquals("Invalid table name: blah", expected.getMessage()); - } - } - - @Test - public void testSupportedTypes() { - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - for (Descriptors.FieldDescriptor field : SupportedTypes.getDescriptor().getFields()) { - assertTrue(compact.isSupportedType(field)); - } - - for (Descriptors.FieldDescriptor field : NonSupportedTypes.getDescriptor().getFields()) { - assertFalse(compact.isSupportedType(field)); - } - } - - @Test - public void testMap() { - customizeSchema( - Schema.of( - Field.newBuilder("map_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - Descriptors.Descriptor testMap = NonSupportedMap.getDescriptor(); - String protoName = testMap.getName() + ".map_value"; - try { - compact.check("projects/p/datasets/d/tables/t", testMap, false); - fail("Should not be supported: field contains map"); - } catch (IllegalArgumentException expected) { - assertEquals( - "Proto schema " + protoName + " is not supported: is a map field.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testNestingSupportedSimple() { - Field BQSupportedNestingLvl2 = - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(), - Field.newBuilder("nesting_value", LegacySQLTypeName.RECORD, BQSupportedNestingLvl2) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - Descriptors.Descriptor testNesting = SupportedNestingLvl1.getDescriptor(); - try { - compact.check("projects/p/datasets/d/tables/t", testNesting, false); - } catch (Exception e) { - fail(e.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testNestingSupportedStacked() { - Field BQSupportedNestingLvl2 = - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(), - Field.newBuilder("nesting_value1", LegacySQLTypeName.RECORD, BQSupportedNestingLvl2) - .setMode(Field.Mode.NULLABLE) - .build(), - Field.newBuilder("nesting_value2", LegacySQLTypeName.RECORD, BQSupportedNestingLvl2) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - Descriptors.Descriptor testNesting = SupportedNestingStacked.getDescriptor(); - try { - compact.check("projects/p/datasets/d/tables/t", testNesting, false); - } catch (Exception e) { - fail(e.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - /* - * This is not the "exact" test, as BigQuery fields cannot be recursive. Instead, this test uses - * two DIFFERENT records with the same name to simulate recursive protos (protos can't have the - * same name anyways unless they are the same proto). - */ - @Test - public void testNestingContainsRecursive() { - Field BQNonSupportedNestingRecursive = - Field.newBuilder( - "nesting_value", - LegacySQLTypeName.RECORD, - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build()) - .setMode(Field.Mode.NULLABLE) - .build(); - - customizeSchema( - Schema.of( - Field.newBuilder("int_value", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(), - Field.newBuilder( - "nesting_value", LegacySQLTypeName.RECORD, BQNonSupportedNestingRecursive) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - Descriptors.Descriptor testNesting = NonSupportedNestingContainsRecursive.getDescriptor(); - try { - compact.check("projects/p/datasets/d/tables/t", testNesting, false); - fail("Should not be supported: contains nested messages of more than 15 levels."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Proto schema " - + testNesting.getName() - + ".nesting_value.nesting_value is not supported: is a recursively nested message.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testNestingRecursiveLimit() { - Field NonSupportedNestingLvl16 = - Field.newBuilder("test1", LegacySQLTypeName.INTEGER).setMode(Field.Mode.NULLABLE).build(); - Field NonSupportedNestingLvl15 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl16) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl14 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl15) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl13 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl14) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl12 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl13) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl11 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl12) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl10 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl11) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl9 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl10) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl8 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl9) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl7 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl8) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl6 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl7) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl5 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl6) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl4 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl5) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl3 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl4) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl2 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl3) - .setMode(Field.Mode.NULLABLE) - .build(); - Field NonSupportedNestingLvl1 = - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl2) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("test1", LegacySQLTypeName.RECORD, NonSupportedNestingLvl1) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - Descriptors.Descriptor testNesting = NonSupportedNestingLvl0.getDescriptor(); - try { - compact.check("projects/p/datasets/d/tables/t", testNesting, false); - fail("Should not be supported: contains nested messages of more than 15 levels."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Proto schema " - + testNesting.getName() - + " is not supported: contains nested messages of more than 15 levels.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testProtoMoreFields() { - Schema customSchema = Schema.of(Field.of("int32_value", LegacySQLTypeName.INTEGER)); - customizeSchema(customSchema); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - - try { - compact.check("projects/p/datasets/d/tables/t", SupportedTypes.getDescriptor(), false); - fail("Should fail: proto has more fields and allowUnknownFields flag is false."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Proto schema " - + SupportedTypes.getDescriptor().getName() - + " has " - + SupportedTypes.getDescriptor().getFields().size() - + " fields, while BQ schema t has " - + 1 - + " fields.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testBQRepeated() { - customizeSchema( - Schema.of( - Field.newBuilder("repeated_mode", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.REPEATED) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", ProtoRepeatedBQRepeated.getDescriptor(), false); - try { - compact.check( - "projects/p/datasets/d/tables/t", ProtoOptionalBQRepeated.getDescriptor(), false); - fail("Should fail: BQ schema is repeated, but proto is optional."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Given proto field " - + ProtoOptionalBQRepeated.getDescriptor().getName() - + ".repeated_mode" - + " is not repeated but Big Query field t.repeated_mode is.", - expected.getMessage()); - } - - try { - compact.check( - "projects/p/datasets/d/tables/t", ProtoRequiredBQRepeated.getDescriptor(), false); - fail("Should fail: BQ schema is repeated, but proto is required."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Given proto field " - + ProtoRequiredBQRepeated.getDescriptor().getName() - + ".repeated_mode" - + " is not repeated but Big Query field t.repeated_mode is.", - expected.getMessage()); - } - verify(mockBigquery, times(3)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(3)).getDefinition(); - } - - @Test - public void testBQRequired() { - customizeSchema( - Schema.of( - Field.newBuilder("required_mode", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.REQUIRED) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", ProtoRequiredBQRequired.getDescriptor(), false); - - try { - compact.check("projects/p/datasets/d/tables/t", ProtoNoneBQRequired.getDescriptor(), false); - fail("Should fail: BQ schema is required, but proto does not have this field."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The required Big Query field t.required_mode is missing in the proto schema " - + ProtoNoneBQRequired.getDescriptor().getName() - + ".", - expected.getMessage()); - } - - try { - compact.check( - "projects/p/datasets/d/tables/t", ProtoOptionalBQRequired.getDescriptor(), false); - fail("Should fail: BQ schema is required, but proto is optional."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Given proto field " - + ProtoOptionalBQRequired.getDescriptor().getName() - + ".required_mode is not required but Big Query field t.required_mode is.", - expected.getMessage()); - } - - try { - compact.check( - "projects/p/datasets/d/tables/t", ProtoRepeatedBQRequired.getDescriptor(), false); - fail("Should fail: BQ schema is required, but proto is repeated."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Given proto field " - + ProtoRepeatedBQRequired.getDescriptor().getName() - + ".required_mode is not required but Big Query field t.required_mode is.", - expected.getMessage()); - } - verify(mockBigquery, times(4)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(4)).getDefinition(); - } - - @Test - public void testBQOptional() { - customizeSchema( - Schema.of( - Field.newBuilder("optional_mode", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", ProtoOptionalBQOptional.getDescriptor(), false); - compact.check("projects/p/datasets/d/tables/t", ProtoRequiredBQOptional.getDescriptor(), false); - - try { - compact.check( - "projects/p/datasets/d/tables/t", ProtoRepeatedBQOptional.getDescriptor(), false); - fail("Should fail: BQ schema is nullable, but proto field is repeated."); - } catch (IllegalArgumentException expected) { - assertEquals( - "Given proto field " - + ProtoRepeatedBQOptional.getDescriptor().getName() - + ".optional_mode is repeated but Big Query field t.optional_mode is optional.", - expected.getMessage()); - } - - verify(mockBigquery, times(3)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(3)).getDefinition(); - } - - @Test - public void testBQBool() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.BOOLEAN) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>( - Arrays.asList( - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - UInt32Type.getDescriptor(), - UInt64Type.getDescriptor(), - Fixed32Type.getDescriptor(), - Fixed64Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor(), - BoolType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Boolean."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQBytes() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.BYTES) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(BytesType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Bytes."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQDate() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.DATE) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>( - Arrays.asList( - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Date."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQDatetime() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.DATETIME) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(Int64Type.getDescriptor(), StringType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Datetime."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQFloat() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.FLOAT) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(FloatType.getDescriptor(), DoubleType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Float."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQGeography() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.GEOGRAPHY) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(StringType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Geography."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQInteger() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>( - Arrays.asList( - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - UInt32Type.getDescriptor(), - Fixed32Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor(), - EnumType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Integer."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQNumeric() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.NUMERIC) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>( - Arrays.asList( - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - UInt32Type.getDescriptor(), - UInt64Type.getDescriptor(), - Fixed32Type.getDescriptor(), - Fixed64Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor(), - BytesType.getDescriptor(), - StringType.getDescriptor(), - FloatType.getDescriptor(), - DoubleType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Numeric."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQRecord() { - Field nestedMessage = - Field.newBuilder("test_field_type", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.RECORD, nestedMessage) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(MessageType.getDescriptor(), GroupType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ String."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQRecordMismatch() { - Field nestedMessage1 = - Field.newBuilder("test_field_type", LegacySQLTypeName.INTEGER) - .setMode(Field.Mode.NULLABLE) - .build(); - Field nestedMessage0 = - Field.newBuilder("mismatchlvl1", LegacySQLTypeName.RECORD, nestedMessage1) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("mismatchlvl0", LegacySQLTypeName.RECORD, nestedMessage0) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - try { - compact.check("projects/p/datasets/d/tables/t", MessageTypeMismatch.getDescriptor(), false); - fail("Should fail: Proto schema type should not match BQ String."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + MessageTypeMismatch.getDescriptor().getName() - + ".mismatchlvl0.mismatchlvl1.test_field_type does not have a matching type with the big query field t.mismatchlvl0.mismatchlvl1.test_field_type.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testBQRecordMatch() { - Field nestedMessage1 = - Field.newBuilder("test_field_type", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build(); - Field nestedMessage0 = - Field.newBuilder("mismatchlvl1", LegacySQLTypeName.RECORD, nestedMessage1) - .setMode(Field.Mode.NULLABLE) - .build(); - customizeSchema( - Schema.of( - Field.newBuilder("mismatchlvl0", LegacySQLTypeName.RECORD, nestedMessage0) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", MessageTypeMismatch.getDescriptor(), false); - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testBQString() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(StringType.getDescriptor(), EnumType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ String."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQTime() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.TIME) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>(Arrays.asList(Int64Type.getDescriptor(), StringType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Time."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - @Test - public void testBQTimestamp() { - customizeSchema( - Schema.of( - Field.newBuilder("test_field_type", LegacySQLTypeName.TIMESTAMP) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - HashSet compatible = - new HashSet<>( - Arrays.asList( - Int32Type.getDescriptor(), - Int64Type.getDescriptor(), - UInt32Type.getDescriptor(), - Fixed32Type.getDescriptor(), - SFixed32Type.getDescriptor(), - SFixed64Type.getDescriptor(), - EnumType.getDescriptor())); - - for (Descriptors.Descriptor descriptor : type_descriptors) { - if (compatible.contains(descriptor)) { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - } else { - try { - compact.check("projects/p/datasets/d/tables/t", descriptor, false); - fail("Should fail: Proto schema type should not match BQ Timestamp."); - } catch (IllegalArgumentException expected) { - assertEquals( - "The proto field " - + descriptor.getName() - + ".test_field_type does not have a matching type with the big query field t.test_field_type.", - expected.getMessage()); - } - } - } - verify(mockBigquery, times(16)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(16)).getDefinition(); - } - - /* - * Tests if having no matching fields in the top level causes an error. - */ - @Test - public void testBQTopLevelMismatch() { - customizeSchema( - Schema.of( - Field.newBuilder("test_toplevel_mismatch", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - try { - compact.check("projects/p/datasets/d/tables/t", StringType.getDescriptor(), false); - } catch (IllegalArgumentException expected) { - assertEquals( - "There is no matching fields found for the proto schema " - + StringType.getDescriptor().getName() - + " and the BQ table schema t.", - expected.getMessage()); - } - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - /* - * Tests if there is at least 1 matching field in the top level. - */ - @Test - public void testBQTopLevelMatch() { - Field nestedMessage0 = - Field.newBuilder("mismatch", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - customizeSchema( - Schema.of( - Field.newBuilder("mismatch", LegacySQLTypeName.RECORD, nestedMessage0) - .setMode(Field.Mode.NULLABLE) - .build(), - Field.newBuilder("match", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", TopLevelMatch.getDescriptor(), false); - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testAllowUnknownUnsupportedFields() { - customizeSchema( - Schema.of( - Field.newBuilder("string_value", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check( - "projects/p/datasets/d/tables/t", AllowUnknownUnsupportedFields.getDescriptor(), true); - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } - - @Test - public void testLowerCase() { - customizeSchema( - Schema.of( - Field.newBuilder("tEsT_fIeLd_TyPe", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build())); - SchemaCompatibility compact = SchemaCompatibility.getInstance(mockBigquery); - compact.check("projects/p/datasets/d/tables/t", StringType.getDescriptor(), true); - verify(mockBigquery, times(1)).getTable(any(TableId.class)); - verify(mockBigqueryTable, times(1)).getDefinition(); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriterTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriterTest.java deleted file mode 100644 index aa998725a3..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/StreamWriterTest.java +++ /dev/null @@ -1,837 +0,0 @@ -/* - * Copyright 2016 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import com.google.api.core.ApiFuture; -import com.google.api.gax.batching.BatchingSettings; -import com.google.api.gax.batching.FlowControlSettings; -import com.google.api.gax.batching.FlowController; -import com.google.api.gax.core.ExecutorProvider; -import com.google.api.gax.core.FixedExecutorProvider; -import com.google.api.gax.core.InstantiatingExecutorProvider; -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.api.gax.retrying.RetrySettings; -import com.google.api.gax.rpc.DataLossException; -import com.google.cloud.bigquery.storage.test.Test.FooType; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.*; -import com.google.common.base.Strings; -import com.google.protobuf.DescriptorProtos; -import com.google.protobuf.Int64Value; -import com.google.protobuf.Timestamp; -import io.grpc.Status; -import io.grpc.StatusRuntimeException; -import java.util.Arrays; -import java.util.UUID; -import java.util.concurrent.ExecutionException; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; -import org.junit.After; -import org.junit.Assert; -import org.junit.Before; -import org.junit.Test; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.threeten.bp.Duration; -import org.threeten.bp.Instant; - -@RunWith(JUnit4.class) -public class StreamWriterTest { - private static final Logger LOG = Logger.getLogger(StreamWriterTest.class.getName()); - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final ExecutorProvider SINGLE_THREAD_EXECUTOR = - InstantiatingExecutorProvider.newBuilder().setExecutorThreadCount(1).build(); - private static LocalChannelProvider channelProvider; - private FakeScheduledExecutorService fakeExecutor; - private FakeBigQueryWrite testBigQueryWrite; - private static MockServiceHelper serviceHelper; - - @Before - public void setUp() throws Exception { - testBigQueryWrite = new FakeBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(testBigQueryWrite)); - serviceHelper.start(); - channelProvider = serviceHelper.createChannelProvider(); - fakeExecutor = new FakeScheduledExecutorService(); - testBigQueryWrite.setExecutor(fakeExecutor); - Instant time = Instant.now(); - Timestamp timestamp = - Timestamp.newBuilder().setSeconds(time.getEpochSecond()).setNanos(time.getNano()).build(); - // Add enough GetWriteStream response. - for (int i = 0; i < 4; i++) { - testBigQueryWrite.addResponse( - Stream.WriteStream.newBuilder().setName(TEST_STREAM).setCreateTime(timestamp).build()); - } - } - - @After - public void tearDown() throws Exception { - LOG.info("tearDown called"); - serviceHelper.stop(); - } - - private StreamWriter.Builder getTestStreamWriterBuilder(String testStream) { - return StreamWriter.newBuilder(testStream) - .setChannelProvider(channelProvider) - .setExecutorProvider(SINGLE_THREAD_EXECUTOR) - .setCredentialsProvider(NoCredentialsProvider.create()); - } - - private StreamWriter.Builder getTestStreamWriterBuilder() { - return getTestStreamWriterBuilder(TEST_STREAM); - } - - private AppendRowsRequest createAppendRequest(String[] messages, long offset) { - AppendRowsRequest.Builder requestBuilder = AppendRowsRequest.newBuilder(); - AppendRowsRequest.ProtoData.Builder dataBuilder = AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema( - ProtoBufProto.ProtoSchema.newBuilder() - .setProtoDescriptor( - DescriptorProtos.DescriptorProto.newBuilder() - .setName("Message") - .addField( - DescriptorProtos.FieldDescriptorProto.newBuilder() - .setName("foo") - .setType(DescriptorProtos.FieldDescriptorProto.Type.TYPE_STRING) - .setNumber(1) - .build()) - .build())); - ProtoBufProto.ProtoRows.Builder rows = ProtoBufProto.ProtoRows.newBuilder(); - for (String message : messages) { - FooType foo = FooType.newBuilder().setFoo(message).build(); - rows.addSerializedRows(foo.toByteString()); - } - if (offset > 0) { - requestBuilder.setOffset(Int64Value.of(offset)); - } - return requestBuilder - .setProtoRows(dataBuilder.setRows(rows.build()).build()) - .setWriteStream(TEST_STREAM) - .build(); - } - - private ApiFuture sendTestMessage(StreamWriter writer, String[] messages) { - return writer.append(createAppendRequest(messages, -1)); - } - - @Test - public void testTableName() throws Exception { - try (StreamWriter writer = getTestStreamWriterBuilder().build()) { - assertEquals("projects/p/datasets/d/tables/t", writer.getTableNameString()); - } - } - - @Test - public void testAppendByDuration() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofSeconds(5)) - .setElementCountThreshold(10L) - .build()) - .setExecutorProvider(FixedExecutorProvider.create(fakeExecutor)) - .build(); - - testBigQueryWrite.addResponse(Storage.AppendRowsResponse.newBuilder().setOffset(0).build()); - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - - assertFalse(appendFuture1.isDone()); - assertFalse(appendFuture2.isDone()); - fakeExecutor.advanceTime(Duration.ofSeconds(10)); - - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - - assertEquals(1, testBigQueryWrite.getAppendRequests().size()); - - assertEquals( - 2, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - true, testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - writer.close(); - } - - @Test - public void testAppendByNumBatchedMessages() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(2L) - .setDelayThreshold(Duration.ofSeconds(100)) - .build()) - .build(); - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(0).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(2).build()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"C"}); - - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - - assertFalse(appendFuture3.isDone()); - - ApiFuture appendFuture4 = sendTestMessage(writer, new String[] {"D"}); - - assertEquals(2L, appendFuture3.get().getOffset()); - assertEquals(3L, appendFuture4.get().getOffset()); - - assertEquals(2, testBigQueryWrite.getAppendRequests().size()); - assertEquals( - 2, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - true, testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - assertEquals( - 2, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - false, testBigQueryWrite.getAppendRequests().get(1).getProtoRows().hasWriterSchema()); - writer.close(); - } - - @Test - public void testAppendByNumBytes() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - // Each message is 32 bytes, setting batch size to 70 bytes allows 2 messages. - .setRequestByteThreshold(70L) - .setDelayThreshold(Duration.ofSeconds(100000)) - .build()) - .build(); - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(0).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(2).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(3).build()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"C"}); - - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - assertFalse(appendFuture3.isDone()); - - // This message is big enough to trigger send on the previous message and itself. - ApiFuture appendFuture4 = - sendTestMessage(writer, new String[] {Strings.repeat("A", 100)}); - assertEquals(2L, appendFuture3.get().getOffset()); - assertEquals(3L, appendFuture4.get().getOffset()); - - assertEquals(3, testBigQueryWrite.getAppendRequests().size()); - - writer.close(); - } - - @Test - public void testWriteByShutdown() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofSeconds(100)) - .setElementCountThreshold(10L) - .build()) - .build(); - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(0L).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(1L).build()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - - // Note we are not advancing time or reaching the count threshold but messages should - // still get written by call to shutdown - - writer.close(); - - // Verify the appends completed - assertTrue(appendFuture1.isDone()); - assertTrue(appendFuture2.isDone()); - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - } - - @Test - public void testWriteMixedSizeAndDuration() throws Exception { - try (StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(2L) - .setDelayThreshold(Duration.ofSeconds(5)) - .build()) - .build()) { - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(0L).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(2L).build()); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - - fakeExecutor.advanceTime(Duration.ofSeconds(2)); - assertFalse(appendFuture1.isDone()); - - ApiFuture appendFuture2 = - sendTestMessage(writer, new String[] {"B", "C"}); - - // Write triggered by batch size - assertEquals(0L, appendFuture1.get().getOffset()); - assertEquals(1L, appendFuture2.get().getOffset()); - - ApiFuture appendFuture3 = sendTestMessage(writer, new String[] {"D"}); - - assertFalse(appendFuture3.isDone()); - - // Write triggered by time - fakeExecutor.advanceTime(Duration.ofSeconds(5)); - - assertEquals(2L, appendFuture3.get().getOffset()); - - assertEquals( - 3, - testBigQueryWrite - .getAppendRequests() - .get(0) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - true, testBigQueryWrite.getAppendRequests().get(0).getProtoRows().hasWriterSchema()); - assertEquals( - 1, - testBigQueryWrite - .getAppendRequests() - .get(1) - .getProtoRows() - .getRows() - .getSerializedRowsCount()); - assertEquals( - false, testBigQueryWrite.getAppendRequests().get(1).getProtoRows().hasWriterSchema()); - } - } - - @Test - public void testFlowControlBehaviorBlock() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .setFlowControlSettings( - StreamWriter.Builder.DEFAULT_FLOW_CONTROL_SETTINGS - .toBuilder() - .setMaxOutstandingRequestBytes(40L) - .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block) - .build()) - .build()) - .build(); - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(2L).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(3L).build()); - testBigQueryWrite.setResponseDelay(Duration.ofSeconds(10)); - - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - final StreamWriter writer1 = writer; - Runnable runnable = - new Runnable() { - @Override - public void run() { - ApiFuture appendFuture2 = - sendTestMessage(writer1, new String[] {"B"}); - } - }; - Thread t = new Thread(runnable); - t.start(); - assertEquals(true, t.isAlive()); - assertEquals(false, appendFuture1.isDone()); - // Wait is necessary for response to be scheduled before timer is advanced. - Thread.sleep(5000L); - fakeExecutor.advanceTime(Duration.ofSeconds(10)); - // The first requests gets back while the second one is blocked. - assertEquals(2L, appendFuture1.get().getOffset()); - Thread.sleep(5000L); - // Wait is necessary for response to be scheduled before timer is advanced. - fakeExecutor.advanceTime(Duration.ofSeconds(10)); - t.join(); - writer.close(); - } - - @Test - public void testFlowControlBehaviorException() throws Exception { - try (StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .setFlowControlSettings( - StreamWriter.Builder.DEFAULT_FLOW_CONTROL_SETTINGS - .toBuilder() - .setMaxOutstandingElementCount(1L) - .setLimitExceededBehavior( - FlowController.LimitExceededBehavior.ThrowException) - .build()) - .build()) - .build()) { - assertEquals( - 1L, - writer - .getBatchingSettings() - .getFlowControlSettings() - .getMaxOutstandingElementCount() - .longValue()); - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(1L).build()); - testBigQueryWrite.setResponseDelay(Duration.ofSeconds(10)); - ApiFuture appendFuture1 = sendTestMessage(writer, new String[] {"A"}); - ApiFuture appendFuture2 = sendTestMessage(writer, new String[] {"B"}); - // Wait is necessary for response to be scheduled before timer is advanced. - Thread.sleep(5000L); - fakeExecutor.advanceTime(Duration.ofSeconds(10)); - try { - appendFuture2.get(); - Assert.fail("This should fail"); - } catch (Exception e) { - assertEquals( - "java.util.concurrent.ExecutionException: The maximum number of batch elements: 1 have been reached.", - e.toString()); - } - assertEquals(1L, appendFuture1.get().getOffset()); - } - } - - @Test - public void testStreamReconnectionTransient() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofSeconds(100000)) - .setElementCountThreshold(1L) - .build()) - .build(); - - StatusRuntimeException transientError = new StatusRuntimeException(Status.UNAVAILABLE); - testBigQueryWrite.addException(transientError); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(0).build()); - ApiFuture future1 = sendTestMessage(writer, new String[] {"m1"}); - assertEquals(false, future1.isDone()); - // Retry is scheduled to be 7 seconds later. - assertEquals(0L, future1.get().getOffset()); - writer.close(); - } - - @Test - public void testStreamReconnectionPermanant() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofSeconds(100000)) - .setElementCountThreshold(1L) - .build()) - .build(); - StatusRuntimeException permanentError = new StatusRuntimeException(Status.INVALID_ARGUMENT); - testBigQueryWrite.addException(permanentError); - ApiFuture future2 = sendTestMessage(writer, new String[] {"m2"}); - try { - future2.get(); - Assert.fail("This should fail."); - } catch (ExecutionException e) { - assertEquals(permanentError.toString(), e.getCause().getCause().toString()); - } - writer.close(); - } - - @Test - public void testStreamReconnectionExceedRetry() throws Exception { - StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofSeconds(100000)) - .setElementCountThreshold(1L) - .build()) - .setRetrySettings( - RetrySettings.newBuilder() - .setMaxRetryDelay(Duration.ofMillis(100)) - .setMaxAttempts(1) - .build()) - .build(); - assertEquals(1, writer.getRetrySettings().getMaxAttempts()); - StatusRuntimeException transientError = new StatusRuntimeException(Status.UNAVAILABLE); - testBigQueryWrite.addException(transientError); - testBigQueryWrite.addException(transientError); - ApiFuture future3 = sendTestMessage(writer, new String[] {"toomanyretry"}); - try { - future3.get(); - Assert.fail("This should fail."); - } catch (ExecutionException e) { - assertEquals(transientError.toString(), e.getCause().getCause().toString()); - } - writer.close(); - } - - @Test - public void testOffset() throws Exception { - try (StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(2L) - .build()) - .build()) { - - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(10L).build()); - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(13L).build()); - AppendRowsRequest request1 = createAppendRequest(new String[] {"A"}, 10L); - ApiFuture appendFuture1 = writer.append(request1); - AppendRowsRequest request2 = createAppendRequest(new String[] {"B", "C"}, 11L); - ApiFuture appendFuture2 = writer.append(request2); - AppendRowsRequest request3 = createAppendRequest(new String[] {"E", "F"}, 13L); - ApiFuture appendFuture3 = writer.append(request3); - AppendRowsRequest request4 = createAppendRequest(new String[] {"G"}, 15L); - ApiFuture appendFuture4 = writer.append(request4); - assertEquals(10L, appendFuture1.get().getOffset()); - assertEquals(11L, appendFuture2.get().getOffset()); - assertEquals(13L, appendFuture3.get().getOffset()); - assertEquals(15L, appendFuture4.get().getOffset()); - } - } - - @Test - public void testOffsetMismatch() throws Exception { - try (StreamWriter writer = - getTestStreamWriterBuilder() - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .build()) - .build()) { - testBigQueryWrite.addResponse(AppendRowsResponse.newBuilder().setOffset(11L).build()); - AppendRowsRequest request1 = createAppendRequest(new String[] {"A"}, 10L); - ApiFuture appendFuture1 = writer.append(request1); - - appendFuture1.get(); - fail("Should throw exception"); - } catch (Exception e) { - assertEquals( - "java.lang.IllegalStateException: The append result offset 11 does not match the expected offset 10.", - e.getCause().toString()); - } - } - - @Test - public void testErrorPropagation() throws Exception { - try (StreamWriter writer = - getTestStreamWriterBuilder() - .setExecutorProvider(SINGLE_THREAD_EXECUTOR) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .setDelayThreshold(Duration.ofSeconds(5)) - .build()) - .build()) { - testBigQueryWrite.addException(Status.DATA_LOSS.asException()); - sendTestMessage(writer, new String[] {"A"}).get(); - fail("should throw exception"); - } catch (ExecutionException e) { - assertThat(e.getCause()).isInstanceOf(DataLossException.class); - } - } - - @Test - public void testWriterGetters() throws Exception { - StreamWriter.Builder builder = StreamWriter.newBuilder(TEST_STREAM); - builder.setChannelProvider(channelProvider); - builder.setExecutorProvider(SINGLE_THREAD_EXECUTOR); - builder.setBatchingSettings( - BatchingSettings.newBuilder() - .setRequestByteThreshold(10L) - .setDelayThreshold(Duration.ofMillis(11)) - .setElementCountThreshold(12L) - .setFlowControlSettings( - FlowControlSettings.newBuilder() - .setMaxOutstandingElementCount(100L) - .setMaxOutstandingRequestBytes(1000L) - .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Block) - .build()) - .build()); - builder.setCredentialsProvider(NoCredentialsProvider.create()); - StreamWriter writer = builder.build(); - - assertEquals(TEST_STREAM, writer.getStreamNameString()); - assertEquals(10, (long) writer.getBatchingSettings().getRequestByteThreshold()); - assertEquals(Duration.ofMillis(11), writer.getBatchingSettings().getDelayThreshold()); - assertEquals(12, (long) writer.getBatchingSettings().getElementCountThreshold()); - assertEquals( - FlowController.LimitExceededBehavior.Block, - writer.getBatchingSettings().getFlowControlSettings().getLimitExceededBehavior()); - assertEquals( - 100L, - writer - .getBatchingSettings() - .getFlowControlSettings() - .getMaxOutstandingElementCount() - .longValue()); - assertEquals( - 1000L, - writer - .getBatchingSettings() - .getFlowControlSettings() - .getMaxOutstandingRequestBytes() - .longValue()); - writer.close(); - } - - @Test - public void testBuilderParametersAndDefaults() { - StreamWriter.Builder builder = StreamWriter.newBuilder(TEST_STREAM); - assertEquals(StreamWriter.Builder.DEFAULT_EXECUTOR_PROVIDER, builder.executorProvider); - assertEquals(100 * 1024L, builder.batchingSettings.getRequestByteThreshold().longValue()); - assertEquals(Duration.ofMillis(10), builder.batchingSettings.getDelayThreshold()); - assertEquals(100L, builder.batchingSettings.getElementCountThreshold().longValue()); - assertEquals(StreamWriter.Builder.DEFAULT_RETRY_SETTINGS, builder.retrySettings); - assertEquals(Duration.ofMillis(100), builder.retrySettings.getInitialRetryDelay()); - assertEquals(3, builder.retrySettings.getMaxAttempts()); - } - - @Test - public void testBuilderInvalidArguments() { - StreamWriter.Builder builder = StreamWriter.newBuilder(TEST_STREAM); - - try { - builder.setChannelProvider(null); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - - try { - builder.setExecutorProvider(null); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setRequestByteThreshold(null) - .build()); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setRequestByteThreshold(0L) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setRequestByteThreshold(-1L) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofMillis(1)) - .build()); - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(null) - .build()); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setDelayThreshold(Duration.ofMillis(-1)) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .build()); - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(null) - .build()); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(0L) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - try { - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(-1L) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - try { - FlowControlSettings flowControlSettings = - FlowControlSettings.newBuilder().setMaxOutstandingElementCount(-1L).build(); - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setFlowControlSettings(flowControlSettings) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - try { - FlowControlSettings flowControlSettings = - FlowControlSettings.newBuilder().setMaxOutstandingRequestBytes(-1L).build(); - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setFlowControlSettings(flowControlSettings) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - try { - FlowControlSettings flowControlSettings = - FlowControlSettings.newBuilder() - .setLimitExceededBehavior(FlowController.LimitExceededBehavior.Ignore) - .build(); - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setFlowControlSettings(flowControlSettings) - .build()); - fail("Should have thrown an IllegalArgumentException"); - } catch (IllegalArgumentException expected) { - // Expected - } - - try { - FlowControlSettings flowControlSettings = - FlowControlSettings.newBuilder().setLimitExceededBehavior(null).build(); - builder.setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setFlowControlSettings(flowControlSettings) - .build()); - fail("Should have thrown an NullPointerException"); - } catch (NullPointerException expected) { - // Expected - } - } - - @Test - public void testExistingClient() throws Exception { - BigQueryWriteSettings settings = - BigQueryWriteSettings.newBuilder() - .setTransportChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build(); - BigQueryWriteClient client = BigQueryWriteClient.create(settings); - StreamWriter writer = StreamWriter.newBuilder(TEST_STREAM, client).build(); - writer.close(); - assertFalse(client.isShutdown()); - client.shutdown(); - client.awaitTermination(1, TimeUnit.MINUTES); - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCacheTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCacheTest.java deleted file mode 100644 index cc62b4ee96..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/WriterCacheTest.java +++ /dev/null @@ -1,243 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; -import static org.mockito.Mockito.*; - -import com.google.api.gax.core.NoCredentialsProvider; -import com.google.api.gax.grpc.testing.LocalChannelProvider; -import com.google.api.gax.grpc.testing.MockGrpcService; -import com.google.api.gax.grpc.testing.MockServiceHelper; -import com.google.cloud.bigquery.storage.test.Test.*; -import com.google.protobuf.AbstractMessage; -import java.io.IOException; -import java.util.Arrays; -import java.util.List; -import java.util.UUID; -import java.util.concurrent.ExecutorService; -import java.util.concurrent.Executors; -import java.util.concurrent.TimeUnit; -import java.util.logging.Logger; -import org.junit.*; -import org.junit.runner.RunWith; -import org.junit.runners.JUnit4; -import org.mockito.Mock; -import org.mockito.MockitoAnnotations; - -@RunWith(JUnit4.class) -public class WriterCacheTest { - private static final Logger LOG = Logger.getLogger(WriterCacheTest.class.getName()); - - private static final String TEST_TABLE = "projects/p/datasets/d/tables/t"; - private static final String TEST_STREAM = "projects/p/datasets/d/tables/t/streams/s"; - private static final String TEST_STREAM_2 = "projects/p/datasets/d/tables/t/streams/s2"; - private static final String TEST_STREAM_3 = "projects/p/datasets/d/tables/t/streams/s3"; - private static final String TEST_STREAM_4 = "projects/p/datasets/d/tables/t/streams/s4"; - private static final String TEST_TABLE_2 = "projects/p/datasets/d/tables/t2"; - private static final String TEST_STREAM_21 = "projects/p/datasets/d/tables/t2/streams/s1"; - private static final String TEST_TABLE_3 = "projects/p/datasets/d/tables/t3"; - private static final String TEST_STREAM_31 = "projects/p/datasets/d/tables/t3/streams/s1"; - - private static MockBigQueryWrite mockBigQueryWrite; - private static MockServiceHelper serviceHelper; - @Mock private static SchemaCompatibility mockSchemaCheck; - private BigQueryWriteClient client; - private LocalChannelProvider channelProvider; - - @BeforeClass - public static void startStaticServer() { - mockBigQueryWrite = new MockBigQueryWrite(); - serviceHelper = - new MockServiceHelper( - UUID.randomUUID().toString(), Arrays.asList(mockBigQueryWrite)); - serviceHelper.start(); - } - - @AfterClass - public static void stopServer() { - serviceHelper.stop(); - } - - @Before - public void setUp() throws IOException { - serviceHelper.reset(); - channelProvider = serviceHelper.createChannelProvider(); - BigQueryWriteSettings settings = - BigQueryWriteSettings.newBuilder() - .setTransportChannelProvider(channelProvider) - .setCredentialsProvider(NoCredentialsProvider.create()) - .build(); - client = BigQueryWriteClient.create(settings); - MockitoAnnotations.initMocks(this); - } - - /** Response mocks for create a new writer */ - void WriterCreationResponseMock(String testStreamName) { - // Response from CreateWriteStream - Stream.WriteStream expectedResponse = - Stream.WriteStream.newBuilder().setName(testStreamName).build(); - mockBigQueryWrite.addResponse(expectedResponse); - } - - @After - public void tearDown() throws Exception { - client.close(); - } - - @Test - public void testRejectBadTableName() throws Exception { - WriterCache cache = WriterCache.getTestInstance(client, 10, mockSchemaCheck); - try { - cache.getTableWriter("abc", FooType.getDescriptor()); - fail(); - } catch (IllegalArgumentException expected) { - assertEquals(expected.getMessage(), "Invalid table name: abc"); - } - } - - @Test - public void testCreateNewWriter() throws Exception { - WriterCache cache = WriterCache.getTestInstance(client, 10, mockSchemaCheck); - WriterCreationResponseMock(TEST_STREAM); - StreamWriter writer = cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, FooType.getDescriptor()); - List actualRequests = mockBigQueryWrite.getRequests(); - assertEquals(1, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - assertEquals( - Stream.WriteStream.Type.COMMITTED, - ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getWriteStream().getType()); - assertEquals(TEST_TABLE, writer.getTableNameString()); - assertEquals(TEST_STREAM, writer.getStreamNameString()); - assertEquals(1, cache.cachedTableCount()); - cache.clear(); - } - - @Test - public void testWriterWithNewSchema() throws Exception { - WriterCache cache = WriterCache.getTestInstance(client, 10, mockSchemaCheck); - WriterCreationResponseMock(TEST_STREAM); - WriterCreationResponseMock(TEST_STREAM_2); - StreamWriter writer1 = cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, FooType.getDescriptor()); - - StreamWriter writer2 = cache.getTableWriter(TEST_TABLE, AllSupportedTypes.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, AllSupportedTypes.getDescriptor()); - - List actualRequests = mockBigQueryWrite.getRequests(); - assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(1)).getParent()); - assertEquals(TEST_STREAM, writer1.getStreamNameString()); - assertEquals(TEST_STREAM_2, writer2.getStreamNameString()); - assertEquals(1, cache.cachedTableCount()); - - // Still able to get the FooType writer. - StreamWriter writer3 = cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, FooType.getDescriptor()); - assertEquals(TEST_STREAM, writer3.getStreamNameString()); - - // Create a writer with a even new schema. - WriterCreationResponseMock(TEST_STREAM_3); - WriterCreationResponseMock(TEST_STREAM_4); - StreamWriter writer4 = cache.getTableWriter(TEST_TABLE, NestedType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, NestedType.getDescriptor()); - - LOG.info("blah"); - // This would cause a new stream to be created since the old entry is evicted. - StreamWriter writer5 = cache.getTableWriter(TEST_TABLE, AllSupportedTypes.getDescriptor()); - verify(mockSchemaCheck, times(2)).check(TEST_TABLE, AllSupportedTypes.getDescriptor()); - assertEquals(TEST_STREAM_3, writer4.getStreamNameString()); - assertEquals(TEST_STREAM_4, writer5.getStreamNameString()); - assertEquals(1, cache.cachedTableCount()); - cache.clear(); - } - - @Test - public void testWriterWithDifferentTable() throws Exception { - WriterCache cache = WriterCache.getTestInstance(client, 2, mockSchemaCheck); - WriterCreationResponseMock(TEST_STREAM); - WriterCreationResponseMock(TEST_STREAM_21); - StreamWriter writer1 = cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()); - StreamWriter writer2 = cache.getTableWriter(TEST_TABLE_2, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE_2, FooType.getDescriptor()); - - List actualRequests = mockBigQueryWrite.getRequests(); - assertEquals(2, actualRequests.size()); - assertEquals( - TEST_TABLE, ((Storage.CreateWriteStreamRequest) actualRequests.get(0)).getParent()); - assertEquals( - TEST_TABLE_2, ((Storage.CreateWriteStreamRequest) actualRequests.get(1)).getParent()); - assertEquals(TEST_STREAM, writer1.getStreamNameString()); - assertEquals(TEST_STREAM_21, writer2.getStreamNameString()); - assertEquals(2, cache.cachedTableCount()); - - // Still able to get the FooType writer. - StreamWriter writer3 = cache.getTableWriter(TEST_TABLE_2, FooType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE_2, FooType.getDescriptor()); - Assert.assertEquals(TEST_STREAM_21, writer3.getStreamNameString()); - - // Create a writer with a even new schema. - WriterCreationResponseMock(TEST_STREAM_31); - WriterCreationResponseMock(TEST_STREAM); - StreamWriter writer4 = cache.getTableWriter(TEST_TABLE_3, NestedType.getDescriptor()); - verify(mockSchemaCheck, times(1)).check(TEST_TABLE_3, NestedType.getDescriptor()); - - // This would cause a new stream to be created since the old entry is evicted. - StreamWriter writer5 = cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()); - verify(mockSchemaCheck, times(2)).check(TEST_TABLE, FooType.getDescriptor()); - - assertEquals(TEST_STREAM_31, writer4.getStreamNameString()); - assertEquals(TEST_STREAM, writer5.getStreamNameString()); - assertEquals(2, cache.cachedTableCount()); - cache.clear(); - } - - @Test - public void testConcurrentAccess() throws Exception { - final WriterCache cache = WriterCache.getTestInstance(client, 2, mockSchemaCheck); - // Make sure getting the same table writer in multiple thread only cause create to be called - // once. - WriterCreationResponseMock(TEST_STREAM); - ExecutorService executor = Executors.newFixedThreadPool(10); - for (int i = 0; i < 10; i++) { - executor.execute( - new Runnable() { - @Override - public void run() { - try { - assertTrue(cache.getTableWriter(TEST_TABLE, FooType.getDescriptor()) != null); - } catch (Exception e) { - fail(e.getMessage()); - } - } - }); - } - executor.shutdown(); - try { - executor.awaitTermination(1, TimeUnit.MINUTES); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - } -} diff --git a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/it/ITBigQueryWriteManualClientTest.java b/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/it/ITBigQueryWriteManualClientTest.java deleted file mode 100644 index 720f13f481..0000000000 --- a/google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/it/ITBigQueryWriteManualClientTest.java +++ /dev/null @@ -1,729 +0,0 @@ -/* - * Copyright 2019 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2.it; - -import static com.google.common.truth.Truth.assertThat; -import static org.junit.Assert.assertEquals; -import static org.junit.Assert.assertFalse; -import static org.junit.Assert.assertTrue; -import static org.junit.Assert.fail; - -import com.google.api.core.ApiFuture; -import com.google.cloud.ServiceOptions; -import com.google.cloud.bigquery.*; -import com.google.cloud.bigquery.Schema; -import com.google.cloud.bigquery.storage.test.SchemaTest.FakeFooType; -import com.google.cloud.bigquery.storage.test.Test.*; -import com.google.cloud.bigquery.storage.v1alpha2.*; -import com.google.cloud.bigquery.storage.v1alpha2.Storage.*; -import com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream; -import com.google.cloud.bigquery.testing.RemoteBigQueryHelper; -import com.google.protobuf.Descriptors; -import com.google.protobuf.Int64Value; -import com.google.protobuf.Message; -import java.io.IOException; -import java.util.*; -import java.util.concurrent.*; -import java.util.logging.Logger; -import org.json.JSONArray; -import org.json.JSONObject; -import org.junit.AfterClass; -import org.junit.Assert; -import org.junit.BeforeClass; -import org.junit.Test; -import org.threeten.bp.Duration; - -/** Integration tests for BigQuery Storage API. */ -public class ITBigQueryWriteManualClientTest { - private static final Logger LOG = - Logger.getLogger(ITBigQueryWriteManualClientTest.class.getName()); - private static final String DATASET = RemoteBigQueryHelper.generateDatasetName(); - private static final String TABLE = "testtable"; - private static final String TABLE2 = "complicatedtable"; - private static final String DESCRIPTION = "BigQuery Write Java manual client test dataset"; - - private static BigQueryWriteClient client; - private static TableInfo tableInfo; - private static TableInfo tableInfo2; - private static String tableId; - private static String tableId2; - private static BigQuery bigquery; - - @BeforeClass - public static void beforeClass() throws IOException { - client = BigQueryWriteClient.create(); - - RemoteBigQueryHelper bigqueryHelper = RemoteBigQueryHelper.create(); - bigquery = bigqueryHelper.getOptions().getService(); - DatasetInfo datasetInfo = - DatasetInfo.newBuilder(/* datasetId = */ DATASET).setDescription(DESCRIPTION).build(); - bigquery.create(datasetInfo); - LOG.info("Created test dataset: " + DATASET); - tableInfo = - TableInfo.newBuilder( - TableId.of(DATASET, TABLE), - StandardTableDefinition.of( - Schema.of( - com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) - .setMode(Field.Mode.NULLABLE) - .build()))) - .build(); - com.google.cloud.bigquery.Field.Builder innerTypeFieldBuilder = - com.google.cloud.bigquery.Field.newBuilder( - "inner_type", - LegacySQLTypeName.RECORD, - com.google.cloud.bigquery.Field.newBuilder("value", LegacySQLTypeName.STRING) - .setMode(Field.Mode.REPEATED) - .build()); - - tableInfo2 = - TableInfo.newBuilder( - TableId.of(DATASET, TABLE2), - StandardTableDefinition.of( - Schema.of( - Field.newBuilder( - "nested_repeated_type", - LegacySQLTypeName.RECORD, - innerTypeFieldBuilder.setMode(Field.Mode.REPEATED).build()) - .setMode(Field.Mode.REPEATED) - .build(), - innerTypeFieldBuilder.setMode(Field.Mode.NULLABLE).build()))) - .build(); - bigquery.create(tableInfo); - bigquery.create(tableInfo2); - tableId = - String.format( - "projects/%s/datasets/%s/tables/%s", - ServiceOptions.getDefaultProjectId(), DATASET, TABLE); - tableId2 = - String.format( - "projects/%s/datasets/%s/tables/%s", - ServiceOptions.getDefaultProjectId(), DATASET, TABLE2); - } - - @AfterClass - public static void afterClass() { - if (client != null) { - client.close(); - } - - if (bigquery != null) { - RemoteBigQueryHelper.forceDelete(bigquery, DATASET); - LOG.info("Deleted test dataset: " + DATASET); - } - } - - private AppendRowsRequest.Builder createAppendRequest(String streamName, String[] messages) { - AppendRowsRequest.Builder requestBuilder = AppendRowsRequest.newBuilder(); - - AppendRowsRequest.ProtoData.Builder dataBuilder = AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema(ProtoSchemaConverter.convert(FooType.getDescriptor())); - - ProtoBufProto.ProtoRows.Builder rows = ProtoBufProto.ProtoRows.newBuilder(); - for (String message : messages) { - FooType foo = FooType.newBuilder().setFoo(message).build(); - rows.addSerializedRows(foo.toByteString()); - } - dataBuilder.setRows(rows.build()); - return requestBuilder.setProtoRows(dataBuilder.build()).setWriteStream(streamName); - } - - private AppendRowsRequest.Builder createAppendRequestComplicateType( - String streamName, String[] messages) { - AppendRowsRequest.Builder requestBuilder = AppendRowsRequest.newBuilder(); - - AppendRowsRequest.ProtoData.Builder dataBuilder = AppendRowsRequest.ProtoData.newBuilder(); - dataBuilder.setWriterSchema(ProtoSchemaConverter.convert(ComplicateType.getDescriptor())); - - ProtoBufProto.ProtoRows.Builder rows = ProtoBufProto.ProtoRows.newBuilder(); - for (String message : messages) { - ComplicateType foo = - ComplicateType.newBuilder() - .setInnerType(InnerType.newBuilder().addValue(message).addValue(message).build()) - .build(); - rows.addSerializedRows(foo.toByteString()); - } - dataBuilder.setRows(rows.build()); - return requestBuilder.setProtoRows(dataBuilder.build()).setWriteStream(streamName); - } - - @Test - public void testBatchWriteWithCommittedStream() - throws IOException, InterruptedException, ExecutionException { - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(tableId) - .setWriteStream( - WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) - .build()); - try (StreamWriter streamWriter = - StreamWriter.newBuilder(writeStream.getName()) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setRequestByteThreshold(1024 * 1024L) // 1 Mb - .setElementCountThreshold(2L) - .setDelayThreshold(Duration.ofSeconds(2)) - .build()) - .build()) { - LOG.info("Sending one message"); - ApiFuture response = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}).build()); - assertEquals(0, response.get().getOffset()); - - LOG.info("Sending two more messages"); - ApiFuture response1 = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"bbb", "ccc"}).build()); - ApiFuture response2 = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"ddd"}).build()); - assertEquals(1, response1.get().getOffset()); - assertEquals(3, response2.get().getOffset()); - - TableResult result = - bigquery.listTableData( - tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iter = result.getValues().iterator(); - assertEquals("aaa", iter.next().get(0).getStringValue()); - assertEquals("bbb", iter.next().get(0).getStringValue()); - assertEquals("ccc", iter.next().get(0).getStringValue()); - assertEquals("ddd", iter.next().get(0).getStringValue()); - assertEquals(false, iter.hasNext()); - } - } - - @Test - public void testJsonStreamWriterBatchWriteWithCommittedStream() - throws IOException, InterruptedException, ExecutionException, - Descriptors.DescriptorValidationException { - String tableName = "JsonTable"; - TableInfo tableInfo = - TableInfo.newBuilder( - TableId.of(DATASET, tableName), - StandardTableDefinition.of( - Schema.of( - com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) - .build()))) - .build(); - bigquery.create(tableInfo); - TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(parent.toString()) - .setWriteStream( - WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) - .build()); - try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setRequestByteThreshold(1024 * 1024L) // 1 Mb - .setElementCountThreshold(2L) - .setDelayThreshold(Duration.ofSeconds(2)) - .build()) - .build()) { - LOG.info("Sending one message"); - JSONObject foo = new JSONObject(); - foo.put("foo", "aaa"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - ApiFuture response = - jsonStreamWriter.append(jsonArr, -1, /* allowUnknownFields */ false); - assertEquals(0, response.get().getOffset()); - - LOG.info("Sending two more messages"); - JSONObject foo1 = new JSONObject(); - foo1.put("foo", "bbb"); - JSONObject foo2 = new JSONObject(); - foo2.put("foo", "ccc"); - JSONArray jsonArr1 = new JSONArray(); - jsonArr1.put(foo1); - jsonArr1.put(foo2); - - JSONObject foo3 = new JSONObject(); - foo3.put("foo", "ddd"); - JSONArray jsonArr2 = new JSONArray(); - jsonArr2.put(foo3); - - ApiFuture response1 = - jsonStreamWriter.append(jsonArr1, -1, /* allowUnknownFields */ false); - ApiFuture response2 = - jsonStreamWriter.append(jsonArr2, -1, /* allowUnknownFields */ false); - assertEquals(1, response1.get().getOffset()); - assertEquals(3, response2.get().getOffset()); - - TableResult result = - bigquery.listTableData( - tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iter = result.getValues().iterator(); - assertEquals("aaa", iter.next().get(0).getStringValue()); - assertEquals("bbb", iter.next().get(0).getStringValue()); - assertEquals("ccc", iter.next().get(0).getStringValue()); - assertEquals("ddd", iter.next().get(0).getStringValue()); - assertEquals(false, iter.hasNext()); - jsonStreamWriter.close(); - } - } - - @Test - public void testJsonStreamWriterSchemaUpdate() - throws IOException, InterruptedException, ExecutionException, - Descriptors.DescriptorValidationException { - String tableName = "SchemaUpdateTable"; - TableInfo tableInfo = - TableInfo.newBuilder( - TableId.of(DATASET, tableName), - StandardTableDefinition.of( - Schema.of( - com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) - .build()))) - .build(); - - bigquery.create(tableInfo); - TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(parent.toString()) - .setWriteStream( - WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) - .build()); - - try (JsonStreamWriter jsonStreamWriter = - JsonStreamWriter.newBuilder(writeStream.getName(), writeStream.getTableSchema()) - .setBatchingSettings( - StreamWriter.Builder.DEFAULT_BATCHING_SETTINGS - .toBuilder() - .setElementCountThreshold(1L) - .build()) - .build()) { - // 1). Send 1 row - JSONObject foo = new JSONObject(); - foo.put("foo", "aaa"); - JSONArray jsonArr = new JSONArray(); - jsonArr.put(foo); - - ApiFuture response = - jsonStreamWriter.append(jsonArr, -1, /* allowUnknownFields */ false); - assertEquals(0, response.get().getOffset()); - // 2). Schema update and wait until querying it returns a new schema. - try { - com.google.cloud.bigquery.Table table = bigquery.getTable(DATASET, tableName); - Schema schema = table.getDefinition().getSchema(); - FieldList fields = schema.getFields(); - Field newField = - Field.newBuilder("bar", LegacySQLTypeName.STRING).setMode(Field.Mode.NULLABLE).build(); - - List fieldList = new ArrayList(); - fieldList.add(fields.get(0)); - fieldList.add(newField); - Schema newSchema = Schema.of(fieldList); - // Update the table with the new schema - com.google.cloud.bigquery.Table updatedTable = - table.toBuilder().setDefinition(StandardTableDefinition.of(newSchema)).build(); - updatedTable.update(); - int millis = 0; - while (millis <= 10000) { - if (newSchema.equals(table.reload().getDefinition().getSchema())) { - break; - } - Thread.sleep(1000); - millis += 1000; - } - newSchema = schema; - LOG.info( - "bar column successfully added to table in " - + millis - + " millis: " - + bigquery.getTable(DATASET, tableName).getDefinition().getSchema()); - } catch (BigQueryException e) { - LOG.severe("bar column was not added. \n" + e.toString()); - } - // 3). Send rows to wait for updatedSchema to be returned. - JSONObject foo2 = new JSONObject(); - foo2.put("foo", "bbb"); - JSONArray jsonArr2 = new JSONArray(); - jsonArr2.put(foo2); - - int next = 0; - for (int i = 1; i < 100; i++) { - ApiFuture response2 = - jsonStreamWriter.append(jsonArr2, -1, /* allowUnknownFields */ false); - assertEquals(i, response2.get().getOffset()); - if (response2.get().hasUpdatedSchema()) { - next = i; - break; - } else { - Thread.sleep(1000); - } - } - - int millis = 0; - while (millis <= 10000) { - if (jsonStreamWriter.getDescriptor().getFields().size() == 2) { - LOG.info("JsonStreamWriter successfully updated internal descriptor!"); - break; - } - Thread.sleep(100); - millis += 100; - } - assertTrue(jsonStreamWriter.getDescriptor().getFields().size() == 2); - // 4). Send rows with updated schema. - JSONObject updatedFoo = new JSONObject(); - updatedFoo.put("foo", "ccc"); - updatedFoo.put("bar", "ddd"); - JSONArray updatedJsonArr = new JSONArray(); - updatedJsonArr.put(updatedFoo); - for (int i = 0; i < 10; i++) { - ApiFuture response3 = - jsonStreamWriter.append(updatedJsonArr, -1, /* allowUnknownFields */ false); - assertEquals(next + 1 + i, response3.get().getOffset()); - } - - TableResult result3 = - bigquery.listTableData( - tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iter3 = result3.getValues().iterator(); - assertEquals("aaa", iter3.next().get(0).getStringValue()); - for (int j = 1; j <= next; j++) { - assertEquals("bbb", iter3.next().get(0).getStringValue()); - } - for (int j = next + 1; j < next + 1 + 10; j++) { - FieldValueList temp = iter3.next(); - assertEquals("ccc", temp.get(0).getStringValue()); - assertEquals("ddd", temp.get(1).getStringValue()); - } - assertEquals(false, iter3.hasNext()); - } - } - - @Test - public void testComplicateSchemaWithPendingStream() - throws IOException, InterruptedException, ExecutionException { - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(tableId2) - .setWriteStream(WriteStream.newBuilder().setType(WriteStream.Type.PENDING).build()) - .build()); - FinalizeWriteStreamResponse finalizeResponse = FinalizeWriteStreamResponse.getDefaultInstance(); - try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()).build()) { - LOG.info("Sending two messages"); - ApiFuture response = - streamWriter.append( - createAppendRequestComplicateType(writeStream.getName(), new String[] {"aaa"}) - .setOffset(Int64Value.of(0L)) - .build()); - assertEquals(0, response.get().getOffset()); - - ApiFuture response2 = - streamWriter.append( - createAppendRequestComplicateType(writeStream.getName(), new String[] {"bbb"}) - .setOffset(Int64Value.of(1L)) - .build()); - assertEquals(1, response2.get().getOffset()); - - // Nothing showed up since rows are not committed. - TableResult result = - bigquery.listTableData( - tableInfo2.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iter = result.getValues().iterator(); - assertEquals(false, iter.hasNext()); - - finalizeResponse = - client.finalizeWriteStream( - FinalizeWriteStreamRequest.newBuilder().setName(writeStream.getName()).build()); - - ApiFuture response3 = - streamWriter.append( - createAppendRequestComplicateType(writeStream.getName(), new String[] {"ccc"}) - .setOffset(Int64Value.of(2L)) - .build()); - try { - assertEquals(2, response3.get().getOffset()); - fail("Append to finalized stream should fail."); - } catch (Exception expected) { - // The exception thrown is not stable. Opened a bug to fix it. - LOG.info("Got exception: " + expected.toString()); - } - } - // Finalize row count is not populated. - assertEquals(2, finalizeResponse.getRowCount()); - BatchCommitWriteStreamsResponse batchCommitWriteStreamsResponse = - client.batchCommitWriteStreams( - BatchCommitWriteStreamsRequest.newBuilder() - .setParent(tableId2) - .addWriteStreams(writeStream.getName()) - .build()); - assertEquals(true, batchCommitWriteStreamsResponse.hasCommitTime()); - TableResult queryResult = - bigquery.query( - QueryJobConfiguration.newBuilder("SELECT * from " + DATASET + '.' + TABLE2).build()); - Iterator queryIter = queryResult.getValues().iterator(); - assertTrue(queryIter.hasNext()); - assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=aaa}, FieldValue{attribute=PRIMITIVE, value=aaa}]}]", - queryIter.next().get(1).getRepeatedValue().toString()); - assertEquals( - "[FieldValue{attribute=REPEATED, value=[FieldValue{attribute=PRIMITIVE, value=bbb}, FieldValue{attribute=PRIMITIVE, value=bbb}]}]", - queryIter.next().get(1).getRepeatedValue().toString()); - assertFalse(queryIter.hasNext()); - } - - @Test - public void testStreamError() throws IOException, InterruptedException, ExecutionException { - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(tableId) - .setWriteStream( - WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) - .build()); - try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()).build()) { - AppendRowsRequest request = - createAppendRequest(writeStream.getName(), new String[] {"aaa"}).build(); - request - .toBuilder() - .setProtoRows(request.getProtoRows().toBuilder().clearWriterSchema().build()) - .build(); - ApiFuture response = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}).build()); - assertEquals(0L, response.get().getOffset()); - // Send in a bogus stream name should cause in connection error. - ApiFuture response2 = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}) - .setOffset(Int64Value.of(100L)) - .build()); - try { - response2.get().getOffset(); - Assert.fail("Should fail"); - } catch (ExecutionException e) { - assertThat(e.getCause().getMessage()) - .contains("OUT_OF_RANGE: The offset is beyond stream, expected offset 1, received 100"); - } - // We can keep sending requests on the same stream. - ApiFuture response3 = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}).build()); - assertEquals(1L, response3.get().getOffset()); - } finally { - } - } - - @Test - public void testStreamReconnect() throws IOException, InterruptedException, ExecutionException { - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(tableId) - .setWriteStream( - WriteStream.newBuilder().setType(WriteStream.Type.COMMITTED).build()) - .build()); - try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()).build()) { - ApiFuture response = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}) - .setOffset(Int64Value.of(0L)) - .build()); - assertEquals(0L, response.get().getOffset()); - } - - try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()).build()) { - // Currently there is a bug that reconnection must wait 5 seconds to get the real row count. - Thread.sleep(5000L); - ApiFuture response = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"bbb"}) - .setOffset(Int64Value.of(1L)) - .build()); - assertEquals(1L, response.get().getOffset()); - } - } - - class CallAppend implements Runnable { - List> resultList; - List messages; - - CallAppend(List> resultList, List messages) { - this.resultList = resultList; - this.messages = messages; - } - - @Override - public void run() { - try { - LOG.info("size: " + resultList.size()); - resultList.add(DirectWriter.append(tableId, messages)); - } catch (Exception e) { - fail("Unexpected Exception: " + e.toString()); - } - } - } - - @Test - public void testDirectWrite() - throws IOException, InterruptedException, ExecutionException, - Descriptors.DescriptorValidationException { - final FooType fa = FooType.newBuilder().setFoo("aaa").build(); - final FooType fb = FooType.newBuilder().setFoo("bbb").build(); - Set expectedOffset = new HashSet<>(); - for (int i = 0; i < 10; i++) { - expectedOffset.add(Long.valueOf(i * 2)); - } - ExecutorService executor = Executors.newFixedThreadPool(10); - List> responses = new ArrayList<>(); - Callable callable = - new Callable() { - @Override - public Long call() throws IOException, InterruptedException, ExecutionException { - ApiFuture result = DirectWriter.append(tableId, Arrays.asList(fa, fb)); - return result.get(); - } - }; - for (int i = 0; i < 10; i++) { - responses.add(executor.submit(callable)); - } - for (Future response : responses) { - assertTrue(expectedOffset.remove(response.get())); - } - assertTrue(expectedOffset.isEmpty()); - - JSONObject a_json = new JSONObject(); - a_json.put("foo", "aaa"); - JSONObject b_json = new JSONObject(); - b_json.put("foo", "bbb"); - final JSONArray jsonArr = new JSONArray(); - jsonArr.put(a_json); - jsonArr.put(b_json); - - expectedOffset = new HashSet<>(); - for (int i = 0; i < 10; i++) { - expectedOffset.add(Long.valueOf(i * 2)); - } - executor = Executors.newFixedThreadPool(10); - responses = new ArrayList<>(); - callable = - new Callable() { - @Override - public Long call() - throws IOException, InterruptedException, ExecutionException, - Descriptors.DescriptorValidationException { - ApiFuture result = DirectWriter.append(tableId, jsonArr); - return result.get(); - } - }; - for (int i = 0; i < 10; i++) { - responses.add(executor.submit(callable)); - } - for (Future response : responses) { - assertTrue(expectedOffset.remove(response.get())); - } - assertTrue(expectedOffset.isEmpty()); - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - - DirectWriter.clearCache(); - } - - @Test - public void testDirectWriteFail() throws IOException, InterruptedException, ExecutionException { - final FakeFooType fa = FakeFooType.newBuilder().setFoo(100).build(); - Set expectedOffset = new HashSet<>(); - for (int i = 0; i < 10; i++) { - expectedOffset.add(Long.valueOf(i)); - } - ExecutorService executor = Executors.newFixedThreadPool(10); - List> responses = new ArrayList<>(); - Callable callable = - new Callable() { - @Override - public Long call() - throws IOException, InterruptedException, ExecutionException, - IllegalArgumentException { - ApiFuture result = DirectWriter.append(tableId, Arrays.asList(fa)); - return result.get(); - } - }; - - for (int i = 0; i < 10; i++) { - responses.add(executor.submit(callable)); - } - for (Future response : responses) { - try { - response.get(); - } catch (ExecutionException e) { - assertEquals( - "The proto field FakeFooType.foo does not have a matching type with the big query field testtable.foo.", - e.getCause().getMessage()); - } - } - executor.shutdown(); - try { - executor.awaitTermination(Long.MAX_VALUE, TimeUnit.NANOSECONDS); - } catch (InterruptedException e) { - LOG.info(e.toString()); - } - DirectWriter.clearCache(); - } - - @Test - public void testFlushRows() throws IOException, InterruptedException, ExecutionException { - String tableName = "BufferTable"; - TableInfo tableInfo = - TableInfo.newBuilder( - TableId.of(DATASET, tableName), - StandardTableDefinition.of( - Schema.of( - com.google.cloud.bigquery.Field.newBuilder("foo", LegacySQLTypeName.STRING) - .build()))) - .build(); - bigquery.create(tableInfo); - TableName parent = TableName.of(ServiceOptions.getDefaultProjectId(), DATASET, tableName); - WriteStream writeStream = - client.createWriteStream( - CreateWriteStreamRequest.newBuilder() - .setParent(parent.toString()) - .setWriteStream(WriteStream.newBuilder().setType(WriteStream.Type.BUFFERED).build()) - .build()); - try (StreamWriter streamWriter = StreamWriter.newBuilder(writeStream.getName()).build()) { - ApiFuture response = - streamWriter.append( - createAppendRequest(writeStream.getName(), new String[] {"aaa"}) - .setOffset(Int64Value.of(0L)) - .build()); - assertEquals(0L, response.get().getOffset()); - streamWriter.flush(0); - } - TableResult result = - bigquery.listTableData(tableInfo.getTableId(), BigQuery.TableDataListOption.startIndex(0L)); - Iterator iter = result.getValues().iterator(); - assertEquals("aaa", iter.next().get(0).getStringValue()); - assertEquals(false, iter.hasNext()); - } -} diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml b/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml deleted file mode 100644 index 9971a4929e..0000000000 --- a/grpc-google-cloud-bigquerystorage-v1alpha2/pom.xml +++ /dev/null @@ -1,50 +0,0 @@ - - 4.0.0 - com.google.api.grpc - grpc-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - grpc-google-cloud-bigquerystorage-v1alpha2 - GRPC library for grpc-google-cloud-bigquerystorage-v1alpha2 - - com.google.cloud - google-cloud-bigquerystorage-parent - 1.18.2-SNAPSHOT - - - - io.grpc - grpc-api - - - io.grpc - grpc-stub - - - io.grpc - grpc-protobuf - - - com.google.protobuf - protobuf-java - - - com.google.api.grpc - proto-google-cloud-bigquerystorage-v1alpha2 - - - com.google.guava - guava - - - - - - - org.codehaus.mojo - flatten-maven-plugin - - - - diff --git a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java b/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java deleted file mode 100644 index e075df92de..0000000000 --- a/grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java +++ /dev/null @@ -1,1076 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -package com.google.cloud.bigquery.storage.v1alpha2; - -import static io.grpc.MethodDescriptor.generateFullMethodName; - -/** - * - * - *
- * BigQuery Write API.
- * The Write API can be used to write data to BigQuery.
- * 
- */ -@javax.annotation.Generated( - value = "by gRPC proto compiler", - comments = "Source: google/cloud/bigquery/storage/v1alpha2/storage.proto") -@java.lang.Deprecated -public final class BigQueryWriteGrpc { - - private BigQueryWriteGrpc() {} - - public static final String SERVICE_NAME = "google.cloud.bigquery.storage.v1alpha2.BigQueryWrite"; - - // Static method descriptors that strictly reflect the proto. - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getCreateWriteStreamMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "CreateWriteStream", - requestType = - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, - responseType = com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, - methodType = io.grpc.MethodDescriptor.MethodType.UNARY) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getCreateWriteStreamMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getCreateWriteStreamMethod; - if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getCreateWriteStreamMethod = BigQueryWriteGrpc.getCreateWriteStreamMethod) == null) { - BigQueryWriteGrpc.getCreateWriteStreamMethod = - getCreateWriteStreamMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.UNARY) - .setFullMethodName(generateFullMethodName(SERVICE_NAME, "CreateWriteStream")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .CreateWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - .getDefaultInstance())) - .setSchemaDescriptor( - new BigQueryWriteMethodDescriptorSupplier("CreateWriteStream")) - .build(); - } - } - } - return getCreateWriteStreamMethod; - } - - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> - getAppendRowsMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "AppendRows", - requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, - responseType = com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, - methodType = io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> - getAppendRowsMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> - getAppendRowsMethod; - if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getAppendRowsMethod = BigQueryWriteGrpc.getAppendRowsMethod) == null) { - BigQueryWriteGrpc.getAppendRowsMethod = - getAppendRowsMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.BIDI_STREAMING) - .setFullMethodName(generateFullMethodName(SERVICE_NAME, "AppendRows")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - .getDefaultInstance())) - .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("AppendRows")) - .build(); - } - } - } - return getAppendRowsMethod; - } - - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getGetWriteStreamMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "GetWriteStream", - requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, - responseType = com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, - methodType = io.grpc.MethodDescriptor.MethodType.UNARY) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getGetWriteStreamMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getGetWriteStreamMethod; - if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getGetWriteStreamMethod = BigQueryWriteGrpc.getGetWriteStreamMethod) == null) { - BigQueryWriteGrpc.getGetWriteStreamMethod = - getGetWriteStreamMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.UNARY) - .setFullMethodName(generateFullMethodName(SERVICE_NAME, "GetWriteStream")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .GetWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - .getDefaultInstance())) - .setSchemaDescriptor( - new BigQueryWriteMethodDescriptorSupplier("GetWriteStream")) - .build(); - } - } - } - return getGetWriteStreamMethod; - } - - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - getFinalizeWriteStreamMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "FinalizeWriteStream", - requestType = - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, - responseType = - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.class, - methodType = io.grpc.MethodDescriptor.MethodType.UNARY) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - getFinalizeWriteStreamMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - getFinalizeWriteStreamMethod; - if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getFinalizeWriteStreamMethod = BigQueryWriteGrpc.getFinalizeWriteStreamMethod) - == null) { - BigQueryWriteGrpc.getFinalizeWriteStreamMethod = - getFinalizeWriteStreamMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - generateFullMethodName(SERVICE_NAME, "FinalizeWriteStream")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamRequest.getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamResponse.getDefaultInstance())) - .setSchemaDescriptor( - new BigQueryWriteMethodDescriptorSupplier("FinalizeWriteStream")) - .build(); - } - } - } - return getFinalizeWriteStreamMethod; - } - - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - getBatchCommitWriteStreamsMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "BatchCommitWriteStreams", - requestType = - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest.class, - responseType = - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse.class, - methodType = io.grpc.MethodDescriptor.MethodType.UNARY) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - getBatchCommitWriteStreamsMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - getBatchCommitWriteStreamsMethod; - if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) - == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getBatchCommitWriteStreamsMethod = BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod) - == null) { - BigQueryWriteGrpc.getBatchCommitWriteStreamsMethod = - getBatchCommitWriteStreamsMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.UNARY) - .setFullMethodName( - generateFullMethodName(SERVICE_NAME, "BatchCommitWriteStreams")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsRequest.getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsResponse.getDefaultInstance())) - .setSchemaDescriptor( - new BigQueryWriteMethodDescriptorSupplier("BatchCommitWriteStreams")) - .build(); - } - } - } - return getBatchCommitWriteStreamsMethod; - } - - private static volatile io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - getFlushRowsMethod; - - @io.grpc.stub.annotations.RpcMethod( - fullMethodName = SERVICE_NAME + '/' + "FlushRows", - requestType = com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class, - responseType = com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class, - methodType = io.grpc.MethodDescriptor.MethodType.UNARY) - public static io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - getFlushRowsMethod() { - io.grpc.MethodDescriptor< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - getFlushRowsMethod; - if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { - synchronized (BigQueryWriteGrpc.class) { - if ((getFlushRowsMethod = BigQueryWriteGrpc.getFlushRowsMethod) == null) { - BigQueryWriteGrpc.getFlushRowsMethod = - getFlushRowsMethod = - io.grpc.MethodDescriptor - . - newBuilder() - .setType(io.grpc.MethodDescriptor.MethodType.UNARY) - .setFullMethodName(generateFullMethodName(SERVICE_NAME, "FlushRows")) - .setSampledToLocalTracing(true) - .setRequestMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - .getDefaultInstance())) - .setResponseMarshaller( - io.grpc.protobuf.ProtoUtils.marshaller( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - .getDefaultInstance())) - .setSchemaDescriptor(new BigQueryWriteMethodDescriptorSupplier("FlushRows")) - .build(); - } - } - } - return getFlushRowsMethod; - } - - /** Creates a new async stub that supports all call types for the service */ - public static BigQueryWriteStub newStub(io.grpc.Channel channel) { - io.grpc.stub.AbstractStub.StubFactory factory = - new io.grpc.stub.AbstractStub.StubFactory() { - @java.lang.Override - public BigQueryWriteStub newStub( - io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteStub(channel, callOptions); - } - }; - return BigQueryWriteStub.newStub(factory, channel); - } - - /** - * Creates a new blocking-style stub that supports unary and streaming output calls on the service - */ - public static BigQueryWriteBlockingStub newBlockingStub(io.grpc.Channel channel) { - io.grpc.stub.AbstractStub.StubFactory factory = - new io.grpc.stub.AbstractStub.StubFactory() { - @java.lang.Override - public BigQueryWriteBlockingStub newStub( - io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteBlockingStub(channel, callOptions); - } - }; - return BigQueryWriteBlockingStub.newStub(factory, channel); - } - - /** Creates a new ListenableFuture-style stub that supports unary calls on the service */ - public static BigQueryWriteFutureStub newFutureStub(io.grpc.Channel channel) { - io.grpc.stub.AbstractStub.StubFactory factory = - new io.grpc.stub.AbstractStub.StubFactory() { - @java.lang.Override - public BigQueryWriteFutureStub newStub( - io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteFutureStub(channel, callOptions); - } - }; - return BigQueryWriteFutureStub.newStub(factory, channel); - } - - /** - * - * - *
-   * BigQuery Write API.
-   * The Write API can be used to write data to BigQuery.
-   * 
- */ - @java.lang.Deprecated - public abstract static class BigQueryWriteImplBase implements io.grpc.BindableService { - - /** - * - * - *
-     * Creates a write stream to the given table.
-     * Additionally, every table has a special COMMITTED stream named '_default'
-     * to which data can be written. This stream doesn't need to be created using
-     * CreateWriteStream. It is a stream that can be used simultaneously by any
-     * number of clients. Data written to this stream is considered committed as
-     * soon as an acknowledgement is received.
-     * 
- */ - public void createWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request, - io.grpc.stub.StreamObserver - responseObserver) { - io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( - getCreateWriteStreamMethod(), responseObserver); - } - - /** - * - * - *
-     * Appends data to the given stream.
-     * If `offset` is specified, the `offset` is checked against the end of
-     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
-     * attempt is made to append to an offset beyond the current end of the stream
-     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
-     * written to. User can retry with adjusted offset within the same RPC
-     * stream. If `offset` is not specified, append happens at the end of the
-     * stream.
-     * The response contains the offset at which the append happened. Responses
-     * are received in the same order in which requests are sent. There will be
-     * one response for each successful request. If the `offset` is not set in
-     * response, it means append didn't happen due to some errors. If one request
-     * fails, all the subsequent requests will also fail until a success request
-     * is made again.
-     * If the stream is of `PENDING` type, data will only be available for read
-     * operations after the stream is committed.
-     * 
- */ - public io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest> - appendRows( - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> - responseObserver) { - return io.grpc.stub.ServerCalls.asyncUnimplementedStreamingCall( - getAppendRowsMethod(), responseObserver); - } - - /** - * - * - *
-     * Gets a write stream.
-     * 
- */ - public void getWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request, - io.grpc.stub.StreamObserver - responseObserver) { - io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( - getGetWriteStreamMethod(), responseObserver); - } - - /** - * - * - *
-     * Finalize a write stream so that no new data can be appended to the
-     * stream. Finalize is not supported on the '_default' stream.
-     * 
- */ - public void finalizeWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - responseObserver) { - io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( - getFinalizeWriteStreamMethod(), responseObserver); - } - - /** - * - * - *
-     * Atomically commits a group of `PENDING` streams that belong to the same
-     * `parent` table.
-     * Streams must be finalized before commit and cannot be committed multiple
-     * times. Once a stream is committed, data in the stream becomes available
-     * for read operations.
-     * 
- */ - public void batchCommitWriteStreams( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - responseObserver) { - io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall( - getBatchCommitWriteStreamsMethod(), responseObserver); - } - - /** - * - * - *
-     * Flushes rows to a BUFFERED stream.
-     * If users are appending rows to BUFFERED stream, flush operation is
-     * required in order for the rows to become available for reading. A
-     * Flush operation flushes up to any previously flushed offset in a BUFFERED
-     * stream, to the offset specified in the request.
-     * Flush is not supported on the _default stream, since it is not BUFFERED.
-     * 
- */ - public void flushRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - responseObserver) { - io.grpc.stub.ServerCalls.asyncUnimplementedUnaryCall(getFlushRowsMethod(), responseObserver); - } - - @java.lang.Override - public final io.grpc.ServerServiceDefinition bindService() { - return io.grpc.ServerServiceDefinition.builder(getServiceDescriptor()) - .addMethod( - getCreateWriteStreamMethod(), - io.grpc.stub.ServerCalls.asyncUnaryCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>( - this, METHODID_CREATE_WRITE_STREAM))) - .addMethod( - getAppendRowsMethod(), - io.grpc.stub.ServerCalls.asyncBidiStreamingCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse>( - this, METHODID_APPEND_ROWS))) - .addMethod( - getGetWriteStreamMethod(), - io.grpc.stub.ServerCalls.asyncUnaryCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>( - this, METHODID_GET_WRITE_STREAM))) - .addMethod( - getFinalizeWriteStreamMethod(), - io.grpc.stub.ServerCalls.asyncUnaryCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamResponse>(this, METHODID_FINALIZE_WRITE_STREAM))) - .addMethod( - getBatchCommitWriteStreamsMethod(), - io.grpc.stub.ServerCalls.asyncUnaryCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsResponse>( - this, METHODID_BATCH_COMMIT_WRITE_STREAMS))) - .addMethod( - getFlushRowsMethod(), - io.grpc.stub.ServerCalls.asyncUnaryCall( - new MethodHandlers< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>( - this, METHODID_FLUSH_ROWS))) - .build(); - } - } - - /** - * - * - *
-   * BigQuery Write API.
-   * The Write API can be used to write data to BigQuery.
-   * 
- */ - @java.lang.Deprecated - public static final class BigQueryWriteStub - extends io.grpc.stub.AbstractAsyncStub { - private BigQueryWriteStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - super(channel, callOptions); - } - - @java.lang.Override - protected BigQueryWriteStub build(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteStub(channel, callOptions); - } - - /** - * - * - *
-     * Creates a write stream to the given table.
-     * Additionally, every table has a special COMMITTED stream named '_default'
-     * to which data can be written. This stream doesn't need to be created using
-     * CreateWriteStream. It is a stream that can be used simultaneously by any
-     * number of clients. Data written to this stream is considered committed as
-     * soon as an acknowledgement is received.
-     * 
- */ - public void createWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request, - io.grpc.stub.StreamObserver - responseObserver) { - io.grpc.stub.ClientCalls.asyncUnaryCall( - getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), - request, - responseObserver); - } - - /** - * - * - *
-     * Appends data to the given stream.
-     * If `offset` is specified, the `offset` is checked against the end of
-     * stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an
-     * attempt is made to append to an offset beyond the current end of the stream
-     * or `ALREADY_EXISTS` if user provids an `offset` that has already been
-     * written to. User can retry with adjusted offset within the same RPC
-     * stream. If `offset` is not specified, append happens at the end of the
-     * stream.
-     * The response contains the offset at which the append happened. Responses
-     * are received in the same order in which requests are sent. There will be
-     * one response for each successful request. If the `offset` is not set in
-     * response, it means append didn't happen due to some errors. If one request
-     * fails, all the subsequent requests will also fail until a success request
-     * is made again.
-     * If the stream is of `PENDING` type, data will only be available for read
-     * operations after the stream is committed.
-     * 
- */ - public io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest> - appendRows( - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse> - responseObserver) { - return io.grpc.stub.ClientCalls.asyncBidiStreamingCall( - getChannel().newCall(getAppendRowsMethod(), getCallOptions()), responseObserver); - } - - /** - * - * - *
-     * Gets a write stream.
-     * 
- */ - public void getWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request, - io.grpc.stub.StreamObserver - responseObserver) { - io.grpc.stub.ClientCalls.asyncUnaryCall( - getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), - request, - responseObserver); - } - - /** - * - * - *
-     * Finalize a write stream so that no new data can be appended to the
-     * stream. Finalize is not supported on the '_default' stream.
-     * 
- */ - public void finalizeWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - responseObserver) { - io.grpc.stub.ClientCalls.asyncUnaryCall( - getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), - request, - responseObserver); - } - - /** - * - * - *
-     * Atomically commits a group of `PENDING` streams that belong to the same
-     * `parent` table.
-     * Streams must be finalized before commit and cannot be committed multiple
-     * times. Once a stream is committed, data in the stream becomes available
-     * for read operations.
-     * 
- */ - public void batchCommitWriteStreams( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - responseObserver) { - io.grpc.stub.ClientCalls.asyncUnaryCall( - getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), - request, - responseObserver); - } - - /** - * - * - *
-     * Flushes rows to a BUFFERED stream.
-     * If users are appending rows to BUFFERED stream, flush operation is
-     * required in order for the rows to become available for reading. A
-     * Flush operation flushes up to any previously flushed offset in a BUFFERED
-     * stream, to the offset specified in the request.
-     * Flush is not supported on the _default stream, since it is not BUFFERED.
-     * 
- */ - public void flushRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request, - io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - responseObserver) { - io.grpc.stub.ClientCalls.asyncUnaryCall( - getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request, responseObserver); - } - } - - /** - * - * - *
-   * BigQuery Write API.
-   * The Write API can be used to write data to BigQuery.
-   * 
- */ - @java.lang.Deprecated - public static final class BigQueryWriteBlockingStub - extends io.grpc.stub.AbstractBlockingStub { - private BigQueryWriteBlockingStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - super(channel, callOptions); - } - - @java.lang.Override - protected BigQueryWriteBlockingStub build( - io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteBlockingStub(channel, callOptions); - } - - /** - * - * - *
-     * Creates a write stream to the given table.
-     * Additionally, every table has a special COMMITTED stream named '_default'
-     * to which data can be written. This stream doesn't need to be created using
-     * CreateWriteStream. It is a stream that can be used simultaneously by any
-     * number of clients. Data written to this stream is considered committed as
-     * soon as an acknowledgement is received.
-     * 
- */ - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream createWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.blockingUnaryCall( - getChannel(), getCreateWriteStreamMethod(), getCallOptions(), request); - } - - /** - * - * - *
-     * Gets a write stream.
-     * 
- */ - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.blockingUnaryCall( - getChannel(), getGetWriteStreamMethod(), getCallOptions(), request); - } - - /** - * - * - *
-     * Finalize a write stream so that no new data can be appended to the
-     * stream. Finalize is not supported on the '_default' stream.
-     * 
- */ - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - finalizeWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.blockingUnaryCall( - getChannel(), getFinalizeWriteStreamMethod(), getCallOptions(), request); - } - - /** - * - * - *
-     * Atomically commits a group of `PENDING` streams that belong to the same
-     * `parent` table.
-     * Streams must be finalized before commit and cannot be committed multiple
-     * times. Once a stream is committed, data in the stream becomes available
-     * for read operations.
-     * 
- */ - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - batchCommitWriteStreams( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - request) { - return io.grpc.stub.ClientCalls.blockingUnaryCall( - getChannel(), getBatchCommitWriteStreamsMethod(), getCallOptions(), request); - } - - /** - * - * - *
-     * Flushes rows to a BUFFERED stream.
-     * If users are appending rows to BUFFERED stream, flush operation is
-     * required in order for the rows to become available for reading. A
-     * Flush operation flushes up to any previously flushed offset in a BUFFERED
-     * stream, to the offset specified in the request.
-     * Flush is not supported on the _default stream, since it is not BUFFERED.
-     * 
- */ - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse flushRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request) { - return io.grpc.stub.ClientCalls.blockingUnaryCall( - getChannel(), getFlushRowsMethod(), getCallOptions(), request); - } - } - - /** - * - * - *
-   * BigQuery Write API.
-   * The Write API can be used to write data to BigQuery.
-   * 
- */ - @java.lang.Deprecated - public static final class BigQueryWriteFutureStub - extends io.grpc.stub.AbstractFutureStub { - private BigQueryWriteFutureStub(io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - super(channel, callOptions); - } - - @java.lang.Override - protected BigQueryWriteFutureStub build( - io.grpc.Channel channel, io.grpc.CallOptions callOptions) { - return new BigQueryWriteFutureStub(channel, callOptions); - } - - /** - * - * - *
-     * Creates a write stream to the given table.
-     * Additionally, every table has a special COMMITTED stream named '_default'
-     * to which data can be written. This stream doesn't need to be created using
-     * CreateWriteStream. It is a stream that can be used simultaneously by any
-     * number of clients. Data written to this stream is considered committed as
-     * soon as an acknowledgement is received.
-     * 
- */ - public com.google.common.util.concurrent.ListenableFuture< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - createWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.futureUnaryCall( - getChannel().newCall(getCreateWriteStreamMethod(), getCallOptions()), request); - } - - /** - * - * - *
-     * Gets a write stream.
-     * 
- */ - public com.google.common.util.concurrent.ListenableFuture< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream> - getWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.futureUnaryCall( - getChannel().newCall(getGetWriteStreamMethod(), getCallOptions()), request); - } - - /** - * - * - *
-     * Finalize a write stream so that no new data can be appended to the
-     * stream. Finalize is not supported on the '_default' stream.
-     * 
- */ - public com.google.common.util.concurrent.ListenableFuture< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse> - finalizeWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest request) { - return io.grpc.stub.ClientCalls.futureUnaryCall( - getChannel().newCall(getFinalizeWriteStreamMethod(), getCallOptions()), request); - } - - /** - * - * - *
-     * Atomically commits a group of `PENDING` streams that belong to the same
-     * `parent` table.
-     * Streams must be finalized before commit and cannot be committed multiple
-     * times. Once a stream is committed, data in the stream becomes available
-     * for read operations.
-     * 
- */ - public com.google.common.util.concurrent.ListenableFuture< - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse> - batchCommitWriteStreams( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - request) { - return io.grpc.stub.ClientCalls.futureUnaryCall( - getChannel().newCall(getBatchCommitWriteStreamsMethod(), getCallOptions()), request); - } - - /** - * - * - *
-     * Flushes rows to a BUFFERED stream.
-     * If users are appending rows to BUFFERED stream, flush operation is
-     * required in order for the rows to become available for reading. A
-     * Flush operation flushes up to any previously flushed offset in a BUFFERED
-     * stream, to the offset specified in the request.
-     * Flush is not supported on the _default stream, since it is not BUFFERED.
-     * 
- */ - public com.google.common.util.concurrent.ListenableFuture< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse> - flushRows(com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest request) { - return io.grpc.stub.ClientCalls.futureUnaryCall( - getChannel().newCall(getFlushRowsMethod(), getCallOptions()), request); - } - } - - private static final int METHODID_CREATE_WRITE_STREAM = 0; - private static final int METHODID_GET_WRITE_STREAM = 1; - private static final int METHODID_FINALIZE_WRITE_STREAM = 2; - private static final int METHODID_BATCH_COMMIT_WRITE_STREAMS = 3; - private static final int METHODID_FLUSH_ROWS = 4; - private static final int METHODID_APPEND_ROWS = 5; - - private static final class MethodHandlers - implements io.grpc.stub.ServerCalls.UnaryMethod, - io.grpc.stub.ServerCalls.ServerStreamingMethod, - io.grpc.stub.ServerCalls.ClientStreamingMethod, - io.grpc.stub.ServerCalls.BidiStreamingMethod { - private final BigQueryWriteImplBase serviceImpl; - private final int methodId; - - MethodHandlers(BigQueryWriteImplBase serviceImpl, int methodId) { - this.serviceImpl = serviceImpl; - this.methodId = methodId; - } - - @java.lang.Override - @java.lang.SuppressWarnings("unchecked") - public void invoke(Req request, io.grpc.stub.StreamObserver responseObserver) { - switch (methodId) { - case METHODID_CREATE_WRITE_STREAM: - serviceImpl.createWriteStream( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) request, - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>) - responseObserver); - break; - case METHODID_GET_WRITE_STREAM: - serviceImpl.getWriteStream( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) request, - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream>) - responseObserver); - break; - case METHODID_FINALIZE_WRITE_STREAM: - serviceImpl.finalizeWriteStream( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) - request, - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamResponse>) - responseObserver); - break; - case METHODID_BATCH_COMMIT_WRITE_STREAMS: - serviceImpl.batchCommitWriteStreams( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) - request, - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsResponse>) - responseObserver); - break; - case METHODID_FLUSH_ROWS: - serviceImpl.flushRows( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) request, - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse>) - responseObserver); - break; - default: - throw new AssertionError(); - } - } - - @java.lang.Override - @java.lang.SuppressWarnings("unchecked") - public io.grpc.stub.StreamObserver invoke( - io.grpc.stub.StreamObserver responseObserver) { - switch (methodId) { - case METHODID_APPEND_ROWS: - return (io.grpc.stub.StreamObserver) - serviceImpl.appendRows( - (io.grpc.stub.StreamObserver< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse>) - responseObserver); - default: - throw new AssertionError(); - } - } - } - - private abstract static class BigQueryWriteBaseDescriptorSupplier - implements io.grpc.protobuf.ProtoFileDescriptorSupplier, - io.grpc.protobuf.ProtoServiceDescriptorSupplier { - BigQueryWriteBaseDescriptorSupplier() {} - - @java.lang.Override - public com.google.protobuf.Descriptors.FileDescriptor getFileDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.getDescriptor(); - } - - @java.lang.Override - public com.google.protobuf.Descriptors.ServiceDescriptor getServiceDescriptor() { - return getFileDescriptor().findServiceByName("BigQueryWrite"); - } - } - - private static final class BigQueryWriteFileDescriptorSupplier - extends BigQueryWriteBaseDescriptorSupplier { - BigQueryWriteFileDescriptorSupplier() {} - } - - private static final class BigQueryWriteMethodDescriptorSupplier - extends BigQueryWriteBaseDescriptorSupplier - implements io.grpc.protobuf.ProtoMethodDescriptorSupplier { - private final String methodName; - - BigQueryWriteMethodDescriptorSupplier(String methodName) { - this.methodName = methodName; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.MethodDescriptor getMethodDescriptor() { - return getServiceDescriptor().findMethodByName(methodName); - } - } - - private static volatile io.grpc.ServiceDescriptor serviceDescriptor; - - public static io.grpc.ServiceDescriptor getServiceDescriptor() { - io.grpc.ServiceDescriptor result = serviceDescriptor; - if (result == null) { - synchronized (BigQueryWriteGrpc.class) { - result = serviceDescriptor; - if (result == null) { - serviceDescriptor = - result = - io.grpc.ServiceDescriptor.newBuilder(SERVICE_NAME) - .setSchemaDescriptor(new BigQueryWriteFileDescriptorSupplier()) - .addMethod(getCreateWriteStreamMethod()) - .addMethod(getAppendRowsMethod()) - .addMethod(getGetWriteStreamMethod()) - .addMethod(getFinalizeWriteStreamMethod()) - .addMethod(getBatchCommitWriteStreamsMethod()) - .addMethod(getFlushRowsMethod()) - .build(); - } - } - } - return result; - } -} diff --git a/pom.xml b/pom.xml index ffa6d0d6c8..00ba5debef 100644 --- a/pom.xml +++ b/pom.xml @@ -94,11 +94,6 @@ google-cloud-bigquery 1.128.0 - - com.google.api.grpc - proto-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - com.google.api.grpc proto-google-cloud-bigquerystorage-v1beta1 @@ -114,11 +109,6 @@ proto-google-cloud-bigquerystorage-v1 1.18.2-SNAPSHOT - - com.google.api.grpc - grpc-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - com.google.api.grpc grpc-google-cloud-bigquerystorage-v1beta1 @@ -186,11 +176,9 @@ - proto-google-cloud-bigquerystorage-v1alpha2 proto-google-cloud-bigquerystorage-v1beta1 proto-google-cloud-bigquerystorage-v1beta2 proto-google-cloud-bigquerystorage-v1 - grpc-google-cloud-bigquerystorage-v1alpha2 grpc-google-cloud-bigquerystorage-v1beta1 grpc-google-cloud-bigquerystorage-v1beta2 grpc-google-cloud-bigquerystorage-v1 diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml b/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml deleted file mode 100644 index 5b6b0bf010..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/pom.xml +++ /dev/null @@ -1,42 +0,0 @@ - - 4.0.0 - com.google.api.grpc - proto-google-cloud-bigquerystorage-v1alpha2 - 0.118.2-SNAPSHOT - proto-google-cloud-bigquerystorage-v1alpha2 - PROTO library for proto-google-cloud-bigquerystorage-v1alpha2 - - com.google.cloud - google-cloud-bigquerystorage-parent - 1.18.2-SNAPSHOT - - - - com.google.protobuf - protobuf-java - - - com.google.api.grpc - proto-google-common-protos - - - com.google.api - api-common - - - com.google.guava - guava - - - - - - - org.codehaus.mojo - flatten-maven-plugin - - - - diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java deleted file mode 100644 index 4730fde0cd..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java +++ /dev/null @@ -1,1691 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: google/cloud/bigquery/storage/v1alpha2/protobuf.proto - -package com.google.cloud.bigquery.storage.v1alpha2; - -public final class ProtoBufProto { - private ProtoBufProto() {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); - } - - public interface ProtoSchemaOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return Whether the protoDescriptor field is set. - */ - boolean hasProtoDescriptor(); - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return The protoDescriptor. - */ - com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor(); - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder getProtoDescriptorOrBuilder(); - } - /** - * - * - *
-   * Protobuf schema is an API presentation the proto buffer schema.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoSchema} - */ - public static final class ProtoSchema extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) - ProtoSchemaOrBuilder { - private static final long serialVersionUID = 0L; - // Use ProtoSchema.newBuilder() to construct. - private ProtoSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private ProtoSchema() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new ProtoSchema(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private ProtoSchema( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - com.google.protobuf.DescriptorProtos.DescriptorProto.Builder subBuilder = null; - if (protoDescriptor_ != null) { - subBuilder = protoDescriptor_.toBuilder(); - } - protoDescriptor_ = - input.readMessage( - com.google.protobuf.DescriptorProtos.DescriptorProto.PARSER, - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(protoDescriptor_); - protoDescriptor_ = subBuilder.buildPartial(); - } - - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder.class); - } - - public static final int PROTO_DESCRIPTOR_FIELD_NUMBER = 1; - private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return Whether the protoDescriptor field is set. - */ - @java.lang.Override - public boolean hasProtoDescriptor() { - return protoDescriptor_ != null; - } - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return The protoDescriptor. - */ - @java.lang.Override - public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { - return protoDescriptor_ == null - ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() - : protoDescriptor_; - } - /** - * - * - *
-     * Descriptor for input message. The descriptor has to be self contained,
-     * including all the nested types, excepted for proto buffer well known types
-     * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-     * and zetasql public protos
-     * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-     * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - @java.lang.Override - public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder - getProtoDescriptorOrBuilder() { - return getProtoDescriptor(); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (hasProtoDescriptor()) { - if (!getProtoDescriptor().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (protoDescriptor_ != null) { - output.writeMessage(1, getProtoDescriptor()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (protoDescriptor_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getProtoDescriptor()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema other = - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) obj; - - if (hasProtoDescriptor() != other.hasProtoDescriptor()) return false; - if (hasProtoDescriptor()) { - if (!getProtoDescriptor().equals(other.getProtoDescriptor())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasProtoDescriptor()) { - hash = (37 * hash) + PROTO_DESCRIPTOR_FIELD_NUMBER; - hash = (53 * hash) + getProtoDescriptor().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Protobuf schema is an API presentation the proto buffer schema.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoSchema} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - if (protoDescriptorBuilder_ == null) { - protoDescriptor_ = null; - } else { - protoDescriptor_ = null; - protoDescriptorBuilder_ = null; - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema build() { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema result = - new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema(this); - if (protoDescriptorBuilder_ == null) { - result.protoDescriptor_ = protoDescriptor_; - } else { - result.protoDescriptor_ = protoDescriptorBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .getDefaultInstance()) return this; - if (other.hasProtoDescriptor()) { - mergeProtoDescriptor(other.getProtoDescriptor()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - if (hasProtoDescriptor()) { - if (!getProtoDescriptor().isInitialized()) { - return false; - } - } - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private com.google.protobuf.DescriptorProtos.DescriptorProto protoDescriptor_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.DescriptorProtos.DescriptorProto, - com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, - com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> - protoDescriptorBuilder_; - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return Whether the protoDescriptor field is set. - */ - public boolean hasProtoDescriptor() { - return protoDescriptorBuilder_ != null || protoDescriptor_ != null; - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - * - * @return The protoDescriptor. - */ - public com.google.protobuf.DescriptorProtos.DescriptorProto getProtoDescriptor() { - if (protoDescriptorBuilder_ == null) { - return protoDescriptor_ == null - ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() - : protoDescriptor_; - } else { - return protoDescriptorBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public Builder setProtoDescriptor( - com.google.protobuf.DescriptorProtos.DescriptorProto value) { - if (protoDescriptorBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - protoDescriptor_ = value; - onChanged(); - } else { - protoDescriptorBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public Builder setProtoDescriptor( - com.google.protobuf.DescriptorProtos.DescriptorProto.Builder builderForValue) { - if (protoDescriptorBuilder_ == null) { - protoDescriptor_ = builderForValue.build(); - onChanged(); - } else { - protoDescriptorBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public Builder mergeProtoDescriptor( - com.google.protobuf.DescriptorProtos.DescriptorProto value) { - if (protoDescriptorBuilder_ == null) { - if (protoDescriptor_ != null) { - protoDescriptor_ = - com.google.protobuf.DescriptorProtos.DescriptorProto.newBuilder(protoDescriptor_) - .mergeFrom(value) - .buildPartial(); - } else { - protoDescriptor_ = value; - } - onChanged(); - } else { - protoDescriptorBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public Builder clearProtoDescriptor() { - if (protoDescriptorBuilder_ == null) { - protoDescriptor_ = null; - onChanged(); - } else { - protoDescriptor_ = null; - protoDescriptorBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public com.google.protobuf.DescriptorProtos.DescriptorProto.Builder - getProtoDescriptorBuilder() { - - onChanged(); - return getProtoDescriptorFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - public com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder - getProtoDescriptorOrBuilder() { - if (protoDescriptorBuilder_ != null) { - return protoDescriptorBuilder_.getMessageOrBuilder(); - } else { - return protoDescriptor_ == null - ? com.google.protobuf.DescriptorProtos.DescriptorProto.getDefaultInstance() - : protoDescriptor_; - } - } - /** - * - * - *
-       * Descriptor for input message. The descriptor has to be self contained,
-       * including all the nested types, excepted for proto buffer well known types
-       * (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf)
-       * and zetasql public protos
-       * (https://github.com/google/zetasql/tree/master/zetasql/public/proto).
-       * 
- * - * .google.protobuf.DescriptorProto proto_descriptor = 1; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.DescriptorProtos.DescriptorProto, - com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, - com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder> - getProtoDescriptorFieldBuilder() { - if (protoDescriptorBuilder_ == null) { - protoDescriptorBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.DescriptorProtos.DescriptorProto, - com.google.protobuf.DescriptorProtos.DescriptorProto.Builder, - com.google.protobuf.DescriptorProtos.DescriptorProtoOrBuilder>( - getProtoDescriptor(), getParentForChildren(), isClean()); - protoDescriptor_ = null; - } - return protoDescriptorBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.ProtoSchema) - private static final com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public ProtoSchema parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ProtoSchema(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface ProtoRowsOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.ProtoRows) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @return A list containing the serializedRows. - */ - java.util.List getSerializedRowsList(); - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @return The count of serializedRows. - */ - int getSerializedRowsCount(); - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @param index The index of the element to return. - * @return The serializedRows at the given index. - */ - com.google.protobuf.ByteString getSerializedRows(int index); - } - /** - * - * - *
-   * Protobuf rows.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoRows} - */ - public static final class ProtoRows extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.ProtoRows) - ProtoRowsOrBuilder { - private static final long serialVersionUID = 0L; - // Use ProtoRows.newBuilder() to construct. - private ProtoRows(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private ProtoRows() { - serializedRows_ = java.util.Collections.emptyList(); - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new ProtoRows(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private ProtoRows( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - serializedRows_ = new java.util.ArrayList(); - mutable_bitField0_ |= 0x00000001; - } - serializedRows_.add(input.readBytes()); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); // C - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.class, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder.class); - } - - public static final int SERIALIZED_ROWS_FIELD_NUMBER = 1; - private java.util.List serializedRows_; - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @return A list containing the serializedRows. - */ - @java.lang.Override - public java.util.List getSerializedRowsList() { - return serializedRows_; - } - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @return The count of serializedRows. - */ - public int getSerializedRowsCount() { - return serializedRows_.size(); - } - /** - * - * - *
-     * A sequence of rows serialized as a Protocol Buffer.
-     * See https://developers.google.com/protocol-buffers/docs/overview for more
-     * information on deserializing this field.
-     * 
- * - * repeated bytes serialized_rows = 1; - * - * @param index The index of the element to return. - * @return The serializedRows at the given index. - */ - public com.google.protobuf.ByteString getSerializedRows(int index) { - return serializedRows_.get(index); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < serializedRows_.size(); i++) { - output.writeBytes(1, serializedRows_.get(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - { - int dataSize = 0; - for (int i = 0; i < serializedRows_.size(); i++) { - dataSize += - com.google.protobuf.CodedOutputStream.computeBytesSizeNoTag(serializedRows_.get(i)); - } - size += dataSize; - size += 1 * getSerializedRowsList().size(); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows other = - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) obj; - - if (!getSerializedRowsList().equals(other.getSerializedRowsList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getSerializedRowsCount() > 0) { - hash = (37 * hash) + SERIALIZED_ROWS_FIELD_NUMBER; - hash = (53 * hash) + getSerializedRowsList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Protobuf rows.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.ProtoRows} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.ProtoRows) - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.class, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - serializedRows_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto - .internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows build() { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows result = - new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows(this); - int from_bitField0_ = bitField0_; - if (((bitField0_ & 0x00000001) != 0)) { - serializedRows_ = java.util.Collections.unmodifiableList(serializedRows_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.serializedRows_ = serializedRows_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .getDefaultInstance()) return this; - if (!other.serializedRows_.isEmpty()) { - if (serializedRows_.isEmpty()) { - serializedRows_ = other.serializedRows_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureSerializedRowsIsMutable(); - serializedRows_.addAll(other.serializedRows_); - } - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int bitField0_; - - private java.util.List serializedRows_ = - java.util.Collections.emptyList(); - - private void ensureSerializedRowsIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - serializedRows_ = - new java.util.ArrayList(serializedRows_); - bitField0_ |= 0x00000001; - } - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @return A list containing the serializedRows. - */ - public java.util.List getSerializedRowsList() { - return ((bitField0_ & 0x00000001) != 0) - ? java.util.Collections.unmodifiableList(serializedRows_) - : serializedRows_; - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @return The count of serializedRows. - */ - public int getSerializedRowsCount() { - return serializedRows_.size(); - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @param index The index of the element to return. - * @return The serializedRows at the given index. - */ - public com.google.protobuf.ByteString getSerializedRows(int index) { - return serializedRows_.get(index); - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @param index The index to set the value at. - * @param value The serializedRows to set. - * @return This builder for chaining. - */ - public Builder setSerializedRows(int index, com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSerializedRowsIsMutable(); - serializedRows_.set(index, value); - onChanged(); - return this; - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @param value The serializedRows to add. - * @return This builder for chaining. - */ - public Builder addSerializedRows(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - ensureSerializedRowsIsMutable(); - serializedRows_.add(value); - onChanged(); - return this; - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @param values The serializedRows to add. - * @return This builder for chaining. - */ - public Builder addAllSerializedRows( - java.lang.Iterable values) { - ensureSerializedRowsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, serializedRows_); - onChanged(); - return this; - } - /** - * - * - *
-       * A sequence of rows serialized as a Protocol Buffer.
-       * See https://developers.google.com/protocol-buffers/docs/overview for more
-       * information on deserializing this field.
-       * 
- * - * repeated bytes serialized_rows = 1; - * - * @return This builder for chaining. - */ - public Builder clearSerializedRows() { - serializedRows_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.ProtoRows) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.ProtoRows) - private static final com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public ProtoRows parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ProtoRows(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { - return descriptor; - } - - private static com.google.protobuf.Descriptors.FileDescriptor descriptor; - - static { - java.lang.String[] descriptorData = { - "\n5google/cloud/bigquery/storage/v1alpha2" - + "/protobuf.proto\022&google.cloud.bigquery.s" - + "torage.v1alpha2\032 google/protobuf/descrip" - + "tor.proto\"I\n\013ProtoSchema\022:\n\020proto_descri" - + "ptor\030\001 \001(\0132 .google.protobuf.DescriptorP" - + "roto\"$\n\tProtoRows\022\027\n\017serialized_rows\030\001 \003" - + "(\014B\212\001\n*com.google.cloud.bigquery.storage" - + ".v1alpha2B\rProtoBufProtoZMgoogle.golang." - + "org/genproto/googleapis/cloud/bigquery/s" - + "torage/v1alpha2;storageb\006proto3" - }; - descriptor = - com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( - descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - com.google.protobuf.DescriptorProtos.getDescriptor(), - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoSchema_descriptor, - new java.lang.String[] { - "ProtoDescriptor", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_ProtoRows_descriptor, - new java.lang.String[] { - "SerializedRows", - }); - com.google.protobuf.DescriptorProtos.getDescriptor(); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java deleted file mode 100644 index e478e9aac2..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java +++ /dev/null @@ -1,10844 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: google/cloud/bigquery/storage/v1alpha2/storage.proto - -package com.google.cloud.bigquery.storage.v1alpha2; - -public final class Storage { - private Storage() {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); - } - - public interface CreateWriteStreamRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. Reference to the table to which the stream belongs, in the format
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - java.lang.String getParent(); - /** - * - * - *
-     * Required. Reference to the table to which the stream belongs, in the format
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - com.google.protobuf.ByteString getParentBytes(); - - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return Whether the writeStream field is set. - */ - boolean hasWriteStream(); - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The writeStream. - */ - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream(); - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder - getWriteStreamOrBuilder(); - } - /** - * - * - *
-   * Request message for `CreateWriteStream`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest} - */ - public static final class CreateWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) - CreateWriteStreamRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use CreateWriteStreamRequest.newBuilder() to construct. - private CreateWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private CreateWriteStreamRequest() { - parent_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new CreateWriteStreamRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private CreateWriteStreamRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - parent_ = s; - break; - } - case 18: - { - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder subBuilder = - null; - if (writeStream_ != null) { - subBuilder = writeStream_.toBuilder(); - } - writeStream_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(writeStream_); - writeStream_ = subBuilder.buildPartial(); - } - - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.Builder - .class); - } - - public static final int PARENT_FIELD_NUMBER = 1; - private volatile java.lang.Object parent_; - /** - * - * - *
-     * Required. Reference to the table to which the stream belongs, in the format
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - @java.lang.Override - public java.lang.String getParent() { - java.lang.Object ref = parent_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - parent_ = s; - return s; - } - } - /** - * - * - *
-     * Required. Reference to the table to which the stream belongs, in the format
-     * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - @java.lang.Override - public com.google.protobuf.ByteString getParentBytes() { - java.lang.Object ref = parent_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - parent_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int WRITE_STREAM_FIELD_NUMBER = 2; - private com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream writeStream_; - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return Whether the writeStream field is set. - */ - @java.lang.Override - public boolean hasWriteStream() { - return writeStream_ != null; - } - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The writeStream. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream() { - return writeStream_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() - : writeStream_; - } - /** - * - * - *
-     * Required. Stream to be created.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder - getWriteStreamOrBuilder() { - return getWriteStream(); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getParentBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); - } - if (writeStream_ != null) { - output.writeMessage(2, getWriteStream()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getParentBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); - } - if (writeStream_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getWriteStream()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) obj; - - if (!getParent().equals(other.getParent())) return false; - if (hasWriteStream() != other.hasWriteStream()) return false; - if (hasWriteStream()) { - if (!getWriteStream().equals(other.getWriteStream())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + PARENT_FIELD_NUMBER; - hash = (53 * hash) + getParent().hashCode(); - if (hasWriteStream()) { - hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; - hash = (53 * hash) + getWriteStream().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for `CreateWriteStream`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.Builder - .class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - parent_ = ""; - - if (writeStreamBuilder_ == null) { - writeStream_ = null; - } else { - writeStream_ = null; - writeStreamBuilder_ = null; - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest(this); - result.parent_ = parent_; - if (writeStreamBuilder_ == null) { - result.writeStream_ = writeStream_; - } else { - result.writeStream_ = writeStreamBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - .getDefaultInstance()) return this; - if (!other.getParent().isEmpty()) { - parent_ = other.parent_; - onChanged(); - } - if (other.hasWriteStream()) { - mergeWriteStream(other.getWriteStream()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest parsedMessage = - null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private java.lang.Object parent_ = ""; - /** - * - * - *
-       * Required. Reference to the table to which the stream belongs, in the format
-       * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - public java.lang.String getParent() { - java.lang.Object ref = parent_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - parent_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. Reference to the table to which the stream belongs, in the format
-       * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - public com.google.protobuf.ByteString getParentBytes() { - java.lang.Object ref = parent_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - parent_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. Reference to the table to which the stream belongs, in the format
-       * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The parent to set. - * @return This builder for chaining. - */ - public Builder setParent(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - parent_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Reference to the table to which the stream belongs, in the format
-       * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearParent() { - - parent_ = getDefaultInstance().getParent(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Reference to the table to which the stream belongs, in the format
-       * of `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for parent to set. - * @return This builder for chaining. - */ - public Builder setParentBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - parent_ = value; - onChanged(); - return this; - } - - private com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream writeStream_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder> - writeStreamBuilder_; - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return Whether the writeStream field is set. - */ - public boolean hasWriteStream() { - return writeStreamBuilder_ != null || writeStream_ != null; - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The writeStream. - */ - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream getWriteStream() { - if (writeStreamBuilder_ == null) { - return writeStream_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() - : writeStream_; - } else { - return writeStreamBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public Builder setWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream value) { - if (writeStreamBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - writeStream_ = value; - onChanged(); - } else { - writeStreamBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public Builder setWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder builderForValue) { - if (writeStreamBuilder_ == null) { - writeStream_ = builderForValue.build(); - onChanged(); - } else { - writeStreamBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public Builder mergeWriteStream( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream value) { - if (writeStreamBuilder_ == null) { - if (writeStream_ != null) { - writeStream_ = - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.newBuilder( - writeStream_) - .mergeFrom(value) - .buildPartial(); - } else { - writeStream_ = value; - } - onChanged(); - } else { - writeStreamBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public Builder clearWriteStream() { - if (writeStreamBuilder_ == null) { - writeStream_ = null; - onChanged(); - } else { - writeStream_ = null; - writeStreamBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder - getWriteStreamBuilder() { - - onChanged(); - return getWriteStreamFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder - getWriteStreamOrBuilder() { - if (writeStreamBuilder_ != null) { - return writeStreamBuilder_.getMessageOrBuilder(); - } else { - return writeStream_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance() - : writeStream_; - } - } - /** - * - * - *
-       * Required. Stream to be created.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream write_stream = 2 [(.google.api.field_behavior) = REQUIRED]; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder> - getWriteStreamFieldBuilder() { - if (writeStreamBuilder_ == null) { - writeStreamBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder>( - getWriteStream(), getParentForChildren(), isClean()); - writeStream_ = null; - } - return writeStreamBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.CreateWriteStreamRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public CreateWriteStreamRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new CreateWriteStreamRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.CreateWriteStreamRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface AppendRowsRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. The stream that is the target of the append operation. This value must be
-     * specified for the initial request. If subsequent requests specify the
-     * stream name, it must equal to the value provided in the first request.
-     * To write to the _default stream, populate this field with a string in the
-     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - java.lang.String getWriteStream(); - /** - * - * - *
-     * Required. The stream that is the target of the append operation. This value must be
-     * specified for the initial request. If subsequent requests specify the
-     * stream name, it must equal to the value provided in the first request.
-     * To write to the _default stream, populate this field with a string in the
-     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - com.google.protobuf.ByteString getWriteStreamBytes(); - - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return Whether the offset field is set. - */ - boolean hasOffset(); - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The offset. - */ - com.google.protobuf.Int64Value getOffset(); - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder(); - - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return Whether the protoRows field is set. - */ - boolean hasProtoRows(); - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return The protoRows. - */ - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData getProtoRows(); - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder - getProtoRowsOrBuilder(); - - /** - * - * - *
-     * Only initial request setting is respected. If true, drop unknown input
-     * fields. Otherwise, the extra fields will cause append to fail. Default
-     * value is false.
-     * 
- * - * bool ignore_unknown_fields = 5; - * - * @return The ignoreUnknownFields. - */ - boolean getIgnoreUnknownFields(); - - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.RowsCase - getRowsCase(); - } - /** - * - * - *
-   * Request message for `AppendRows`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest} - */ - public static final class AppendRowsRequest extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) - AppendRowsRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use AppendRowsRequest.newBuilder() to construct. - private AppendRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private AppendRowsRequest() { - writeStream_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new AppendRowsRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private AppendRowsRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - writeStream_ = s; - break; - } - case 18: - { - com.google.protobuf.Int64Value.Builder subBuilder = null; - if (offset_ != null) { - subBuilder = offset_.toBuilder(); - } - offset_ = - input.readMessage(com.google.protobuf.Int64Value.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(offset_); - offset_ = subBuilder.buildPartial(); - } - - break; - } - case 34: - { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder - subBuilder = null; - if (rowsCase_ == 4) { - subBuilder = - ((com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoData) - rows_) - .toBuilder(); - } - rows_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoData.parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoData) - rows_); - rows_ = subBuilder.buildPartial(); - } - rowsCase_ = 4; - break; - } - case 40: - { - ignoreUnknownFields_ = input.readBool(); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.Builder.class); - } - - public interface ProtoDataOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return Whether the writerSchema field is set. - */ - boolean hasWriterSchema(); - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return The writerSchema. - */ - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema getWriterSchema(); - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder - getWriterSchemaOrBuilder(); - - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return Whether the rows field is set. - */ - boolean hasRows(); - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return The rows. - */ - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows(); - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder - getRowsOrBuilder(); - } - /** Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData} */ - public static final class ProtoData extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) - ProtoDataOrBuilder { - private static final long serialVersionUID = 0L; - // Use ProtoData.newBuilder() to construct. - private ProtoData(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private ProtoData() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new ProtoData(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private ProtoData( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder - subBuilder = null; - if (writerSchema_ != null) { - subBuilder = writerSchema_.toBuilder(); - } - writerSchema_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(writerSchema_); - writerSchema_ = subBuilder.buildPartial(); - } - - break; - } - case 18: - { - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder - subBuilder = null; - if (rows_ != null) { - subBuilder = rows_.toBuilder(); - } - rows_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(rows_); - rows_ = subBuilder.buildPartial(); - } - - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e) - .setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder.class); - } - - public static final int WRITER_SCHEMA_FIELD_NUMBER = 1; - private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema writerSchema_; - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return Whether the writerSchema field is set. - */ - @java.lang.Override - public boolean hasWriterSchema() { - return writerSchema_ != null; - } - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return The writerSchema. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - getWriterSchema() { - return writerSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .getDefaultInstance() - : writerSchema_; - } - /** - * - * - *
-       * Proto schema used to serialize the data.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder - getWriterSchemaOrBuilder() { - return getWriterSchema(); - } - - public static final int ROWS_FIELD_NUMBER = 2; - private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows rows_; - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return Whether the rows field is set. - */ - @java.lang.Override - public boolean hasRows() { - return rows_ != null; - } - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return The rows. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows() { - return rows_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .getDefaultInstance() - : rows_; - } - /** - * - * - *
-       * Serialized row data in protobuf message format.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder - getRowsOrBuilder() { - return getRows(); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (hasWriterSchema()) { - if (!getWriterSchema().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (writerSchema_ != null) { - output.writeMessage(1, getWriterSchema()); - } - if (rows_ != null) { - output.writeMessage(2, getRows()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (writerSchema_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getWriterSchema()); - } - if (rows_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getRows()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) obj; - - if (hasWriterSchema() != other.hasWriterSchema()) return false; - if (hasWriterSchema()) { - if (!getWriterSchema().equals(other.getWriterSchema())) return false; - } - if (hasRows() != other.hasRows()) return false; - if (hasRows()) { - if (!getRows().equals(other.getRows())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasWriterSchema()) { - hash = (37 * hash) + WRITER_SCHEMA_FIELD_NUMBER; - hash = (53 * hash) + getWriterSchema().hashCode(); - } - if (hasRows()) { - hash = (37 * hash) + ROWS_FIELD_NUMBER; - hash = (53 * hash) + getRows().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseDelimitedFrom( - java.io.InputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - if (writerSchemaBuilder_ == null) { - writerSchema_ = null; - } else { - writerSchema_ = null; - writerSchemaBuilder_ = null; - } - if (rowsBuilder_ == null) { - rows_ = null; - } else { - rows_ = null; - rowsBuilder_ = null; - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData( - this); - if (writerSchemaBuilder_ == null) { - result.writerSchema_ = writerSchema_; - } else { - result.writerSchema_ = writerSchemaBuilder_.build(); - } - if (rowsBuilder_ == null) { - result.rows_ = rows_; - } else { - result.rows_ = rowsBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance()) return this; - if (other.hasWriterSchema()) { - mergeWriterSchema(other.getWriterSchema()); - } - if (other.hasRows()) { - mergeRows(other.getRows()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - if (hasWriterSchema()) { - if (!getWriterSchema().isInitialized()) { - return false; - } - } - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema writerSchema_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder> - writerSchemaBuilder_; - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return Whether the writerSchema field is set. - */ - public boolean hasWriterSchema() { - return writerSchemaBuilder_ != null || writerSchema_ != null; - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - * - * @return The writerSchema. - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - getWriterSchema() { - if (writerSchemaBuilder_ == null) { - return writerSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .getDefaultInstance() - : writerSchema_; - } else { - return writerSchemaBuilder_.getMessage(); - } - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public Builder setWriterSchema( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema value) { - if (writerSchemaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - writerSchema_ = value; - onChanged(); - } else { - writerSchemaBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public Builder setWriterSchema( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder - builderForValue) { - if (writerSchemaBuilder_ == null) { - writerSchema_ = builderForValue.build(); - onChanged(); - } else { - writerSchemaBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public Builder mergeWriterSchema( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema value) { - if (writerSchemaBuilder_ == null) { - if (writerSchema_ != null) { - writerSchema_ = - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.newBuilder( - writerSchema_) - .mergeFrom(value) - .buildPartial(); - } else { - writerSchema_ = value; - } - onChanged(); - } else { - writerSchemaBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public Builder clearWriterSchema() { - if (writerSchemaBuilder_ == null) { - writerSchema_ = null; - onChanged(); - } else { - writerSchema_ = null; - writerSchemaBuilder_ = null; - } - - return this; - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder - getWriterSchemaBuilder() { - - onChanged(); - return getWriterSchemaFieldBuilder().getBuilder(); - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder - getWriterSchemaOrBuilder() { - if (writerSchemaBuilder_ != null) { - return writerSchemaBuilder_.getMessageOrBuilder(); - } else { - return writerSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema - .getDefaultInstance() - : writerSchema_; - } - } - /** - * - * - *
-         * Proto schema used to serialize the data.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoSchema writer_schema = 1; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder> - getWriterSchemaFieldBuilder() { - if (writerSchemaBuilder_ == null) { - writerSchemaBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoSchemaOrBuilder>( - getWriterSchema(), getParentForChildren(), isClean()); - writerSchema_ = null; - } - return writerSchemaBuilder_; - } - - private com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows rows_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder> - rowsBuilder_; - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return Whether the rows field is set. - */ - public boolean hasRows() { - return rowsBuilder_ != null || rows_ != null; - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - * - * @return The rows. - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows getRows() { - if (rowsBuilder_ == null) { - return rows_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .getDefaultInstance() - : rows_; - } else { - return rowsBuilder_.getMessage(); - } - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public Builder setRows( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows value) { - if (rowsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - rows_ = value; - onChanged(); - } else { - rowsBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public Builder setRows( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder - builderForValue) { - if (rowsBuilder_ == null) { - rows_ = builderForValue.build(); - onChanged(); - } else { - rowsBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public Builder mergeRows( - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows value) { - if (rowsBuilder_ == null) { - if (rows_ != null) { - rows_ = - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.newBuilder( - rows_) - .mergeFrom(value) - .buildPartial(); - } else { - rows_ = value; - } - onChanged(); - } else { - rowsBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public Builder clearRows() { - if (rowsBuilder_ == null) { - rows_ = null; - onChanged(); - } else { - rows_ = null; - rowsBuilder_ = null; - } - - return this; - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder - getRowsBuilder() { - - onChanged(); - return getRowsFieldBuilder().getBuilder(); - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - public com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder - getRowsOrBuilder() { - if (rowsBuilder_ != null) { - return rowsBuilder_.getMessageOrBuilder(); - } else { - return rows_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows - .getDefaultInstance() - : rows_; - } - } - /** - * - * - *
-         * Serialized row data in protobuf message format.
-         * 
- * - * .google.cloud.bigquery.storage.v1alpha2.ProtoRows rows = 2; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder> - getRowsFieldBuilder() { - if (rowsBuilder_ == null) { - rowsBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRows.Builder, - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.ProtoRowsOrBuilder>( - getRows(), getParentForChildren(), isClean()); - rows_ = null; - } - return rowsBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoData - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public ProtoData parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new ProtoData(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - private int rowsCase_ = 0; - private java.lang.Object rows_; - - public enum RowsCase - implements - com.google.protobuf.Internal.EnumLite, - com.google.protobuf.AbstractMessage.InternalOneOfEnum { - PROTO_ROWS(4), - ROWS_NOT_SET(0); - private final int value; - - private RowsCase(int value) { - this.value = value; - } - /** - * @param value The number of the enum to look for. - * @return The enum associated with the given number. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static RowsCase valueOf(int value) { - return forNumber(value); - } - - public static RowsCase forNumber(int value) { - switch (value) { - case 4: - return PROTO_ROWS; - case 0: - return ROWS_NOT_SET; - default: - return null; - } - } - - public int getNumber() { - return this.value; - } - }; - - public RowsCase getRowsCase() { - return RowsCase.forNumber(rowsCase_); - } - - public static final int WRITE_STREAM_FIELD_NUMBER = 1; - private volatile java.lang.Object writeStream_; - /** - * - * - *
-     * Required. The stream that is the target of the append operation. This value must be
-     * specified for the initial request. If subsequent requests specify the
-     * stream name, it must equal to the value provided in the first request.
-     * To write to the _default stream, populate this field with a string in the
-     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - @java.lang.Override - public java.lang.String getWriteStream() { - java.lang.Object ref = writeStream_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - writeStream_ = s; - return s; - } - } - /** - * - * - *
-     * Required. The stream that is the target of the append operation. This value must be
-     * specified for the initial request. If subsequent requests specify the
-     * stream name, it must equal to the value provided in the first request.
-     * To write to the _default stream, populate this field with a string in the
-     * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - @java.lang.Override - public com.google.protobuf.ByteString getWriteStreamBytes() { - java.lang.Object ref = writeStream_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - writeStream_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int OFFSET_FIELD_NUMBER = 2; - private com.google.protobuf.Int64Value offset_; - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return Whether the offset field is set. - */ - @java.lang.Override - public boolean hasOffset() { - return offset_ != null; - } - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The offset. - */ - @java.lang.Override - public com.google.protobuf.Int64Value getOffset() { - return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; - } - /** - * - * - *
-     * Optional. If present, the write is only performed if the next append offset is same
-     * as the provided value. If not present, the write is performed at the
-     * current end of stream. Specifying a value for this field is not allowed
-     * when calling AppendRows for the '_default' stream.
-     * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { - return getOffset(); - } - - public static final int PROTO_ROWS_FIELD_NUMBER = 4; - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return Whether the protoRows field is set. - */ - @java.lang.Override - public boolean hasProtoRows() { - return rowsCase_ == 4; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return The protoRows. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - getProtoRows() { - if (rowsCase_ == 4) { - return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_; - } - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder - getProtoRowsOrBuilder() { - if (rowsCase_ == 4) { - return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_; - } - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - - public static final int IGNORE_UNKNOWN_FIELDS_FIELD_NUMBER = 5; - private boolean ignoreUnknownFields_; - /** - * - * - *
-     * Only initial request setting is respected. If true, drop unknown input
-     * fields. Otherwise, the extra fields will cause append to fail. Default
-     * value is false.
-     * 
- * - * bool ignore_unknown_fields = 5; - * - * @return The ignoreUnknownFields. - */ - @java.lang.Override - public boolean getIgnoreUnknownFields() { - return ignoreUnknownFields_; - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - if (hasProtoRows()) { - if (!getProtoRows().isInitialized()) { - memoizedIsInitialized = 0; - return false; - } - } - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getWriteStreamBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); - } - if (offset_ != null) { - output.writeMessage(2, getOffset()); - } - if (rowsCase_ == 4) { - output.writeMessage( - 4, - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) rows_); - } - if (ignoreUnknownFields_ != false) { - output.writeBool(5, ignoreUnknownFields_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getWriteStreamBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); - } - if (offset_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(2, getOffset()); - } - if (rowsCase_ == 4) { - size += - com.google.protobuf.CodedOutputStream.computeMessageSize( - 4, - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_); - } - if (ignoreUnknownFields_ != false) { - size += com.google.protobuf.CodedOutputStream.computeBoolSize(5, ignoreUnknownFields_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) obj; - - if (!getWriteStream().equals(other.getWriteStream())) return false; - if (hasOffset() != other.hasOffset()) return false; - if (hasOffset()) { - if (!getOffset().equals(other.getOffset())) return false; - } - if (getIgnoreUnknownFields() != other.getIgnoreUnknownFields()) return false; - if (!getRowsCase().equals(other.getRowsCase())) return false; - switch (rowsCase_) { - case 4: - if (!getProtoRows().equals(other.getProtoRows())) return false; - break; - case 0: - default: - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; - hash = (53 * hash) + getWriteStream().hashCode(); - if (hasOffset()) { - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + getOffset().hashCode(); - } - hash = (37 * hash) + IGNORE_UNKNOWN_FIELDS_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashBoolean(getIgnoreUnknownFields()); - switch (rowsCase_) { - case 4: - hash = (37 * hash) + PROTO_ROWS_FIELD_NUMBER; - hash = (53 * hash) + getProtoRows().hashCode(); - break; - case 0: - default: - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for `AppendRows`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - writeStream_ = ""; - - if (offsetBuilder_ == null) { - offset_ = null; - } else { - offset_ = null; - offsetBuilder_ = null; - } - ignoreUnknownFields_ = false; - - rowsCase_ = 0; - rows_ = null; - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest(this); - result.writeStream_ = writeStream_; - if (offsetBuilder_ == null) { - result.offset_ = offset_; - } else { - result.offset_ = offsetBuilder_.build(); - } - if (rowsCase_ == 4) { - if (protoRowsBuilder_ == null) { - result.rows_ = rows_; - } else { - result.rows_ = protoRowsBuilder_.build(); - } - } - result.ignoreUnknownFields_ = ignoreUnknownFields_; - result.rowsCase_ = rowsCase_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .getDefaultInstance()) return this; - if (!other.getWriteStream().isEmpty()) { - writeStream_ = other.writeStream_; - onChanged(); - } - if (other.hasOffset()) { - mergeOffset(other.getOffset()); - } - if (other.getIgnoreUnknownFields() != false) { - setIgnoreUnknownFields(other.getIgnoreUnknownFields()); - } - switch (other.getRowsCase()) { - case PROTO_ROWS: - { - mergeProtoRows(other.getProtoRows()); - break; - } - case ROWS_NOT_SET: - { - break; - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - if (hasProtoRows()) { - if (!getProtoRows().isInitialized()) { - return false; - } - } - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int rowsCase_ = 0; - private java.lang.Object rows_; - - public RowsCase getRowsCase() { - return RowsCase.forNumber(rowsCase_); - } - - public Builder clearRows() { - rowsCase_ = 0; - rows_ = null; - onChanged(); - return this; - } - - private java.lang.Object writeStream_ = ""; - /** - * - * - *
-       * Required. The stream that is the target of the append operation. This value must be
-       * specified for the initial request. If subsequent requests specify the
-       * stream name, it must equal to the value provided in the first request.
-       * To write to the _default stream, populate this field with a string in the
-       * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - public java.lang.String getWriteStream() { - java.lang.Object ref = writeStream_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - writeStream_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. The stream that is the target of the append operation. This value must be
-       * specified for the initial request. If subsequent requests specify the
-       * stream name, it must equal to the value provided in the first request.
-       * To write to the _default stream, populate this field with a string in the
-       * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - public com.google.protobuf.ByteString getWriteStreamBytes() { - java.lang.Object ref = writeStream_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - writeStream_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. The stream that is the target of the append operation. This value must be
-       * specified for the initial request. If subsequent requests specify the
-       * stream name, it must equal to the value provided in the first request.
-       * To write to the _default stream, populate this field with a string in the
-       * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The writeStream to set. - * @return This builder for chaining. - */ - public Builder setWriteStream(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - writeStream_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The stream that is the target of the append operation. This value must be
-       * specified for the initial request. If subsequent requests specify the
-       * stream name, it must equal to the value provided in the first request.
-       * To write to the _default stream, populate this field with a string in the
-       * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearWriteStream() { - - writeStream_ = getDefaultInstance().getWriteStream(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The stream that is the target of the append operation. This value must be
-       * specified for the initial request. If subsequent requests specify the
-       * stream name, it must equal to the value provided in the first request.
-       * To write to the _default stream, populate this field with a string in the
-       * format `projects/{project}/datasets/{dataset}/tables/{table}/_default`.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for writeStream to set. - * @return This builder for chaining. - */ - public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - writeStream_ = value; - onChanged(); - return this; - } - - private com.google.protobuf.Int64Value offset_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Int64Value, - com.google.protobuf.Int64Value.Builder, - com.google.protobuf.Int64ValueOrBuilder> - offsetBuilder_; - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return Whether the offset field is set. - */ - public boolean hasOffset() { - return offsetBuilder_ != null || offset_ != null; - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The offset. - */ - public com.google.protobuf.Int64Value getOffset() { - if (offsetBuilder_ == null) { - return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; - } else { - return offsetBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder setOffset(com.google.protobuf.Int64Value value) { - if (offsetBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - offset_ = value; - onChanged(); - } else { - offsetBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder setOffset(com.google.protobuf.Int64Value.Builder builderForValue) { - if (offsetBuilder_ == null) { - offset_ = builderForValue.build(); - onChanged(); - } else { - offsetBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder mergeOffset(com.google.protobuf.Int64Value value) { - if (offsetBuilder_ == null) { - if (offset_ != null) { - offset_ = - com.google.protobuf.Int64Value.newBuilder(offset_).mergeFrom(value).buildPartial(); - } else { - offset_ = value; - } - onChanged(); - } else { - offsetBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder clearOffset() { - if (offsetBuilder_ == null) { - offset_ = null; - onChanged(); - } else { - offset_ = null; - offsetBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.protobuf.Int64Value.Builder getOffsetBuilder() { - - onChanged(); - return getOffsetFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.protobuf.Int64ValueOrBuilder getOffsetOrBuilder() { - if (offsetBuilder_ != null) { - return offsetBuilder_.getMessageOrBuilder(); - } else { - return offset_ == null ? com.google.protobuf.Int64Value.getDefaultInstance() : offset_; - } - } - /** - * - * - *
-       * Optional. If present, the write is only performed if the next append offset is same
-       * as the provided value. If not present, the write is performed at the
-       * current end of stream. Specifying a value for this field is not allowed
-       * when calling AppendRows for the '_default' stream.
-       * 
- * - * .google.protobuf.Int64Value offset = 2 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Int64Value, - com.google.protobuf.Int64Value.Builder, - com.google.protobuf.Int64ValueOrBuilder> - getOffsetFieldBuilder() { - if (offsetBuilder_ == null) { - offsetBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Int64Value, - com.google.protobuf.Int64Value.Builder, - com.google.protobuf.Int64ValueOrBuilder>( - getOffset(), getParentForChildren(), isClean()); - offset_ = null; - } - return offsetBuilder_; - } - - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoDataOrBuilder> - protoRowsBuilder_; - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return Whether the protoRows field is set. - */ - @java.lang.Override - public boolean hasProtoRows() { - return rowsCase_ == 4; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - * - * @return The protoRows. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - getProtoRows() { - if (protoRowsBuilder_ == null) { - if (rowsCase_ == 4) { - return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_; - } - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } else { - if (rowsCase_ == 4) { - return protoRowsBuilder_.getMessage(); - } - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - public Builder setProtoRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData value) { - if (protoRowsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - rows_ = value; - onChanged(); - } else { - protoRowsBuilder_.setMessage(value); - } - rowsCase_ = 4; - return this; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - public Builder setProtoRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.Builder - builderForValue) { - if (protoRowsBuilder_ == null) { - rows_ = builderForValue.build(); - onChanged(); - } else { - protoRowsBuilder_.setMessage(builderForValue.build()); - } - rowsCase_ = 4; - return this; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - public Builder mergeProtoRows( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData value) { - if (protoRowsBuilder_ == null) { - if (rowsCase_ == 4 - && rows_ - != com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance()) { - rows_ = - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .newBuilder( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoData) - rows_) - .mergeFrom(value) - .buildPartial(); - } else { - rows_ = value; - } - onChanged(); - } else { - if (rowsCase_ == 4) { - protoRowsBuilder_.mergeFrom(value); - } - protoRowsBuilder_.setMessage(value); - } - rowsCase_ = 4; - return this; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - public Builder clearProtoRows() { - if (protoRowsBuilder_ == null) { - if (rowsCase_ == 4) { - rowsCase_ = 0; - rows_ = null; - onChanged(); - } - } else { - if (rowsCase_ == 4) { - rowsCase_ = 0; - rows_ = null; - } - protoRowsBuilder_.clear(); - } - return this; - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData.Builder - getProtoRowsBuilder() { - return getProtoRowsFieldBuilder().getBuilder(); - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoDataOrBuilder - getProtoRowsOrBuilder() { - if ((rowsCase_ == 4) && (protoRowsBuilder_ != null)) { - return protoRowsBuilder_.getMessageOrBuilder(); - } else { - if (rowsCase_ == 4) { - return (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_; - } - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - } - /** - * .google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest.ProtoData proto_rows = 4; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoDataOrBuilder> - getProtoRowsFieldBuilder() { - if (protoRowsBuilder_ == null) { - if (!(rowsCase_ == 4)) { - rows_ = - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .getDefaultInstance(); - } - protoRowsBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData - .Builder, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - .ProtoDataOrBuilder>( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest.ProtoData) - rows_, - getParentForChildren(), - isClean()); - rows_ = null; - } - rowsCase_ = 4; - onChanged(); - ; - return protoRowsBuilder_; - } - - private boolean ignoreUnknownFields_; - /** - * - * - *
-       * Only initial request setting is respected. If true, drop unknown input
-       * fields. Otherwise, the extra fields will cause append to fail. Default
-       * value is false.
-       * 
- * - * bool ignore_unknown_fields = 5; - * - * @return The ignoreUnknownFields. - */ - @java.lang.Override - public boolean getIgnoreUnknownFields() { - return ignoreUnknownFields_; - } - /** - * - * - *
-       * Only initial request setting is respected. If true, drop unknown input
-       * fields. Otherwise, the extra fields will cause append to fail. Default
-       * value is false.
-       * 
- * - * bool ignore_unknown_fields = 5; - * - * @param value The ignoreUnknownFields to set. - * @return This builder for chaining. - */ - public Builder setIgnoreUnknownFields(boolean value) { - - ignoreUnknownFields_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Only initial request setting is respected. If true, drop unknown input
-       * fields. Otherwise, the extra fields will cause append to fail. Default
-       * value is false.
-       * 
- * - * bool ignore_unknown_fields = 5; - * - * @return This builder for chaining. - */ - public Builder clearIgnoreUnknownFields() { - - ignoreUnknownFields_ = false; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public AppendRowsRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AppendRowsRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface AppendRowsResponseOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * The row offset at which the last append occurred.
-     * 
- * - * int64 offset = 1; - * - * @return Whether the offset field is set. - */ - boolean hasOffset(); - /** - * - * - *
-     * The row offset at which the last append occurred.
-     * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - long getOffset(); - - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - * - * @return Whether the error field is set. - */ - boolean hasError(); - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - * - * @return The error. - */ - com.google.rpc.Status getError(); - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - */ - com.google.rpc.StatusOrBuilder getErrorOrBuilder(); - - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return Whether the updatedSchema field is set. - */ - boolean hasUpdatedSchema(); - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return The updatedSchema. - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getUpdatedSchema(); - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder - getUpdatedSchemaOrBuilder(); - - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.ResponseCase - getResponseCase(); - } - /** - * - * - *
-   * Response message for `AppendRows`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse} - */ - public static final class AppendRowsResponse extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) - AppendRowsResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use AppendRowsResponse.newBuilder() to construct. - private AppendRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private AppendRowsResponse() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new AppendRowsResponse(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private AppendRowsResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: - { - responseCase_ = 1; - response_ = input.readInt64(); - break; - } - case 18: - { - com.google.rpc.Status.Builder subBuilder = null; - if (responseCase_ == 2) { - subBuilder = ((com.google.rpc.Status) response_).toBuilder(); - } - response_ = input.readMessage(com.google.rpc.Status.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom((com.google.rpc.Status) response_); - response_ = subBuilder.buildPartial(); - } - responseCase_ = 2; - break; - } - case 26: - { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder subBuilder = - null; - if (updatedSchema_ != null) { - subBuilder = updatedSchema_.toBuilder(); - } - updatedSchema_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(updatedSchema_); - updatedSchema_ = subBuilder.buildPartial(); - } - - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.Builder.class); - } - - private int responseCase_ = 0; - private java.lang.Object response_; - - public enum ResponseCase - implements - com.google.protobuf.Internal.EnumLite, - com.google.protobuf.AbstractMessage.InternalOneOfEnum { - OFFSET(1), - ERROR(2), - RESPONSE_NOT_SET(0); - private final int value; - - private ResponseCase(int value) { - this.value = value; - } - /** - * @param value The number of the enum to look for. - * @return The enum associated with the given number. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static ResponseCase valueOf(int value) { - return forNumber(value); - } - - public static ResponseCase forNumber(int value) { - switch (value) { - case 1: - return OFFSET; - case 2: - return ERROR; - case 0: - return RESPONSE_NOT_SET; - default: - return null; - } - } - - public int getNumber() { - return this.value; - } - }; - - public ResponseCase getResponseCase() { - return ResponseCase.forNumber(responseCase_); - } - - public static final int OFFSET_FIELD_NUMBER = 1; - /** - * - * - *
-     * The row offset at which the last append occurred.
-     * 
- * - * int64 offset = 1; - * - * @return Whether the offset field is set. - */ - @java.lang.Override - public boolean hasOffset() { - return responseCase_ == 1; - } - /** - * - * - *
-     * The row offset at which the last append occurred.
-     * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - @java.lang.Override - public long getOffset() { - if (responseCase_ == 1) { - return (java.lang.Long) response_; - } - return 0L; - } - - public static final int ERROR_FIELD_NUMBER = 2; - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - * - * @return Whether the error field is set. - */ - @java.lang.Override - public boolean hasError() { - return responseCase_ == 2; - } - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - * - * @return The error. - */ - @java.lang.Override - public com.google.rpc.Status getError() { - if (responseCase_ == 2) { - return (com.google.rpc.Status) response_; - } - return com.google.rpc.Status.getDefaultInstance(); - } - /** - * - * - *
-     * Error in case of append failure. If set, it means rows are not accepted
-     * into the system. Users can retry or continue with other requests within
-     * the same connection.
-     * ALREADY_EXISTS: happens when offset is specified, it means the row is
-     *   already appended, it is safe to ignore this error.
-     * OUT_OF_RANGE: happens when offset is specified, it means the specified
-     *   offset is beyond the end of the stream.
-     * INVALID_ARGUMENT: error caused by malformed request or data.
-     * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-     *   append without offset.
-     * ABORTED: request processing is aborted because of prior failures, request
-     *   can be retried if previous failure is fixed.
-     * INTERNAL: server side errors that can be retried.
-     * 
- * - * .google.rpc.Status error = 2; - */ - @java.lang.Override - public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { - if (responseCase_ == 2) { - return (com.google.rpc.Status) response_; - } - return com.google.rpc.Status.getDefaultInstance(); - } - - public static final int UPDATED_SCHEMA_FIELD_NUMBER = 3; - private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema updatedSchema_; - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return Whether the updatedSchema field is set. - */ - @java.lang.Override - public boolean hasUpdatedSchema() { - return updatedSchema_ != null; - } - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return The updatedSchema. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getUpdatedSchema() { - return updatedSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : updatedSchema_; - } - /** - * - * - *
-     * If backend detects a schema update, pass it to user so that user can
-     * use it to input new type of message. It will be empty when there is no
-     * schema updates.
-     * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder - getUpdatedSchemaOrBuilder() { - return getUpdatedSchema(); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (responseCase_ == 1) { - output.writeInt64(1, (long) ((java.lang.Long) response_)); - } - if (responseCase_ == 2) { - output.writeMessage(2, (com.google.rpc.Status) response_); - } - if (updatedSchema_ != null) { - output.writeMessage(3, getUpdatedSchema()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (responseCase_ == 1) { - size += - com.google.protobuf.CodedOutputStream.computeInt64Size( - 1, (long) ((java.lang.Long) response_)); - } - if (responseCase_ == 2) { - size += - com.google.protobuf.CodedOutputStream.computeMessageSize( - 2, (com.google.rpc.Status) response_); - } - if (updatedSchema_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getUpdatedSchema()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) obj; - - if (hasUpdatedSchema() != other.hasUpdatedSchema()) return false; - if (hasUpdatedSchema()) { - if (!getUpdatedSchema().equals(other.getUpdatedSchema())) return false; - } - if (!getResponseCase().equals(other.getResponseCase())) return false; - switch (responseCase_) { - case 1: - if (getOffset() != other.getOffset()) return false; - break; - case 2: - if (!getError().equals(other.getError())) return false; - break; - case 0: - default: - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasUpdatedSchema()) { - hash = (37 * hash) + UPDATED_SCHEMA_FIELD_NUMBER; - hash = (53 * hash) + getUpdatedSchema().hashCode(); - } - switch (responseCase_) { - case 1: - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); - break; - case 2: - hash = (37 * hash) + ERROR_FIELD_NUMBER; - hash = (53 * hash) + getError().hashCode(); - break; - case 0: - default: - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Response message for `AppendRows`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.Builder - .class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - if (updatedSchemaBuilder_ == null) { - updatedSchema_ = null; - } else { - updatedSchema_ = null; - updatedSchemaBuilder_ = null; - } - responseCase_ = 0; - response_ = null; - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse(this); - if (responseCase_ == 1) { - result.response_ = response_; - } - if (responseCase_ == 2) { - if (errorBuilder_ == null) { - result.response_ = response_; - } else { - result.response_ = errorBuilder_.build(); - } - } - if (updatedSchemaBuilder_ == null) { - result.updatedSchema_ = updatedSchema_; - } else { - result.updatedSchema_ = updatedSchemaBuilder_.build(); - } - result.responseCase_ = responseCase_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - .getDefaultInstance()) return this; - if (other.hasUpdatedSchema()) { - mergeUpdatedSchema(other.getUpdatedSchema()); - } - switch (other.getResponseCase()) { - case OFFSET: - { - setOffset(other.getOffset()); - break; - } - case ERROR: - { - mergeError(other.getError()); - break; - } - case RESPONSE_NOT_SET: - { - break; - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int responseCase_ = 0; - private java.lang.Object response_; - - public ResponseCase getResponseCase() { - return ResponseCase.forNumber(responseCase_); - } - - public Builder clearResponse() { - responseCase_ = 0; - response_ = null; - onChanged(); - return this; - } - - /** - * - * - *
-       * The row offset at which the last append occurred.
-       * 
- * - * int64 offset = 1; - * - * @return Whether the offset field is set. - */ - public boolean hasOffset() { - return responseCase_ == 1; - } - /** - * - * - *
-       * The row offset at which the last append occurred.
-       * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - public long getOffset() { - if (responseCase_ == 1) { - return (java.lang.Long) response_; - } - return 0L; - } - /** - * - * - *
-       * The row offset at which the last append occurred.
-       * 
- * - * int64 offset = 1; - * - * @param value The offset to set. - * @return This builder for chaining. - */ - public Builder setOffset(long value) { - responseCase_ = 1; - response_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * The row offset at which the last append occurred.
-       * 
- * - * int64 offset = 1; - * - * @return This builder for chaining. - */ - public Builder clearOffset() { - if (responseCase_ == 1) { - responseCase_ = 0; - response_ = null; - onChanged(); - } - return this; - } - - private com.google.protobuf.SingleFieldBuilderV3< - com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> - errorBuilder_; - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - * - * @return Whether the error field is set. - */ - @java.lang.Override - public boolean hasError() { - return responseCase_ == 2; - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - * - * @return The error. - */ - @java.lang.Override - public com.google.rpc.Status getError() { - if (errorBuilder_ == null) { - if (responseCase_ == 2) { - return (com.google.rpc.Status) response_; - } - return com.google.rpc.Status.getDefaultInstance(); - } else { - if (responseCase_ == 2) { - return errorBuilder_.getMessage(); - } - return com.google.rpc.Status.getDefaultInstance(); - } - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - public Builder setError(com.google.rpc.Status value) { - if (errorBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - response_ = value; - onChanged(); - } else { - errorBuilder_.setMessage(value); - } - responseCase_ = 2; - return this; - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - public Builder setError(com.google.rpc.Status.Builder builderForValue) { - if (errorBuilder_ == null) { - response_ = builderForValue.build(); - onChanged(); - } else { - errorBuilder_.setMessage(builderForValue.build()); - } - responseCase_ = 2; - return this; - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - public Builder mergeError(com.google.rpc.Status value) { - if (errorBuilder_ == null) { - if (responseCase_ == 2 && response_ != com.google.rpc.Status.getDefaultInstance()) { - response_ = - com.google.rpc.Status.newBuilder((com.google.rpc.Status) response_) - .mergeFrom(value) - .buildPartial(); - } else { - response_ = value; - } - onChanged(); - } else { - if (responseCase_ == 2) { - errorBuilder_.mergeFrom(value); - } - errorBuilder_.setMessage(value); - } - responseCase_ = 2; - return this; - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - public Builder clearError() { - if (errorBuilder_ == null) { - if (responseCase_ == 2) { - responseCase_ = 0; - response_ = null; - onChanged(); - } - } else { - if (responseCase_ == 2) { - responseCase_ = 0; - response_ = null; - } - errorBuilder_.clear(); - } - return this; - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - public com.google.rpc.Status.Builder getErrorBuilder() { - return getErrorFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - @java.lang.Override - public com.google.rpc.StatusOrBuilder getErrorOrBuilder() { - if ((responseCase_ == 2) && (errorBuilder_ != null)) { - return errorBuilder_.getMessageOrBuilder(); - } else { - if (responseCase_ == 2) { - return (com.google.rpc.Status) response_; - } - return com.google.rpc.Status.getDefaultInstance(); - } - } - /** - * - * - *
-       * Error in case of append failure. If set, it means rows are not accepted
-       * into the system. Users can retry or continue with other requests within
-       * the same connection.
-       * ALREADY_EXISTS: happens when offset is specified, it means the row is
-       *   already appended, it is safe to ignore this error.
-       * OUT_OF_RANGE: happens when offset is specified, it means the specified
-       *   offset is beyond the end of the stream.
-       * INVALID_ARGUMENT: error caused by malformed request or data.
-       * RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when
-       *   append without offset.
-       * ABORTED: request processing is aborted because of prior failures, request
-       *   can be retried if previous failure is fixed.
-       * INTERNAL: server side errors that can be retried.
-       * 
- * - * .google.rpc.Status error = 2; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.rpc.Status, com.google.rpc.Status.Builder, com.google.rpc.StatusOrBuilder> - getErrorFieldBuilder() { - if (errorBuilder_ == null) { - if (!(responseCase_ == 2)) { - response_ = com.google.rpc.Status.getDefaultInstance(); - } - errorBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.rpc.Status, - com.google.rpc.Status.Builder, - com.google.rpc.StatusOrBuilder>( - (com.google.rpc.Status) response_, getParentForChildren(), isClean()); - response_ = null; - } - responseCase_ = 2; - onChanged(); - ; - return errorBuilder_; - } - - private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema updatedSchema_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> - updatedSchemaBuilder_; - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return Whether the updatedSchema field is set. - */ - public boolean hasUpdatedSchema() { - return updatedSchemaBuilder_ != null || updatedSchema_ != null; - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - * - * @return The updatedSchema. - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getUpdatedSchema() { - if (updatedSchemaBuilder_ == null) { - return updatedSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : updatedSchema_; - } else { - return updatedSchemaBuilder_.getMessage(); - } - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public Builder setUpdatedSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { - if (updatedSchemaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - updatedSchema_ = value; - onChanged(); - } else { - updatedSchemaBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public Builder setUpdatedSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder builderForValue) { - if (updatedSchemaBuilder_ == null) { - updatedSchema_ = builderForValue.build(); - onChanged(); - } else { - updatedSchemaBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public Builder mergeUpdatedSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { - if (updatedSchemaBuilder_ == null) { - if (updatedSchema_ != null) { - updatedSchema_ = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.newBuilder( - updatedSchema_) - .mergeFrom(value) - .buildPartial(); - } else { - updatedSchema_ = value; - } - onChanged(); - } else { - updatedSchemaBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public Builder clearUpdatedSchema() { - if (updatedSchemaBuilder_ == null) { - updatedSchema_ = null; - onChanged(); - } else { - updatedSchema_ = null; - updatedSchemaBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder - getUpdatedSchemaBuilder() { - - onChanged(); - return getUpdatedSchemaFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder - getUpdatedSchemaOrBuilder() { - if (updatedSchemaBuilder_ != null) { - return updatedSchemaBuilder_.getMessageOrBuilder(); - } else { - return updatedSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : updatedSchema_; - } - } - /** - * - * - *
-       * If backend detects a schema update, pass it to user so that user can
-       * use it to input new type of message. It will be empty when there is no
-       * schema updates.
-       * 
- * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema updated_schema = 3; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> - getUpdatedSchemaFieldBuilder() { - if (updatedSchemaBuilder_ == null) { - updatedSchemaBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder>( - getUpdatedSchema(), getParentForChildren(), isClean()); - updatedSchema_ = null; - } - return updatedSchemaBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.AppendRowsResponse) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public AppendRowsResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new AppendRowsResponse(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.AppendRowsResponse - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface GetWriteStreamRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. Name of the stream to get, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - java.lang.String getName(); - /** - * - * - *
-     * Required. Name of the stream to get, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - com.google.protobuf.ByteString getNameBytes(); - } - /** - * - * - *
-   * Request message for `GetWriteStreamRequest`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest} - */ - public static final class GetWriteStreamRequest extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) - GetWriteStreamRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use GetWriteStreamRequest.newBuilder() to construct. - private GetWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private GetWriteStreamRequest() { - name_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new GetWriteStreamRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private GetWriteStreamRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - name_ = s; - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.Builder - .class); - } - - public static final int NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object name_; - /** - * - * - *
-     * Required. Name of the stream to get, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - @java.lang.Override - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } - } - /** - * - * - *
-     * Required. Name of the stream to get, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - @java.lang.Override - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) obj; - - if (!getName().equals(other.getName())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for `GetWriteStreamRequest`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.Builder - .class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - name_ = ""; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest(this); - result.name_ = name_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - .getDefaultInstance()) return this; - if (!other.getName().isEmpty()) { - name_ = other.name_; - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest parsedMessage = - null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private java.lang.Object name_ = ""; - /** - * - * - *
-       * Required. Name of the stream to get, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. Name of the stream to get, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. Name of the stream to get, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The name to set. - * @return This builder for chaining. - */ - public Builder setName(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - name_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Name of the stream to get, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearName() { - - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Name of the stream to get, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for name to set. - * @return This builder for chaining. - */ - public Builder setNameBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - name_ = value; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.GetWriteStreamRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public GetWriteStreamRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new GetWriteStreamRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.GetWriteStreamRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface BatchCommitWriteStreamsRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. Parent table that all the streams should belong to, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - java.lang.String getParent(); - /** - * - * - *
-     * Required. Parent table that all the streams should belong to, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - com.google.protobuf.ByteString getParentBytes(); - - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return A list containing the writeStreams. - */ - java.util.List getWriteStreamsList(); - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The count of writeStreams. - */ - int getWriteStreamsCount(); - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the element to return. - * @return The writeStreams at the given index. - */ - java.lang.String getWriteStreams(int index); - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the value to return. - * @return The bytes of the writeStreams at the given index. - */ - com.google.protobuf.ByteString getWriteStreamsBytes(int index); - } - /** - * - * - *
-   * Request message for `BatchCommitWriteStreams`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest} - */ - public static final class BatchCommitWriteStreamsRequest - extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) - BatchCommitWriteStreamsRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use BatchCommitWriteStreamsRequest.newBuilder() to construct. - private BatchCommitWriteStreamsRequest( - com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private BatchCommitWriteStreamsRequest() { - parent_ = ""; - writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new BatchCommitWriteStreamsRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private BatchCommitWriteStreamsRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - parent_ = s; - break; - } - case 18: - { - java.lang.String s = input.readStringRequireUtf8(); - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - writeStreams_ = new com.google.protobuf.LazyStringArrayList(); - mutable_bitField0_ |= 0x00000001; - } - writeStreams_.add(s); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - writeStreams_ = writeStreams_.getUnmodifiableView(); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .Builder.class); - } - - public static final int PARENT_FIELD_NUMBER = 1; - private volatile java.lang.Object parent_; - /** - * - * - *
-     * Required. Parent table that all the streams should belong to, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - @java.lang.Override - public java.lang.String getParent() { - java.lang.Object ref = parent_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - parent_ = s; - return s; - } - } - /** - * - * - *
-     * Required. Parent table that all the streams should belong to, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}`.
-     * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - @java.lang.Override - public com.google.protobuf.ByteString getParentBytes() { - java.lang.Object ref = parent_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - parent_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int WRITE_STREAMS_FIELD_NUMBER = 2; - private com.google.protobuf.LazyStringList writeStreams_; - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return A list containing the writeStreams. - */ - public com.google.protobuf.ProtocolStringList getWriteStreamsList() { - return writeStreams_; - } - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The count of writeStreams. - */ - public int getWriteStreamsCount() { - return writeStreams_.size(); - } - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the element to return. - * @return The writeStreams at the given index. - */ - public java.lang.String getWriteStreams(int index) { - return writeStreams_.get(index); - } - /** - * - * - *
-     * Required. The group of streams that will be committed atomically.
-     * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the value to return. - * @return The bytes of the writeStreams at the given index. - */ - public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { - return writeStreams_.getByteString(index); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getParentBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, parent_); - } - for (int i = 0; i < writeStreams_.size(); i++) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 2, writeStreams_.getRaw(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getParentBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, parent_); - } - { - int dataSize = 0; - for (int i = 0; i < writeStreams_.size(); i++) { - dataSize += computeStringSizeNoTag(writeStreams_.getRaw(i)); - } - size += dataSize; - size += 1 * getWriteStreamsList().size(); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) obj; - - if (!getParent().equals(other.getParent())) return false; - if (!getWriteStreamsList().equals(other.getWriteStreamsList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + PARENT_FIELD_NUMBER; - hash = (53 * hash) + getParent().hashCode(); - if (getWriteStreamsCount() > 0) { - hash = (37 * hash) + WRITE_STREAMS_FIELD_NUMBER; - hash = (53 * hash) + getWriteStreamsList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for `BatchCommitWriteStreams`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - parent_ = ""; - - writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest( - this); - int from_bitField0_ = bitField0_; - result.parent_ = parent_; - if (((bitField0_ & 0x00000001) != 0)) { - writeStreams_ = writeStreams_.getUnmodifiableView(); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.writeStreams_ = writeStreams_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) - other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - .getDefaultInstance()) return this; - if (!other.getParent().isEmpty()) { - parent_ = other.parent_; - onChanged(); - } - if (!other.writeStreams_.isEmpty()) { - if (writeStreams_.isEmpty()) { - writeStreams_ = other.writeStreams_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureWriteStreamsIsMutable(); - writeStreams_.addAll(other.writeStreams_); - } - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int bitField0_; - - private java.lang.Object parent_ = ""; - /** - * - * - *
-       * Required. Parent table that all the streams should belong to, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The parent. - */ - public java.lang.String getParent() { - java.lang.Object ref = parent_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - parent_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. Parent table that all the streams should belong to, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for parent. - */ - public com.google.protobuf.ByteString getParentBytes() { - java.lang.Object ref = parent_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - parent_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. Parent table that all the streams should belong to, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The parent to set. - * @return This builder for chaining. - */ - public Builder setParent(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - parent_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Parent table that all the streams should belong to, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearParent() { - - parent_ = getDefaultInstance().getParent(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Parent table that all the streams should belong to, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}`.
-       * 
- * - * - * string parent = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for parent to set. - * @return This builder for chaining. - */ - public Builder setParentBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - parent_ = value; - onChanged(); - return this; - } - - private com.google.protobuf.LazyStringList writeStreams_ = - com.google.protobuf.LazyStringArrayList.EMPTY; - - private void ensureWriteStreamsIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - writeStreams_ = new com.google.protobuf.LazyStringArrayList(writeStreams_); - bitField0_ |= 0x00000001; - } - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return A list containing the writeStreams. - */ - public com.google.protobuf.ProtocolStringList getWriteStreamsList() { - return writeStreams_.getUnmodifiableView(); - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The count of writeStreams. - */ - public int getWriteStreamsCount() { - return writeStreams_.size(); - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the element to return. - * @return The writeStreams at the given index. - */ - public java.lang.String getWriteStreams(int index) { - return writeStreams_.get(index); - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index of the value to return. - * @return The bytes of the writeStreams at the given index. - */ - public com.google.protobuf.ByteString getWriteStreamsBytes(int index) { - return writeStreams_.getByteString(index); - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param index The index to set the value at. - * @param value The writeStreams to set. - * @return This builder for chaining. - */ - public Builder setWriteStreams(int index, java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureWriteStreamsIsMutable(); - writeStreams_.set(index, value); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param value The writeStreams to add. - * @return This builder for chaining. - */ - public Builder addWriteStreams(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - ensureWriteStreamsIsMutable(); - writeStreams_.add(value); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param values The writeStreams to add. - * @return This builder for chaining. - */ - public Builder addAllWriteStreams(java.lang.Iterable values) { - ensureWriteStreamsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, writeStreams_); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @return This builder for chaining. - */ - public Builder clearWriteStreams() { - writeStreams_ = com.google.protobuf.LazyStringArrayList.EMPTY; - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The group of streams that will be committed atomically.
-       * 
- * - * repeated string write_streams = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * @param value The bytes of the writeStreams to add. - * @return This builder for chaining. - */ - public Builder addWriteStreamsBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - ensureWriteStreamsIsMutable(); - writeStreams_.add(value); - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public BatchCommitWriteStreamsRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BatchCommitWriteStreamsRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface BatchCommitWriteStreamsResponseOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return Whether the commitTime field is set. - */ - boolean hasCommitTime(); - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return The commitTime. - */ - com.google.protobuf.Timestamp getCommitTime(); - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); - } - /** - * - * - *
-   * Response message for `BatchCommitWriteStreams`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse} - */ - public static final class BatchCommitWriteStreamsResponse - extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) - BatchCommitWriteStreamsResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use BatchCommitWriteStreamsResponse.newBuilder() to construct. - private BatchCommitWriteStreamsResponse( - com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private BatchCommitWriteStreamsResponse() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new BatchCommitWriteStreamsResponse(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private BatchCommitWriteStreamsResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - com.google.protobuf.Timestamp.Builder subBuilder = null; - if (commitTime_ != null) { - subBuilder = commitTime_.toBuilder(); - } - commitTime_ = - input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(commitTime_); - commitTime_ = subBuilder.buildPartial(); - } - - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .Builder.class); - } - - public static final int COMMIT_TIME_FIELD_NUMBER = 1; - private com.google.protobuf.Timestamp commitTime_; - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return Whether the commitTime field is set. - */ - @java.lang.Override - public boolean hasCommitTime() { - return commitTime_ != null; - } - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return The commitTime. - */ - @java.lang.Override - public com.google.protobuf.Timestamp getCommitTime() { - return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; - } - /** - * - * - *
-     * The time at which streams were committed in microseconds granularity.
-     * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - @java.lang.Override - public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { - return getCommitTime(); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (commitTime_ != null) { - output.writeMessage(1, getCommitTime()); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (commitTime_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, getCommitTime()); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) obj; - - if (hasCommitTime() != other.hasCommitTime()) return false; - if (hasCommitTime()) { - if (!getCommitTime().equals(other.getCommitTime())) return false; - } - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (hasCommitTime()) { - hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; - hash = (53 * hash) + getCommitTime().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Response message for `BatchCommitWriteStreams`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) - com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - if (commitTimeBuilder_ == null) { - commitTime_ = null; - } else { - commitTime_ = null; - commitTimeBuilder_ = null; - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse( - this); - if (commitTimeBuilder_ == null) { - result.commitTime_ = commitTime_; - } else { - result.commitTime_ = commitTimeBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) - other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - .getDefaultInstance()) return this; - if (other.hasCommitTime()) { - mergeCommitTime(other.getCommitTime()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private com.google.protobuf.Timestamp commitTime_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - commitTimeBuilder_; - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return Whether the commitTime field is set. - */ - public boolean hasCommitTime() { - return commitTimeBuilder_ != null || commitTime_ != null; - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - * - * @return The commitTime. - */ - public com.google.protobuf.Timestamp getCommitTime() { - if (commitTimeBuilder_ == null) { - return commitTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : commitTime_; - } else { - return commitTimeBuilder_.getMessage(); - } - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public Builder setCommitTime(com.google.protobuf.Timestamp value) { - if (commitTimeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - commitTime_ = value; - onChanged(); - } else { - commitTimeBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { - if (commitTimeBuilder_ == null) { - commitTime_ = builderForValue.build(); - onChanged(); - } else { - commitTimeBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { - if (commitTimeBuilder_ == null) { - if (commitTime_ != null) { - commitTime_ = - com.google.protobuf.Timestamp.newBuilder(commitTime_) - .mergeFrom(value) - .buildPartial(); - } else { - commitTime_ = value; - } - onChanged(); - } else { - commitTimeBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public Builder clearCommitTime() { - if (commitTimeBuilder_ == null) { - commitTime_ = null; - onChanged(); - } else { - commitTime_ = null; - commitTimeBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { - - onChanged(); - return getCommitTimeFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { - if (commitTimeBuilder_ != null) { - return commitTimeBuilder_.getMessageOrBuilder(); - } else { - return commitTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : commitTime_; - } - } - /** - * - * - *
-       * The time at which streams were committed in microseconds granularity.
-       * 
- * - * .google.protobuf.Timestamp commit_time = 1; - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - getCommitTimeFieldBuilder() { - if (commitTimeBuilder_ == null) { - commitTimeBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder>( - getCommitTime(), getParentForChildren(), isClean()); - commitTime_ = null; - } - return commitTimeBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.BatchCommitWriteStreamsResponse) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage - .BatchCommitWriteStreamsResponse - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public BatchCommitWriteStreamsResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new BatchCommitWriteStreamsResponse(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.BatchCommitWriteStreamsResponse - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface FinalizeWriteStreamRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. Name of the stream to finalize, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - java.lang.String getName(); - /** - * - * - *
-     * Required. Name of the stream to finalize, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - com.google.protobuf.ByteString getNameBytes(); - } - /** - * - * - *
-   * Request message for invoking `FinalizeWriteStream`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest} - */ - public static final class FinalizeWriteStreamRequest - extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) - FinalizeWriteStreamRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use FinalizeWriteStreamRequest.newBuilder() to construct. - private FinalizeWriteStreamRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private FinalizeWriteStreamRequest() { - name_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new FinalizeWriteStreamRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private FinalizeWriteStreamRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - name_ = s; - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.Builder - .class); - } - - public static final int NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object name_; - /** - * - * - *
-     * Required. Name of the stream to finalize, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - @java.lang.Override - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } - } - /** - * - * - *
-     * Required. Name of the stream to finalize, in the form of
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - @java.lang.Override - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) obj; - - if (!getName().equals(other.getName())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for invoking `FinalizeWriteStream`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - .Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - name_ = ""; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest(this); - result.name_ = name_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) - other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - .getDefaultInstance()) return this; - if (!other.getName().isEmpty()) { - name_ = other.name_; - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private java.lang.Object name_ = ""; - /** - * - * - *
-       * Required. Name of the stream to finalize, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The name. - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. Name of the stream to finalize, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for name. - */ - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. Name of the stream to finalize, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The name to set. - * @return This builder for chaining. - */ - public Builder setName(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - name_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Name of the stream to finalize, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearName() { - - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. Name of the stream to finalize, in the form of
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * - * string name = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for name to set. - * @return This builder for chaining. - */ - public Builder setNameBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - name_ = value; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FinalizeWriteStreamRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FinalizeWriteStreamRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface FinalizeWriteStreamResponseOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Number of rows in the finalized stream.
-     * 
- * - * int64 row_count = 1; - * - * @return The rowCount. - */ - long getRowCount(); - } - /** - * - * - *
-   * Response message for `FinalizeWriteStream`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse} - */ - public static final class FinalizeWriteStreamResponse - extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) - FinalizeWriteStreamResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use FinalizeWriteStreamResponse.newBuilder() to construct. - private FinalizeWriteStreamResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private FinalizeWriteStreamResponse() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new FinalizeWriteStreamResponse(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private FinalizeWriteStreamResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: - { - rowCount_ = input.readInt64(); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.Builder - .class); - } - - public static final int ROW_COUNT_FIELD_NUMBER = 1; - private long rowCount_; - /** - * - * - *
-     * Number of rows in the finalized stream.
-     * 
- * - * int64 row_count = 1; - * - * @return The rowCount. - */ - @java.lang.Override - public long getRowCount() { - return rowCount_; - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (rowCount_ != 0L) { - output.writeInt64(1, rowCount_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (rowCount_ != 0L) { - size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, rowCount_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) obj; - - if (getRowCount() != other.getRowCount()) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + ROW_COUNT_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getRowCount()); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(java.nio.ByteBuffer data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom(com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Response message for `FinalizeWriteStream`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - .class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - .Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - rowCount_ = 0L; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse( - this); - result.rowCount_ = rowCount_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other - instanceof - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) - other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - .getDefaultInstance()) return this; - if (other.getRowCount() != 0L) { - setRowCount(other.getRowCount()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private long rowCount_; - /** - * - * - *
-       * Number of rows in the finalized stream.
-       * 
- * - * int64 row_count = 1; - * - * @return The rowCount. - */ - @java.lang.Override - public long getRowCount() { - return rowCount_; - } - /** - * - * - *
-       * Number of rows in the finalized stream.
-       * 
- * - * int64 row_count = 1; - * - * @param value The rowCount to set. - * @return This builder for chaining. - */ - public Builder setRowCount(long value) { - - rowCount_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Number of rows in the finalized stream.
-       * 
- * - * int64 row_count = 1; - * - * @return This builder for chaining. - */ - public Builder clearRowCount() { - - rowCount_ = 0L; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FinalizeWriteStreamResponse) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage - .FinalizeWriteStreamResponse - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FinalizeWriteStreamResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FinalizeWriteStreamResponse(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FinalizeWriteStreamResponse - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface FlushRowsRequestOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. The stream that is the target of the flush operation.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - java.lang.String getWriteStream(); - /** - * - * - *
-     * Required. The stream that is the target of the flush operation.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - com.google.protobuf.ByteString getWriteStreamBytes(); - - /** - * - * - *
-     * Ending offset of the flush operation. Rows before this offset(including
-     * this offset) will be flushed.
-     * 
- * - * int64 offset = 2; - * - * @return The offset. - */ - long getOffset(); - } - /** - * - * - *
-   * Request message for `FlushRows`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest} - */ - public static final class FlushRowsRequest extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest) - FlushRowsRequestOrBuilder { - private static final long serialVersionUID = 0L; - // Use FlushRowsRequest.newBuilder() to construct. - private FlushRowsRequest(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private FlushRowsRequest() { - writeStream_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new FlushRowsRequest(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private FlushRowsRequest( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - writeStream_ = s; - break; - } - case 16: - { - offset_ = input.readInt64(); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.Builder.class); - } - - public static final int WRITE_STREAM_FIELD_NUMBER = 1; - private volatile java.lang.Object writeStream_; - /** - * - * - *
-     * Required. The stream that is the target of the flush operation.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - @java.lang.Override - public java.lang.String getWriteStream() { - java.lang.Object ref = writeStream_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - writeStream_ = s; - return s; - } - } - /** - * - * - *
-     * Required. The stream that is the target of the flush operation.
-     * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - @java.lang.Override - public com.google.protobuf.ByteString getWriteStreamBytes() { - java.lang.Object ref = writeStream_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - writeStream_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int OFFSET_FIELD_NUMBER = 2; - private long offset_; - /** - * - * - *
-     * Ending offset of the flush operation. Rows before this offset(including
-     * this offset) will be flushed.
-     * 
- * - * int64 offset = 2; - * - * @return The offset. - */ - @java.lang.Override - public long getOffset() { - return offset_; - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getWriteStreamBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, writeStream_); - } - if (offset_ != 0L) { - output.writeInt64(2, offset_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getWriteStreamBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, writeStream_); - } - if (offset_ != 0L) { - size += com.google.protobuf.CodedOutputStream.computeInt64Size(2, offset_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) obj; - - if (!getWriteStream().equals(other.getWriteStream())) return false; - if (getOffset() != other.getOffset()) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + WRITE_STREAM_FIELD_NUMBER; - hash = (53 * hash) + getWriteStream().hashCode(); - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Request message for `FlushRows`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest) - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequestOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - writeStream_ = ""; - - offset_ = 0L; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest(this); - result.writeStream_ = writeStream_; - result.offset_ = offset_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - .getDefaultInstance()) return this; - if (!other.getWriteStream().isEmpty()) { - writeStream_ = other.writeStream_; - onChanged(); - } - if (other.getOffset() != 0L) { - setOffset(other.getOffset()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private java.lang.Object writeStream_ = ""; - /** - * - * - *
-       * Required. The stream that is the target of the flush operation.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The writeStream. - */ - public java.lang.String getWriteStream() { - java.lang.Object ref = writeStream_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - writeStream_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. The stream that is the target of the flush operation.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return The bytes for writeStream. - */ - public com.google.protobuf.ByteString getWriteStreamBytes() { - java.lang.Object ref = writeStream_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - writeStream_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. The stream that is the target of the flush operation.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The writeStream to set. - * @return This builder for chaining. - */ - public Builder setWriteStream(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - writeStream_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The stream that is the target of the flush operation.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @return This builder for chaining. - */ - public Builder clearWriteStream() { - - writeStream_ = getDefaultInstance().getWriteStream(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The stream that is the target of the flush operation.
-       * 
- * - * - * string write_stream = 1 [(.google.api.field_behavior) = REQUIRED, (.google.api.resource_reference) = { ... } - * - * - * @param value The bytes for writeStream to set. - * @return This builder for chaining. - */ - public Builder setWriteStreamBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - writeStream_ = value; - onChanged(); - return this; - } - - private long offset_; - /** - * - * - *
-       * Ending offset of the flush operation. Rows before this offset(including
-       * this offset) will be flushed.
-       * 
- * - * int64 offset = 2; - * - * @return The offset. - */ - @java.lang.Override - public long getOffset() { - return offset_; - } - /** - * - * - *
-       * Ending offset of the flush operation. Rows before this offset(including
-       * this offset) will be flushed.
-       * 
- * - * int64 offset = 2; - * - * @param value The offset to set. - * @return This builder for chaining. - */ - public Builder setOffset(long value) { - - offset_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Ending offset of the flush operation. Rows before this offset(including
-       * this offset) will be flushed.
-       * 
- * - * int64 offset = 2; - * - * @return This builder for chaining. - */ - public Builder clearOffset() { - - offset_ = 0L; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsRequest) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FlushRowsRequest parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FlushRowsRequest(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsRequest - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface FlushRowsResponseOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * The rows before this offset (including this offset) are flushed.
-     * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - long getOffset(); - } - /** - * - * - *
-   * Respond message for `FlushRows`.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse} - */ - public static final class FlushRowsResponse extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse) - FlushRowsResponseOrBuilder { - private static final long serialVersionUID = 0L; - // Use FlushRowsResponse.newBuilder() to construct. - private FlushRowsResponse(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private FlushRowsResponse() {} - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new FlushRowsResponse(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private FlushRowsResponse( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 8: - { - offset_ = input.readInt64(); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.Builder.class); - } - - public static final int OFFSET_FIELD_NUMBER = 1; - private long offset_; - /** - * - * - *
-     * The rows before this offset (including this offset) are flushed.
-     * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - @java.lang.Override - public long getOffset() { - return offset_; - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (offset_ != 0L) { - output.writeInt64(1, offset_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (offset_ != 0L) { - size += com.google.protobuf.CodedOutputStream.computeInt64Size(1, offset_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse other = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) obj; - - if (getOffset() != other.getOffset()) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + OFFSET_FIELD_NUMBER; - hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getOffset()); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Respond message for `FlushRows`.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse) - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponseOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.class, - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - offset_ = 0L; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage - .internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse build() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse result = - buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse result = - new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse(this); - result.offset_ = offset_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - .getDefaultInstance()) return this; - if (other.getOffset() != 0L) { - setOffset(other.getOffset()); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private long offset_; - /** - * - * - *
-       * The rows before this offset (including this offset) are flushed.
-       * 
- * - * int64 offset = 1; - * - * @return The offset. - */ - @java.lang.Override - public long getOffset() { - return offset_; - } - /** - * - * - *
-       * The rows before this offset (including this offset) are flushed.
-       * 
- * - * int64 offset = 1; - * - * @param value The offset to set. - * @return This builder for chaining. - */ - public Builder setOffset(long value) { - - offset_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * The rows before this offset (including this offset) are flushed.
-       * 
- * - * int64 offset = 1; - * - * @return This builder for chaining. - */ - public Builder clearOffset() { - - offset_ = 0L; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.FlushRowsResponse) - private static final com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public FlushRowsResponse parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new FlushRowsResponse(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Storage.FlushRowsResponse - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { - return descriptor; - } - - private static com.google.protobuf.Descriptors.FileDescriptor descriptor; - - static { - java.lang.String[] descriptorData = { - "\n4google/cloud/bigquery/storage/v1alpha2" - + "/storage.proto\022&google.cloud.bigquery.st" - + "orage.v1alpha2\032\034google/api/annotations.p" - + "roto\032\027google/api/client.proto\032\037google/ap" - + "i/field_behavior.proto\032\031google/api/resou" - + "rce.proto\0325google/cloud/bigquery/storage" - + "/v1alpha2/protobuf.proto\0323google/cloud/b" - + "igquery/storage/v1alpha2/stream.proto\0322g" - + "oogle/cloud/bigquery/storage/v1alpha2/ta" - + "ble.proto\032\033google/protobuf/empty.proto\032\037" - + "google/protobuf/timestamp.proto\032\036google/" - + "protobuf/wrappers.proto\032\027google/rpc/stat" - + "us.proto\"\250\001\n\030CreateWriteStreamRequest\022<\n" - + "\006parent\030\001 \001(\tB,\340A\002\372A&\n$bigquerystorage.g" - + "oogleapis.com/Table\022N\n\014write_stream\030\002 \001(" - + "\01323.google.cloud.bigquery.storage.v1alph" - + "a2.WriteStreamB\003\340A\002\"\254\003\n\021AppendRowsReques" - + "t\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A,\n*bigquer" - + "ystorage.googleapis.com/WriteStream\0220\n\006o" - + "ffset\030\002 \001(\0132\033.google.protobuf.Int64Value" - + "B\003\340A\001\022Y\n\nproto_rows\030\004 \001(\0132C.google.cloud" - + ".bigquery.storage.v1alpha2.AppendRowsReq" - + "uest.ProtoDataH\000\022\035\n\025ignore_unknown_field" - + "s\030\005 \001(\010\032\230\001\n\tProtoData\022J\n\rwriter_schema\030\001" - + " \001(\01323.google.cloud.bigquery.storage.v1a" - + "lpha2.ProtoSchema\022?\n\004rows\030\002 \001(\01321.google" - + ".cloud.bigquery.storage.v1alpha2.ProtoRo" - + "wsB\006\n\004rows\"\244\001\n\022AppendRowsResponse\022\020\n\006off" - + "set\030\001 \001(\003H\000\022#\n\005error\030\002 \001(\0132\022.google.rpc." - + "StatusH\000\022K\n\016updated_schema\030\003 \001(\01323.googl" - + "e.cloud.bigquery.storage.v1alpha2.TableS" - + "chemaB\n\n\010response\"Y\n\025GetWriteStreamReque" - + "st\022@\n\004name\030\001 \001(\tB2\340A\002\372A,\n*bigquerystorag" - + "e.googleapis.com/WriteStream\"z\n\036BatchCom" - + "mitWriteStreamsRequest\022<\n\006parent\030\001 \001(\tB," - + "\340A\002\372A&\n$bigquerystorage.googleapis.com/T" - + "able\022\032\n\rwrite_streams\030\002 \003(\tB\003\340A\002\"R\n\037Batc" - + "hCommitWriteStreamsResponse\022/\n\013commit_ti" - + "me\030\001 \001(\0132\032.google.protobuf.Timestamp\"^\n\032" - + "FinalizeWriteStreamRequest\022@\n\004name\030\001 \001(\t" - + "B2\340A\002\372A,\n*bigquerystorage.googleapis.com" - + "/WriteStream\"0\n\033FinalizeWriteStreamRespo" - + "nse\022\021\n\trow_count\030\001 \001(\003\"l\n\020FlushRowsReque" - + "st\022H\n\014write_stream\030\001 \001(\tB2\340A\002\372A,\n*bigque" - + "rystorage.googleapis.com/WriteStream\022\016\n\006" - + "offset\030\002 \001(\003\"#\n\021FlushRowsResponse\022\016\n\006off" - + "set\030\001 \001(\0032\253\014\n\rBigQueryWrite\022\351\001\n\021CreateWr" - + "iteStream\022@.google.cloud.bigquery.storag" - + "e.v1alpha2.CreateWriteStreamRequest\0323.go" - + "ogle.cloud.bigquery.storage.v1alpha2.Wri" - + "teStream\"]\202\323\344\223\002A\"1/v1alpha2/{parent=proj" - + "ects/*/datasets/*/tables/*}:\014write_strea" - + "m\332A\023parent,write_stream\022\344\001\n\nAppendRows\0229" - + ".google.cloud.bigquery.storage.v1alpha2." - + "AppendRowsRequest\032:.google.cloud.bigquer" - + "y.storage.v1alpha2.AppendRowsResponse\"[\202" - + "\323\344\223\002F\"A/v1alpha2/{write_stream=projects/" - + "*/datasets/*/tables/*/streams/*}:\001*\332A\014wr" - + "ite_stream(\0010\001\022\321\001\n\016GetWriteStream\022=.goog" - + "le.cloud.bigquery.storage.v1alpha2.GetWr" - + "iteStreamRequest\0323.google.cloud.bigquery" - + ".storage.v1alpha2.WriteStream\"K\202\323\344\223\002>\"9/" - + "v1alpha2/{name=projects/*/datasets/*/tab" - + "les/*/streams/*}:\001*\332A\004name\022\353\001\n\023FinalizeW" - + "riteStream\022B.google.cloud.bigquery.stora" - + "ge.v1alpha2.FinalizeWriteStreamRequest\032C" - + ".google.cloud.bigquery.storage.v1alpha2." - + "FinalizeWriteStreamResponse\"K\202\323\344\223\002>\"9/v1" - + "alpha2/{name=projects/*/datasets/*/table" - + "s/*/streams/*}:\001*\332A\004name\022\356\001\n\027BatchCommit" - + "WriteStreams\022F.google.cloud.bigquery.sto" - + "rage.v1alpha2.BatchCommitWriteStreamsReq" - + "uest\032G.google.cloud.bigquery.storage.v1a" - + "lpha2.BatchCommitWriteStreamsResponse\"B\202" - + "\323\344\223\0023\0221/v1alpha2/{parent=projects/*/data" - + "sets/*/tables/*}\332A\006parent\022\335\001\n\tFlushRows\022" - + "8.google.cloud.bigquery.storage.v1alpha2" - + ".FlushRowsRequest\0329.google.cloud.bigquer" - + "y.storage.v1alpha2.FlushRowsResponse\"[\202\323" - + "\344\223\002F\"A/v1alpha2/{write_stream=projects/*" - + "/datasets/*/tables/*/streams/*}:\001*\332A\014wri" - + "te_stream\032\263\001\210\002\001\312A\036bigquerystorage.google" - + "apis.com\322A\213\001https://www.googleapis.com/a" - + "uth/bigquery,https://www.googleapis.com/" - + "auth/bigquery.insertdata,https://www.goo" - + "gleapis.com/auth/cloud-platformB{\n*com.g" - + "oogle.cloud.bigquery.storage.v1alpha2ZMg" - + "oogle.golang.org/genproto/googleapis/clo" - + "ud/bigquery/storage/v1alpha2;storageb\006pr" - + "oto3" - }; - descriptor = - com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( - descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - com.google.api.AnnotationsProto.getDescriptor(), - com.google.api.ClientProto.getDescriptor(), - com.google.api.FieldBehaviorProto.getDescriptor(), - com.google.api.ResourceProto.getDescriptor(), - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.getDescriptor(), - com.google.cloud.bigquery.storage.v1alpha2.Stream.getDescriptor(), - com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(), - com.google.protobuf.EmptyProto.getDescriptor(), - com.google.protobuf.TimestampProto.getDescriptor(), - com.google.protobuf.WrappersProto.getDescriptor(), - com.google.rpc.StatusProto.getDescriptor(), - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_CreateWriteStreamRequest_descriptor, - new java.lang.String[] { - "Parent", "WriteStream", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor, - new java.lang.String[] { - "WriteStream", "Offset", "ProtoRows", "IgnoreUnknownFields", "Rows", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor = - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_descriptor - .getNestedTypes() - .get(0); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsRequest_ProtoData_descriptor, - new java.lang.String[] { - "WriterSchema", "Rows", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor = - getDescriptor().getMessageTypes().get(2); - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_AppendRowsResponse_descriptor, - new java.lang.String[] { - "Offset", "Error", "UpdatedSchema", "Response", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor = - getDescriptor().getMessageTypes().get(3); - internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_GetWriteStreamRequest_descriptor, - new java.lang.String[] { - "Name", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor = - getDescriptor().getMessageTypes().get(4); - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsRequest_descriptor, - new java.lang.String[] { - "Parent", "WriteStreams", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor = - getDescriptor().getMessageTypes().get(5); - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_BatchCommitWriteStreamsResponse_descriptor, - new java.lang.String[] { - "CommitTime", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor = - getDescriptor().getMessageTypes().get(6); - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamRequest_descriptor, - new java.lang.String[] { - "Name", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor = - getDescriptor().getMessageTypes().get(7); - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_FinalizeWriteStreamResponse_descriptor, - new java.lang.String[] { - "RowCount", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor = - getDescriptor().getMessageTypes().get(8); - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsRequest_descriptor, - new java.lang.String[] { - "WriteStream", "Offset", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor = - getDescriptor().getMessageTypes().get(9); - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_FlushRowsResponse_descriptor, - new java.lang.String[] { - "Offset", - }); - com.google.protobuf.ExtensionRegistry registry = - com.google.protobuf.ExtensionRegistry.newInstance(); - registry.add(com.google.api.ClientProto.defaultHost); - registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); - registry.add(com.google.api.AnnotationsProto.http); - registry.add(com.google.api.ClientProto.methodSignature); - registry.add(com.google.api.ClientProto.oauthScopes); - registry.add(com.google.api.ResourceProto.resourceReference); - com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( - descriptor, registry); - com.google.api.AnnotationsProto.getDescriptor(); - com.google.api.ClientProto.getDescriptor(); - com.google.api.FieldBehaviorProto.getDescriptor(); - com.google.api.ResourceProto.getDescriptor(); - com.google.cloud.bigquery.storage.v1alpha2.ProtoBufProto.getDescriptor(); - com.google.cloud.bigquery.storage.v1alpha2.Stream.getDescriptor(); - com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(); - com.google.protobuf.EmptyProto.getDescriptor(); - com.google.protobuf.TimestampProto.getDescriptor(); - com.google.protobuf.WrappersProto.getDescriptor(); - com.google.rpc.StatusProto.getDescriptor(); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java deleted file mode 100644 index a072fab6ad..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java +++ /dev/null @@ -1,2430 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: google/cloud/bigquery/storage/v1alpha2/stream.proto - -package com.google.cloud.bigquery.storage.v1alpha2; - -public final class Stream { - private Stream() {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); - } - - public interface WriteStreamOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.WriteStream) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Output only. Name of the stream, in the form
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The name. - */ - java.lang.String getName(); - /** - * - * - *
-     * Output only. Name of the stream, in the form
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The bytes for name. - */ - com.google.protobuf.ByteString getNameBytes(); - - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The enum numeric value on the wire for type. - */ - int getTypeValue(); - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The type. - */ - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType(); - - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the createTime field is set. - */ - boolean hasCreateTime(); - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The createTime. - */ - com.google.protobuf.Timestamp getCreateTime(); - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder(); - - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the commitTime field is set. - */ - boolean hasCommitTime(); - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The commitTime. - */ - com.google.protobuf.Timestamp getCommitTime(); - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder(); - - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the tableSchema field is set. - */ - boolean hasTableSchema(); - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The tableSchema. - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema(); - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder getTableSchemaOrBuilder(); - - /** - * - * - *
-     * Id set by client to annotate its identity.
-     * 
- * - * string external_id = 6; - * - * @return The externalId. - */ - java.lang.String getExternalId(); - /** - * - * - *
-     * Id set by client to annotate its identity.
-     * 
- * - * string external_id = 6; - * - * @return The bytes for externalId. - */ - com.google.protobuf.ByteString getExternalIdBytes(); - } - /** - * - * - *
-   * Information about a single stream that gets data inside the storage system.
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.WriteStream} - */ - public static final class WriteStream extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.WriteStream) - WriteStreamOrBuilder { - private static final long serialVersionUID = 0L; - // Use WriteStream.newBuilder() to construct. - private WriteStream(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private WriteStream() { - name_ = ""; - type_ = 0; - externalId_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new WriteStream(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private WriteStream( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - name_ = s; - break; - } - case 16: - { - int rawValue = input.readEnum(); - - type_ = rawValue; - break; - } - case 26: - { - com.google.protobuf.Timestamp.Builder subBuilder = null; - if (createTime_ != null) { - subBuilder = createTime_.toBuilder(); - } - createTime_ = - input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(createTime_); - createTime_ = subBuilder.buildPartial(); - } - - break; - } - case 34: - { - com.google.protobuf.Timestamp.Builder subBuilder = null; - if (commitTime_ != null) { - subBuilder = commitTime_.toBuilder(); - } - commitTime_ = - input.readMessage(com.google.protobuf.Timestamp.parser(), extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(commitTime_); - commitTime_ = subBuilder.buildPartial(); - } - - break; - } - case 42: - { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder subBuilder = - null; - if (tableSchema_ != null) { - subBuilder = tableSchema_.toBuilder(); - } - tableSchema_ = - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.parser(), - extensionRegistry); - if (subBuilder != null) { - subBuilder.mergeFrom(tableSchema_); - tableSchema_ = subBuilder.buildPartial(); - } - - break; - } - case 50: - { - java.lang.String s = input.readStringRequireUtf8(); - - externalId_ = s; - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream - .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream - .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder.class); - } - - /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.WriteStream.Type} */ - public enum Type implements com.google.protobuf.ProtocolMessageEnum { - /** - * - * - *
-       * Unknown type.
-       * 
- * - * TYPE_UNSPECIFIED = 0; - */ - TYPE_UNSPECIFIED(0), - /** - * - * - *
-       * Data will commit automatically and appear as soon as the write is
-       * acknowledged.
-       * 
- * - * COMMITTED = 1; - */ - COMMITTED(1), - /** - * - * - *
-       * Data is invisible until the stream is committed.
-       * 
- * - * PENDING = 2; - */ - PENDING(2), - /** - * - * - *
-       * Data is only visible up to the offset to which it was flushed.
-       * 
- * - * BUFFERED = 3; - */ - BUFFERED(3), - UNRECOGNIZED(-1), - ; - - /** - * - * - *
-       * Unknown type.
-       * 
- * - * TYPE_UNSPECIFIED = 0; - */ - public static final int TYPE_UNSPECIFIED_VALUE = 0; - /** - * - * - *
-       * Data will commit automatically and appear as soon as the write is
-       * acknowledged.
-       * 
- * - * COMMITTED = 1; - */ - public static final int COMMITTED_VALUE = 1; - /** - * - * - *
-       * Data is invisible until the stream is committed.
-       * 
- * - * PENDING = 2; - */ - public static final int PENDING_VALUE = 2; - /** - * - * - *
-       * Data is only visible up to the offset to which it was flushed.
-       * 
- * - * BUFFERED = 3; - */ - public static final int BUFFERED_VALUE = 3; - - public final int getNumber() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalArgumentException( - "Can't get the number of an unknown enum value."); - } - return value; - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static Type valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static Type forNumber(int value) { - switch (value) { - case 0: - return TYPE_UNSPECIFIED; - case 1: - return COMMITTED; - case 2: - return PENDING; - case 3: - return BUFFERED; - default: - return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { - return internalValueMap; - } - - private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.forNumber(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalStateException( - "Can't get the descriptor of an unrecognized enum value."); - } - return getDescriptor().getValues().get(ordinal()); - } - - public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { - return getDescriptor(); - } - - public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDescriptor() - .getEnumTypes() - .get(0); - } - - private static final Type[] VALUES = values(); - - public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); - } - if (desc.getIndex() == -1) { - return UNRECOGNIZED; - } - return VALUES[desc.getIndex()]; - } - - private final int value; - - private Type(int value) { - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream.Type) - } - - public static final int NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object name_; - /** - * - * - *
-     * Output only. Name of the stream, in the form
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The name. - */ - @java.lang.Override - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } - } - /** - * - * - *
-     * Output only. Name of the stream, in the form
-     * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The bytes for name. - */ - @java.lang.Override - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int TYPE_FIELD_NUMBER = 2; - private int type_; - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The enum numeric value on the wire for type. - */ - @java.lang.Override - public int getTypeValue() { - return type_; - } - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The type. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type result = - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.valueOf(type_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.UNRECOGNIZED - : result; - } - - public static final int CREATE_TIME_FIELD_NUMBER = 3; - private com.google.protobuf.Timestamp createTime_; - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the createTime field is set. - */ - @java.lang.Override - public boolean hasCreateTime() { - return createTime_ != null; - } - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The createTime. - */ - @java.lang.Override - public com.google.protobuf.Timestamp getCreateTime() { - return createTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : createTime_; - } - /** - * - * - *
-     * Output only. Create time of the stream. For the _default stream, this is the
-     * creation_time of the table.
-     * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - @java.lang.Override - public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { - return getCreateTime(); - } - - public static final int COMMIT_TIME_FIELD_NUMBER = 4; - private com.google.protobuf.Timestamp commitTime_; - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the commitTime field is set. - */ - @java.lang.Override - public boolean hasCommitTime() { - return commitTime_ != null; - } - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The commitTime. - */ - @java.lang.Override - public com.google.protobuf.Timestamp getCommitTime() { - return commitTime_ == null ? com.google.protobuf.Timestamp.getDefaultInstance() : commitTime_; - } - /** - * - * - *
-     * Output only. Commit time of the stream.
-     * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-     * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-     * means it is not committed.
-     * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - @java.lang.Override - public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { - return getCommitTime(); - } - - public static final int TABLE_SCHEMA_FIELD_NUMBER = 5; - private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema tableSchema_; - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the tableSchema field is set. - */ - @java.lang.Override - public boolean hasTableSchema() { - return tableSchema_ != null; - } - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The tableSchema. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema() { - return tableSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : tableSchema_; - } - /** - * - * - *
-     * Output only. The schema of the destination table. It is only returned in
-     * `CreateWriteStream` response. Caller should generate data that's
-     * compatible with this schema to send in initial `AppendRowsRequest`.
-     * The table schema could go out of date during the life time of the stream.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder - getTableSchemaOrBuilder() { - return getTableSchema(); - } - - public static final int EXTERNAL_ID_FIELD_NUMBER = 6; - private volatile java.lang.Object externalId_; - /** - * - * - *
-     * Id set by client to annotate its identity.
-     * 
- * - * string external_id = 6; - * - * @return The externalId. - */ - @java.lang.Override - public java.lang.String getExternalId() { - java.lang.Object ref = externalId_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - externalId_ = s; - return s; - } - } - /** - * - * - *
-     * Id set by client to annotate its identity.
-     * 
- * - * string external_id = 6; - * - * @return The bytes for externalId. - */ - @java.lang.Override - public com.google.protobuf.ByteString getExternalIdBytes() { - java.lang.Object ref = externalId_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - externalId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); - } - if (type_ - != com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.TYPE_UNSPECIFIED - .getNumber()) { - output.writeEnum(2, type_); - } - if (createTime_ != null) { - output.writeMessage(3, getCreateTime()); - } - if (commitTime_ != null) { - output.writeMessage(4, getCommitTime()); - } - if (tableSchema_ != null) { - output.writeMessage(5, getTableSchema()); - } - if (!getExternalIdBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, externalId_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); - } - if (type_ - != com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.TYPE_UNSPECIFIED - .getNumber()) { - size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); - } - if (createTime_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(3, getCreateTime()); - } - if (commitTime_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, getCommitTime()); - } - if (tableSchema_ != null) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(5, getTableSchema()); - } - if (!getExternalIdBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, externalId_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream other = - (com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) obj; - - if (!getName().equals(other.getName())) return false; - if (type_ != other.type_) return false; - if (hasCreateTime() != other.hasCreateTime()) return false; - if (hasCreateTime()) { - if (!getCreateTime().equals(other.getCreateTime())) return false; - } - if (hasCommitTime() != other.hasCommitTime()) return false; - if (hasCommitTime()) { - if (!getCommitTime().equals(other.getCommitTime())) return false; - } - if (hasTableSchema() != other.hasTableSchema()) return false; - if (hasTableSchema()) { - if (!getTableSchema().equals(other.getTableSchema())) return false; - } - if (!getExternalId().equals(other.getExternalId())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + type_; - if (hasCreateTime()) { - hash = (37 * hash) + CREATE_TIME_FIELD_NUMBER; - hash = (53 * hash) + getCreateTime().hashCode(); - } - if (hasCommitTime()) { - hash = (37 * hash) + COMMIT_TIME_FIELD_NUMBER; - hash = (53 * hash) + getCommitTime().hashCode(); - } - if (hasTableSchema()) { - hash = (37 * hash) + TABLE_SCHEMA_FIELD_NUMBER; - hash = (53 * hash) + getTableSchema().hashCode(); - } - hash = (37 * hash) + EXTERNAL_ID_FIELD_NUMBER; - hash = (53 * hash) + getExternalId().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseDelimitedFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Information about a single stream that gets data inside the storage system.
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.WriteStream} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.WriteStream) - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStreamOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream - .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream - .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.class, - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Builder.class); - } - - // Construct using com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) {} - } - - @java.lang.Override - public Builder clear() { - super.clear(); - name_ = ""; - - type_ = 0; - - if (createTimeBuilder_ == null) { - createTime_ = null; - } else { - createTime_ = null; - createTimeBuilder_ = null; - } - if (commitTimeBuilder_ == null) { - commitTime_ = null; - } else { - commitTime_ = null; - commitTimeBuilder_ = null; - } - if (tableSchemaBuilder_ == null) { - tableSchema_ = null; - } else { - tableSchema_ = null; - tableSchemaBuilder_ = null; - } - externalId_ = ""; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream - .internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream build() { - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream result = - new com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream(this); - result.name_ = name_; - result.type_ = type_; - if (createTimeBuilder_ == null) { - result.createTime_ = createTime_; - } else { - result.createTime_ = createTimeBuilder_.build(); - } - if (commitTimeBuilder_ == null) { - result.commitTime_ = commitTime_; - } else { - result.commitTime_ = commitTimeBuilder_.build(); - } - if (tableSchemaBuilder_ == null) { - result.tableSchema_ = tableSchema_; - } else { - result.tableSchema_ = tableSchemaBuilder_.build(); - } - result.externalId_ = externalId_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) { - return mergeFrom((com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.getDefaultInstance()) - return this; - if (!other.getName().isEmpty()) { - name_ = other.name_; - onChanged(); - } - if (other.type_ != 0) { - setTypeValue(other.getTypeValue()); - } - if (other.hasCreateTime()) { - mergeCreateTime(other.getCreateTime()); - } - if (other.hasCommitTime()) { - mergeCommitTime(other.getCommitTime()); - } - if (other.hasTableSchema()) { - mergeTableSchema(other.getTableSchema()); - } - if (!other.getExternalId().isEmpty()) { - externalId_ = other.externalId_; - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private java.lang.Object name_ = ""; - /** - * - * - *
-       * Output only. Name of the stream, in the form
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The name. - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Output only. Name of the stream, in the form
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return The bytes for name. - */ - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Output only. Name of the stream, in the form
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @param value The name to set. - * @return This builder for chaining. - */ - public Builder setName(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - name_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Output only. Name of the stream, in the form
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @return This builder for chaining. - */ - public Builder clearName() { - - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * - * - *
-       * Output only. Name of the stream, in the form
-       * `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * @param value The bytes for name to set. - * @return This builder for chaining. - */ - public Builder setNameBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - name_ = value; - onChanged(); - return this; - } - - private int type_ = 0; - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The enum numeric value on the wire for type. - */ - @java.lang.Override - public int getTypeValue() { - return type_; - } - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @param value The enum numeric value on the wire for type to set. - * @return This builder for chaining. - */ - public Builder setTypeValue(int value) { - - type_ = value; - onChanged(); - return this; - } - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return The type. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type getType() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type result = - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.valueOf(type_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type.UNRECOGNIZED - : result; - } - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @param value The type to set. - * @return This builder for chaining. - */ - public Builder setType( - com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream.Type value) { - if (value == null) { - throw new NullPointerException(); - } - - type_ = value.getNumber(); - onChanged(); - return this; - } - /** - * - * .google.cloud.bigquery.storage.v1alpha2.WriteStream.Type type = 2 [(.google.api.field_behavior) = IMMUTABLE]; - * - * - * @return This builder for chaining. - */ - public Builder clearType() { - - type_ = 0; - onChanged(); - return this; - } - - private com.google.protobuf.Timestamp createTime_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - createTimeBuilder_; - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the createTime field is set. - */ - public boolean hasCreateTime() { - return createTimeBuilder_ != null || createTime_ != null; - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The createTime. - */ - public com.google.protobuf.Timestamp getCreateTime() { - if (createTimeBuilder_ == null) { - return createTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : createTime_; - } else { - return createTimeBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setCreateTime(com.google.protobuf.Timestamp value) { - if (createTimeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - createTime_ = value; - onChanged(); - } else { - createTimeBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setCreateTime(com.google.protobuf.Timestamp.Builder builderForValue) { - if (createTimeBuilder_ == null) { - createTime_ = builderForValue.build(); - onChanged(); - } else { - createTimeBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder mergeCreateTime(com.google.protobuf.Timestamp value) { - if (createTimeBuilder_ == null) { - if (createTime_ != null) { - createTime_ = - com.google.protobuf.Timestamp.newBuilder(createTime_) - .mergeFrom(value) - .buildPartial(); - } else { - createTime_ = value; - } - onChanged(); - } else { - createTimeBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder clearCreateTime() { - if (createTimeBuilder_ == null) { - createTime_ = null; - onChanged(); - } else { - createTime_ = null; - createTimeBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.protobuf.Timestamp.Builder getCreateTimeBuilder() { - - onChanged(); - return getCreateTimeFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.protobuf.TimestampOrBuilder getCreateTimeOrBuilder() { - if (createTimeBuilder_ != null) { - return createTimeBuilder_.getMessageOrBuilder(); - } else { - return createTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : createTime_; - } - } - /** - * - * - *
-       * Output only. Create time of the stream. For the _default stream, this is the
-       * creation_time of the table.
-       * 
- * - * - * .google.protobuf.Timestamp create_time = 3 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - getCreateTimeFieldBuilder() { - if (createTimeBuilder_ == null) { - createTimeBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder>( - getCreateTime(), getParentForChildren(), isClean()); - createTime_ = null; - } - return createTimeBuilder_; - } - - private com.google.protobuf.Timestamp commitTime_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - commitTimeBuilder_; - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the commitTime field is set. - */ - public boolean hasCommitTime() { - return commitTimeBuilder_ != null || commitTime_ != null; - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The commitTime. - */ - public com.google.protobuf.Timestamp getCommitTime() { - if (commitTimeBuilder_ == null) { - return commitTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : commitTime_; - } else { - return commitTimeBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setCommitTime(com.google.protobuf.Timestamp value) { - if (commitTimeBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - commitTime_ = value; - onChanged(); - } else { - commitTimeBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setCommitTime(com.google.protobuf.Timestamp.Builder builderForValue) { - if (commitTimeBuilder_ == null) { - commitTime_ = builderForValue.build(); - onChanged(); - } else { - commitTimeBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder mergeCommitTime(com.google.protobuf.Timestamp value) { - if (commitTimeBuilder_ == null) { - if (commitTime_ != null) { - commitTime_ = - com.google.protobuf.Timestamp.newBuilder(commitTime_) - .mergeFrom(value) - .buildPartial(); - } else { - commitTime_ = value; - } - onChanged(); - } else { - commitTimeBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder clearCommitTime() { - if (commitTimeBuilder_ == null) { - commitTime_ = null; - onChanged(); - } else { - commitTime_ = null; - commitTimeBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.protobuf.Timestamp.Builder getCommitTimeBuilder() { - - onChanged(); - return getCommitTimeFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.protobuf.TimestampOrBuilder getCommitTimeOrBuilder() { - if (commitTimeBuilder_ != null) { - return commitTimeBuilder_.getMessageOrBuilder(); - } else { - return commitTime_ == null - ? com.google.protobuf.Timestamp.getDefaultInstance() - : commitTime_; - } - } - /** - * - * - *
-       * Output only. Commit time of the stream.
-       * If a stream is of `COMMITTED` type, then it will have a commit_time same as
-       * `create_time`. If the stream is of `PENDING` type, commit_time being empty
-       * means it is not committed.
-       * 
- * - * - * .google.protobuf.Timestamp commit_time = 4 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder> - getCommitTimeFieldBuilder() { - if (commitTimeBuilder_ == null) { - commitTimeBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.protobuf.Timestamp, - com.google.protobuf.Timestamp.Builder, - com.google.protobuf.TimestampOrBuilder>( - getCommitTime(), getParentForChildren(), isClean()); - commitTime_ = null; - } - return commitTimeBuilder_; - } - - private com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema tableSchema_; - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> - tableSchemaBuilder_; - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return Whether the tableSchema field is set. - */ - public boolean hasTableSchema() { - return tableSchemaBuilder_ != null || tableSchema_ != null; - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - * - * @return The tableSchema. - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema getTableSchema() { - if (tableSchemaBuilder_ == null) { - return tableSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : tableSchema_; - } else { - return tableSchemaBuilder_.getMessage(); - } - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setTableSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { - if (tableSchemaBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - tableSchema_ = value; - onChanged(); - } else { - tableSchemaBuilder_.setMessage(value); - } - - return this; - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder setTableSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder builderForValue) { - if (tableSchemaBuilder_ == null) { - tableSchema_ = builderForValue.build(); - onChanged(); - } else { - tableSchemaBuilder_.setMessage(builderForValue.build()); - } - - return this; - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder mergeTableSchema( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema value) { - if (tableSchemaBuilder_ == null) { - if (tableSchema_ != null) { - tableSchema_ = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.newBuilder( - tableSchema_) - .mergeFrom(value) - .buildPartial(); - } else { - tableSchema_ = value; - } - onChanged(); - } else { - tableSchemaBuilder_.mergeFrom(value); - } - - return this; - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public Builder clearTableSchema() { - if (tableSchemaBuilder_ == null) { - tableSchema_ = null; - onChanged(); - } else { - tableSchema_ = null; - tableSchemaBuilder_ = null; - } - - return this; - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder - getTableSchemaBuilder() { - - onChanged(); - return getTableSchemaFieldBuilder().getBuilder(); - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder - getTableSchemaOrBuilder() { - if (tableSchemaBuilder_ != null) { - return tableSchemaBuilder_.getMessageOrBuilder(); - } else { - return tableSchema_ == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance() - : tableSchema_; - } - } - /** - * - * - *
-       * Output only. The schema of the destination table. It is only returned in
-       * `CreateWriteStream` response. Caller should generate data that's
-       * compatible with this schema to send in initial `AppendRowsRequest`.
-       * The table schema could go out of date during the life time of the stream.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableSchema table_schema = 5 [(.google.api.field_behavior) = OUTPUT_ONLY]; - * - */ - private com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder> - getTableSchemaFieldBuilder() { - if (tableSchemaBuilder_ == null) { - tableSchemaBuilder_ = - new com.google.protobuf.SingleFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder>( - getTableSchema(), getParentForChildren(), isClean()); - tableSchema_ = null; - } - return tableSchemaBuilder_; - } - - private java.lang.Object externalId_ = ""; - /** - * - * - *
-       * Id set by client to annotate its identity.
-       * 
- * - * string external_id = 6; - * - * @return The externalId. - */ - public java.lang.String getExternalId() { - java.lang.Object ref = externalId_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - externalId_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Id set by client to annotate its identity.
-       * 
- * - * string external_id = 6; - * - * @return The bytes for externalId. - */ - public com.google.protobuf.ByteString getExternalIdBytes() { - java.lang.Object ref = externalId_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - externalId_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Id set by client to annotate its identity.
-       * 
- * - * string external_id = 6; - * - * @param value The externalId to set. - * @return This builder for chaining. - */ - public Builder setExternalId(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - externalId_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Id set by client to annotate its identity.
-       * 
- * - * string external_id = 6; - * - * @return This builder for chaining. - */ - public Builder clearExternalId() { - - externalId_ = getDefaultInstance().getExternalId(); - onChanged(); - return this; - } - /** - * - * - *
-       * Id set by client to annotate its identity.
-       * 
- * - * string external_id = 6; - * - * @param value The bytes for externalId to set. - * @return This builder for chaining. - */ - public Builder setExternalIdBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - externalId_ = value; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.WriteStream) - private static final com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public WriteStream parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new WriteStream(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Stream.WriteStream - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { - return descriptor; - } - - private static com.google.protobuf.Descriptors.FileDescriptor descriptor; - - static { - java.lang.String[] descriptorData = { - "\n3google/cloud/bigquery/storage/v1alpha2" - + "/stream.proto\022&google.cloud.bigquery.sto" - + "rage.v1alpha2\032\037google/api/field_behavior" - + ".proto\032\031google/api/resource.proto\0322googl" - + "e/cloud/bigquery/storage/v1alpha2/table." - + "proto\032\037google/protobuf/timestamp.proto\"\376" - + "\003\n\013WriteStream\022\021\n\004name\030\001 \001(\tB\003\340A\003\022K\n\004typ" - + "e\030\002 \001(\01628.google.cloud.bigquery.storage." - + "v1alpha2.WriteStream.TypeB\003\340A\005\0224\n\013create" - + "_time\030\003 \001(\0132\032.google.protobuf.TimestampB" - + "\003\340A\003\0224\n\013commit_time\030\004 \001(\0132\032.google.proto" - + "buf.TimestampB\003\340A\003\022N\n\014table_schema\030\005 \001(\013" - + "23.google.cloud.bigquery.storage.v1alpha" - + "2.TableSchemaB\003\340A\003\022\023\n\013external_id\030\006 \001(\t\"" - + "F\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\r\n\tCOMMITT" - + "ED\020\001\022\013\n\007PENDING\020\002\022\014\n\010BUFFERED\020\003:v\352As\n*bi" - + "gquerystorage.googleapis.com/WriteStream" - + "\022Eprojects/{project}/datasets/{dataset}/" - + "tables/{table}/streams/{stream}B\332\001\n*com." - + "google.cloud.bigquery.storage.v1alpha2ZM" - + "google.golang.org/genproto/googleapis/cl" - + "oud/bigquery/storage/v1alpha2;storage\352A\\" - + "\n$bigquerystorage.googleapis.com/Table\0224" - + "projects/{project}/datasets/{dataset}/ta" - + "bles/{table}b\006proto3" - }; - descriptor = - com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( - descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - com.google.api.FieldBehaviorProto.getDescriptor(), - com.google.api.ResourceProto.getDescriptor(), - com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(), - com.google.protobuf.TimestampProto.getDescriptor(), - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_WriteStream_descriptor, - new java.lang.String[] { - "Name", "Type", "CreateTime", "CommitTime", "TableSchema", "ExternalId", - }); - com.google.protobuf.ExtensionRegistry registry = - com.google.protobuf.ExtensionRegistry.newInstance(); - registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); - registry.add(com.google.api.ResourceProto.resource); - registry.add(com.google.api.ResourceProto.resourceDefinition); - com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( - descriptor, registry); - com.google.api.FieldBehaviorProto.getDescriptor(); - com.google.api.ResourceProto.getDescriptor(); - com.google.cloud.bigquery.storage.v1alpha2.Table.getDescriptor(); - com.google.protobuf.TimestampProto.getDescriptor(); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java deleted file mode 100644 index 6b6e6d4bbc..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java +++ /dev/null @@ -1,3559 +0,0 @@ -/* - * Copyright 2020 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ -// Generated by the protocol buffer compiler. DO NOT EDIT! -// source: google/cloud/bigquery/storage/v1alpha2/table.proto - -package com.google.cloud.bigquery.storage.v1alpha2; - -public final class Table { - private Table() {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistryLite registry) {} - - public static void registerAllExtensions(com.google.protobuf.ExtensionRegistry registry) { - registerAllExtensions((com.google.protobuf.ExtensionRegistryLite) registry); - } - - public interface TableSchemaOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.TableSchema) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - java.util.List - getFieldsList(); - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index); - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - int getFieldsCount(); - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList(); - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder getFieldsOrBuilder( - int index); - } - /** - * - * - *
-   * Schema of a table
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableSchema} - */ - public static final class TableSchema extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.TableSchema) - TableSchemaOrBuilder { - private static final long serialVersionUID = 0L; - // Use TableSchema.newBuilder() to construct. - private TableSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private TableSchema() { - fields_ = java.util.Collections.emptyList(); - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new TableSchema(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private TableSchema( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - fields_ = - new java.util.ArrayList< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(); - mutable_bitField0_ |= 0x00000001; - } - fields_.add( - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.parser(), - extensionRegistry)); - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - fields_ = java.util.Collections.unmodifiableList(fields_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder.class); - } - - public static final int FIELDS_FIELD_NUMBER = 1; - private java.util.List - fields_; - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - @java.lang.Override - public java.util.List - getFieldsList() { - return fields_; - } - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - @java.lang.Override - public java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList() { - return fields_; - } - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - @java.lang.Override - public int getFieldsCount() { - return fields_.size(); - } - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index) { - return fields_.get(index); - } - /** - * - * - *
-     * Describes the fields in a table.
-     * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder - getFieldsOrBuilder(int index) { - return fields_.get(index); - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - for (int i = 0; i < fields_.size(); i++) { - output.writeMessage(1, fields_.get(i)); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - for (int i = 0; i < fields_.size(); i++) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(1, fields_.get(i)); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema other = - (com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) obj; - - if (!getFieldsList().equals(other.getFieldsList())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - if (getFieldsCount() > 0) { - hash = (37 * hash) + FIELDS_FIELD_NUMBER; - hash = (53 * hash) + getFieldsList().hashCode(); - } - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseDelimitedFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * Schema of a table
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableSchema} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.TableSchema) - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchemaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.Builder.class); - } - - // Construct using com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { - getFieldsFieldBuilder(); - } - } - - @java.lang.Override - public Builder clear() { - super.clear(); - if (fieldsBuilder_ == null) { - fields_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - fieldsBuilder_.clear(); - } - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema build() { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema result = - new com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema(this); - int from_bitField0_ = bitField0_; - if (fieldsBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { - fields_ = java.util.Collections.unmodifiableList(fields_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.fields_ = fields_; - } else { - result.fields_ = fieldsBuilder_.build(); - } - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) { - return mergeFrom((com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom(com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema.getDefaultInstance()) - return this; - if (fieldsBuilder_ == null) { - if (!other.fields_.isEmpty()) { - if (fields_.isEmpty()) { - fields_ = other.fields_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFieldsIsMutable(); - fields_.addAll(other.fields_); - } - onChanged(); - } - } else { - if (!other.fields_.isEmpty()) { - if (fieldsBuilder_.isEmpty()) { - fieldsBuilder_.dispose(); - fieldsBuilder_ = null; - fields_ = other.fields_; - bitField0_ = (bitField0_ & ~0x00000001); - fieldsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders - ? getFieldsFieldBuilder() - : null; - } else { - fieldsBuilder_.addAllMessages(other.fields_); - } - } - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int bitField0_; - - private java.util.List - fields_ = java.util.Collections.emptyList(); - - private void ensureFieldsIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - fields_ = - new java.util.ArrayList< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(fields_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - fieldsBuilder_; - - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public java.util.List - getFieldsList() { - if (fieldsBuilder_ == null) { - return java.util.Collections.unmodifiableList(fields_); - } else { - return fieldsBuilder_.getMessageList(); - } - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public int getFieldsCount() { - if (fieldsBuilder_ == null) { - return fields_.size(); - } else { - return fieldsBuilder_.getCount(); - } - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields( - int index) { - if (fieldsBuilder_ == null) { - return fields_.get(index); - } else { - return fieldsBuilder_.getMessage(index); - } - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder setFields( - int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.set(index, value); - onChanged(); - } else { - fieldsBuilder_.setMessage(index, value); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder setFields( - int index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.set(index, builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder addFields( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.add(value); - onChanged(); - } else { - fieldsBuilder_.addMessage(value); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder addFields( - int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.add(index, value); - onChanged(); - } else { - fieldsBuilder_.addMessage(index, value); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder addFields( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.add(builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder addFields( - int index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.add(index, builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder addAllFields( - java.lang.Iterable< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema> - values) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); - onChanged(); - } else { - fieldsBuilder_.addAllMessages(values); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder clearFields() { - if (fieldsBuilder_ == null) { - fields_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - fieldsBuilder_.clear(); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public Builder removeFields(int index) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.remove(index); - onChanged(); - } else { - fieldsBuilder_.remove(index); - } - return this; - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - getFieldsBuilder(int index) { - return getFieldsFieldBuilder().getBuilder(index); - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder - getFieldsOrBuilder(int index) { - if (fieldsBuilder_ == null) { - return fields_.get(index); - } else { - return fieldsBuilder_.getMessageOrBuilder(index); - } - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList() { - if (fieldsBuilder_ != null) { - return fieldsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(fields_); - } - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - addFieldsBuilder() { - return getFieldsFieldBuilder() - .addBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance()); - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - addFieldsBuilder(int index) { - return getFieldsFieldBuilder() - .addBuilder( - index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance()); - } - /** - * - * - *
-       * Describes the fields in a table.
-       * 
- * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 1; - */ - public java.util.List< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder> - getFieldsBuilderList() { - return getFieldsFieldBuilder().getBuilderList(); - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsFieldBuilder() { - if (fieldsBuilder_ == null) { - fieldsBuilder_ = - new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder>( - fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - fields_ = null; - } - return fieldsBuilder_; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.TableSchema) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.TableSchema) - private static final com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TableSchema parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableSchema(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableSchema - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - public interface TableFieldSchemaOrBuilder - extends - // @@protoc_insertion_point(interface_extends:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) - com.google.protobuf.MessageOrBuilder { - - /** - * - * - *
-     * Required. The field name. The name must contain only letters (a-z, A-Z),
-     * numbers (0-9), or underscores (_), and must start with a letter or
-     * underscore. The maximum length is 128 characters.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The name. - */ - java.lang.String getName(); - /** - * - * - *
-     * Required. The field name. The name must contain only letters (a-z, A-Z),
-     * numbers (0-9), or underscores (_), and must start with a letter or
-     * underscore. The maximum length is 128 characters.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The bytes for name. - */ - com.google.protobuf.ByteString getNameBytes(); - - /** - * - * - *
-     * Required. The field data type.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The enum numeric value on the wire for type. - */ - int getTypeValue(); - /** - * - * - *
-     * Required. The field data type.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The type. - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType(); - - /** - * - * - *
-     * Optional. The field mode. The default value is NULLABLE.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The enum numeric value on the wire for mode. - */ - int getModeValue(); - /** - * - * - *
-     * Optional. The field mode. The default value is NULLABLE.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The mode. - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode(); - - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - java.util.List - getFieldsList(); - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index); - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - int getFieldsCount(); - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList(); - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder getFieldsOrBuilder( - int index); - - /** - * - * - *
-     * Optional. The field description. The maximum length is 1,024 characters.
-     * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The description. - */ - java.lang.String getDescription(); - /** - * - * - *
-     * Optional. The field description. The maximum length is 1,024 characters.
-     * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The bytes for description. - */ - com.google.protobuf.ByteString getDescriptionBytes(); - } - /** - * - * - *
-   * A field in TableSchema
-   * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema} - */ - public static final class TableFieldSchema extends com.google.protobuf.GeneratedMessageV3 - implements - // @@protoc_insertion_point(message_implements:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) - TableFieldSchemaOrBuilder { - private static final long serialVersionUID = 0L; - // Use TableFieldSchema.newBuilder() to construct. - private TableFieldSchema(com.google.protobuf.GeneratedMessageV3.Builder builder) { - super(builder); - } - - private TableFieldSchema() { - name_ = ""; - type_ = 0; - mode_ = 0; - fields_ = java.util.Collections.emptyList(); - description_ = ""; - } - - @java.lang.Override - @SuppressWarnings({"unused"}) - protected java.lang.Object newInstance(UnusedPrivateParameter unused) { - return new TableFieldSchema(); - } - - @java.lang.Override - public final com.google.protobuf.UnknownFieldSet getUnknownFields() { - return this.unknownFields; - } - - private TableFieldSchema( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - this(); - if (extensionRegistry == null) { - throw new java.lang.NullPointerException(); - } - int mutable_bitField0_ = 0; - com.google.protobuf.UnknownFieldSet.Builder unknownFields = - com.google.protobuf.UnknownFieldSet.newBuilder(); - try { - boolean done = false; - while (!done) { - int tag = input.readTag(); - switch (tag) { - case 0: - done = true; - break; - case 10: - { - java.lang.String s = input.readStringRequireUtf8(); - - name_ = s; - break; - } - case 16: - { - int rawValue = input.readEnum(); - - type_ = rawValue; - break; - } - case 24: - { - int rawValue = input.readEnum(); - - mode_ = rawValue; - break; - } - case 34: - { - if (!((mutable_bitField0_ & 0x00000001) != 0)) { - fields_ = - new java.util.ArrayList< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(); - mutable_bitField0_ |= 0x00000001; - } - fields_.add( - input.readMessage( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.parser(), - extensionRegistry)); - break; - } - case 50: - { - java.lang.String s = input.readStringRequireUtf8(); - - description_ = s; - break; - } - default: - { - if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) { - done = true; - } - break; - } - } - } - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - throw e.setUnfinishedMessage(this); - } catch (java.io.IOException e) { - throw new com.google.protobuf.InvalidProtocolBufferException(e).setUnfinishedMessage(this); - } finally { - if (((mutable_bitField0_ & 0x00000001) != 0)) { - fields_ = java.util.Collections.unmodifiableList(fields_); - } - this.unknownFields = unknownFields.build(); - makeExtensionsImmutable(); - } - } - - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder.class); - } - - /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type} */ - public enum Type implements com.google.protobuf.ProtocolMessageEnum { - /** - * - * - *
-       * Illegal value
-       * 
- * - * TYPE_UNSPECIFIED = 0; - */ - TYPE_UNSPECIFIED(0), - /** - * - * - *
-       * 64K, UTF8
-       * 
- * - * STRING = 1; - */ - STRING(1), - /** - * - * - *
-       * 64-bit signed
-       * 
- * - * INT64 = 2; - */ - INT64(2), - /** - * - * - *
-       * 64-bit IEEE floating point
-       * 
- * - * DOUBLE = 3; - */ - DOUBLE(3), - /** - * - * - *
-       * Aggregate type
-       * 
- * - * STRUCT = 4; - */ - STRUCT(4), - /** - * - * - *
-       * 64K, Binary
-       * 
- * - * BYTES = 5; - */ - BYTES(5), - /** - * - * - *
-       * 2-valued
-       * 
- * - * BOOL = 6; - */ - BOOL(6), - /** - * - * - *
-       * 64-bit signed usec since UTC epoch
-       * 
- * - * TIMESTAMP = 7; - */ - TIMESTAMP(7), - /** - * - * - *
-       * Civil date - Year, Month, Day
-       * 
- * - * DATE = 8; - */ - DATE(8), - /** - * - * - *
-       * Civil time - Hour, Minute, Second, Microseconds
-       * 
- * - * TIME = 9; - */ - TIME(9), - /** - * - * - *
-       * Combination of civil date and civil time
-       * 
- * - * DATETIME = 10; - */ - DATETIME(10), - /** - * - * - *
-       * Geography object
-       * 
- * - * GEOGRAPHY = 11; - */ - GEOGRAPHY(11), - /** - * - * - *
-       * Numeric value
-       * 
- * - * NUMERIC = 12; - */ - NUMERIC(12), - UNRECOGNIZED(-1), - ; - - /** - * - * - *
-       * Illegal value
-       * 
- * - * TYPE_UNSPECIFIED = 0; - */ - public static final int TYPE_UNSPECIFIED_VALUE = 0; - /** - * - * - *
-       * 64K, UTF8
-       * 
- * - * STRING = 1; - */ - public static final int STRING_VALUE = 1; - /** - * - * - *
-       * 64-bit signed
-       * 
- * - * INT64 = 2; - */ - public static final int INT64_VALUE = 2; - /** - * - * - *
-       * 64-bit IEEE floating point
-       * 
- * - * DOUBLE = 3; - */ - public static final int DOUBLE_VALUE = 3; - /** - * - * - *
-       * Aggregate type
-       * 
- * - * STRUCT = 4; - */ - public static final int STRUCT_VALUE = 4; - /** - * - * - *
-       * 64K, Binary
-       * 
- * - * BYTES = 5; - */ - public static final int BYTES_VALUE = 5; - /** - * - * - *
-       * 2-valued
-       * 
- * - * BOOL = 6; - */ - public static final int BOOL_VALUE = 6; - /** - * - * - *
-       * 64-bit signed usec since UTC epoch
-       * 
- * - * TIMESTAMP = 7; - */ - public static final int TIMESTAMP_VALUE = 7; - /** - * - * - *
-       * Civil date - Year, Month, Day
-       * 
- * - * DATE = 8; - */ - public static final int DATE_VALUE = 8; - /** - * - * - *
-       * Civil time - Hour, Minute, Second, Microseconds
-       * 
- * - * TIME = 9; - */ - public static final int TIME_VALUE = 9; - /** - * - * - *
-       * Combination of civil date and civil time
-       * 
- * - * DATETIME = 10; - */ - public static final int DATETIME_VALUE = 10; - /** - * - * - *
-       * Geography object
-       * 
- * - * GEOGRAPHY = 11; - */ - public static final int GEOGRAPHY_VALUE = 11; - /** - * - * - *
-       * Numeric value
-       * 
- * - * NUMERIC = 12; - */ - public static final int NUMERIC_VALUE = 12; - - public final int getNumber() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalArgumentException( - "Can't get the number of an unknown enum value."); - } - return value; - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static Type valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static Type forNumber(int value) { - switch (value) { - case 0: - return TYPE_UNSPECIFIED; - case 1: - return STRING; - case 2: - return INT64; - case 3: - return DOUBLE; - case 4: - return STRUCT; - case 5: - return BYTES; - case 6: - return BOOL; - case 7: - return TIMESTAMP; - case 8: - return DATE; - case 9: - return TIME; - case 10: - return DATETIME; - case 11: - return GEOGRAPHY; - case 12: - return NUMERIC; - default: - return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { - return internalValueMap; - } - - private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Type findValueByNumber(int number) { - return Type.forNumber(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalStateException( - "Can't get the descriptor of an unrecognized enum value."); - } - return getDescriptor().getValues().get(ordinal()); - } - - public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { - return getDescriptor(); - } - - public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.getDescriptor() - .getEnumTypes() - .get(0); - } - - private static final Type[] VALUES = values(); - - public static Type valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); - } - if (desc.getIndex() == -1) { - return UNRECOGNIZED; - } - return VALUES[desc.getIndex()]; - } - - private final int value; - - private Type(int value) { - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type) - } - - /** Protobuf enum {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode} */ - public enum Mode implements com.google.protobuf.ProtocolMessageEnum { - /** - * - * - *
-       * Illegal value
-       * 
- * - * MODE_UNSPECIFIED = 0; - */ - MODE_UNSPECIFIED(0), - /** NULLABLE = 1; */ - NULLABLE(1), - /** REQUIRED = 2; */ - REQUIRED(2), - /** REPEATED = 3; */ - REPEATED(3), - UNRECOGNIZED(-1), - ; - - /** - * - * - *
-       * Illegal value
-       * 
- * - * MODE_UNSPECIFIED = 0; - */ - public static final int MODE_UNSPECIFIED_VALUE = 0; - /** NULLABLE = 1; */ - public static final int NULLABLE_VALUE = 1; - /** REQUIRED = 2; */ - public static final int REQUIRED_VALUE = 2; - /** REPEATED = 3; */ - public static final int REPEATED_VALUE = 3; - - public final int getNumber() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalArgumentException( - "Can't get the number of an unknown enum value."); - } - return value; - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - * @deprecated Use {@link #forNumber(int)} instead. - */ - @java.lang.Deprecated - public static Mode valueOf(int value) { - return forNumber(value); - } - - /** - * @param value The numeric wire value of the corresponding enum entry. - * @return The enum associated with the given numeric wire value. - */ - public static Mode forNumber(int value) { - switch (value) { - case 0: - return MODE_UNSPECIFIED; - case 1: - return NULLABLE; - case 2: - return REQUIRED; - case 3: - return REPEATED; - default: - return null; - } - } - - public static com.google.protobuf.Internal.EnumLiteMap internalGetValueMap() { - return internalValueMap; - } - - private static final com.google.protobuf.Internal.EnumLiteMap internalValueMap = - new com.google.protobuf.Internal.EnumLiteMap() { - public Mode findValueByNumber(int number) { - return Mode.forNumber(number); - } - }; - - public final com.google.protobuf.Descriptors.EnumValueDescriptor getValueDescriptor() { - if (this == UNRECOGNIZED) { - throw new java.lang.IllegalStateException( - "Can't get the descriptor of an unrecognized enum value."); - } - return getDescriptor().getValues().get(ordinal()); - } - - public final com.google.protobuf.Descriptors.EnumDescriptor getDescriptorForType() { - return getDescriptor(); - } - - public static final com.google.protobuf.Descriptors.EnumDescriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.getDescriptor() - .getEnumTypes() - .get(1); - } - - private static final Mode[] VALUES = values(); - - public static Mode valueOf(com.google.protobuf.Descriptors.EnumValueDescriptor desc) { - if (desc.getType() != getDescriptor()) { - throw new java.lang.IllegalArgumentException("EnumValueDescriptor is not for this type."); - } - if (desc.getIndex() == -1) { - return UNRECOGNIZED; - } - return VALUES[desc.getIndex()]; - } - - private final int value; - - private Mode(int value) { - this.value = value; - } - - // @@protoc_insertion_point(enum_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode) - } - - public static final int NAME_FIELD_NUMBER = 1; - private volatile java.lang.Object name_; - /** - * - * - *
-     * Required. The field name. The name must contain only letters (a-z, A-Z),
-     * numbers (0-9), or underscores (_), and must start with a letter or
-     * underscore. The maximum length is 128 characters.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The name. - */ - @java.lang.Override - public java.lang.String getName() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } - } - /** - * - * - *
-     * Required. The field name. The name must contain only letters (a-z, A-Z),
-     * numbers (0-9), or underscores (_), and must start with a letter or
-     * underscore. The maximum length is 128 characters.
-     * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The bytes for name. - */ - @java.lang.Override - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - public static final int TYPE_FIELD_NUMBER = 2; - private int type_; - /** - * - * - *
-     * Required. The field data type.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The enum numeric value on the wire for type. - */ - @java.lang.Override - public int getTypeValue() { - return type_; - } - /** - * - * - *
-     * Required. The field data type.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The type. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type result = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.valueOf(type_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.UNRECOGNIZED - : result; - } - - public static final int MODE_FIELD_NUMBER = 3; - private int mode_; - /** - * - * - *
-     * Optional. The field mode. The default value is NULLABLE.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The enum numeric value on the wire for mode. - */ - @java.lang.Override - public int getModeValue() { - return mode_; - } - /** - * - * - *
-     * Optional. The field mode. The default value is NULLABLE.
-     * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The mode. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode result = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.valueOf(mode_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.UNRECOGNIZED - : result; - } - - public static final int FIELDS_FIELD_NUMBER = 4; - private java.util.List - fields_; - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public java.util.List - getFieldsList() { - return fields_; - } - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList() { - return fields_; - } - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public int getFieldsCount() { - return fields_.size(); - } - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields(int index) { - return fields_.get(index); - } - /** - * - * - *
-     * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-     * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder - getFieldsOrBuilder(int index) { - return fields_.get(index); - } - - public static final int DESCRIPTION_FIELD_NUMBER = 6; - private volatile java.lang.Object description_; - /** - * - * - *
-     * Optional. The field description. The maximum length is 1,024 characters.
-     * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The description. - */ - @java.lang.Override - public java.lang.String getDescription() { - java.lang.Object ref = description_; - if (ref instanceof java.lang.String) { - return (java.lang.String) ref; - } else { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - description_ = s; - return s; - } - } - /** - * - * - *
-     * Optional. The field description. The maximum length is 1,024 characters.
-     * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The bytes for description. - */ - @java.lang.Override - public com.google.protobuf.ByteString getDescriptionBytes() { - java.lang.Object ref = description_; - if (ref instanceof java.lang.String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - description_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - - private byte memoizedIsInitialized = -1; - - @java.lang.Override - public final boolean isInitialized() { - byte isInitialized = memoizedIsInitialized; - if (isInitialized == 1) return true; - if (isInitialized == 0) return false; - - memoizedIsInitialized = 1; - return true; - } - - @java.lang.Override - public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io.IOException { - if (!getNameBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 1, name_); - } - if (type_ - != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.TYPE_UNSPECIFIED - .getNumber()) { - output.writeEnum(2, type_); - } - if (mode_ - != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.MODE_UNSPECIFIED - .getNumber()) { - output.writeEnum(3, mode_); - } - for (int i = 0; i < fields_.size(); i++) { - output.writeMessage(4, fields_.get(i)); - } - if (!getDescriptionBytes().isEmpty()) { - com.google.protobuf.GeneratedMessageV3.writeString(output, 6, description_); - } - unknownFields.writeTo(output); - } - - @java.lang.Override - public int getSerializedSize() { - int size = memoizedSize; - if (size != -1) return size; - - size = 0; - if (!getNameBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(1, name_); - } - if (type_ - != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.TYPE_UNSPECIFIED - .getNumber()) { - size += com.google.protobuf.CodedOutputStream.computeEnumSize(2, type_); - } - if (mode_ - != com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.MODE_UNSPECIFIED - .getNumber()) { - size += com.google.protobuf.CodedOutputStream.computeEnumSize(3, mode_); - } - for (int i = 0; i < fields_.size(); i++) { - size += com.google.protobuf.CodedOutputStream.computeMessageSize(4, fields_.get(i)); - } - if (!getDescriptionBytes().isEmpty()) { - size += com.google.protobuf.GeneratedMessageV3.computeStringSize(6, description_); - } - size += unknownFields.getSerializedSize(); - memoizedSize = size; - return size; - } - - @java.lang.Override - public boolean equals(final java.lang.Object obj) { - if (obj == this) { - return true; - } - if (!(obj instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema)) { - return super.equals(obj); - } - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema other = - (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) obj; - - if (!getName().equals(other.getName())) return false; - if (type_ != other.type_) return false; - if (mode_ != other.mode_) return false; - if (!getFieldsList().equals(other.getFieldsList())) return false; - if (!getDescription().equals(other.getDescription())) return false; - if (!unknownFields.equals(other.unknownFields)) return false; - return true; - } - - @java.lang.Override - public int hashCode() { - if (memoizedHashCode != 0) { - return memoizedHashCode; - } - int hash = 41; - hash = (19 * hash) + getDescriptor().hashCode(); - hash = (37 * hash) + NAME_FIELD_NUMBER; - hash = (53 * hash) + getName().hashCode(); - hash = (37 * hash) + TYPE_FIELD_NUMBER; - hash = (53 * hash) + type_; - hash = (37 * hash) + MODE_FIELD_NUMBER; - hash = (53 * hash) + mode_; - if (getFieldsCount() > 0) { - hash = (37 * hash) + FIELDS_FIELD_NUMBER; - hash = (53 * hash) + getFieldsList().hashCode(); - } - hash = (37 * hash) + DESCRIPTION_FIELD_NUMBER; - hash = (53 * hash) + getDescription().hashCode(); - hash = (29 * hash) + unknownFields.hashCode(); - memoizedHashCode = hash; - return hash; - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - java.nio.ByteBuffer data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - java.nio.ByteBuffer data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - com.google.protobuf.ByteString data) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - com.google.protobuf.ByteString data, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - byte[] data) throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - byte[] data, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return PARSER.parseFrom(data, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - parseDelimitedFrom(java.io.InputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - parseDelimitedFrom( - java.io.InputStream input, com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseDelimitedWithIOException( - PARSER, input, extensionRegistry); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - com.google.protobuf.CodedInputStream input) throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException(PARSER, input); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parseFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - return com.google.protobuf.GeneratedMessageV3.parseWithIOException( - PARSER, input, extensionRegistry); - } - - @java.lang.Override - public Builder newBuilderForType() { - return newBuilder(); - } - - public static Builder newBuilder() { - return DEFAULT_INSTANCE.toBuilder(); - } - - public static Builder newBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema prototype) { - return DEFAULT_INSTANCE.toBuilder().mergeFrom(prototype); - } - - @java.lang.Override - public Builder toBuilder() { - return this == DEFAULT_INSTANCE ? new Builder() : new Builder().mergeFrom(this); - } - - @java.lang.Override - protected Builder newBuilderForType( - com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - Builder builder = new Builder(parent); - return builder; - } - /** - * - * - *
-     * A field in TableSchema
-     * 
- * - * Protobuf type {@code google.cloud.bigquery.storage.v1alpha2.TableFieldSchema} - */ - public static final class Builder - extends com.google.protobuf.GeneratedMessageV3.Builder - implements - // @@protoc_insertion_point(builder_implements:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder { - public static final com.google.protobuf.Descriptors.Descriptor getDescriptor() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; - } - - @java.lang.Override - protected com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internalGetFieldAccessorTable() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable - .ensureFieldAccessorsInitialized( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.class, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder.class); - } - - // Construct using - // com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.newBuilder() - private Builder() { - maybeForceBuilderInitialization(); - } - - private Builder(com.google.protobuf.GeneratedMessageV3.BuilderParent parent) { - super(parent); - maybeForceBuilderInitialization(); - } - - private void maybeForceBuilderInitialization() { - if (com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders) { - getFieldsFieldBuilder(); - } - } - - @java.lang.Override - public Builder clear() { - super.clear(); - name_ = ""; - - type_ = 0; - - mode_ = 0; - - if (fieldsBuilder_ == null) { - fields_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - } else { - fieldsBuilder_.clear(); - } - description_ = ""; - - return this; - } - - @java.lang.Override - public com.google.protobuf.Descriptors.Descriptor getDescriptorForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Table - .internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - getDefaultInstanceForType() { - return com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance(); - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema build() { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema result = buildPartial(); - if (!result.isInitialized()) { - throw newUninitializedMessageException(result); - } - return result; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema buildPartial() { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema result = - new com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema(this); - int from_bitField0_ = bitField0_; - result.name_ = name_; - result.type_ = type_; - result.mode_ = mode_; - if (fieldsBuilder_ == null) { - if (((bitField0_ & 0x00000001) != 0)) { - fields_ = java.util.Collections.unmodifiableList(fields_); - bitField0_ = (bitField0_ & ~0x00000001); - } - result.fields_ = fields_; - } else { - result.fields_ = fieldsBuilder_.build(); - } - result.description_ = description_; - onBuilt(); - return result; - } - - @java.lang.Override - public Builder clone() { - return super.clone(); - } - - @java.lang.Override - public Builder setField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.setField(field, value); - } - - @java.lang.Override - public Builder clearField(com.google.protobuf.Descriptors.FieldDescriptor field) { - return super.clearField(field); - } - - @java.lang.Override - public Builder clearOneof(com.google.protobuf.Descriptors.OneofDescriptor oneof) { - return super.clearOneof(oneof); - } - - @java.lang.Override - public Builder setRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, - int index, - java.lang.Object value) { - return super.setRepeatedField(field, index, value); - } - - @java.lang.Override - public Builder addRepeatedField( - com.google.protobuf.Descriptors.FieldDescriptor field, java.lang.Object value) { - return super.addRepeatedField(field, value); - } - - @java.lang.Override - public Builder mergeFrom(com.google.protobuf.Message other) { - if (other instanceof com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) { - return mergeFrom( - (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) other); - } else { - super.mergeFrom(other); - return this; - } - } - - public Builder mergeFrom( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema other) { - if (other - == com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance()) return this; - if (!other.getName().isEmpty()) { - name_ = other.name_; - onChanged(); - } - if (other.type_ != 0) { - setTypeValue(other.getTypeValue()); - } - if (other.mode_ != 0) { - setModeValue(other.getModeValue()); - } - if (fieldsBuilder_ == null) { - if (!other.fields_.isEmpty()) { - if (fields_.isEmpty()) { - fields_ = other.fields_; - bitField0_ = (bitField0_ & ~0x00000001); - } else { - ensureFieldsIsMutable(); - fields_.addAll(other.fields_); - } - onChanged(); - } - } else { - if (!other.fields_.isEmpty()) { - if (fieldsBuilder_.isEmpty()) { - fieldsBuilder_.dispose(); - fieldsBuilder_ = null; - fields_ = other.fields_; - bitField0_ = (bitField0_ & ~0x00000001); - fieldsBuilder_ = - com.google.protobuf.GeneratedMessageV3.alwaysUseFieldBuilders - ? getFieldsFieldBuilder() - : null; - } else { - fieldsBuilder_.addAllMessages(other.fields_); - } - } - } - if (!other.getDescription().isEmpty()) { - description_ = other.description_; - onChanged(); - } - this.mergeUnknownFields(other.unknownFields); - onChanged(); - return this; - } - - @java.lang.Override - public final boolean isInitialized() { - return true; - } - - @java.lang.Override - public Builder mergeFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws java.io.IOException { - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema parsedMessage = null; - try { - parsedMessage = PARSER.parsePartialFrom(input, extensionRegistry); - } catch (com.google.protobuf.InvalidProtocolBufferException e) { - parsedMessage = - (com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema) - e.getUnfinishedMessage(); - throw e.unwrapIOException(); - } finally { - if (parsedMessage != null) { - mergeFrom(parsedMessage); - } - } - return this; - } - - private int bitField0_; - - private java.lang.Object name_ = ""; - /** - * - * - *
-       * Required. The field name. The name must contain only letters (a-z, A-Z),
-       * numbers (0-9), or underscores (_), and must start with a letter or
-       * underscore. The maximum length is 128 characters.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The name. - */ - public java.lang.String getName() { - java.lang.Object ref = name_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - name_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Required. The field name. The name must contain only letters (a-z, A-Z),
-       * numbers (0-9), or underscores (_), and must start with a letter or
-       * underscore. The maximum length is 128 characters.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return The bytes for name. - */ - public com.google.protobuf.ByteString getNameBytes() { - java.lang.Object ref = name_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - name_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Required. The field name. The name must contain only letters (a-z, A-Z),
-       * numbers (0-9), or underscores (_), and must start with a letter or
-       * underscore. The maximum length is 128 characters.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @param value The name to set. - * @return This builder for chaining. - */ - public Builder setName(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - name_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The field name. The name must contain only letters (a-z, A-Z),
-       * numbers (0-9), or underscores (_), and must start with a letter or
-       * underscore. The maximum length is 128 characters.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @return This builder for chaining. - */ - public Builder clearName() { - - name_ = getDefaultInstance().getName(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The field name. The name must contain only letters (a-z, A-Z),
-       * numbers (0-9), or underscores (_), and must start with a letter or
-       * underscore. The maximum length is 128 characters.
-       * 
- * - * string name = 1 [(.google.api.field_behavior) = REQUIRED]; - * - * @param value The bytes for name to set. - * @return This builder for chaining. - */ - public Builder setNameBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - name_ = value; - onChanged(); - return this; - } - - private int type_ = 0; - /** - * - * - *
-       * Required. The field data type.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The enum numeric value on the wire for type. - */ - @java.lang.Override - public int getTypeValue() { - return type_; - } - /** - * - * - *
-       * Required. The field data type.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @param value The enum numeric value on the wire for type to set. - * @return This builder for chaining. - */ - public Builder setTypeValue(int value) { - - type_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The field data type.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return The type. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type getType() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type result = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.valueOf(type_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type.UNRECOGNIZED - : result; - } - /** - * - * - *
-       * Required. The field data type.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @param value The type to set. - * @return This builder for chaining. - */ - public Builder setType( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Type value) { - if (value == null) { - throw new NullPointerException(); - } - - type_ = value.getNumber(); - onChanged(); - return this; - } - /** - * - * - *
-       * Required. The field data type.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Type type = 2 [(.google.api.field_behavior) = REQUIRED]; - * - * - * @return This builder for chaining. - */ - public Builder clearType() { - - type_ = 0; - onChanged(); - return this; - } - - private int mode_ = 0; - /** - * - * - *
-       * Optional. The field mode. The default value is NULLABLE.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The enum numeric value on the wire for mode. - */ - @java.lang.Override - public int getModeValue() { - return mode_; - } - /** - * - * - *
-       * Optional. The field mode. The default value is NULLABLE.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @param value The enum numeric value on the wire for mode to set. - * @return This builder for chaining. - */ - public Builder setModeValue(int value) { - - mode_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Optional. The field mode. The default value is NULLABLE.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return The mode. - */ - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode getMode() { - @SuppressWarnings("deprecation") - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode result = - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.valueOf(mode_); - return result == null - ? com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode.UNRECOGNIZED - : result; - } - /** - * - * - *
-       * Optional. The field mode. The default value is NULLABLE.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @param value The mode to set. - * @return This builder for chaining. - */ - public Builder setMode( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Mode value) { - if (value == null) { - throw new NullPointerException(); - } - - mode_ = value.getNumber(); - onChanged(); - return this; - } - /** - * - * - *
-       * Optional. The field mode. The default value is NULLABLE.
-       * 
- * - * - * .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema.Mode mode = 3 [(.google.api.field_behavior) = OPTIONAL]; - * - * - * @return This builder for chaining. - */ - public Builder clearMode() { - - mode_ = 0; - onChanged(); - return this; - } - - private java.util.List - fields_ = java.util.Collections.emptyList(); - - private void ensureFieldsIsMutable() { - if (!((bitField0_ & 0x00000001) != 0)) { - fields_ = - new java.util.ArrayList< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema>(fields_); - bitField0_ |= 0x00000001; - } - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - fieldsBuilder_; - - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public java.util.List - getFieldsList() { - if (fieldsBuilder_ == null) { - return java.util.Collections.unmodifiableList(fields_); - } else { - return fieldsBuilder_.getMessageList(); - } - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public int getFieldsCount() { - if (fieldsBuilder_ == null) { - return fields_.size(); - } else { - return fieldsBuilder_.getCount(); - } - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema getFields( - int index) { - if (fieldsBuilder_ == null) { - return fields_.get(index); - } else { - return fieldsBuilder_.getMessage(index); - } - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder setFields( - int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.set(index, value); - onChanged(); - } else { - fieldsBuilder_.setMessage(index, value); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder setFields( - int index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.set(index, builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.setMessage(index, builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder addFields( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.add(value); - onChanged(); - } else { - fieldsBuilder_.addMessage(value); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder addFields( - int index, com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema value) { - if (fieldsBuilder_ == null) { - if (value == null) { - throw new NullPointerException(); - } - ensureFieldsIsMutable(); - fields_.add(index, value); - onChanged(); - } else { - fieldsBuilder_.addMessage(index, value); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder addFields( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.add(builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.addMessage(builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder addFields( - int index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - builderForValue) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.add(index, builderForValue.build()); - onChanged(); - } else { - fieldsBuilder_.addMessage(index, builderForValue.build()); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder addAllFields( - java.lang.Iterable< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema> - values) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - com.google.protobuf.AbstractMessageLite.Builder.addAll(values, fields_); - onChanged(); - } else { - fieldsBuilder_.addAllMessages(values); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder clearFields() { - if (fieldsBuilder_ == null) { - fields_ = java.util.Collections.emptyList(); - bitField0_ = (bitField0_ & ~0x00000001); - onChanged(); - } else { - fieldsBuilder_.clear(); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public Builder removeFields(int index) { - if (fieldsBuilder_ == null) { - ensureFieldsIsMutable(); - fields_.remove(index); - onChanged(); - } else { - fieldsBuilder_.remove(index); - } - return this; - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - getFieldsBuilder(int index) { - return getFieldsFieldBuilder().getBuilder(index); - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder - getFieldsOrBuilder(int index) { - if (fieldsBuilder_ == null) { - return fields_.get(index); - } else { - return fieldsBuilder_.getMessageOrBuilder(index); - } - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public java.util.List< - ? extends com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsOrBuilderList() { - if (fieldsBuilder_ != null) { - return fieldsBuilder_.getMessageOrBuilderList(); - } else { - return java.util.Collections.unmodifiableList(fields_); - } - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - addFieldsBuilder() { - return getFieldsFieldBuilder() - .addBuilder( - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance()); - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder - addFieldsBuilder(int index) { - return getFieldsFieldBuilder() - .addBuilder( - index, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - .getDefaultInstance()); - } - /** - * - * - *
-       * Optional. Describes the nested schema fields if the type property is set to STRUCT.
-       * 
- * - * - * repeated .google.cloud.bigquery.storage.v1alpha2.TableFieldSchema fields = 4 [(.google.api.field_behavior) = OPTIONAL]; - * - */ - public java.util.List< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder> - getFieldsBuilderList() { - return getFieldsFieldBuilder().getBuilderList(); - } - - private com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder> - getFieldsFieldBuilder() { - if (fieldsBuilder_ == null) { - fieldsBuilder_ = - new com.google.protobuf.RepeatedFieldBuilderV3< - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema.Builder, - com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchemaOrBuilder>( - fields_, ((bitField0_ & 0x00000001) != 0), getParentForChildren(), isClean()); - fields_ = null; - } - return fieldsBuilder_; - } - - private java.lang.Object description_ = ""; - /** - * - * - *
-       * Optional. The field description. The maximum length is 1,024 characters.
-       * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The description. - */ - public java.lang.String getDescription() { - java.lang.Object ref = description_; - if (!(ref instanceof java.lang.String)) { - com.google.protobuf.ByteString bs = (com.google.protobuf.ByteString) ref; - java.lang.String s = bs.toStringUtf8(); - description_ = s; - return s; - } else { - return (java.lang.String) ref; - } - } - /** - * - * - *
-       * Optional. The field description. The maximum length is 1,024 characters.
-       * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return The bytes for description. - */ - public com.google.protobuf.ByteString getDescriptionBytes() { - java.lang.Object ref = description_; - if (ref instanceof String) { - com.google.protobuf.ByteString b = - com.google.protobuf.ByteString.copyFromUtf8((java.lang.String) ref); - description_ = b; - return b; - } else { - return (com.google.protobuf.ByteString) ref; - } - } - /** - * - * - *
-       * Optional. The field description. The maximum length is 1,024 characters.
-       * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @param value The description to set. - * @return This builder for chaining. - */ - public Builder setDescription(java.lang.String value) { - if (value == null) { - throw new NullPointerException(); - } - - description_ = value; - onChanged(); - return this; - } - /** - * - * - *
-       * Optional. The field description. The maximum length is 1,024 characters.
-       * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @return This builder for chaining. - */ - public Builder clearDescription() { - - description_ = getDefaultInstance().getDescription(); - onChanged(); - return this; - } - /** - * - * - *
-       * Optional. The field description. The maximum length is 1,024 characters.
-       * 
- * - * string description = 6 [(.google.api.field_behavior) = OPTIONAL]; - * - * @param value The bytes for description to set. - * @return This builder for chaining. - */ - public Builder setDescriptionBytes(com.google.protobuf.ByteString value) { - if (value == null) { - throw new NullPointerException(); - } - checkByteStringIsUtf8(value); - - description_ = value; - onChanged(); - return this; - } - - @java.lang.Override - public final Builder setUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.setUnknownFields(unknownFields); - } - - @java.lang.Override - public final Builder mergeUnknownFields( - final com.google.protobuf.UnknownFieldSet unknownFields) { - return super.mergeUnknownFields(unknownFields); - } - - // @@protoc_insertion_point(builder_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) - } - - // @@protoc_insertion_point(class_scope:google.cloud.bigquery.storage.v1alpha2.TableFieldSchema) - private static final com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - DEFAULT_INSTANCE; - - static { - DEFAULT_INSTANCE = new com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema(); - } - - public static com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - getDefaultInstance() { - return DEFAULT_INSTANCE; - } - - private static final com.google.protobuf.Parser PARSER = - new com.google.protobuf.AbstractParser() { - @java.lang.Override - public TableFieldSchema parsePartialFrom( - com.google.protobuf.CodedInputStream input, - com.google.protobuf.ExtensionRegistryLite extensionRegistry) - throws com.google.protobuf.InvalidProtocolBufferException { - return new TableFieldSchema(input, extensionRegistry); - } - }; - - public static com.google.protobuf.Parser parser() { - return PARSER; - } - - @java.lang.Override - public com.google.protobuf.Parser getParserForType() { - return PARSER; - } - - @java.lang.Override - public com.google.cloud.bigquery.storage.v1alpha2.Table.TableFieldSchema - getDefaultInstanceForType() { - return DEFAULT_INSTANCE; - } - } - - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable; - private static final com.google.protobuf.Descriptors.Descriptor - internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor; - private static final com.google.protobuf.GeneratedMessageV3.FieldAccessorTable - internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable; - - public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() { - return descriptor; - } - - private static com.google.protobuf.Descriptors.FileDescriptor descriptor; - - static { - java.lang.String[] descriptorData = { - "\n2google/cloud/bigquery/storage/v1alpha2" - + "/table.proto\022&google.cloud.bigquery.stor" - + "age.v1alpha2\032\037google/api/field_behavior." - + "proto\"W\n\013TableSchema\022H\n\006fields\030\001 \003(\01328.g" - + "oogle.cloud.bigquery.storage.v1alpha2.Ta" - + "bleFieldSchema\"\252\004\n\020TableFieldSchema\022\021\n\004n" - + "ame\030\001 \001(\tB\003\340A\002\022P\n\004type\030\002 \001(\0162=.google.cl" - + "oud.bigquery.storage.v1alpha2.TableField" - + "Schema.TypeB\003\340A\002\022P\n\004mode\030\003 \001(\0162=.google." - + "cloud.bigquery.storage.v1alpha2.TableFie" - + "ldSchema.ModeB\003\340A\001\022M\n\006fields\030\004 \003(\01328.goo" - + "gle.cloud.bigquery.storage.v1alpha2.Tabl" - + "eFieldSchemaB\003\340A\001\022\030\n\013description\030\006 \001(\tB\003" - + "\340A\001\"\255\001\n\004Type\022\024\n\020TYPE_UNSPECIFIED\020\000\022\n\n\006ST" - + "RING\020\001\022\t\n\005INT64\020\002\022\n\n\006DOUBLE\020\003\022\n\n\006STRUCT\020" - + "\004\022\t\n\005BYTES\020\005\022\010\n\004BOOL\020\006\022\r\n\tTIMESTAMP\020\007\022\010\n" - + "\004DATE\020\010\022\010\n\004TIME\020\t\022\014\n\010DATETIME\020\n\022\r\n\tGEOGR" - + "APHY\020\013\022\013\n\007NUMERIC\020\014\"F\n\004Mode\022\024\n\020MODE_UNSP" - + "ECIFIED\020\000\022\014\n\010NULLABLE\020\001\022\014\n\010REQUIRED\020\002\022\014\n" - + "\010REPEATED\020\003B{\n*com.google.cloud.bigquery" - + ".storage.v1alpha2ZMgoogle.golang.org/gen" - + "proto/googleapis/cloud/bigquery/storage/" - + "v1alpha2;storageb\006proto3" - }; - descriptor = - com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom( - descriptorData, - new com.google.protobuf.Descriptors.FileDescriptor[] { - com.google.api.FieldBehaviorProto.getDescriptor(), - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor = - getDescriptor().getMessageTypes().get(0); - internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_TableSchema_descriptor, - new java.lang.String[] { - "Fields", - }); - internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor = - getDescriptor().getMessageTypes().get(1); - internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_fieldAccessorTable = - new com.google.protobuf.GeneratedMessageV3.FieldAccessorTable( - internal_static_google_cloud_bigquery_storage_v1alpha2_TableFieldSchema_descriptor, - new java.lang.String[] { - "Name", "Type", "Mode", "Fields", "Description", - }); - com.google.protobuf.ExtensionRegistry registry = - com.google.protobuf.ExtensionRegistry.newInstance(); - registry.add(com.google.api.FieldBehaviorProto.fieldBehavior); - com.google.protobuf.Descriptors.FileDescriptor.internalUpdateFileDescriptor( - descriptor, registry); - com.google.api.FieldBehaviorProto.getDescriptor(); - } - - // @@protoc_insertion_point(outer_class_scope) -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/TableName.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/TableName.java deleted file mode 100644 index 432be73699..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/TableName.java +++ /dev/null @@ -1,217 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.pathtemplate.PathTemplate; -import com.google.api.resourcenames.ResourceName; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -@Generated("by gapic-generator-java") -public class TableName implements ResourceName { - private static final PathTemplate PROJECT_DATASET_TABLE = - PathTemplate.createWithoutUrlEncoding("projects/{project}/datasets/{dataset}/tables/{table}"); - private volatile Map fieldValuesMap; - private final String project; - private final String dataset; - private final String table; - - @Deprecated - protected TableName() { - project = null; - dataset = null; - table = null; - } - - private TableName(Builder builder) { - project = Preconditions.checkNotNull(builder.getProject()); - dataset = Preconditions.checkNotNull(builder.getDataset()); - table = Preconditions.checkNotNull(builder.getTable()); - } - - public String getProject() { - return project; - } - - public String getDataset() { - return dataset; - } - - public String getTable() { - return table; - } - - public static Builder newBuilder() { - return new Builder(); - } - - public Builder toBuilder() { - return new Builder(this); - } - - public static TableName of(String project, String dataset, String table) { - return newBuilder().setProject(project).setDataset(dataset).setTable(table).build(); - } - - public static String format(String project, String dataset, String table) { - return newBuilder().setProject(project).setDataset(dataset).setTable(table).build().toString(); - } - - public static TableName parse(String formattedString) { - if (formattedString.isEmpty()) { - return null; - } - Map matchMap = - PROJECT_DATASET_TABLE.validatedMatch( - formattedString, "TableName.parse: formattedString not in valid format"); - return of(matchMap.get("project"), matchMap.get("dataset"), matchMap.get("table")); - } - - public static List parseList(List formattedStrings) { - List list = new ArrayList<>(formattedStrings.size()); - for (String formattedString : formattedStrings) { - list.add(parse(formattedString)); - } - return list; - } - - public static List toStringList(List values) { - List list = new ArrayList<>(values.size()); - for (TableName value : values) { - if (value == null) { - list.add(""); - } else { - list.add(value.toString()); - } - } - return list; - } - - public static boolean isParsableFrom(String formattedString) { - return PROJECT_DATASET_TABLE.matches(formattedString); - } - - @Override - public Map getFieldValuesMap() { - if (fieldValuesMap == null) { - synchronized (this) { - if (fieldValuesMap == null) { - ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); - if (project != null) { - fieldMapBuilder.put("project", project); - } - if (dataset != null) { - fieldMapBuilder.put("dataset", dataset); - } - if (table != null) { - fieldMapBuilder.put("table", table); - } - fieldValuesMap = fieldMapBuilder.build(); - } - } - } - return fieldValuesMap; - } - - public String getFieldValue(String fieldName) { - return getFieldValuesMap().get(fieldName); - } - - @Override - public String toString() { - return PROJECT_DATASET_TABLE.instantiate( - "project", project, "dataset", dataset, "table", table); - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o != null || getClass() == o.getClass()) { - TableName that = ((TableName) o); - return Objects.equals(this.project, that.project) - && Objects.equals(this.dataset, that.dataset) - && Objects.equals(this.table, that.table); - } - return false; - } - - @Override - public int hashCode() { - int h = 1; - h *= 1000003; - h ^= Objects.hashCode(project); - h *= 1000003; - h ^= Objects.hashCode(dataset); - h *= 1000003; - h ^= Objects.hashCode(table); - return h; - } - - /** Builder for projects/{project}/datasets/{dataset}/tables/{table}. */ - public static class Builder { - private String project; - private String dataset; - private String table; - - protected Builder() {} - - public String getProject() { - return project; - } - - public String getDataset() { - return dataset; - } - - public String getTable() { - return table; - } - - public Builder setProject(String project) { - this.project = project; - return this; - } - - public Builder setDataset(String dataset) { - this.dataset = dataset; - return this; - } - - public Builder setTable(String table) { - this.table = table; - return this; - } - - private Builder(TableName tableName) { - project = tableName.project; - dataset = tableName.dataset; - table = tableName.table; - } - - public TableName build() { - return new TableName(this); - } - } -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriteStreamName.java b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriteStreamName.java deleted file mode 100644 index c21a1af18e..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriteStreamName.java +++ /dev/null @@ -1,257 +0,0 @@ -/* - * Copyright 2021 Google LLC - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * https://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - -package com.google.cloud.bigquery.storage.v1alpha2; - -import com.google.api.pathtemplate.PathTemplate; -import com.google.api.resourcenames.ResourceName; -import com.google.common.base.Preconditions; -import com.google.common.collect.ImmutableMap; -import java.util.ArrayList; -import java.util.List; -import java.util.Map; -import java.util.Objects; -import javax.annotation.Generated; - -// AUTO-GENERATED DOCUMENTATION AND CLASS. -@Generated("by gapic-generator-java") -public class WriteStreamName implements ResourceName { - private static final PathTemplate PROJECT_DATASET_TABLE_STREAM = - PathTemplate.createWithoutUrlEncoding( - "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}"); - private volatile Map fieldValuesMap; - private final String project; - private final String dataset; - private final String table; - private final String stream; - - @Deprecated - protected WriteStreamName() { - project = null; - dataset = null; - table = null; - stream = null; - } - - private WriteStreamName(Builder builder) { - project = Preconditions.checkNotNull(builder.getProject()); - dataset = Preconditions.checkNotNull(builder.getDataset()); - table = Preconditions.checkNotNull(builder.getTable()); - stream = Preconditions.checkNotNull(builder.getStream()); - } - - public String getProject() { - return project; - } - - public String getDataset() { - return dataset; - } - - public String getTable() { - return table; - } - - public String getStream() { - return stream; - } - - public static Builder newBuilder() { - return new Builder(); - } - - public Builder toBuilder() { - return new Builder(this); - } - - public static WriteStreamName of(String project, String dataset, String table, String stream) { - return newBuilder() - .setProject(project) - .setDataset(dataset) - .setTable(table) - .setStream(stream) - .build(); - } - - public static String format(String project, String dataset, String table, String stream) { - return newBuilder() - .setProject(project) - .setDataset(dataset) - .setTable(table) - .setStream(stream) - .build() - .toString(); - } - - public static WriteStreamName parse(String formattedString) { - if (formattedString.isEmpty()) { - return null; - } - Map matchMap = - PROJECT_DATASET_TABLE_STREAM.validatedMatch( - formattedString, "WriteStreamName.parse: formattedString not in valid format"); - return of( - matchMap.get("project"), - matchMap.get("dataset"), - matchMap.get("table"), - matchMap.get("stream")); - } - - public static List parseList(List formattedStrings) { - List list = new ArrayList<>(formattedStrings.size()); - for (String formattedString : formattedStrings) { - list.add(parse(formattedString)); - } - return list; - } - - public static List toStringList(List values) { - List list = new ArrayList<>(values.size()); - for (WriteStreamName value : values) { - if (value == null) { - list.add(""); - } else { - list.add(value.toString()); - } - } - return list; - } - - public static boolean isParsableFrom(String formattedString) { - return PROJECT_DATASET_TABLE_STREAM.matches(formattedString); - } - - @Override - public Map getFieldValuesMap() { - if (fieldValuesMap == null) { - synchronized (this) { - if (fieldValuesMap == null) { - ImmutableMap.Builder fieldMapBuilder = ImmutableMap.builder(); - if (project != null) { - fieldMapBuilder.put("project", project); - } - if (dataset != null) { - fieldMapBuilder.put("dataset", dataset); - } - if (table != null) { - fieldMapBuilder.put("table", table); - } - if (stream != null) { - fieldMapBuilder.put("stream", stream); - } - fieldValuesMap = fieldMapBuilder.build(); - } - } - } - return fieldValuesMap; - } - - public String getFieldValue(String fieldName) { - return getFieldValuesMap().get(fieldName); - } - - @Override - public String toString() { - return PROJECT_DATASET_TABLE_STREAM.instantiate( - "project", project, "dataset", dataset, "table", table, "stream", stream); - } - - @Override - public boolean equals(Object o) { - if (o == this) { - return true; - } - if (o != null || getClass() == o.getClass()) { - WriteStreamName that = ((WriteStreamName) o); - return Objects.equals(this.project, that.project) - && Objects.equals(this.dataset, that.dataset) - && Objects.equals(this.table, that.table) - && Objects.equals(this.stream, that.stream); - } - return false; - } - - @Override - public int hashCode() { - int h = 1; - h *= 1000003; - h ^= Objects.hashCode(project); - h *= 1000003; - h ^= Objects.hashCode(dataset); - h *= 1000003; - h ^= Objects.hashCode(table); - h *= 1000003; - h ^= Objects.hashCode(stream); - return h; - } - - /** Builder for projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}. */ - public static class Builder { - private String project; - private String dataset; - private String table; - private String stream; - - protected Builder() {} - - public String getProject() { - return project; - } - - public String getDataset() { - return dataset; - } - - public String getTable() { - return table; - } - - public String getStream() { - return stream; - } - - public Builder setProject(String project) { - this.project = project; - return this; - } - - public Builder setDataset(String dataset) { - this.dataset = dataset; - return this; - } - - public Builder setTable(String table) { - this.table = table; - return this; - } - - public Builder setStream(String stream) { - this.stream = stream; - return this; - } - - private Builder(WriteStreamName writeStreamName) { - project = writeStreamName.project; - dataset = writeStreamName.dataset; - table = writeStreamName.table; - stream = writeStreamName.stream; - } - - public WriteStreamName build() { - return new WriteStreamName(this); - } - } -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto deleted file mode 100644 index 63bce1fc9a..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto +++ /dev/null @@ -1,42 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1alpha2; - -import "google/protobuf/descriptor.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; -option java_outer_classname = "ProtoBufProto"; -option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; - -// Protobuf schema is an API presentation the proto buffer schema. -message ProtoSchema { - // Descriptor for input message. The descriptor has to be self contained, - // including all the nested types, excepted for proto buffer well known types - // (https://developers.google.com/protocol-buffers/docs/reference/google.protobuf) - // and zetasql public protos - // (https://github.com/google/zetasql/tree/master/zetasql/public/proto). - google.protobuf.DescriptorProto proto_descriptor = 1; -} - -// Protobuf rows. -message ProtoRows { - // A sequence of rows serialized as a Protocol Buffer. - // - // See https://developers.google.com/protocol-buffers/docs/overview for more - // information on deserializing this field. - repeated bytes serialized_rows = 1; -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto deleted file mode 100644 index 03c846313d..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto +++ /dev/null @@ -1,287 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1alpha2; - -import "google/api/annotations.proto"; -import "google/api/client.proto"; -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/bigquery/storage/v1alpha2/protobuf.proto"; -import "google/cloud/bigquery/storage/v1alpha2/stream.proto"; -import "google/cloud/bigquery/storage/v1alpha2/table.proto"; -import "google/protobuf/empty.proto"; -import "google/protobuf/timestamp.proto"; -import "google/protobuf/wrappers.proto"; -import "google/rpc/status.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; -option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; - -// Request message for `CreateWriteStream`. -message CreateWriteStreamRequest { - // Required. Reference to the table to which the stream belongs, in the format - // of `projects/{project}/datasets/{dataset}/tables/{table}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/Table" - } - ]; - - // Required. Stream to be created. - WriteStream write_stream = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// BigQuery Write API. -// -// The Write API can be used to write data to BigQuery. -service BigQueryWrite { - option deprecated = true; - option (google.api.default_host) = "bigquerystorage.googleapis.com"; - option (google.api.oauth_scopes) = - "https://www.googleapis.com/auth/bigquery," - "https://www.googleapis.com/auth/bigquery.insertdata," - "https://www.googleapis.com/auth/cloud-platform"; - - // Creates a write stream to the given table. - // Additionally, every table has a special COMMITTED stream named '_default' - // to which data can be written. This stream doesn't need to be created using - // CreateWriteStream. It is a stream that can be used simultaneously by any - // number of clients. Data written to this stream is considered committed as - // soon as an acknowledgement is received. - rpc CreateWriteStream(CreateWriteStreamRequest) returns (WriteStream) { - option (google.api.http) = { - post: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" - body: "write_stream" - }; - option (google.api.method_signature) = "parent,write_stream"; - } - - // Appends data to the given stream. - // - // If `offset` is specified, the `offset` is checked against the end of - // stream. The server returns `OUT_OF_RANGE` in `AppendRowsResponse` if an - // attempt is made to append to an offset beyond the current end of the stream - // or `ALREADY_EXISTS` if user provids an `offset` that has already been - // written to. User can retry with adjusted offset within the same RPC - // stream. If `offset` is not specified, append happens at the end of the - // stream. - // - // The response contains the offset at which the append happened. Responses - // are received in the same order in which requests are sent. There will be - // one response for each successful request. If the `offset` is not set in - // response, it means append didn't happen due to some errors. If one request - // fails, all the subsequent requests will also fail until a success request - // is made again. - // - // If the stream is of `PENDING` type, data will only be available for read - // operations after the stream is committed. - rpc AppendRows(stream AppendRowsRequest) returns (stream AppendRowsResponse) { - option (google.api.http) = { - post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "write_stream"; - } - - // Gets a write stream. - rpc GetWriteStream(GetWriteStreamRequest) returns (WriteStream) { - option (google.api.http) = { - post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Finalize a write stream so that no new data can be appended to the - // stream. Finalize is not supported on the '_default' stream. - rpc FinalizeWriteStream(FinalizeWriteStreamRequest) returns (FinalizeWriteStreamResponse) { - option (google.api.http) = { - post: "/v1alpha2/{name=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "name"; - } - - // Atomically commits a group of `PENDING` streams that belong to the same - // `parent` table. - // Streams must be finalized before commit and cannot be committed multiple - // times. Once a stream is committed, data in the stream becomes available - // for read operations. - rpc BatchCommitWriteStreams(BatchCommitWriteStreamsRequest) returns (BatchCommitWriteStreamsResponse) { - option (google.api.http) = { - get: "/v1alpha2/{parent=projects/*/datasets/*/tables/*}" - }; - option (google.api.method_signature) = "parent"; - } - - // Flushes rows to a BUFFERED stream. - // If users are appending rows to BUFFERED stream, flush operation is - // required in order for the rows to become available for reading. A - // Flush operation flushes up to any previously flushed offset in a BUFFERED - // stream, to the offset specified in the request. - // Flush is not supported on the _default stream, since it is not BUFFERED. - rpc FlushRows(FlushRowsRequest) returns (FlushRowsResponse) { - option (google.api.http) = { - post: "/v1alpha2/{write_stream=projects/*/datasets/*/tables/*/streams/*}" - body: "*" - }; - option (google.api.method_signature) = "write_stream"; - } -} - -// Request message for `AppendRows`. -message AppendRowsRequest { - message ProtoData { - // Proto schema used to serialize the data. - ProtoSchema writer_schema = 1; - - // Serialized row data in protobuf message format. - ProtoRows rows = 2; - } - - // Required. The stream that is the target of the append operation. This value must be - // specified for the initial request. If subsequent requests specify the - // stream name, it must equal to the value provided in the first request. - // To write to the _default stream, populate this field with a string in the - // format `projects/{project}/datasets/{dataset}/tables/{table}/_default`. - string write_stream = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; - - // Optional. If present, the write is only performed if the next append offset is same - // as the provided value. If not present, the write is performed at the - // current end of stream. Specifying a value for this field is not allowed - // when calling AppendRows for the '_default' stream. - google.protobuf.Int64Value offset = 2 [(google.api.field_behavior) = OPTIONAL]; - - // Input rows. The `writer_schema` field must be specified at the initial - // request and currently, it will be ignored if specified in following - // requests. Following requests must have data in the same format as the - // initial request. - oneof rows { - ProtoData proto_rows = 4; - } - - // Only initial request setting is respected. If true, drop unknown input - // fields. Otherwise, the extra fields will cause append to fail. Default - // value is false. - bool ignore_unknown_fields = 5; -} - -// Response message for `AppendRows`. -message AppendRowsResponse { - oneof response { - // The row offset at which the last append occurred. - int64 offset = 1; - - // Error in case of append failure. If set, it means rows are not accepted - // into the system. Users can retry or continue with other requests within - // the same connection. - // ALREADY_EXISTS: happens when offset is specified, it means the row is - // already appended, it is safe to ignore this error. - // OUT_OF_RANGE: happens when offset is specified, it means the specified - // offset is beyond the end of the stream. - // INVALID_ARGUMENT: error caused by malformed request or data. - // RESOURCE_EXHAUSTED: request rejected due to throttling. Only happens when - // append without offset. - // ABORTED: request processing is aborted because of prior failures, request - // can be retried if previous failure is fixed. - // INTERNAL: server side errors that can be retried. - google.rpc.Status error = 2; - } - - // If backend detects a schema update, pass it to user so that user can - // use it to input new type of message. It will be empty when there is no - // schema updates. - TableSchema updated_schema = 3; -} - -// Request message for `GetWriteStreamRequest`. -message GetWriteStreamRequest { - // Required. Name of the stream to get, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; -} - -// Request message for `BatchCommitWriteStreams`. -message BatchCommitWriteStreamsRequest { - // Required. Parent table that all the streams should belong to, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}`. - string parent = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/Table" - } - ]; - - // Required. The group of streams that will be committed atomically. - repeated string write_streams = 2 [(google.api.field_behavior) = REQUIRED]; -} - -// Response message for `BatchCommitWriteStreams`. -message BatchCommitWriteStreamsResponse { - // The time at which streams were committed in microseconds granularity. - google.protobuf.Timestamp commit_time = 1; -} - -// Request message for invoking `FinalizeWriteStream`. -message FinalizeWriteStreamRequest { - // Required. Name of the stream to finalize, in the form of - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; -} - -// Response message for `FinalizeWriteStream`. -message FinalizeWriteStreamResponse { - // Number of rows in the finalized stream. - int64 row_count = 1; -} - -// Request message for `FlushRows`. -message FlushRowsRequest { - // Required. The stream that is the target of the flush operation. - string write_stream = 1 [ - (google.api.field_behavior) = REQUIRED, - (google.api.resource_reference) = { - type: "bigquerystorage.googleapis.com/WriteStream" - } - ]; - - // Ending offset of the flush operation. Rows before this offset(including - // this offset) will be flushed. - int64 offset = 2; -} - -// Respond message for `FlushRows`. -message FlushRowsResponse { - // The rows before this offset (including this offset) are flushed. - int64 offset = 1; -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto deleted file mode 100644 index 2483e9315b..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto +++ /dev/null @@ -1,77 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1alpha2; - -import "google/api/field_behavior.proto"; -import "google/api/resource.proto"; -import "google/cloud/bigquery/storage/v1alpha2/table.proto"; -import "google/protobuf/timestamp.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; -option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; -option (google.api.resource_definition) = { - type: "bigquerystorage.googleapis.com/Table" - pattern: "projects/{project}/datasets/{dataset}/tables/{table}" -}; - -// Information about a single stream that gets data inside the storage system. -message WriteStream { - option (google.api.resource) = { - type: "bigquerystorage.googleapis.com/WriteStream" - pattern: "projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}" - }; - - enum Type { - // Unknown type. - TYPE_UNSPECIFIED = 0; - - // Data will commit automatically and appear as soon as the write is - // acknowledged. - COMMITTED = 1; - - // Data is invisible until the stream is committed. - PENDING = 2; - - // Data is only visible up to the offset to which it was flushed. - BUFFERED = 3; - } - - // Output only. Name of the stream, in the form - // `projects/{project}/datasets/{dataset}/tables/{table}/streams/{stream}`. - string name = 1 [(google.api.field_behavior) = OUTPUT_ONLY]; - - Type type = 2 [(google.api.field_behavior) = IMMUTABLE]; - - // Output only. Create time of the stream. For the _default stream, this is the - // creation_time of the table. - google.protobuf.Timestamp create_time = 3 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. Commit time of the stream. - // If a stream is of `COMMITTED` type, then it will have a commit_time same as - // `create_time`. If the stream is of `PENDING` type, commit_time being empty - // means it is not committed. - google.protobuf.Timestamp commit_time = 4 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Output only. The schema of the destination table. It is only returned in - // `CreateWriteStream` response. Caller should generate data that's - // compatible with this schema to send in initial `AppendRowsRequest`. - // The table schema could go out of date during the life time of the stream. - TableSchema table_schema = 5 [(google.api.field_behavior) = OUTPUT_ONLY]; - - // Id set by client to annotate its identity. - string external_id = 6; -} diff --git a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto b/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto deleted file mode 100644 index a3e7ad48a0..0000000000 --- a/proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto +++ /dev/null @@ -1,100 +0,0 @@ -// Copyright 2021 Google LLC -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -syntax = "proto3"; - -package google.cloud.bigquery.storage.v1alpha2; - -import "google/api/field_behavior.proto"; - -option go_package = "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1alpha2;storage"; -option java_package = "com.google.cloud.bigquery.storage.v1alpha2"; - -// Schema of a table -message TableSchema { - // Describes the fields in a table. - repeated TableFieldSchema fields = 1; -} - -// A field in TableSchema -message TableFieldSchema { - enum Type { - // Illegal value - TYPE_UNSPECIFIED = 0; - - // 64K, UTF8 - STRING = 1; - - // 64-bit signed - INT64 = 2; - - // 64-bit IEEE floating point - DOUBLE = 3; - - // Aggregate type - STRUCT = 4; - - // 64K, Binary - BYTES = 5; - - // 2-valued - BOOL = 6; - - // 64-bit signed usec since UTC epoch - TIMESTAMP = 7; - - // Civil date - Year, Month, Day - DATE = 8; - - // Civil time - Hour, Minute, Second, Microseconds - TIME = 9; - - // Combination of civil date and civil time - DATETIME = 10; - - // Geography object - GEOGRAPHY = 11; - - // Numeric value - NUMERIC = 12; - } - - enum Mode { - // Illegal value - MODE_UNSPECIFIED = 0; - - NULLABLE = 1; - - REQUIRED = 2; - - REPEATED = 3; - } - - // Required. The field name. The name must contain only letters (a-z, A-Z), - // numbers (0-9), or underscores (_), and must start with a letter or - // underscore. The maximum length is 128 characters. - string name = 1 [(google.api.field_behavior) = REQUIRED]; - - // Required. The field data type. - Type type = 2 [(google.api.field_behavior) = REQUIRED]; - - // Optional. The field mode. The default value is NULLABLE. - Mode mode = 3 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. Describes the nested schema fields if the type property is set to STRUCT. - repeated TableFieldSchema fields = 4 [(google.api.field_behavior) = OPTIONAL]; - - // Optional. The field description. The maximum length is 1,024 characters. - string description = 6 [(google.api.field_behavior) = OPTIONAL]; -} diff --git a/synth.metadata b/synth.metadata index 0eda7749cb..32d4bad22c 100644 --- a/synth.metadata +++ b/synth.metadata @@ -73,15 +73,6 @@ "generator": "bazel" } }, - { - "client": { - "source": "googleapis", - "apiName": "bigquery-storage", - "apiVersion": "v1alpha2", - "language": "java", - "generator": "bazel" - } - }, { "client": { "source": "googleapis", @@ -165,14 +156,6 @@ "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/BigQueryReadStubSettings.java", "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadCallableFactory.java", "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1/stub/GrpcBigQueryReadStub.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClient.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteSettings.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/gapic_metadata.json", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/package-info.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStub.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/BigQueryWriteStubSettings.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteCallableFactory.java", - "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/stub/GrpcBigQueryWriteStub.java", "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClient.java", "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageSettings.java", "google-cloud-bigquerystorage/src/main/java/com/google/cloud/bigquery/storage/v1beta1/gapic_metadata.json", @@ -198,9 +181,6 @@ "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/BaseBigQueryReadClientTest.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryRead.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1/MockBigQueryReadImpl.java", - "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteClientTest.java", - "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWrite.java", - "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1alpha2/MockBigQueryWriteImpl.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/BaseBigQueryStorageClientTest.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorage.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta1/MockBigQueryStorageImpl.java", @@ -211,7 +191,6 @@ "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWrite.java", "google-cloud-bigquerystorage/src/test/java/com/google/cloud/bigquery/storage/v1beta2/MockBigQueryWriteImpl.java", "grpc-google-cloud-bigquerystorage-v1/src/main/java/com/google/cloud/bigquery/storage/v1/BigQueryReadGrpc.java", - "grpc-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/BigQueryWriteGrpc.java", "grpc-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/BigQueryStorageGrpc.java", "grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryReadGrpc.java", "grpc-google-cloud-bigquerystorage-v1beta2/src/main/java/com/google/cloud/bigquery/storage/v1beta2/BigQueryWriteGrpc.java", @@ -258,16 +237,6 @@ "proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/avro.proto", "proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/storage.proto", "proto-google-cloud-bigquerystorage-v1/src/main/proto/google/cloud/bigquery/storage/v1/stream.proto", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/ProtoBufProto.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Storage.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Stream.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/Table.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/TableName.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/java/com/google/cloud/bigquery/storage/v1alpha2/WriteStreamName.java", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/protobuf.proto", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/storage.proto", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/stream.proto", - "proto-google-cloud-bigquerystorage-v1alpha2/src/main/proto/google/cloud/bigquery/storage/v1alpha2/table.proto", "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ArrowProto.java", "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/AvroProto.java", "proto-google-cloud-bigquerystorage-v1beta1/src/main/java/com/google/cloud/bigquery/storage/v1beta1/ProjectName.java", diff --git a/synth.py b/synth.py index c4853154bf..f02704dc7d 100644 --- a/synth.py +++ b/synth.py @@ -23,7 +23,7 @@ gapic = gcp.GAPICGenerator() service = 'bigquerystorage' -versions = ['v1beta1', 'v1beta2', 'v1alpha2', 'v1'] +versions = ['v1beta1', 'v1beta2', 'v1'] for version in versions: java.bazel_library( diff --git a/versions.txt b/versions.txt index 70b027a273..87cec4a923 100644 --- a/versions.txt +++ b/versions.txt @@ -1,11 +1,9 @@ # Format: # module:released-version:current-version -proto-google-cloud-bigquerystorage-v1alpha2:0.118.1:0.118.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1beta1:0.118.1:0.118.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1beta2:0.118.1:0.118.2-SNAPSHOT proto-google-cloud-bigquerystorage-v1:1.18.1:1.18.2-SNAPSHOT -grpc-google-cloud-bigquerystorage-v1alpha2:0.118.1:0.118.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1beta1:0.118.1:0.118.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1beta2:0.118.1:0.118.2-SNAPSHOT grpc-google-cloud-bigquerystorage-v1:1.18.1:1.18.2-SNAPSHOT