Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

docs: Align session length with public documentation feat: Expose estimated bytes that a session will scan. #1310

Merged
merged 2 commits into from Sep 17, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Jump to
Jump to file
Failed to load files.
Diff view
Diff view
Expand Up @@ -163,7 +163,7 @@ public BigQueryReadStub getStub() {
* limits are enforced based on the number of pre-filtered rows, so some filters can lead to
* lopsided assignments.
*
* <p>Read sessions automatically expire 24 hours after they are created and do not require manual
* <p>Read sessions automatically expire 6 hours after they are created and do not require manual
* clean-up by the caller.
*
* <p>Sample code:
Expand Down Expand Up @@ -216,7 +216,7 @@ public final ReadSession createReadSession(
* limits are enforced based on the number of pre-filtered rows, so some filters can lead to
* lopsided assignments.
*
* <p>Read sessions automatically expire 24 hours after they are created and do not require manual
* <p>Read sessions automatically expire 6 hours after they are created and do not require manual
* clean-up by the caller.
*
* <p>Sample code:
Expand Down Expand Up @@ -269,7 +269,7 @@ public final ReadSession createReadSession(
* limits are enforced based on the number of pre-filtered rows, so some filters can lead to
* lopsided assignments.
*
* <p>Read sessions automatically expire 24 hours after they are created and do not require manual
* <p>Read sessions automatically expire 6 hours after they are created and do not require manual
* clean-up by the caller.
*
* <p>Sample code:
Expand Down Expand Up @@ -309,7 +309,7 @@ public final ReadSession createReadSession(CreateReadSessionRequest request) {
* limits are enforced based on the number of pre-filtered rows, so some filters can lead to
* lopsided assignments.
*
* <p>Read sessions automatically expire 24 hours after they are created and do not require manual
* <p>Read sessions automatically expire 6 hours after they are created and do not require manual
* clean-up by the caller.
*
* <p>Sample code:
Expand Down
Expand Up @@ -92,6 +92,7 @@ public void createReadSessionTest() throws Exception {
.setTableModifiers(ReadSession.TableModifiers.newBuilder().build())
.setReadOptions(ReadSession.TableReadOptions.newBuilder().build())
.addAllStreams(new ArrayList<ReadStream>())
.setEstimatedTotalBytesScanned(452788190)
.build();
mockBigQueryRead.addResponse(expectedResponse);

Expand Down Expand Up @@ -142,6 +143,7 @@ public void createReadSessionTest2() throws Exception {
.setTableModifiers(ReadSession.TableModifiers.newBuilder().build())
.setReadOptions(ReadSession.TableReadOptions.newBuilder().build())
.addAllStreams(new ArrayList<ReadStream>())
.setEstimatedTotalBytesScanned(452788190)
.build();
mockBigQueryRead.addResponse(expectedResponse);

Expand Down
Expand Up @@ -244,7 +244,7 @@ public abstract static class BigQueryReadImplBase implements io.grpc.BindableSer
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
* Read sessions automatically expire 24 hours after they are created and do
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
* </pre>
*/
Expand Down Expand Up @@ -365,7 +365,7 @@ protected BigQueryReadStub build(io.grpc.Channel channel, io.grpc.CallOptions ca
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
* Read sessions automatically expire 24 hours after they are created and do
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
* </pre>
*/
Expand Down Expand Up @@ -465,7 +465,7 @@ protected BigQueryReadBlockingStub build(
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
* Read sessions automatically expire 24 hours after they are created and do
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
* </pre>
*/
Expand Down Expand Up @@ -555,7 +555,7 @@ protected BigQueryReadFutureStub build(
* each stream will return the same number or rows. Additionally, the
* limits are enforced based on the number of pre-filtered rows, so some
* filters can lead to lopsided assignments.
* Read sessions automatically expire 24 hours after they are created and do
* Read sessions automatically expire 6 hours after they are created and do
* not require manual clean-up by the caller.
* </pre>
*/
Expand Down
Expand Up @@ -192,6 +192,11 @@ private ReadSession(
com.google.cloud.bigquery.storage.v1.ReadStream.parser(), extensionRegistry));
break;
}
case 96:
{
estimatedTotalBytesScanned_ = input.readInt64();
break;
}
default:
{
if (!parseUnknownField(input, unknownFields, extensionRegistry, tag)) {
Expand Down Expand Up @@ -3161,6 +3166,27 @@ public com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getStreamsOrBuil
return streams_.get(index);
}

public static final int ESTIMATED_TOTAL_BYTES_SCANNED_FIELD_NUMBER = 12;
private long estimatedTotalBytesScanned_;
/**
*
*
* <pre>
* Output only. An estimate on the number of bytes this session will scan when
* all streams are completely consumed. This estimate is based on
* metadata from the table which might be incomplete or stale.
* </pre>
*
* <code>int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
* </code>
*
* @return The estimatedTotalBytesScanned.
*/
@java.lang.Override
public long getEstimatedTotalBytesScanned() {
return estimatedTotalBytesScanned_;
}

private byte memoizedIsInitialized = -1;

@java.lang.Override
Expand Down Expand Up @@ -3203,6 +3229,9 @@ public void writeTo(com.google.protobuf.CodedOutputStream output) throws java.io
for (int i = 0; i < streams_.size(); i++) {
output.writeMessage(10, streams_.get(i));
}
if (estimatedTotalBytesScanned_ != 0L) {
output.writeInt64(12, estimatedTotalBytesScanned_);
}
unknownFields.writeTo(output);
}

Expand Down Expand Up @@ -3244,6 +3273,10 @@ public int getSerializedSize() {
for (int i = 0; i < streams_.size(); i++) {
size += com.google.protobuf.CodedOutputStream.computeMessageSize(10, streams_.get(i));
}
if (estimatedTotalBytesScanned_ != 0L) {
size +=
com.google.protobuf.CodedOutputStream.computeInt64Size(12, estimatedTotalBytesScanned_);
}
size += unknownFields.getSerializedSize();
memoizedSize = size;
return size;
Expand Down Expand Up @@ -3276,6 +3309,7 @@ public boolean equals(final java.lang.Object obj) {
if (!getReadOptions().equals(other.getReadOptions())) return false;
}
if (!getStreamsList().equals(other.getStreamsList())) return false;
if (getEstimatedTotalBytesScanned() != other.getEstimatedTotalBytesScanned()) return false;
if (!getSchemaCase().equals(other.getSchemaCase())) return false;
switch (schemaCase_) {
case 4:
Expand Down Expand Up @@ -3320,6 +3354,8 @@ public int hashCode() {
hash = (37 * hash) + STREAMS_FIELD_NUMBER;
hash = (53 * hash) + getStreamsList().hashCode();
}
hash = (37 * hash) + ESTIMATED_TOTAL_BYTES_SCANNED_FIELD_NUMBER;
hash = (53 * hash) + com.google.protobuf.Internal.hashLong(getEstimatedTotalBytesScanned());
switch (schemaCase_) {
case 4:
hash = (37 * hash) + AVRO_SCHEMA_FIELD_NUMBER;
Expand Down Expand Up @@ -3509,6 +3545,8 @@ public Builder clear() {
} else {
streamsBuilder_.clear();
}
estimatedTotalBytesScanned_ = 0L;

schemaCase_ = 0;
schema_ = null;
return this;
Expand Down Expand Up @@ -3580,6 +3618,7 @@ public com.google.cloud.bigquery.storage.v1.ReadSession buildPartial() {
} else {
result.streams_ = streamsBuilder_.build();
}
result.estimatedTotalBytesScanned_ = estimatedTotalBytesScanned_;
result.schemaCase_ = schemaCase_;
onBuilt();
return result;
Expand Down Expand Up @@ -3678,6 +3717,9 @@ public Builder mergeFrom(com.google.cloud.bigquery.storage.v1.ReadSession other)
}
}
}
if (other.getEstimatedTotalBytesScanned() != 0L) {
setEstimatedTotalBytesScanned(other.getEstimatedTotalBytesScanned());
}
switch (other.getSchemaCase()) {
case AVRO_SCHEMA:
{
Expand Down Expand Up @@ -5622,6 +5664,67 @@ public com.google.cloud.bigquery.storage.v1.ReadStream.Builder addStreamsBuilder
return streamsBuilder_;
}

private long estimatedTotalBytesScanned_;
/**
*
*
* <pre>
* Output only. An estimate on the number of bytes this session will scan when
* all streams are completely consumed. This estimate is based on
* metadata from the table which might be incomplete or stale.
* </pre>
*
* <code>int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
* </code>
*
* @return The estimatedTotalBytesScanned.
*/
@java.lang.Override
public long getEstimatedTotalBytesScanned() {
return estimatedTotalBytesScanned_;
}
/**
*
*
* <pre>
* Output only. An estimate on the number of bytes this session will scan when
* all streams are completely consumed. This estimate is based on
* metadata from the table which might be incomplete or stale.
* </pre>
*
* <code>int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
* </code>
*
* @param value The estimatedTotalBytesScanned to set.
* @return This builder for chaining.
*/
public Builder setEstimatedTotalBytesScanned(long value) {

estimatedTotalBytesScanned_ = value;
onChanged();
return this;
}
/**
*
*
* <pre>
* Output only. An estimate on the number of bytes this session will scan when
* all streams are completely consumed. This estimate is based on
* metadata from the table which might be incomplete or stale.
* </pre>
*
* <code>int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
* </code>
*
* @return This builder for chaining.
*/
public Builder clearEstimatedTotalBytesScanned() {

estimatedTotalBytesScanned_ = 0L;
onChanged();
return this;
}

@java.lang.Override
public final Builder setUnknownFields(final com.google.protobuf.UnknownFieldSet unknownFields) {
return super.setUnknownFields(unknownFields);
Expand Down
Expand Up @@ -402,5 +402,21 @@ public interface ReadSessionOrBuilder
*/
com.google.cloud.bigquery.storage.v1.ReadStreamOrBuilder getStreamsOrBuilder(int index);

/**
*
*
* <pre>
* Output only. An estimate on the number of bytes this session will scan when
* all streams are completely consumed. This estimate is based on
* metadata from the table which might be incomplete or stale.
* </pre>
*
* <code>int64 estimated_total_bytes_scanned = 12 [(.google.api.field_behavior) = OUTPUT_ONLY];
* </code>
*
* @return The estimatedTotalBytesScanned.
*/
long getEstimatedTotalBytesScanned();

public com.google.cloud.bigquery.storage.v1.ReadSession.SchemaCase getSchemaCase();
}
Expand Up @@ -58,7 +58,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "le/api/resource.proto\032,google/cloud/bigq"
+ "uery/storage/v1/arrow.proto\032+google/clou"
+ "d/bigquery/storage/v1/avro.proto\032\037google"
+ "/protobuf/timestamp.proto\"\370\007\n\013ReadSessio"
+ "/protobuf/timestamp.proto\"\244\010\n\013ReadSessio"
+ "n\022\021\n\004name\030\001 \001(\tB\003\340A\003\0224\n\013expire_time\030\002 \001("
+ "\0132\032.google.protobuf.TimestampB\003\340A\003\022F\n\013da"
+ "ta_format\030\003 \001(\0162,.google.cloud.bigquery."
Expand All @@ -74,27 +74,28 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
+ "bigquery.storage.v1.ReadSession.TableRea"
+ "dOptionsB\003\340A\001\022B\n\007streams\030\n \003(\0132,.google."
+ "cloud.bigquery.storage.v1.ReadStreamB\003\340A"
+ "\003\032C\n\016TableModifiers\0221\n\rsnapshot_time\030\001 \001"
+ "(\0132\032.google.protobuf.Timestamp\032\324\001\n\020Table"
+ "ReadOptions\022\027\n\017selected_fields\030\001 \003(\t\022\027\n\017"
+ "row_restriction\030\002 \001(\t\022g\n\033arrow_serializa"
+ "tion_options\030\003 \001(\0132;.google.cloud.bigque"
+ "ry.storage.v1.ArrowSerializationOptionsB"
+ "\003\340A\001H\000B%\n#output_format_serialization_op"
+ "tions:k\352Ah\n*bigquerystorage.googleapis.c"
+ "om/ReadSession\022:projects/{project}/locat"
+ "ions/{location}/sessions/{session}B\010\n\006sc"
+ "hema\"\234\001\n\nReadStream\022\021\n\004name\030\001 \001(\tB\003\340A\003:{"
+ "\352Ax\n)bigquerystorage.googleapis.com/Read"
+ "Stream\022Kprojects/{project}/locations/{lo"
+ "cation}/sessions/{session}/streams/{stre"
+ "am}*>\n\nDataFormat\022\033\n\027DATA_FORMAT_UNSPECI"
+ "FIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005ARROW\020\002B\304\001\n$com.goog"
+ "le.cloud.bigquery.storage.v1B\013StreamProt"
+ "oP\001ZGgoogle.golang.org/genproto/googleap"
+ "is/cloud/bigquery/storage/v1;storage\252\002 G"
+ "oogle.Cloud.BigQuery.Storage.V1\312\002 Google"
+ "\\Cloud\\BigQuery\\Storage\\V1b\006proto3"
+ "\003\022*\n\035estimated_total_bytes_scanned\030\014 \001(\003"
+ "B\003\340A\003\032C\n\016TableModifiers\0221\n\rsnapshot_time"
+ "\030\001 \001(\0132\032.google.protobuf.Timestamp\032\324\001\n\020T"
+ "ableReadOptions\022\027\n\017selected_fields\030\001 \003(\t"
+ "\022\027\n\017row_restriction\030\002 \001(\t\022g\n\033arrow_seria"
+ "lization_options\030\003 \001(\0132;.google.cloud.bi"
+ "gquery.storage.v1.ArrowSerializationOpti"
+ "onsB\003\340A\001H\000B%\n#output_format_serializatio"
+ "n_options:k\352Ah\n*bigquerystorage.googleap"
+ "is.com/ReadSession\022:projects/{project}/l"
+ "ocations/{location}/sessions/{session}B\010"
+ "\n\006schema\"\234\001\n\nReadStream\022\021\n\004name\030\001 \001(\tB\003\340"
+ "A\003:{\352Ax\n)bigquerystorage.googleapis.com/"
+ "ReadStream\022Kprojects/{project}/locations"
+ "/{location}/sessions/{session}/streams/{"
+ "stream}*>\n\nDataFormat\022\033\n\027DATA_FORMAT_UNS"
+ "PECIFIED\020\000\022\010\n\004AVRO\020\001\022\t\n\005ARROW\020\002B\304\001\n$com."
+ "google.cloud.bigquery.storage.v1B\013Stream"
+ "ProtoP\001ZGgoogle.golang.org/genproto/goog"
+ "leapis/cloud/bigquery/storage/v1;storage"
+ "\252\002 Google.Cloud.BigQuery.Storage.V1\312\002 Go"
+ "ogle\\Cloud\\BigQuery\\Storage\\V1b\006proto3"
};
descriptor =
com.google.protobuf.Descriptors.FileDescriptor.internalBuildGeneratedFileFrom(
Expand All @@ -121,6 +122,7 @@ public static com.google.protobuf.Descriptors.FileDescriptor getDescriptor() {
"TableModifiers",
"ReadOptions",
"Streams",
"EstimatedTotalBytesScanned",
"Schema",
});
internal_static_google_cloud_bigquery_storage_v1_ReadSession_TableModifiers_descriptor =
Expand Down
Expand Up @@ -62,7 +62,7 @@ service BigQueryRead {
// limits are enforced based on the number of pre-filtered rows, so some
// filters can lead to lopsided assignments.
//
// Read sessions automatically expire 24 hours after they are created and do
// Read sessions automatically expire 6 hours after they are created and do
// not require manual clean-up by the caller.
rpc CreateReadSession(CreateReadSessionRequest) returns (ReadSession) {
option (google.api.http) = {
Expand Down
Expand Up @@ -126,6 +126,11 @@ message ReadSession {
// in that case, the user will need to use a List method to get the streams
// instead, which is not yet available.
repeated ReadStream streams = 10 [(google.api.field_behavior) = OUTPUT_ONLY];

// Output only. An estimate on the number of bytes this session will scan when
// all streams are completely consumed. This estimate is based on
// metadata from the table which might be incomplete or stale.
int64 estimated_total_bytes_scanned = 12 [(google.api.field_behavior) = OUTPUT_ONLY];
}

// Information about a single stream that gets data out of the storage system.
Expand Down