/
appendresult.go
132 lines (115 loc) · 3.84 KB
/
appendresult.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
// Copyright 2021 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package managedwriter
import (
"context"
storagepb "google.golang.org/genproto/googleapis/cloud/bigquery/storage/v1beta2"
"google.golang.org/protobuf/proto"
"google.golang.org/protobuf/types/known/wrapperspb"
)
// NoStreamOffset is a sentinel value for signalling we're not tracking
// stream offset (e.g. a default stream which allows simultaneous append streams).
const NoStreamOffset int64 = -1
// AppendResult tracks the status of a single row of data.
type AppendResult struct {
// rowData contains the serialized row data.
rowData []byte
ready chan struct{}
// if the encapsulating append failed, this will retain a reference to the error.
err error
// the stream offset
offset int64
}
func newAppendResult(data []byte) *AppendResult {
return &AppendResult{
ready: make(chan struct{}),
rowData: data,
}
}
// Ready blocks until the append request is completed.
func (ar *AppendResult) Ready() <-chan struct{} { return ar.ready }
// GetResult returns the optional offset of this row, or the associated
// error.
func (ar *AppendResult) GetResult(ctx context.Context) (int64, error) {
select {
case <-ctx.Done():
return 0, ctx.Err()
case <-ar.Ready():
return ar.offset, ar.err
}
}
// pendingWrite tracks state for a set of rows that are part of a single
// append request.
type pendingWrite struct {
request *storagepb.AppendRowsRequest
results []*AppendResult
// this is used by the flow controller.
reqSize int
}
// newPendingWrite constructs the proto request and attaches references
// to the pending results for later consumption. The reason for this is
// that in the future, we may want to allow row batching to be managed by
// the server (e.g. for default/COMMITTED streams). For BUFFERED/PENDING
// streams, this should be managed by the user.
func newPendingWrite(appends [][]byte, offset int64) *pendingWrite {
results := make([]*AppendResult, len(appends))
for k, r := range appends {
results[k] = newAppendResult(r)
}
pw := &pendingWrite{
request: &storagepb.AppendRowsRequest{
Rows: &storagepb.AppendRowsRequest_ProtoRows{
ProtoRows: &storagepb.AppendRowsRequest_ProtoData{
Rows: &storagepb.ProtoRows{
SerializedRows: appends,
},
},
},
},
results: results,
}
if offset > 0 {
pw.request.Offset = &wrapperspb.Int64Value{Value: offset}
}
// We compute the size now for flow controller purposes, though
// the actual request size may be slightly larger (e.g. the first
// request in a new stream bears schema and stream id).
pw.reqSize = proto.Size(pw.request)
return pw
}
// markDone propagates finalization of an append request to associated
// AppendResult references.
func (pw *pendingWrite) markDone(startOffset int64, err error, fc *flowController) {
curOffset := startOffset
for _, ar := range pw.results {
if err != nil {
ar.err = err
close(ar.ready)
continue
}
ar.offset = curOffset
// only advance curOffset if we were given a valid starting offset.
if startOffset >= 0 {
curOffset = curOffset + 1
}
close(ar.ready)
}
// Clear the reference to the request.
pw.request = nil
// if there's a flow controller, signal release. The only time this should be nil is when
// encountering issues with flow control during enqueuing the initial request.
if fc != nil {
fc.release(pw.reqSize)
}
}