Skip to content
This repository has been archived by the owner on Apr 2, 2024. It is now read-only.

Commit

Permalink
bug fix
Browse files Browse the repository at this point in the history
  • Loading branch information
cevian committed Dec 13, 2022
1 parent efdf869 commit 05df804
Showing 1 changed file with 2 additions and 2 deletions.
4 changes: 2 additions & 2 deletions pkg/pgmodel/ingestor/metric_batcher.go
Original file line number Diff line number Diff line change
Expand Up @@ -256,7 +256,7 @@ func sendBatches(firstReq *insertDataRequest, input chan *insertDataRequest, con

numSeries := pending.batch.CountSeries()
numSamples, numExemplars := pending.batch.Count()

wasFull := pending.IsFull()
select {
//try to batch as much as possible before sending
case req, ok := <-recvCh:
Expand All @@ -277,7 +277,7 @@ func sendBatches(firstReq *insertDataRequest, input chan *insertDataRequest, con
metrics.IngestorFlushSeries.With(prometheus.Labels{"type": "metric", "subsystem": "metric_batcher"}).Observe(float64(numSeries))
metrics.IngestorFlushInsertables.With(prometheus.Labels{"type": "metric", "subsystem": "metric_batcher"}).Observe(float64(numSamples + numExemplars))
metrics.IngestorBatchDuration.With(prometheus.Labels{"type": "metric", "subsystem": "metric_batcher"}).Observe(time.Since(pending.Start).Seconds())
if pending.IsFull() {
if wasFull {
metrics.IngestorBatchFlushTotal.With(prometheus.Labels{"type": "metric", "subsystem": "metric_batcher", "reason": "size"}).Inc()
} else {
metrics.IngestorBatchFlushTotal.With(prometheus.Labels{"type": "metric", "subsystem": "metric_batcher", "reason": "requested"}).Inc()
Expand Down

0 comments on commit 05df804

Please sign in to comment.