This repository has been archived by the owner on Feb 21, 2023. It is now read-only.
/
queue.go
147 lines (121 loc) · 2.86 KB
/
queue.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
package gomol
import (
"errors"
)
type queue struct {
base *Base
running bool
finished chan struct{}
queueChan chan *Message
}
func newQueue(base *Base, maxQueueSize uint) *queue {
return &queue{
base: base,
running: false,
finished: make(chan struct{}),
queueChan: make(chan *Message, maxQueueSize),
}
}
func (queue *queue) startWorker() error {
if queue.running {
return errors.New("workers are already running")
}
queue.running = true
go queue.work()
return nil
}
func (queue *queue) stopWorker() error {
if !queue.running {
return errors.New("workers are not running")
}
queue.running = false
close(queue.queueChan)
queue.flush()
return nil
}
func (queue *queue) work() {
defer close(queue.finished)
for {
// First, try to consume _all_ messages which are
// currently on the channel. If we hit the default
// block here it's because there's no message ready
// to process.
select {
case msg, ok := <-queue.queueChan:
if !ok {
return
}
queue.write(msg)
continue
default:
}
// In that case, we're going to either try to process
// another message, or if someone is waiting in another
// goroutine for us to finish the queue (a flush sync),
// then we'll throw a value on that channel to inform
// them that we had a bit of downtime.
select {
case msg, ok := <-queue.queueChan:
if !ok {
return
}
queue.write(msg)
case queue.finished <- struct{}{}:
}
}
}
func (queue *queue) write(msg *Message) {
if msg == nil {
return
}
unhealthy := len(msg.base.loggers) == 0
for _, l := range msg.base.loggers {
if hcLogger, ok := l.(HealthCheckLogger); ok {
if !hcLogger.Healthy() {
unhealthy = true
}
}
l.Logm(msg.Timestamp, msg.Level, msg.Attrs.Attrs(), msg.Msg)
}
if unhealthy && msg.base.fallbackLogger != nil {
logFallback := true
if hcLogger, ok := msg.base.fallbackLogger.(HealthCheckLogger); ok {
logFallback = hcLogger.Healthy()
}
if logFallback {
msg.base.fallbackLogger.Logm(msg.Timestamp, msg.Level, msg.Attrs.Attrs(), msg.Msg)
}
}
}
func (queue *queue) flush() {
<-queue.finished
}
func (queue *queue) queueMessage(msg *Message) error {
if !queue.running {
return errors.New("the logging system is not running - has InitLoggers() been executed?")
}
loop:
for {
// Attempt to queue the message immediately to
// the channel.
select {
case queue.queueChan <- msg:
break loop
default:
}
// The queue was full. Try to read one message
// from it (which will be the oldest) to make room
// for another attempt to append. We do this in a
// loop in case there's some contention - we'll keep
// eating from the front until we finally make it in.
select {
case <-queue.queueChan:
queue.base.report(ErrMessageDropped)
default:
}
}
return nil
}
func (queue *queue) pressure() int {
return len(queue.queueChan)
}