|
5 | 5 | package servers
|
6 | 6 |
|
7 | 7 | import (
|
| 8 | + "bytes" |
8 | 9 | "io"
|
9 | 10 | "sync"
|
10 | 11 | "sync/atomic"
|
11 | 12 |
|
12 | 13 | "github.com/jaegertracing/jaeger/pkg/metrics"
|
13 | 14 | )
|
14 | 15 |
|
15 |
| -// ThriftTransport is a subset of thrift.TTransport methods, for easier mocking. |
16 |
| -type ThriftTransport interface { |
| 16 | +// TBufferedServer is an alias for UDPServer, for backwards compatibility. |
| 17 | +type TBufferedServer = UDPServer |
| 18 | + |
| 19 | +// NewTBufferedServer is an alias for NewUDPServer, for backwards compatibility. |
| 20 | +var NewTBufferedServer = NewUDPServer |
| 21 | + |
| 22 | +// UDPConn is a an abstraction of *net.UDPConn, for easier mocking. |
| 23 | +type UDPConn interface { |
17 | 24 | io.Reader
|
18 | 25 | io.Closer
|
19 | 26 | }
|
20 | 27 |
|
21 |
| -// TBufferedServer is a custom thrift server that reads traffic using the transport provided |
22 |
| -// and places messages into a buffered channel to be processed by the processor provided |
23 |
| -type TBufferedServer struct { |
| 28 | +// UDPServer reads packets from a UDP connection into bytes.Buffer and places |
| 29 | +// each buffer into a bounded channel to be consumed by the receiver. |
| 30 | +// After consuming the buffer, the receiver SHOULD call DataRecd() to signal |
| 31 | +// that the buffer is no longer in use and to return it to the pool. |
| 32 | +type UDPServer struct { |
24 | 33 | // NB. queueLength HAS to be at the top of the struct or it will SIGSEV for certain architectures.
|
25 | 34 | // See https://github.com/golang/go/issues/13868
|
26 | 35 | queueSize int64
|
27 |
| - dataChan chan *ReadBuf |
| 36 | + dataChan chan *bytes.Buffer |
28 | 37 | maxPacketSize int
|
29 | 38 | maxQueueSize int
|
30 | 39 | serving uint32
|
31 |
| - transport ThriftTransport |
32 |
| - readBufPool *sync.Pool |
| 40 | + transport UDPConn |
| 41 | + readBufPool sync.Pool |
33 | 42 | metrics struct {
|
34 | 43 | // Size of the current server queue
|
35 | 44 | QueueSize metrics.Gauge `metric:"thrift.udp.server.queue_size"`
|
@@ -58,86 +67,117 @@ const (
|
58 | 67 | stateInit
|
59 | 68 | )
|
60 | 69 |
|
61 |
| -// NewTBufferedServer creates a TBufferedServer |
62 |
| -func NewTBufferedServer( |
63 |
| - transport ThriftTransport, |
| 70 | +// NewUDPServer creates a UDPServer |
| 71 | +func NewUDPServer( |
| 72 | + transport UDPConn, |
64 | 73 | maxQueueSize int,
|
65 | 74 | maxPacketSize int,
|
66 | 75 | mFactory metrics.Factory,
|
67 |
| -) (*TBufferedServer, error) { |
68 |
| - dataChan := make(chan *ReadBuf, maxQueueSize) |
69 |
| - |
70 |
| - readBufPool := &sync.Pool{ |
71 |
| - New: func() any { |
72 |
| - return &ReadBuf{bytes: make([]byte, maxPacketSize)} |
73 |
| - }, |
74 |
| - } |
75 |
| - |
76 |
| - res := &TBufferedServer{ |
77 |
| - dataChan: dataChan, |
| 76 | +) (*UDPServer, error) { |
| 77 | + srv := &UDPServer{ |
| 78 | + dataChan: make(chan *bytes.Buffer, maxQueueSize), |
78 | 79 | transport: transport,
|
79 | 80 | maxQueueSize: maxQueueSize,
|
80 | 81 | maxPacketSize: maxPacketSize,
|
81 |
| - readBufPool: readBufPool, |
82 | 82 | serving: stateInit,
|
| 83 | + readBufPool: sync.Pool{ |
| 84 | + New: func() any { |
| 85 | + return new(bytes.Buffer) |
| 86 | + }, |
| 87 | + }, |
| 88 | + } |
| 89 | + |
| 90 | + metrics.MustInit(&srv.metrics, mFactory, nil) |
| 91 | + return srv, nil |
| 92 | +} |
| 93 | + |
| 94 | +// packetReader is a helper for reading a single packet no larger than maxPacketSize |
| 95 | +// from the underlying reader. Without it the ReadFrom() method of bytes.Buffer would |
| 96 | +// read multiple packets and won't even stop at maxPacketSize. |
| 97 | +type packetReader struct { |
| 98 | + maxPacketSize int |
| 99 | + reader io.LimitedReader |
| 100 | + attempt int |
| 101 | +} |
| 102 | + |
| 103 | +func (r *packetReader) Read(p []byte) (int, error) { |
| 104 | + if r.attempt > 0 { |
| 105 | + return 0, io.EOF |
83 | 106 | }
|
| 107 | + r.attempt = 1 |
| 108 | + return r.reader.Read(p) |
| 109 | +} |
84 | 110 |
|
85 |
| - metrics.MustInit(&res.metrics, mFactory, nil) |
86 |
| - return res, nil |
| 111 | +func (r *packetReader) readPacket(buf *bytes.Buffer) (int, error) { |
| 112 | + // reset the readers since we're reusing them to avoid allocations |
| 113 | + r.attempt = 0 |
| 114 | + r.reader.N = int64(r.maxPacketSize) |
| 115 | + // prepare the buffer for expected packet size |
| 116 | + buf.Grow(r.maxPacketSize) |
| 117 | + buf.Reset() |
| 118 | + // use Buffer's ReadFrom() as otherwise it's hard to get it into the right state |
| 119 | + n, err := buf.ReadFrom(r) |
| 120 | + return int(n), err |
87 | 121 | }
|
88 | 122 |
|
89 | 123 | // Serve initiates the readers and starts serving traffic
|
90 |
| -func (s *TBufferedServer) Serve() { |
| 124 | +func (s *UDPServer) Serve() { |
91 | 125 | defer close(s.dataChan)
|
92 | 126 | if !atomic.CompareAndSwapUint32(&s.serving, stateInit, stateServing) {
|
93 | 127 | return // Stop already called
|
94 | 128 | }
|
95 | 129 |
|
| 130 | + pr := &packetReader{ |
| 131 | + maxPacketSize: s.maxPacketSize, |
| 132 | + reader: io.LimitedReader{ |
| 133 | + R: s.transport, |
| 134 | + }, |
| 135 | + } |
| 136 | + |
96 | 137 | for s.IsServing() {
|
97 |
| - readBuf := s.readBufPool.Get().(*ReadBuf) |
98 |
| - n, err := s.transport.Read(readBuf.bytes) |
| 138 | + buf := s.readBufPool.Get().(*bytes.Buffer) |
| 139 | + n, err := pr.readPacket(buf) |
99 | 140 | if err == nil {
|
100 |
| - readBuf.n = n |
101 | 141 | s.metrics.PacketSize.Update(int64(n))
|
102 | 142 | select {
|
103 |
| - case s.dataChan <- readBuf: |
| 143 | + case s.dataChan <- buf: |
104 | 144 | s.metrics.PacketsProcessed.Inc(1)
|
105 | 145 | s.updateQueueSize(1)
|
106 | 146 | default:
|
107 |
| - s.readBufPool.Put(readBuf) |
| 147 | + s.readBufPool.Put(buf) |
108 | 148 | s.metrics.PacketsDropped.Inc(1)
|
109 | 149 | }
|
110 | 150 | } else {
|
111 |
| - s.readBufPool.Put(readBuf) |
| 151 | + s.readBufPool.Put(buf) |
112 | 152 | s.metrics.ReadError.Inc(1)
|
113 | 153 | }
|
114 | 154 | }
|
115 | 155 | }
|
116 | 156 |
|
117 |
| -func (s *TBufferedServer) updateQueueSize(delta int64) { |
| 157 | +func (s *UDPServer) updateQueueSize(delta int64) { |
118 | 158 | atomic.AddInt64(&s.queueSize, delta)
|
119 | 159 | s.metrics.QueueSize.Update(atomic.LoadInt64(&s.queueSize))
|
120 | 160 | }
|
121 | 161 |
|
122 | 162 | // IsServing indicates whether the server is currently serving traffic
|
123 |
| -func (s *TBufferedServer) IsServing() bool { |
| 163 | +func (s *UDPServer) IsServing() bool { |
124 | 164 | return atomic.LoadUint32(&s.serving) == stateServing
|
125 | 165 | }
|
126 | 166 |
|
127 | 167 | // Stop stops the serving of traffic and waits until the queue is
|
128 | 168 | // emptied by the readers
|
129 |
| -func (s *TBufferedServer) Stop() { |
| 169 | +func (s *UDPServer) Stop() { |
130 | 170 | atomic.StoreUint32(&s.serving, stateStopped)
|
131 | 171 | _ = s.transport.Close()
|
132 | 172 | }
|
133 | 173 |
|
134 | 174 | // DataChan returns the data chan of the buffered server
|
135 |
| -func (s *TBufferedServer) DataChan() chan *ReadBuf { |
| 175 | +func (s *UDPServer) DataChan() chan *bytes.Buffer { |
136 | 176 | return s.dataChan
|
137 | 177 | }
|
138 | 178 |
|
139 | 179 | // DataRecd is called by the consumers every time they read a data item from DataChan
|
140 |
| -func (s *TBufferedServer) DataRecd(buf *ReadBuf) { |
| 180 | +func (s *UDPServer) DataRecd(buf *bytes.Buffer) { |
141 | 181 | s.updateQueueSize(-1)
|
142 | 182 | s.readBufPool.Put(buf)
|
143 | 183 | }
|
0 commit comments