Skip to content

Commit 4ee2b50

Browse files
authored
Change Queue interface to return a callback instead of an index (#12230)
This change will allow to simplify significantly the implementation for when queue is disabled and caller must block until the batch is constructed and processed downstream Updates #8122 It is ok to have this as a breaking change since Queue is still experimental. Signed-off-by: Bogdan Drutu <[email protected]>
1 parent 0a40b4e commit 4ee2b50

File tree

10 files changed

+84
-63
lines changed

10 files changed

+84
-63
lines changed

.chloggen/done-callback.yaml

Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Use this changelog template to create an entry for release notes.
2+
3+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
4+
change_type: breaking
5+
6+
# The name of the component, or a single word describing the area of concern, (e.g. otlpreceiver)
7+
component: exporterqueue
8+
9+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
10+
note: Change Queue interface to return a callback instead of an index
11+
12+
# One or more tracking issues or pull requests related to the change
13+
issues: [8122]
14+
15+
# (Optional) One or more lines of additional information to render under the primary note.
16+
# These lines will be padded with 2 spaces and then inserted directly into the document.
17+
# Use pipe (|) for multiline entries.
18+
subtext:
19+
20+
# Optional: The change log or logs in which this entry should be included.
21+
# e.g. '[user]' or '[user, api]'
22+
# Include 'user' if the change is relevant to end users.
23+
# Include 'api' if there is a change to a library API.
24+
# Default: '[user]'
25+
change_logs: [api]

exporter/exporterqueue/bounded_memory_queue.go

Lines changed: 4 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -11,6 +11,8 @@ import (
1111
"go.opentelemetry.io/collector/component"
1212
)
1313

14+
var noopDone DoneCallback = func(error) {}
15+
1416
// boundedMemoryQueue implements a producer-consumer exchange similar to a ring buffer queue,
1517
// where the queue is bounded and if it fills up due to slow consumers, the new items written by
1618
// the producer are dropped.
@@ -34,11 +36,7 @@ func newBoundedMemoryQueue[T any](set memoryQueueSettings[T]) Queue[T] {
3436
}
3537
}
3638

37-
func (q *boundedMemoryQueue[T]) Read(context.Context) (uint64, context.Context, T, bool) {
39+
func (q *boundedMemoryQueue[T]) Read(context.Context) (context.Context, T, DoneCallback, bool) {
3840
ctx, req, ok := q.sizedQueue.pop()
39-
return 0, ctx, req, ok
41+
return ctx, req, noopDone, ok
4042
}
41-
42-
// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished.
43-
// For in memory queue, this function is noop.
44-
func (q *boundedMemoryQueue[T]) OnProcessingFinished(uint64, error) {}

exporter/exporterqueue/bounded_memory_queue_test.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -183,11 +183,11 @@ func TestZeroSizeNoConsumers(t *testing.T) {
183183
}
184184

185185
func consume[T any](q Queue[T], consumeFunc func(context.Context, T) error) bool {
186-
index, ctx, req, ok := q.Read(context.Background())
186+
ctx, req, done, ok := q.Read(context.Background())
187187
if !ok {
188188
return false
189189
}
190-
q.OnProcessingFinished(index, consumeFunc(ctx, req))
190+
done(consumeFunc(ctx, req))
191191
return true
192192
}
193193

@@ -203,11 +203,11 @@ func newAsyncConsumer[T any](q Queue[T], numConsumers int, consumeFunc func(cont
203203
go func() {
204204
defer ac.stopWG.Done()
205205
for {
206-
index, ctx, req, ok := q.Read(context.Background())
206+
ctx, req, done, ok := q.Read(context.Background())
207207
if !ok {
208208
return
209209
}
210-
q.OnProcessingFinished(index, consumeFunc(ctx, req))
210+
done(consumeFunc(ctx, req))
211211
}
212212
}()
213213
}

exporter/exporterqueue/persistent_queue.go

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -273,14 +273,14 @@ func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error {
273273
return nil
274274
}
275275

276-
func (pq *persistentQueue[T]) Read(ctx context.Context) (uint64, context.Context, T, bool) {
276+
func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, DoneCallback, bool) {
277277
pq.mu.Lock()
278278
defer pq.mu.Unlock()
279279

280280
for {
281281
if pq.stopped {
282282
var req T
283-
return 0, context.Background(), req, false
283+
return context.Background(), req, nil, false
284284
}
285285

286286
// Read until either a successful retrieved element or no more elements in the storage.
@@ -301,7 +301,7 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (uint64, context.Context
301301
}
302302
pq.hasMoreSpace.Signal()
303303

304-
return index, context.Background(), req, true
304+
return context.Background(), req, func(processErr error) { pq.onDone(index, processErr) }, true
305305
}
306306
}
307307

@@ -312,7 +312,7 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (uint64, context.Context
312312
}
313313

314314
// getNextItem pulls the next available item from the persistent storage along with its index. Once processing is
315-
// finished, the index should be called with OnProcessingFinished to clean up the storage. If no new item is available,
315+
// finished, the index should be called with onDone to clean up the storage. If no new item is available,
316316
// returns false.
317317
func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool) {
318318
index := pq.readIndex
@@ -347,8 +347,8 @@ func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool)
347347
return index, request, true
348348
}
349349

350-
// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished.
351-
func (pq *persistentQueue[T]) OnProcessingFinished(index uint64, consumeErr error) {
350+
// onDone should be called to remove the item of the given index from the queue once processing is finished.
351+
func (pq *persistentQueue[T]) onDone(index uint64, consumeErr error) {
352352
// Delete the item from the persistent storage after it was processed.
353353
pq.mu.Lock()
354354
// Always unref client even if the consumer is shutdown because we always ref it for every valid request.

exporter/exporterqueue/persistent_queue_test.go

Lines changed: 5 additions & 5 deletions
Original file line numberDiff line numberDiff line change
@@ -659,19 +659,19 @@ func TestPersistentQueue_CurrentlyProcessedItems(t *testing.T) {
659659
requireCurrentlyDispatchedItemsEqual(t, ps, []uint64{})
660660

661661
// Takes index 0 in process.
662-
_, _, readReq, found := ps.Read(context.Background())
662+
_, readReq, _, found := ps.Read(context.Background())
663663
require.True(t, found)
664664
assert.Equal(t, req, readReq)
665665
requireCurrentlyDispatchedItemsEqual(t, ps, []uint64{0})
666666

667667
// This takes item 1 to process.
668-
secondIndex, _, secondReadReq, found := ps.Read(context.Background())
668+
_, secondReadReq, secondDone, found := ps.Read(context.Background())
669669
require.True(t, found)
670670
assert.Equal(t, req, secondReadReq)
671671
requireCurrentlyDispatchedItemsEqual(t, ps, []uint64{0, 1})
672672

673673
// Lets mark item 1 as finished, it will remove it from the currently dispatched items list.
674-
ps.OnProcessingFinished(secondIndex, nil)
674+
secondDone(nil)
675675
requireCurrentlyDispatchedItemsEqual(t, ps, []uint64{0})
676676

677677
// Reload the storage. Since items 0 was not finished, this should be re-enqueued at the end.
@@ -910,12 +910,12 @@ func TestPersistentQueue_ShutdownWhileConsuming(t *testing.T) {
910910

911911
require.NoError(t, ps.Offer(context.Background(), uint64(50)))
912912

913-
index, _, _, ok := ps.Read(context.Background())
913+
_, _, done, ok := ps.Read(context.Background())
914914
require.True(t, ok)
915915
assert.False(t, ps.client.(*storagetest.MockStorageClient).IsClosed())
916916
require.NoError(t, ps.Shutdown(context.Background()))
917917
assert.False(t, ps.client.(*storagetest.MockStorageClient).IsClosed())
918-
ps.OnProcessingFinished(index, nil)
918+
done(nil)
919919
assert.True(t, ps.client.(*storagetest.MockStorageClient).IsClosed())
920920
}
921921

exporter/exporterqueue/queue.go

Lines changed: 12 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -12,11 +12,18 @@ import (
1212
"go.opentelemetry.io/collector/pipeline"
1313
)
1414

15-
// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full.
15+
// ErrQueueIsFull is the error returned when an item is offered to the Queue and the queue is full and setup to
16+
// not block.
1617
// Experimental: This API is at the early stage of development and may change without backward compatibility
1718
// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved.
1819
var ErrQueueIsFull = errors.New("sending queue is full")
1920

21+
// DoneCallback represents the callback that will be called when the read request is completely processed by the
22+
// downstream components.
23+
// Experimental: This API is at the early stage of development and may change without backward compatibility
24+
// until https://github.com/open-telemetry/opentelemetry-collector/issues/8122 is resolved.
25+
type DoneCallback func(processErr error)
26+
2027
// Queue defines a producer-consumer exchange which can be backed by e.g. the memory-based ring buffer queue
2128
// (boundedMemoryQueue) or via a disk-based queue (persistentQueue)
2229
// Experimental: This API is at the early stage of development and may change without backward compatibility
@@ -31,13 +38,11 @@ type Queue[T any] interface {
3138
Size() int64
3239
// Capacity returns the capacity of the queue.
3340
Capacity() int64
34-
// Read pulls the next available item from the queue along with its index. Once processing is
35-
// finished, the index should be called with OnProcessingFinished to clean up the storage.
41+
// Read pulls the next available item from the queue along with its done callback. Once processing is
42+
// finished, the done callback must be called to clean up the storage.
3643
// The function blocks until an item is available or if the queue is stopped.
37-
// Returns false if reading failed or if the queue is stopped.
38-
Read(context.Context) (uint64, context.Context, T, bool)
39-
// OnProcessingFinished should be called to remove the item of the given index from the queue once processing is finished.
40-
OnProcessingFinished(index uint64, consumeErr error)
44+
// If the queue is stopped returns false, otherwise true.
45+
Read(context.Context) (context.Context, T, DoneCallback, bool)
4146
}
4247

4348
// Settings defines settings for creating a queue.

exporter/internal/queue/batcher.go

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -14,9 +14,9 @@ import (
1414
)
1515

1616
type batch struct {
17-
ctx context.Context
18-
req internal.Request
19-
idxList []uint64
17+
ctx context.Context
18+
req internal.Request
19+
dones []exporterqueue.DoneCallback
2020
}
2121

2222
// Batcher is in charge of reading items from the queue and send them out asynchronously.
@@ -68,16 +68,16 @@ func newBaseBatcher(batchCfg exporterbatcher.Config,
6868
}
6969

7070
// flush starts a goroutine that calls exportFunc. It blocks until a worker is available if necessary.
71-
func (qb *BaseBatcher) flush(batchToFlush batch) {
71+
func (qb *BaseBatcher) flush(ctx context.Context, req internal.Request, dones []exporterqueue.DoneCallback) {
7272
qb.stopWG.Add(1)
7373
if qb.workerPool != nil {
7474
<-qb.workerPool
7575
}
7676
go func() {
7777
defer qb.stopWG.Done()
78-
err := qb.exportFunc(batchToFlush.ctx, batchToFlush.req)
79-
for _, idx := range batchToFlush.idxList {
80-
qb.queue.OnProcessingFinished(idx, err)
78+
err := qb.exportFunc(ctx, req)
79+
for _, done := range dones {
80+
done(err)
8181
}
8282
if qb.workerPool != nil {
8383
qb.workerPool <- true

exporter/internal/queue/consumers.go

Lines changed: 2 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -36,12 +36,11 @@ func (qc *Consumers[T]) Start(_ context.Context, _ component.Host) error {
3636
startWG.Done()
3737
defer qc.stopWG.Done()
3838
for {
39-
index, ctx, req, ok := qc.queue.Read(context.Background())
39+
ctx, req, done, ok := qc.queue.Read(context.Background())
4040
if !ok {
4141
return
4242
}
43-
consumeErr := qc.consumeFunc(ctx, req)
44-
qc.queue.OnProcessingFinished(index, consumeErr)
43+
done(qc.consumeFunc(ctx, req))
4544
}
4645
}()
4746
}

exporter/internal/queue/default_batcher.go

Lines changed: 16 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -10,6 +10,7 @@ import (
1010
"time"
1111

1212
"go.opentelemetry.io/collector/component"
13+
"go.opentelemetry.io/collector/exporter/exporterqueue"
1314
"go.opentelemetry.io/collector/exporter/internal"
1415
)
1516

@@ -35,7 +36,7 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
3536
defer qb.stopWG.Done()
3637
for {
3738
// Read() blocks until the queue is non-empty or until the queue is stopped.
38-
idx, ctx, req, ok := qb.queue.Read(context.Background())
39+
ctx, req, done, ok := qb.queue.Read(context.Background())
3940
if !ok {
4041
qb.shutdownCh <- true
4142
return
@@ -54,7 +55,7 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
5455
}
5556

5657
if mergeSplitErr != nil || reqList == nil {
57-
qb.queue.OnProcessingFinished(idx, mergeSplitErr)
58+
done(mergeSplitErr)
5859
qb.currentBatchMu.Unlock()
5960
continue
6061
}
@@ -64,42 +65,38 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
6465
qb.currentBatch = nil
6566
qb.currentBatchMu.Unlock()
6667
for i := 0; i < len(reqList); i++ {
67-
qb.flush(batch{
68-
req: reqList[i],
69-
ctx: ctx,
70-
idxList: []uint64{idx},
71-
})
68+
qb.flush(ctx, reqList[i], []exporterqueue.DoneCallback{done})
7269
// TODO: handle partial failure
7370
}
7471
qb.resetTimer()
7572
} else {
7673
qb.currentBatch = &batch{
77-
req: reqList[0],
78-
ctx: ctx,
79-
idxList: []uint64{idx},
74+
req: reqList[0],
75+
ctx: ctx,
76+
dones: []exporterqueue.DoneCallback{done},
8077
}
8178
qb.currentBatchMu.Unlock()
8279
}
8380
} else {
8481
if qb.currentBatch == nil || qb.currentBatch.req == nil {
8582
qb.resetTimer()
8683
qb.currentBatch = &batch{
87-
req: req,
88-
ctx: ctx,
89-
idxList: []uint64{idx},
84+
req: req,
85+
ctx: ctx,
86+
dones: []exporterqueue.DoneCallback{done},
9087
}
9188
} else {
9289
// TODO: consolidate implementation for the cases where MaxSizeConfig is specified and the case where it is not specified
9390
mergedReq, mergeErr := qb.currentBatch.req.MergeSplit(qb.currentBatch.ctx, qb.batchCfg.MaxSizeConfig, req)
9491
if mergeErr != nil {
95-
qb.queue.OnProcessingFinished(idx, mergeErr)
92+
done(mergeErr)
9693
qb.currentBatchMu.Unlock()
9794
continue
9895
}
9996
qb.currentBatch = &batch{
100-
req: mergedReq[0],
101-
ctx: qb.currentBatch.ctx,
102-
idxList: append(qb.currentBatch.idxList, idx),
97+
req: mergedReq[0],
98+
ctx: qb.currentBatch.ctx,
99+
dones: append(qb.currentBatch.dones, done),
103100
}
104101
}
105102

@@ -109,7 +106,7 @@ func (qb *DefaultBatcher) startReadingFlushingGoroutine() {
109106
qb.currentBatchMu.Unlock()
110107

111108
// flush() blocks until successfully started a goroutine for flushing.
112-
qb.flush(batchToFlush)
109+
qb.flush(batchToFlush.ctx, batchToFlush.req, batchToFlush.dones)
113110
qb.resetTimer()
114111
} else {
115112
qb.currentBatchMu.Unlock()
@@ -168,7 +165,7 @@ func (qb *DefaultBatcher) flushCurrentBatchIfNecessary() {
168165
qb.currentBatchMu.Unlock()
169166

170167
// flush() blocks until successfully started a goroutine for flushing.
171-
qb.flush(batchToFlush)
168+
qb.flush(batchToFlush.ctx, batchToFlush.req, batchToFlush.dones)
172169
qb.resetTimer()
173170
}
174171

exporter/internal/queue/disabled_batcher.go

Lines changed: 3 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -7,6 +7,7 @@ import (
77
"context"
88

99
"go.opentelemetry.io/collector/component"
10+
"go.opentelemetry.io/collector/exporter/exporterqueue"
1011
)
1112

1213
// DisabledBatcher is a special-case of Batcher that has no size limit for sending. Any items read from the queue will
@@ -29,15 +30,11 @@ func (qb *DisabledBatcher) Start(_ context.Context, _ component.Host) error {
2930
go func() {
3031
defer qb.stopWG.Done()
3132
for {
32-
idx, _, req, ok := qb.queue.Read(context.Background())
33+
_, req, done, ok := qb.queue.Read(context.Background())
3334
if !ok {
3435
return
3536
}
36-
qb.flush(batch{
37-
req: req,
38-
ctx: context.Background(),
39-
idxList: []uint64{idx},
40-
})
37+
qb.flush(context.Background(), req, []exporterqueue.DoneCallback{done})
4138
}
4239
}()
4340
return nil

0 commit comments

Comments
 (0)