Skip to content

Commit 6f269eb

Browse files
authored
[chore] Unify persistentQueue fields into QueueMetadata (#13140)
Embed QueueMetadata directly into persistentQueue, replacing the scattered metadata fields. 1. Add QueueMetadata as a member of persistentQueue. 2. Delete redundant fields (queueSize, readIdx, writeIdx, …). Relates to #13126
1 parent 30f2707 commit 6f269eb

File tree

2 files changed

+41
-43
lines changed

2 files changed

+41
-43
lines changed

exporter/exporterhelper/internal/queuebatch/persistent_queue.go

Lines changed: 38 additions & 40 deletions
Original file line numberDiff line numberDiff line change
@@ -15,6 +15,7 @@ import (
1515

1616
"go.opentelemetry.io/collector/component"
1717
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/experr"
18+
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/queuebatch/internal/persistentqueue"
1819
"go.opentelemetry.io/collector/exporter/exporterhelper/internal/request"
1920
"go.opentelemetry.io/collector/extension/xextension/storage"
2021
"go.opentelemetry.io/collector/pipeline"
@@ -92,15 +93,12 @@ type persistentQueue[T any] struct {
9293
isRequestSized bool
9394

9495
// mu guards everything declared below.
95-
mu sync.Mutex
96-
hasMoreElements *sync.Cond
97-
hasMoreSpace *cond
98-
readIndex uint64
99-
writeIndex uint64
100-
currentlyDispatchedItems []uint64
101-
queueSize int64
102-
refClient int64
103-
stopped bool
96+
mu sync.Mutex
97+
hasMoreElements *sync.Cond
98+
hasMoreSpace *cond
99+
metadata persistentqueue.QueueMetadata
100+
refClient int64
101+
stopped bool
104102
}
105103

106104
// newPersistentQueue creates a new queue backed by file storage; name and signal must be a unique combination that identifies the queue storage
@@ -129,7 +127,7 @@ func (pq *persistentQueue[T]) Start(ctx context.Context, host component.Host) er
129127
func (pq *persistentQueue[T]) Size() int64 {
130128
pq.mu.Lock()
131129
defer pq.mu.Unlock()
132-
return pq.queueSize
130+
return pq.metadata.QueueSize
133131
}
134132

135133
func (pq *persistentQueue[T]) Capacity() int64 {
@@ -151,11 +149,11 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
151149

152150
err := pq.client.Batch(ctx, riOp, wiOp)
153151
if err == nil {
154-
pq.readIndex, err = bytesToItemIndex(riOp.Value)
152+
pq.metadata.ReadIndex, err = bytesToItemIndex(riOp.Value)
155153
}
156154

157155
if err == nil {
158-
pq.writeIndex, err = bytesToItemIndex(wiOp.Value)
156+
pq.metadata.WriteIndex, err = bytesToItemIndex(wiOp.Value)
159157
}
160158

161159
if err != nil {
@@ -164,11 +162,11 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
164162
} else {
165163
pq.logger.Error("Failed getting read/write index, starting with new ones", zap.Error(err))
166164
}
167-
pq.readIndex = 0
168-
pq.writeIndex = 0
165+
pq.metadata.ReadIndex = 0
166+
pq.metadata.WriteIndex = 0
169167
}
170168

171-
queueSize := pq.writeIndex - pq.readIndex
169+
queueSize := pq.metadata.WriteIndex - pq.metadata.ReadIndex
172170

173171
// If the queue is sized by the number of requests, no need to read the queue size from storage.
174172
if queueSize > 0 && !pq.isRequestSized {
@@ -177,7 +175,7 @@ func (pq *persistentQueue[T]) initPersistentContiguousStorage(ctx context.Contex
177175
}
178176
}
179177
//nolint:gosec
180-
pq.queueSize = int64(queueSize)
178+
pq.metadata.QueueSize = int64(queueSize)
181179
}
182180

183181
// restoreQueueSizeFromStorage restores the queue size from storage.
@@ -222,7 +220,7 @@ func (pq *persistentQueue[T]) backupQueueSize(ctx context.Context) error {
222220
}
223221

224222
//nolint:gosec
225-
return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.queueSize)))
223+
return pq.client.Set(ctx, queueSizeKey, itemIndexToBytes(uint64(pq.metadata.QueueSize)))
226224
}
227225

228226
// unrefClient unrefs the client, and closes if no more references. Callers MUST hold the mutex.
@@ -247,7 +245,7 @@ func (pq *persistentQueue[T]) Offer(ctx context.Context, req T) error {
247245
// putInternal is the internal version that requires caller to hold the mutex lock.
248246
func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error {
249247
reqSize := pq.set.sizer.Sizeof(req)
250-
for pq.queueSize+reqSize > pq.set.capacity {
248+
for pq.metadata.QueueSize+reqSize > pq.set.capacity {
251249
if !pq.set.blockOnOverflow {
252250
return ErrQueueIsFull
253251
}
@@ -263,20 +261,20 @@ func (pq *persistentQueue[T]) putInternal(ctx context.Context, req T) error {
263261

264262
// Carry out a transaction where we both add the item and update the write index
265263
ops := []*storage.Operation{
266-
storage.SetOperation(writeIndexKey, itemIndexToBytes(pq.writeIndex+1)),
267-
storage.SetOperation(getItemKey(pq.writeIndex), reqBuf),
264+
storage.SetOperation(writeIndexKey, itemIndexToBytes(pq.metadata.WriteIndex+1)),
265+
storage.SetOperation(getItemKey(pq.metadata.WriteIndex), reqBuf),
268266
}
269267
if err = pq.client.Batch(ctx, ops...); err != nil {
270268
return err
271269
}
272270

273-
pq.writeIndex++
274-
pq.queueSize += reqSize
271+
pq.metadata.WriteIndex++
272+
pq.metadata.QueueSize += reqSize
275273
pq.hasMoreElements.Signal()
276274

277275
// Back up the queue size to storage every 10 writes. The stored value is used to recover the queue size
278276
// in case if the collector is killed. The recovered queue size is allowed to be inaccurate.
279-
if (pq.writeIndex % 10) == 5 {
277+
if (pq.metadata.WriteIndex % 10) == 5 {
280278
if err := pq.backupQueueSize(ctx); err != nil {
281279
pq.logger.Error("Error writing queue size to storage", zap.Error(err))
282280
}
@@ -296,11 +294,11 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, Don
296294
}
297295

298296
// Read until either a successful retrieved element or no more elements in the storage.
299-
for pq.readIndex != pq.writeIndex {
297+
for pq.metadata.ReadIndex != pq.metadata.WriteIndex {
300298
index, req, consumed := pq.getNextItem(ctx)
301299
// Ensure the used size and the channel size are in sync.
302-
if pq.readIndex == pq.writeIndex {
303-
pq.queueSize = 0
300+
if pq.metadata.ReadIndex == pq.metadata.WriteIndex {
301+
pq.metadata.QueueSize = 0
304302
pq.hasMoreSpace.Signal()
305303
}
306304
if consumed {
@@ -320,14 +318,14 @@ func (pq *persistentQueue[T]) Read(ctx context.Context) (context.Context, T, Don
320318
// finished, the index should be called with onDone to clean up the storage. If no new item is available,
321319
// returns false.
322320
func (pq *persistentQueue[T]) getNextItem(ctx context.Context) (uint64, T, bool) {
323-
index := pq.readIndex
321+
index := pq.metadata.ReadIndex
324322
// Increase here, so even if errors happen below, it always iterates
325-
pq.readIndex++
326-
pq.currentlyDispatchedItems = append(pq.currentlyDispatchedItems, index)
323+
pq.metadata.ReadIndex++
324+
pq.metadata.CurrentlyDispatchedItems = append(pq.metadata.CurrentlyDispatchedItems, index)
327325
getOp := storage.GetOperation(getItemKey(index))
328326
err := pq.client.Batch(ctx,
329-
storage.SetOperation(readIndexKey, itemIndexToBytes(pq.readIndex)),
330-
storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems)),
327+
storage.SetOperation(readIndexKey, itemIndexToBytes(pq.metadata.ReadIndex)),
328+
storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.metadata.CurrentlyDispatchedItems)),
331329
getOp)
332330

333331
var request T
@@ -364,12 +362,12 @@ func (pq *persistentQueue[T]) onDone(index uint64, elSize int64, consumeErr erro
364362
pq.mu.Unlock()
365363
}()
366364

367-
pq.queueSize -= elSize
365+
pq.metadata.QueueSize -= elSize
368366
// The size might be not in sync with the queue in case it's restored from the disk
369367
// because we don't flush the current queue size on the disk on every read/write.
370368
// In that case we need to make sure it doesn't go below 0.
371-
if pq.queueSize < 0 {
372-
pq.queueSize = 0
369+
if pq.metadata.QueueSize < 0 {
370+
pq.metadata.QueueSize = 0
373371
}
374372
pq.hasMoreSpace.Signal()
375373

@@ -385,7 +383,7 @@ func (pq *persistentQueue[T]) onDone(index uint64, elSize int64, consumeErr erro
385383

386384
// Back up the queue size to storage on every 10 reads. The stored value is used to recover the queue size
387385
// in case if the collector is killed. The recovered queue size is allowed to be inaccurate.
388-
if (pq.readIndex % 10) == 0 {
386+
if (pq.metadata.ReadIndex % 10) == 0 {
389387
if qsErr := pq.backupQueueSize(context.Background()); qsErr != nil {
390388
pq.logger.Error("Error writing queue size to storage", zap.Error(qsErr))
391389
}
@@ -463,16 +461,16 @@ func (pq *persistentQueue[T]) retrieveAndEnqueueNotDispatchedReqs(ctx context.Co
463461

464462
// itemDispatchingFinish removes the item from the list of currently dispatched items and deletes it from the persistent queue
465463
func (pq *persistentQueue[T]) itemDispatchingFinish(ctx context.Context, index uint64) error {
466-
lenCDI := len(pq.currentlyDispatchedItems)
464+
lenCDI := len(pq.metadata.CurrentlyDispatchedItems)
467465
for i := 0; i < lenCDI; i++ {
468-
if pq.currentlyDispatchedItems[i] == index {
469-
pq.currentlyDispatchedItems[i] = pq.currentlyDispatchedItems[lenCDI-1]
470-
pq.currentlyDispatchedItems = pq.currentlyDispatchedItems[:lenCDI-1]
466+
if pq.metadata.CurrentlyDispatchedItems[i] == index {
467+
pq.metadata.CurrentlyDispatchedItems[i] = pq.metadata.CurrentlyDispatchedItems[lenCDI-1]
468+
pq.metadata.CurrentlyDispatchedItems = pq.metadata.CurrentlyDispatchedItems[:lenCDI-1]
471469
break
472470
}
473471
}
474472

475-
setOp := storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.currentlyDispatchedItems))
473+
setOp := storage.SetOperation(currentlyDispatchedItemsKey, itemIndexArrayToBytes(pq.metadata.CurrentlyDispatchedItems))
476474
deleteOp := storage.DeleteOperation(getItemKey(index))
477475
if err := pq.client.Batch(ctx, setOp, deleteOp); err != nil {
478476
// got an error, try to gracefully handle it

exporter/exporterhelper/internal/queuebatch/persistent_queue_test.go

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -686,10 +686,10 @@ func TestPersistentQueue_CurrentlyProcessedItems(t *testing.T) {
686686
requireCurrentlyDispatchedItemsEqual(t, newPs, []uint64{})
687687
assert.Equal(t, int64(0), newPs.Size())
688688
// The writeIndex should be now set accordingly
689-
require.EqualValues(t, 6, newPs.writeIndex)
689+
require.EqualValues(t, 6, newPs.metadata.WriteIndex)
690690

691691
// There should be no items left in the storage
692-
for i := uint64(0); i < newPs.writeIndex; i++ {
692+
for i := uint64(0); i < newPs.metadata.WriteIndex; i++ {
693693
bb, err := newPs.client.Get(context.Background(), getItemKey(i))
694694
require.NoError(t, err)
695695
require.Nil(t, bb)
@@ -1203,5 +1203,5 @@ func TestPersistentQueue_RestoredUsedSizeIsCorrectedOnDrain(t *testing.T) {
12031203
func requireCurrentlyDispatchedItemsEqual(t *testing.T, pq *persistentQueue[uint64], compare []uint64) {
12041204
pq.mu.Lock()
12051205
defer pq.mu.Unlock()
1206-
assert.ElementsMatch(t, compare, pq.currentlyDispatchedItems)
1206+
assert.ElementsMatch(t, compare, pq.metadata.CurrentlyDispatchedItems)
12071207
}

0 commit comments

Comments
 (0)