Skip to content

Commit b082e57

Browse files
authored
Use separate config struct for persistent storage (#4067)
1 parent 6f5d208 commit b082e57

File tree

4 files changed

+59
-29
lines changed

4 files changed

+59
-29
lines changed

exporter/exporterhelper/README.md

Lines changed: 10 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -17,12 +17,10 @@ The following configuration options can be modified:
1717
- `sending_queue`
1818
- `enabled` (default = true)
1919
- `num_consumers` (default = 10): Number of consumers that dequeue batches; ignored if `enabled` is `false`
20-
- `queue_size` (default = 5000): Maximum number of batches kept in memory or on disk (for persistent storage) before dropping; ignored if `enabled` is `false`
20+
- `queue_size` (default = 5000): Maximum number of batches kept in memory before dropping; ignored if `enabled` is `false`
2121
User should calculate this as `num_seconds * requests_per_second` where:
2222
- `num_seconds` is the number of seconds to buffer in case of a backend outage
2323
- `requests_per_second` is the average number of requests per seconds.
24-
- `persistent_storage_enabled` (default = false): When set, enables persistence via a file storage extension
25-
(note, `enable_unstable` build tag needs to be enabled first, see below for more details)
2624
- `resource_to_telemetry_conversion`
2725
- `enabled` (default = false): If `enabled` is `true`, all the resource attributes will be converted to metric labels by default.
2826
- `timeout` (default = 5s): Time to wait per individual attempt to send data to a backend.
@@ -36,6 +34,15 @@ The full list of settings exposed for this helper exporter are documented [here]
3634
> :warning: The capability is under development and currently can be enabled only in OpenTelemetry
3735
> Collector Contrib with `enable_unstable` build tag set.
3836
37+
With this build tag set, additional configuration option can be enabled:
38+
39+
- `sending_queue`
40+
- `persistent_storage_enabled` (default = false): When set, enables persistence via a file storage extension
41+
(note, `enable_unstable` build tag needs to be enabled first, see below for more details)
42+
43+
The maximum number of batches stored to disk can be controlled using `sending_queue.queue_size` parameter (which,
44+
similarly as for in-memory buffering, defaults to 5000 batches).
45+
3946
When `persistent_storage_enabled` is set to true, the queue is being buffered to disk using
4047
[file storage extension](https://github.com/open-telemetry/opentelemetry-collector-contrib/tree/main/extension/storage/filestorage).
4148
If collector instance is killed while having some items in the persistent queue, on restart the items are being picked and

exporter/exporterhelper/queued_retry.go

Lines changed: 0 additions & 26 deletions
Original file line numberDiff line numberDiff line change
@@ -49,32 +49,6 @@ func init() {
4949
metricproducer.GlobalManager().AddProducer(r)
5050
}
5151

52-
// QueueSettings defines configuration for queueing batches before sending to the consumerSender.
53-
type QueueSettings struct {
54-
// Enabled indicates whether to not enqueue batches before sending to the consumerSender.
55-
Enabled bool `mapstructure:"enabled"`
56-
// NumConsumers is the number of consumers from the queue.
57-
NumConsumers int `mapstructure:"num_consumers"`
58-
// QueueSize is the maximum number of batches allowed in queue at a given time.
59-
QueueSize int `mapstructure:"queue_size"`
60-
// PersistentStorageEnabled describes whether persistence via a file storage extension is enabled
61-
PersistentStorageEnabled bool `mapstructure:"persistent_storage_enabled"`
62-
}
63-
64-
// DefaultQueueSettings returns the default settings for QueueSettings.
65-
func DefaultQueueSettings() QueueSettings {
66-
return QueueSettings{
67-
Enabled: true,
68-
NumConsumers: 10,
69-
// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.
70-
// This is a pretty decent value for production.
71-
// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,
72-
// multiply that by the number of requests per seconds.
73-
QueueSize: 5000,
74-
PersistentStorageEnabled: false,
75-
}
76-
}
77-
7852
// RetrySettings defines configuration for retrying batches in case of export failure.
7953
// The current supported strategy is exponential backoff.
8054
type RetrySettings struct {

exporter/exporterhelper/queued_retry_experimental.go

Lines changed: 26 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -37,6 +37,32 @@ import (
3737
// queued_retry_experimental includes the code for both memory-backed and persistent-storage backed queued retry helpers
3838
// enabled by setting "enable_unstable" build tag
3939

40+
// QueueSettings defines configuration for queueing batches before sending to the consumerSender.
41+
type QueueSettings struct {
42+
// Enabled indicates whether to not enqueue batches before sending to the consumerSender.
43+
Enabled bool `mapstructure:"enabled"`
44+
// NumConsumers is the number of consumers from the queue.
45+
NumConsumers int `mapstructure:"num_consumers"`
46+
// QueueSize is the maximum number of batches allowed in queue at a given time.
47+
QueueSize int `mapstructure:"queue_size"`
48+
// PersistentStorageEnabled describes whether persistence via a file storage extension is enabled
49+
PersistentStorageEnabled bool `mapstructure:"persistent_storage_enabled"`
50+
}
51+
52+
// DefaultQueueSettings returns the default settings for QueueSettings.
53+
func DefaultQueueSettings() QueueSettings {
54+
return QueueSettings{
55+
Enabled: true,
56+
NumConsumers: 10,
57+
// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.
58+
// This is a pretty decent value for production.
59+
// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,
60+
// multiply that by the number of requests per seconds.
61+
QueueSize: 5000,
62+
PersistentStorageEnabled: false,
63+
}
64+
}
65+
4066
var (
4167
currentlyDispatchedBatchesGauge, _ = r.AddInt64DerivedGauge(
4268
obsmetrics.ExporterKey+"/currently_dispatched_batches",

exporter/exporterhelper/queued_retry_inmemory.go

Lines changed: 23 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -34,6 +34,29 @@ import (
3434
// queued_retry_inmemory includes the code for memory-backed (original) queued retry helper only
3535
// enabled when "enable_unstable" build tag is not set
3636

37+
// QueueSettings defines configuration for queueing batches before sending to the consumerSender.
38+
type QueueSettings struct {
39+
// Enabled indicates whether to not enqueue batches before sending to the consumerSender.
40+
Enabled bool `mapstructure:"enabled"`
41+
// NumConsumers is the number of consumers from the queue.
42+
NumConsumers int `mapstructure:"num_consumers"`
43+
// QueueSize is the maximum number of batches allowed in queue at a given time.
44+
QueueSize int `mapstructure:"queue_size"`
45+
}
46+
47+
// DefaultQueueSettings returns the default settings for QueueSettings.
48+
func DefaultQueueSettings() QueueSettings {
49+
return QueueSettings{
50+
Enabled: true,
51+
NumConsumers: 10,
52+
// For 5000 queue elements at 100 requests/sec gives about 50 sec of survival of destination outage.
53+
// This is a pretty decent value for production.
54+
// User should calculate this from the perspective of how many seconds to buffer in case of a backend outage,
55+
// multiply that by the number of requests per seconds.
56+
QueueSize: 5000,
57+
}
58+
}
59+
3760
type queuedRetrySender struct {
3861
fullName string
3962
cfg QueueSettings

0 commit comments

Comments
 (0)