Skip to content

Commit 6c71da2

Browse files
authored
[exporter/splunk_hec] Apply compression if it's enabled in the config (#22969)
The compression used to be enabled only if the payload size was greater than 1.5KB which significantly complicated the logic and made it hard to test. This change makes the compression unconditionally applied to the payload if it's enabled in the config. The benchmarking shows improvements in the throughput and CPU usage for large payloads and expected degradation for small payloads which is acceptable given that it's not a common case.
1 parent b20a252 commit 6c71da2

File tree

4 files changed

+169
-126
lines changed

4 files changed

+169
-126
lines changed
Lines changed: 25 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,25 @@
1+
# Use this changelog template to create an entry for release notes.
2+
# If your change doesn't affect end users, such as a test fix or a tooling change,
3+
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
4+
5+
# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
6+
change_type: enhancement
7+
8+
# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
9+
component: exporter/splunk_hec
10+
11+
# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
12+
note: Apply compression to Splunk HEC payload unconditionally if it's enabled in the config.
13+
14+
# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
15+
issues: [22969, 22018]
16+
17+
# (Optional) One or more lines of additional information to render under the primary note.
18+
# These lines will be padded with 2 spaces and then inserted directly into the document.
19+
# Use pipe (|) for multiline entries.
20+
subtext: |
21+
The compression used to be enabled only if the payload size was greater than 1.5KB which significantly
22+
complicated the logic and made it hard to test. This change makes the compression unconditionally applied to
23+
the payload if it's enabled in the config. The benchmarking shows improvements in the throughput and CPU usage for
24+
large payloads and expected degradation for small payloads which is acceptable given that it's not a common case.
25+

exporter/splunkhecexporter/buffer.go

Lines changed: 67 additions & 69 deletions
Original file line numberDiff line numberDiff line change
@@ -17,22 +17,24 @@ var (
1717
errOverCapacity = errors.New("over capacity")
1818
)
1919

20-
// Minimum number of bytes to compress. 1500 is the MTU of an ethernet frame.
21-
const minCompressionLen = 1500
22-
2320
// bufferState encapsulates intermediate buffer state when pushing data
2421
type bufferState struct {
25-
compressionAvailable bool
26-
bufferMaxLen uint
27-
maxEventLength uint
28-
writer io.Writer
29-
buf *bytes.Buffer
30-
jsonStream *jsoniter.Stream
31-
rawLength int
22+
maxEventLength uint
23+
buf buffer
24+
jsonStream *jsoniter.Stream
25+
rawLength int
26+
}
27+
28+
type buffer interface {
29+
io.Writer
30+
io.Reader
31+
io.Closer
32+
Reset()
33+
Len() int
3234
}
3335

3436
func (b *bufferState) compressionEnabled() bool {
35-
_, ok := b.writer.(*cancellableGzipWriter)
37+
_, ok := b.buf.(*cancellableGzipWriter)
3638
return ok
3739
}
3840

@@ -42,9 +44,6 @@ func (b *bufferState) containsData() bool {
4244

4345
func (b *bufferState) reset() {
4446
b.buf.Reset()
45-
if _, ok := b.writer.(*cancellableBytesWriter); !ok {
46-
b.writer = &cancellableBytesWriter{innerWriter: b.buf, maxCapacity: b.bufferMaxLen}
47-
}
4847
b.rawLength = 0
4948
}
5049

@@ -53,64 +52,23 @@ func (b *bufferState) Read(p []byte) (n int, err error) {
5352
}
5453

5554
func (b *bufferState) Close() error {
56-
if _, ok := b.writer.(*cancellableGzipWriter); ok {
57-
return b.writer.(*cancellableGzipWriter).close()
58-
}
59-
return nil
55+
return b.buf.Close()
6056
}
6157

6258
// accept returns true if data is accepted by the buffer
6359
func (b *bufferState) accept(data []byte) (bool, error) {
6460
if len(data)+b.rawLength > int(b.maxEventLength) {
6561
return false, nil
6662
}
67-
_, err := b.writer.Write(data)
68-
overCapacity := errors.Is(err, errOverCapacity)
69-
bufLen := b.buf.Len()
70-
if overCapacity {
71-
bufLen += len(data)
72-
}
73-
if b.compressionAvailable && !b.compressionEnabled() && bufLen > minCompressionLen {
74-
// switch over to a zip buffer.
75-
tmpBuf := bytes.NewBuffer(make([]byte, 0, b.bufferMaxLen+bufCapPadding))
76-
writer := gzip.NewWriter(tmpBuf)
77-
writer.Reset(tmpBuf)
78-
zipWriter := &cancellableGzipWriter{
79-
innerBuffer: tmpBuf,
80-
innerWriter: writer,
81-
// 8 bytes required for the zip footer.
82-
maxCapacity: b.bufferMaxLen - 8,
83-
}
84-
85-
if b.bufferMaxLen == 0 {
86-
zipWriter.maxCapacity = 0
87-
}
88-
89-
// we write the bytes buffer into the zip buffer. Any error from this is I/O, and should stop the process.
90-
if _, err2 := zipWriter.Write(b.buf.Bytes()); err2 != nil {
91-
return false, err2
92-
}
93-
b.writer = zipWriter
94-
b.buf = tmpBuf
95-
// if the byte writer was over capacity, try to write the new entry in the zip writer:
96-
if overCapacity {
97-
if _, err2 := zipWriter.Write(data); err2 != nil {
98-
overCapacity2 := errors.Is(err2, errOverCapacity)
99-
if overCapacity2 {
100-
return false, nil
101-
}
102-
return false, err2
103-
}
104-
105-
}
63+
_, err := b.buf.Write(data)
64+
if err == nil {
10665
b.rawLength += len(data)
10766
return true, nil
10867
}
109-
if overCapacity {
68+
if errors.Is(err, errOverCapacity) {
11069
return false, nil
11170
}
112-
b.rawLength += len(data)
113-
return true, err
71+
return false, err
11472
}
11573

11674
type cancellableBytesWriter struct {
@@ -128,6 +86,22 @@ func (c *cancellableBytesWriter) Write(b []byte) (int, error) {
12886
return c.innerWriter.Write(b)
12987
}
13088

89+
func (c *cancellableBytesWriter) Read(p []byte) (int, error) {
90+
return c.innerWriter.Read(p)
91+
}
92+
93+
func (c *cancellableBytesWriter) Reset() {
94+
c.innerWriter.Reset()
95+
}
96+
97+
func (c *cancellableBytesWriter) Close() error {
98+
return nil
99+
}
100+
101+
func (c *cancellableBytesWriter) Len() int {
102+
return c.innerWriter.Len()
103+
}
104+
131105
type cancellableGzipWriter struct {
132106
innerBuffer *bytes.Buffer
133107
innerWriter *gzip.Writer
@@ -168,10 +142,24 @@ func (c *cancellableGzipWriter) Write(b []byte) (int, error) {
168142
return c.innerWriter.Write(b)
169143
}
170144

171-
func (c *cancellableGzipWriter) close() error {
145+
func (c *cancellableGzipWriter) Read(p []byte) (int, error) {
146+
return c.innerBuffer.Read(p)
147+
}
148+
149+
func (c *cancellableGzipWriter) Reset() {
150+
c.innerBuffer.Reset()
151+
c.innerWriter.Reset(c.innerBuffer)
152+
c.len = 0
153+
}
154+
155+
func (c *cancellableGzipWriter) Close() error {
172156
return c.innerWriter.Close()
173157
}
174158

159+
func (c *cancellableGzipWriter) Len() int {
160+
return c.innerBuffer.Len()
161+
}
162+
175163
// bufferStatePool is a pool of bufferState objects.
176164
type bufferStatePool struct {
177165
pool *sync.Pool
@@ -189,18 +177,28 @@ func (p bufferStatePool) put(bf *bufferState) {
189177

190178
const initBufferCap = 512
191179

192-
func newBufferStatePool(bufCap uint, compressionAvailable bool, maxEventLength uint) bufferStatePool {
180+
func newBufferStatePool(bufCap uint, compressionEnabled bool, maxEventLength uint) bufferStatePool {
193181
return bufferStatePool{
194182
&sync.Pool{
195183
New: func() interface{} {
196-
buf := bytes.NewBuffer(make([]byte, 0, initBufferCap))
184+
innerBuffer := bytes.NewBuffer(make([]byte, 0, initBufferCap))
185+
var buf buffer
186+
if compressionEnabled {
187+
buf = &cancellableGzipWriter{
188+
innerBuffer: innerBuffer,
189+
innerWriter: gzip.NewWriter(buf),
190+
maxCapacity: bufCap,
191+
}
192+
} else {
193+
buf = &cancellableBytesWriter{
194+
innerWriter: innerBuffer,
195+
maxCapacity: bufCap,
196+
}
197+
}
197198
return &bufferState{
198-
compressionAvailable: compressionAvailable,
199-
writer: &cancellableBytesWriter{innerWriter: buf, maxCapacity: bufCap},
200-
buf: buf,
201-
jsonStream: jsoniter.NewStream(jsoniter.ConfigDefault, nil, initBufferCap),
202-
bufferMaxLen: bufCap,
203-
maxEventLength: maxEventLength,
199+
buf: buf,
200+
jsonStream: jsoniter.NewStream(jsoniter.ConfigDefault, nil, initBufferCap),
201+
maxEventLength: maxEventLength,
204202
}
205203
},
206204
},

exporter/splunkhecexporter/client.go

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -219,7 +219,7 @@ func (c *client) fillLogsBuffer(logs plog.Logs, bs *bufferState, is iterState) (
219219
}
220220
permanentErrors = append(permanentErrors, consumererror.NewPermanent(
221221
fmt.Errorf("dropped log event: error: event size %d bytes larger than configured max"+
222-
" content length %d bytes", len(b), bs.bufferMaxLen)))
222+
" content length %d bytes", len(b), c.config.MaxContentLengthLogs)))
223223
return iterState{i, j, k + 1, false}, permanentErrors
224224
}
225225
}
@@ -277,7 +277,7 @@ func (c *client) fillMetricsBuffer(metrics pmetric.Metrics, bs *bufferState, is
277277
}
278278
permanentErrors = append(permanentErrors, consumererror.NewPermanent(
279279
fmt.Errorf("dropped metric event: error: event size %d bytes larger than configured max"+
280-
" content length %d bytes", len(b), bs.bufferMaxLen)))
280+
" content length %d bytes", len(b), c.config.MaxContentLengthMetrics)))
281281
return iterState{i, j, k + 1, false}, permanentErrors
282282
}
283283
}
@@ -322,7 +322,7 @@ func (c *client) fillTracesBuffer(traces ptrace.Traces, bs *bufferState, is iter
322322
}
323323
permanentErrors = append(permanentErrors, consumererror.NewPermanent(
324324
fmt.Errorf("dropped span event: error: event size %d bytes larger than configured max"+
325-
" content length %d bytes", len(b), bs.bufferMaxLen)))
325+
" content length %d bytes", len(b), c.config.MaxContentLengthTraces)))
326326
return iterState{i, j, k + 1, false}, permanentErrors
327327
}
328328
}
@@ -381,7 +381,7 @@ func (c *client) pushTracesDataInBatches(ctx context.Context, td ptrace.Traces,
381381
}
382382

383383
func (c *client) postEvents(ctx context.Context, bufState *bufferState, headers map[string]string) error {
384-
if err := bufState.Close(); err != nil {
384+
if err := bufState.buf.Close(); err != nil {
385385
return err
386386
}
387387
return c.hecWorker.send(ctx, bufState, headers)

0 commit comments

Comments
 (0)