Skip to content

[stanza] Use buffer pool for the read buffers to limit allocations #39373

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 1 commit into from
Apr 14, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
27 changes: 27 additions & 0 deletions .chloggen/pool-bufs-factory.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
# Use this changelog template to create an entry for release notes.

# One of 'breaking', 'deprecation', 'new_component', 'enhancement', 'bug_fix'
change_type: enhancement

# The name of the component, or a single word describing the area of concern, (e.g. filelogreceiver)
component: pkg/stanza

# A brief description of the change. Surround your text with quotes ("") if it needs to start with a backtick (`).
note: Use buffer pool for the read buffers to limit allocations"

# Mandatory: One or more tracking issues related to the change. You can use the PR number here if no issue exists.
issues: [39373]

# (Optional) One or more lines of additional information to render under the primary note.
# These lines will be padded with 2 spaces and then inserted directly into the document.
# Use pipe (|) for multiline entries.
subtext:

# If your change doesn't affect end users or the exported elements of any package,
# you should instead start your pull request title with [chore] or use the "Skip Changelog" label.
# Optional: The change log or logs in which this entry should be included.
# e.g. '[user]' or '[user, api]'
# Include 'user' if the change is relevant to end users.
# Include 'api' if there is a change to a library API.
# Default: '[user]'
change_logs: []
109 changes: 109 additions & 0 deletions pkg/stanza/fileconsumer/benchmark_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -9,6 +9,7 @@ import (
"os"
"path/filepath"
"sync"
"sync/atomic"
"testing"
"time"

Expand Down Expand Up @@ -161,6 +162,7 @@ func BenchmarkFileInput(b *testing.B) {

for _, bench := range cases {
b.Run(bench.name, func(b *testing.B) {
b.ReportAllocs()
rootDir := b.TempDir()

var files []*os.File
Expand Down Expand Up @@ -228,3 +230,110 @@ func BenchmarkFileInput(b *testing.B) {
})
}
}

func BenchmarkConsumeFiles(b *testing.B) {
cases := []fileInputBenchmark{
{
name: "Single",
paths: []string{
"file0.log",
},
config: func() *Config {
cfg := NewConfig()
cfg.Include = []string{
"file0.log",
}
cfg.MaxLogSize = 1 * 1024 * 1024
cfg.InitialBufferSize = 1 * 1024 * 1024
cfg.FingerprintSize = fingerprint.DefaultSize / 10
return cfg
},
},
{
name: "Multiple",
paths: func() []string {
paths := make([]string, 100)
for i := range paths {
paths[i] = fmt.Sprintf("file%d.log", i)
}
return paths
}(),
config: func() *Config {
cfg := NewConfig()
cfg.Include = []string{"file*.log"}
cfg.Encoding = ""
cfg.FingerprintSize = fingerprint.DefaultSize / 10
cfg.MaxLogSize = 1 * 1024 * 1024
cfg.InitialBufferSize = 1 * 1024 * 1024
cfg.MaxConcurrentFiles = 10
return cfg
},
},
}

// Pregenerate some lines which we can write to the files
// to avoid measuring the time it takes to generate them
// and to reduce the amount of syscalls in the benchmark.
uniqueLines := 10
severalLines := ""
for i := 0; i < uniqueLines; i++ {
severalLines += string(filetest.TokenWithLength(999)) + "\n"
}

for _, bench := range cases {
b.Run(bench.name, func(b *testing.B) {
rootDir := b.TempDir()

var consumePaths []string
var files []*os.File
for _, path := range bench.paths {
consumePath := filepath.Join(rootDir, path)
consumePaths = append(consumePaths, consumePath)
f := filetest.OpenFile(b, consumePath)
files = append(files, f)
// Initialize the file to ensure a unique fingerprint
_, err := f.WriteString(f.Name() + "\n")
require.NoError(b, err)
for i := 0; i < b.N; i++ {
_, err := f.WriteString(severalLines)
require.NoError(b, err)
}
require.NoError(b, f.Sync())
}

cfg := bench.config()
for i, inc := range cfg.Include {
cfg.Include[i] = filepath.Join(rootDir, inc)
}
cfg.StartAt = "beginning"
// Use a long poll so that we don't trigger it.
cfg.PollInterval = 1 * time.Hour

doneChan := make(chan bool, len(files))
numTokens := &atomic.Int64{}
callback := func(_ context.Context, tokens [][]byte, _ map[string]any, _ int64) error {
if numTokens.Add(int64(len(tokens))) == int64(len(files)*(b.N*uniqueLines+1)) {
close(doneChan)
}
return nil
}
set := componenttest.NewNopTelemetrySettings()
op, err := cfg.Build(set, callback)
require.NoError(b, err)

require.NoError(b, op.Start(testutil.NewUnscopedMockPersister()))
defer func() {
require.NoError(b, op.Stop())
}()

b.ReportAllocs()
b.ResetTimer()
for len(consumePaths) > op.maxBatchFiles {
op.consume(context.Background(), consumePaths[:op.maxBatchFiles])
consumePaths = consumePaths[op.maxBatchFiles:]
}
op.consume(context.Background(), consumePaths)
<-doneChan
})
}
}
2 changes: 1 addition & 1 deletion pkg/stanza/fileconsumer/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -152,7 +152,7 @@ func (c Config) Build(set component.TelemetrySettings, emit emit.Callback, opts
}

set.Logger = set.Logger.With(zap.String("component", "fileconsumer"))
readerFactory := reader.Factory{
readerFactory := &reader.Factory{
TelemetrySettings: set,
FromBeginning: startAtBeginning,
FingerprintSize: int(c.FingerprintSize),
Expand Down
2 changes: 1 addition & 1 deletion pkg/stanza/fileconsumer/file.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@ type Manager struct {
wg sync.WaitGroup
cancel context.CancelFunc

readerFactory reader.Factory
readerFactory *reader.Factory
fileMatcher *matcher.Matcher
tracker tracker.Tracker
noTracking bool
Expand Down
3 changes: 3 additions & 0 deletions pkg/stanza/fileconsumer/internal/reader/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,7 @@ import (
"errors"
"fmt"
"os"
"sync"
"time"

"go.opentelemetry.io/collector/component"
Expand All @@ -34,6 +35,7 @@ type Factory struct {
HeaderConfig *header.Config
FromBeginning bool
FingerprintSize int
BufPool sync.Pool
InitialBufferSize int
MaxLogSize int
Encoding encoding.Encoding
Expand Down Expand Up @@ -75,6 +77,7 @@ func (f *Factory) NewReaderFromMetadata(file *os.File, m *Metadata) (r *Reader,
file: file,
fileName: file.Name(),
fingerprintSize: f.FingerprintSize,
bufPool: &f.BufPool,
initialBufferSize: f.InitialBufferSize,
maxLogSize: f.MaxLogSize,
decoder: f.Encoding.NewDecoder(),
Expand Down
34 changes: 26 additions & 8 deletions pkg/stanza/fileconsumer/internal/reader/reader.go
Original file line number Diff line number Diff line change
Expand Up @@ -8,8 +8,10 @@ import (
"compress/gzip"
"context"
"errors"
"fmt"
"io"
"os"
"sync"

"go.opentelemetry.io/collector/component"
"go.uber.org/zap"
Expand Down Expand Up @@ -42,6 +44,7 @@ type Reader struct {
file *os.File
reader io.Reader
fingerprintSize int
bufPool *sync.Pool
initialBufferSize int
maxLogSize int
headerSplitFunc bufio.SplitFunc
Expand Down Expand Up @@ -116,7 +119,9 @@ func (r *Reader) ReadToEnd(ctx context.Context) {
}

func (r *Reader) readHeader(ctx context.Context) (doneReadingFile bool) {
s := scanner.New(r, r.maxLogSize, r.initialBufferSize, r.Offset, r.headerSplitFunc)
bufPtr := r.getBufPtrFromPool()
defer r.bufPool.Put(bufPtr)
s := scanner.New(r, r.maxLogSize, *bufPtr, r.Offset, r.headerSplitFunc)

// Read the tokens from the file until no more header tokens are found or the end of file is reached.
for {
Expand Down Expand Up @@ -176,15 +181,19 @@ func (r *Reader) readHeader(ctx context.Context) (doneReadingFile bool) {
}

func (r *Reader) readContents(ctx context.Context) {
// Create the scanner to read the contents of the file.
bufferSize := r.initialBufferSize
if r.TokenLenState.MinimumLength > bufferSize {
var buf []byte
fmt.Println(r.fileName)
Copy link
Member

@andrzej-stencel andrzej-stencel Apr 15, 2025

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

This should not be there. EDIT: See #39414.

if r.TokenLenState.MinimumLength <= r.initialBufferSize {
bufPtr := r.getBufPtrFromPool()
buf = *bufPtr
defer r.bufPool.Put(bufPtr)
} else {
// If we previously saw a potential token larger than the default buffer,
// size the buffer to be at least one byte larger so we can see if there's more data
bufferSize = r.TokenLenState.MinimumLength + 1
// size the buffer to be at least one byte larger so we can see if there's more data.
// Usually, expect this to be a rare event so that we don't bother pooling this special buffer size.
buf = make([]byte, 0, r.TokenLenState.MinimumLength+1)
}

s := scanner.New(r, r.maxLogSize, bufferSize, r.Offset, r.contentSplitFunc)
s := scanner.New(r, r.maxLogSize, buf, r.Offset, r.contentSplitFunc)

tokenBodies := make([][]byte, r.maxBatchSize)
numTokensBatched := 0
Expand Down Expand Up @@ -319,3 +328,12 @@ func (r *Reader) updateFingerprint() {
}
r.Fingerprint = refreshedFingerprint
}

func (r *Reader) getBufPtrFromPool() *[]byte {
bufP := r.bufPool.Get()
if bufP == nil {
buf := make([]byte, 0, r.initialBufferSize)
return &buf
}
return bufP.(*[]byte)
}
4 changes: 2 additions & 2 deletions pkg/stanza/fileconsumer/internal/scanner/scanner.go
Original file line number Diff line number Diff line change
Expand Up @@ -20,9 +20,9 @@ type Scanner struct {
}

// New creates a new positional scanner
func New(r io.Reader, maxLogSize int, bufferSize int, startOffset int64, splitFunc bufio.SplitFunc) *Scanner {
func New(r io.Reader, maxLogSize int, buf []byte, startOffset int64, splitFunc bufio.SplitFunc) *Scanner {
s := &Scanner{Scanner: bufio.NewScanner(r), pos: startOffset}
s.Buffer(make([]byte, 0, bufferSize), maxLogSize)
s.Buffer(buf, maxLogSize)
scanFunc := func(data []byte, atEOF bool) (advance int, token []byte, err error) {
advance, token, err = splitFunc(data, atEOF)
s.pos += int64(advance)
Expand Down
6 changes: 3 additions & 3 deletions pkg/stanza/fileconsumer/internal/scanner/scanner_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -77,7 +77,7 @@ func TestScanner(t *testing.T) {

for _, tc := range testCases {
t.Run(tc.name, func(t *testing.T) {
scanner := New(bytes.NewReader(tc.stream), tc.maxSize, tc.bufferSize, tc.startOffset, simpleSplit(tc.delimiter))
scanner := New(bytes.NewReader(tc.stream), tc.maxSize, make([]byte, 0, tc.bufferSize), tc.startOffset, simpleSplit(tc.delimiter))
for i, p := 0, 0; scanner.Scan(); i++ {
assert.NoError(t, scanner.Error())

Expand Down Expand Up @@ -117,12 +117,12 @@ func (r *errReader) Read([]byte) (n int, err error) {

func TestScannerError(t *testing.T) {
reader := &errReader{err: bufio.ErrTooLong}
scanner := New(reader, 100, 100, 0, simpleSplit([]byte("\n")))
scanner := New(reader, 100, make([]byte, 0, 100), 0, simpleSplit([]byte("\n")))
assert.False(t, scanner.Scan())
assert.EqualError(t, scanner.Error(), "log entry too large")

reader = &errReader{err: errors.New("some err")}
scanner = New(reader, 100, 100, 0, simpleSplit([]byte("\n")))
scanner = New(reader, 100, make([]byte, 0, 100), 0, simpleSplit([]byte("\n")))
assert.False(t, scanner.Scan())
assert.EqualError(t, scanner.Error(), "scanner error: some err")
}