4
4
package elasticsearchexporter // import "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter"
5
5
6
6
import (
7
- "bytes"
8
7
"context"
9
8
"errors"
10
9
"fmt"
@@ -20,7 +19,7 @@ import (
20
19
"go.opentelemetry.io/collector/pdata/ptrace"
21
20
"go.uber.org/zap"
22
21
23
- "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/objmodel "
22
+ "github.com/open-telemetry/opentelemetry-collector-contrib/exporter/elasticsearchexporter/internal/pool "
24
23
)
25
24
26
25
type elasticsearchExporter struct {
@@ -36,6 +35,8 @@ type elasticsearchExporter struct {
36
35
37
36
wg sync.WaitGroup // active sessions
38
37
bulkIndexer bulkIndexer
38
+
39
+ bufferPool * pool.BufferPool
39
40
}
40
41
41
42
func newExporter (
@@ -69,6 +70,7 @@ func newExporter(
69
70
model : model ,
70
71
logstashFormat : cfg .LogstashFormat ,
71
72
otel : otel ,
73
+ bufferPool : pool .NewBufferPool (),
72
74
}
73
75
}
74
76
@@ -173,11 +175,14 @@ func (e *elasticsearchExporter) pushLogRecord(
173
175
fIndex = formattedIndex
174
176
}
175
177
176
- document , err := e .model .encodeLog (resource , resourceSchemaURL , record , scope , scopeSchemaURL )
178
+ buf := e .bufferPool .NewPooledBuffer ()
179
+ err := e .model .encodeLog (resource , resourceSchemaURL , record , scope , scopeSchemaURL , buf .Buffer )
177
180
if err != nil {
181
+ buf .Recycle ()
178
182
return fmt .Errorf ("failed to encode log event: %w" , err )
179
183
}
180
- return bulkIndexerSession .Add (ctx , fIndex , bytes .NewReader (document ), nil )
184
+ // not recycling after Add returns an error as we don't know if it's already recycled
185
+ return bulkIndexerSession .Add (ctx , fIndex , buf , nil )
181
186
}
182
187
183
188
func (e * elasticsearchExporter ) pushMetricsData (
@@ -193,21 +198,18 @@ func (e *elasticsearchExporter) pushMetricsData(
193
198
}
194
199
defer session .End ()
195
200
196
- var (
197
- validationErrs []error // log instead of returning these so that upstream does not retry
198
- errs []error
199
- )
201
+ var errs []error
200
202
resourceMetrics := metrics .ResourceMetrics ()
201
203
for i := 0 ; i < resourceMetrics .Len (); i ++ {
202
204
resourceMetric := resourceMetrics .At (i )
203
205
resource := resourceMetric .Resource ()
204
206
scopeMetrics := resourceMetric .ScopeMetrics ()
205
207
206
- resourceDocs := make (map [string ]map [uint32 ]objmodel.Document )
207
-
208
208
for j := 0 ; j < scopeMetrics .Len (); j ++ {
209
+ var validationErrs []error // log instead of returning these so that upstream does not retry
209
210
scopeMetrics := scopeMetrics .At (j )
210
211
scope := scopeMetrics .Scope ()
212
+ groupedDataPointsByIndex := make (map [string ]map [uint32 ][]dataPoint )
211
213
for k := 0 ; k < scopeMetrics .Metrics ().Len (); k ++ {
212
214
metric := scopeMetrics .Metrics ().At (k )
213
215
@@ -216,13 +218,17 @@ func (e *elasticsearchExporter) pushMetricsData(
216
218
if err != nil {
217
219
return err
218
220
}
219
- if _ , ok := resourceDocs [fIndex ]; ! ok {
220
- resourceDocs [fIndex ] = make (map [uint32 ]objmodel.Document )
221
+ groupedDataPoints , ok := groupedDataPointsByIndex [fIndex ]
222
+ if ! ok {
223
+ groupedDataPoints = make (map [uint32 ][]dataPoint )
224
+ groupedDataPointsByIndex [fIndex ] = groupedDataPoints
221
225
}
222
-
223
- if err = e .model .upsertMetricDataPointValue (resourceDocs [fIndex ], resource ,
224
- resourceMetric .SchemaUrl (), scope , scopeMetrics .SchemaUrl (), metric , dp ); err != nil {
225
- return err
226
+ dpHash := e .model .hashDataPoint (dp )
227
+ dataPoints , ok := groupedDataPoints [dpHash ]
228
+ if ! ok {
229
+ groupedDataPoints [dpHash ] = []dataPoint {dp }
230
+ } else {
231
+ groupedDataPoints [dpHash ] = append (dataPoints , dp )
226
232
}
227
233
return nil
228
234
}
@@ -232,7 +238,7 @@ func (e *elasticsearchExporter) pushMetricsData(
232
238
dps := metric .Sum ().DataPoints ()
233
239
for l := 0 ; l < dps .Len (); l ++ {
234
240
dp := dps .At (l )
235
- if err := upsertDataPoint (newNumberDataPoint (dp )); err != nil {
241
+ if err := upsertDataPoint (newNumberDataPoint (metric , dp )); err != nil {
236
242
validationErrs = append (validationErrs , err )
237
243
continue
238
244
}
@@ -241,7 +247,7 @@ func (e *elasticsearchExporter) pushMetricsData(
241
247
dps := metric .Gauge ().DataPoints ()
242
248
for l := 0 ; l < dps .Len (); l ++ {
243
249
dp := dps .At (l )
244
- if err := upsertDataPoint (newNumberDataPoint (dp )); err != nil {
250
+ if err := upsertDataPoint (newNumberDataPoint (metric , dp )); err != nil {
245
251
validationErrs = append (validationErrs , err )
246
252
continue
247
253
}
@@ -254,7 +260,7 @@ func (e *elasticsearchExporter) pushMetricsData(
254
260
dps := metric .ExponentialHistogram ().DataPoints ()
255
261
for l := 0 ; l < dps .Len (); l ++ {
256
262
dp := dps .At (l )
257
- if err := upsertDataPoint (newExponentialHistogramDataPoint (dp )); err != nil {
263
+ if err := upsertDataPoint (newExponentialHistogramDataPoint (metric , dp )); err != nil {
258
264
validationErrs = append (validationErrs , err )
259
265
continue
260
266
}
@@ -267,7 +273,7 @@ func (e *elasticsearchExporter) pushMetricsData(
267
273
dps := metric .Histogram ().DataPoints ()
268
274
for l := 0 ; l < dps .Len (); l ++ {
269
275
dp := dps .At (l )
270
- if err := upsertDataPoint (newHistogramDataPoint (dp )); err != nil {
276
+ if err := upsertDataPoint (newHistogramDataPoint (metric , dp )); err != nil {
271
277
validationErrs = append (validationErrs , err )
272
278
continue
273
279
}
@@ -276,37 +282,35 @@ func (e *elasticsearchExporter) pushMetricsData(
276
282
dps := metric .Summary ().DataPoints ()
277
283
for l := 0 ; l < dps .Len (); l ++ {
278
284
dp := dps .At (l )
279
- if err := upsertDataPoint (newSummaryDataPoint (dp )); err != nil {
285
+ if err := upsertDataPoint (newSummaryDataPoint (metric , dp )); err != nil {
280
286
validationErrs = append (validationErrs , err )
281
287
continue
282
288
}
283
289
}
284
290
}
285
291
}
286
- }
287
292
288
- if len (validationErrs ) > 0 {
289
- e .Logger .Warn ("validation errors" , zap .Error (errors .Join (validationErrs ... )))
290
- }
291
-
292
- for fIndex , docs := range resourceDocs {
293
- for _ , doc := range docs {
294
- var (
295
- docBytes []byte
296
- err error
297
- )
298
- docBytes , err = e .model .encodeDocument (doc )
299
- if err != nil {
300
- errs = append (errs , err )
301
- continue
302
- }
303
- if err := session .Add (ctx , fIndex , bytes .NewReader (docBytes ), doc .DynamicTemplates ()); err != nil {
304
- if cerr := ctx .Err (); cerr != nil {
305
- return cerr
293
+ for fIndex , groupedDataPoints := range groupedDataPointsByIndex {
294
+ for _ , dataPoints := range groupedDataPoints {
295
+ buf := e .bufferPool .NewPooledBuffer ()
296
+ dynamicTemplates , err := e .model .encodeMetrics (resource , resourceMetric .SchemaUrl (), scope , scopeMetrics .SchemaUrl (), dataPoints , & validationErrs , buf .Buffer )
297
+ if err != nil {
298
+ buf .Recycle ()
299
+ errs = append (errs , err )
300
+ continue
301
+ }
302
+ if err := session .Add (ctx , fIndex , buf , dynamicTemplates ); err != nil {
303
+ // not recycling after Add returns an error as we don't know if it's already recycled
304
+ if cerr := ctx .Err (); cerr != nil {
305
+ return cerr
306
+ }
307
+ errs = append (errs , err )
306
308
}
307
- errs = append (errs , err )
308
309
}
309
310
}
311
+ if len (validationErrs ) > 0 {
312
+ e .Logger .Warn ("validation errors" , zap .Error (errors .Join (validationErrs ... )))
313
+ }
310
314
}
311
315
}
312
316
@@ -411,11 +415,14 @@ func (e *elasticsearchExporter) pushTraceRecord(
411
415
fIndex = formattedIndex
412
416
}
413
417
414
- document , err := e .model .encodeSpan (resource , resourceSchemaURL , span , scope , scopeSchemaURL )
418
+ buf := e .bufferPool .NewPooledBuffer ()
419
+ err := e .model .encodeSpan (resource , resourceSchemaURL , span , scope , scopeSchemaURL , buf .Buffer )
415
420
if err != nil {
421
+ buf .Recycle ()
416
422
return fmt .Errorf ("failed to encode trace record: %w" , err )
417
423
}
418
- return bulkIndexerSession .Add (ctx , fIndex , bytes .NewReader (document ), nil )
424
+ // not recycling after Add returns an error as we don't know if it's already recycled
425
+ return bulkIndexerSession .Add (ctx , fIndex , buf , nil )
419
426
}
420
427
421
428
func (e * elasticsearchExporter ) pushSpanEvent (
@@ -440,14 +447,12 @@ func (e *elasticsearchExporter) pushSpanEvent(
440
447
}
441
448
fIndex = formattedIndex
442
449
}
443
-
444
- document := e .model .encodeSpanEvent (resource , resourceSchemaURL , span , spanEvent , scope , scopeSchemaURL )
445
- if document == nil {
450
+ buf := e .bufferPool .NewPooledBuffer ()
451
+ e .model .encodeSpanEvent (resource , resourceSchemaURL , span , spanEvent , scope , scopeSchemaURL , buf .Buffer )
452
+ if buf .Buffer .Len () == 0 {
453
+ buf .Recycle ()
446
454
return nil
447
455
}
448
- docBytes , err := e .model .encodeDocument (* document )
449
- if err != nil {
450
- return err
451
- }
452
- return bulkIndexerSession .Add (ctx , fIndex , bytes .NewReader (docBytes ), nil )
456
+ // not recycling after Add returns an error as we don't know if it's already recycled
457
+ return bulkIndexerSession .Add (ctx , fIndex , buf , nil )
453
458
}
0 commit comments