Skip to content
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion auditbeat/helper/hasher/cached_hasher.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func (ch *CachedHasher) HashFile(path string) (map[HashType]Digest, error) {
entry := hashEntry{hashes: hashes, statx: statx}
if ch.hashLRU.Add(path, entry) {
ch.stats.Evictions++
ch.log.Debugf("evict (%s)")
ch.log.Debugf("evict (%s)", path)
}

ch.log.Debugf("miss (%s) took %v", path, time.Since(x))
Expand Down
2 changes: 1 addition & 1 deletion auditbeat/module/auditd/audit_linux.go
Original file line number Diff line number Diff line change
Expand Up @@ -450,7 +450,7 @@ func (ms *MetricSet) updateKernelLostMetric(lost uint32) {
}
logFn("kernel lost events: %d (total: %d)", delta, lost)
} else {
ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost, lost)
ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost.counter, lost)
}
ms.kernelLost.counter = lost
}
Expand Down
6 changes: 3 additions & 3 deletions filebeat/autodiscover/builder/hints/logs.go
Original file line number Diff line number Diff line change
Expand Up @@ -160,7 +160,7 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf
// Merge config template with the configs from the annotations
// AppendValues option is used to append arrays from annotations to existing arrays while merging
if err := config.MergeWithOpts(tempCfg, ucfg.AppendValues); err != nil {
l.log.Debugf("hints.builder", "config merge failed with error: %v", err)
l.log.Debugf("config merge failed with error: %v", err)
continue
}
module := l.getModule(hints)
Expand Down Expand Up @@ -191,11 +191,11 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf
moduleConf[fileset+".enabled"] = cfg.Enabled
moduleConf[fileset+".input"] = filesetConf

l.log.Debugf("hints.builder", "generated config %+v", moduleConf)
l.log.Debugf("generated config %+v", moduleConf)
}
config, _ = conf.NewConfigFrom(moduleConf)
}
l.log.Debugf("hints.builder", "generated config %+v of logHints %+v", config, l)
l.log.Debugf("generated config %+v of logHints %+v", config, l)
configs = append(configs, config)
}
// Apply information in event to the template to generate the final config
Expand Down
2 changes: 1 addition & 1 deletion filebeat/input/filestream/copytruncate_prospector.go
Original file line number Diff line number Diff line change
Expand Up @@ -280,7 +280,7 @@ func (p *copyTruncateFileProspector) onFSEvent(

err := updater.ResetCursor(src, state{Offset: 0})
if err != nil {
log.Errorf("failed to reset file cursor: %w", err)
log.Errorf("failed to reset file cursor: %v", err)
}
group.Restart(ctx, src)

Expand Down
4 changes: 2 additions & 2 deletions filebeat/input/filestream/filestream.go
Original file line number Diff line number Diff line change
Expand Up @@ -159,7 +159,7 @@ func (f *logFile) startFileMonitoringIfNeeded() {
return nil
})
if err != nil {
f.log.Errorf("failed to start file monitoring: %w", err)
f.log.Errorf("failed to start file monitoring: %v", err)
}
}

Expand All @@ -169,7 +169,7 @@ func (f *logFile) startFileMonitoringIfNeeded() {
return nil
})
if err != nil {
f.log.Errorf("failed to schedule a file close: %w", err)
f.log.Errorf("failed to schedule a file close: %v", err)
}
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -138,7 +138,7 @@ func (hg *defaultHarvesterGroup) Start(ctx inputv2.Context, src Source) {

if err := hg.tg.Go(startHarvester(ctx, hg, src, false, hg.metrics)); err != nil {
ctx.Logger.Warnf(
"tried to start harvester with task group already closed",
"tried to start harvester for %s with task group already closed",
ctx.ID)
}
}
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -134,7 +134,7 @@ func (op *updateOp) Execute(store *store, n uint) {
} else {
err := typeconv.Convert(&resource.cursor, op.delta)
if err != nil {
store.log.Errorf("failed to perform type conversion: %w", err)
store.log.Errorf("failed to perform type conversion: %v", err)
}
}

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -66,7 +66,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter {
return nil
})
if err != nil {
store.log.Errorf("failed to schedule the update writer routine: %w", err)
store.log.Errorf("failed to schedule the update writer routine: %v", err)
}

return w
Expand All @@ -77,7 +77,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter {
func (w *updateWriter) Close() {
err := w.tg.Stop()
if err != nil {
w.store.log.Errorf("failed to stop the update writer routine: %w", err)
w.store.log.Errorf("failed to stop the update writer routine: %v", err)
}
w.syncStates(w.ch.TryRecv())
}
Expand Down
2 changes: 1 addition & 1 deletion filebeat/input/journald/input.go
Original file line number Diff line number Diff line change
Expand Up @@ -216,7 +216,7 @@ func (inp *journald) Run(
if err := publisher.Publish(event, event.Private); err != nil {
msg := fmt.Sprintf("could not publish event: %s", err)
ctx.UpdateStatus(status.Failed, msg)
logger.Errorf(msg)
logger.Errorf("%s", msg)
return err
}
}
Expand Down
2 changes: 1 addition & 1 deletion filebeat/input/log/input.go
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@ func NewInput(
cleanupIfNeeded := func(f func() error) {
if cleanupNeeded {
if err := f(); err != nil {
logger.Named("input.log").Errorf("clean up function returned an error: %w", err)
logger.Named("input.log").Errorf("clean up function returned an error: %v", err)
}
}
}
Expand Down
2 changes: 1 addition & 1 deletion filebeat/input/unix/input.go
Original file line number Diff line number Diff line change
Expand Up @@ -116,7 +116,7 @@ func (s *server) Run(ctx input.Context, publisher stateless.Publisher) error {
return err
}

log.Debugf("%s Input '%v' initialized", s.config.Config.SocketType, ctx.ID)
log.Debugf("%v Input '%v' initialized", s.config.Config.SocketType, ctx.ID)

err = server.Run(ctxtool.FromCanceller(ctx.Cancelation))

Expand Down
4 changes: 2 additions & 2 deletions heartbeat/autodiscover/builder/hints/monitors.go
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option)

h, err := hb.getHostsWithPort(monitor, port, podEvent)
if err != nil {
hb.logger.Warnf("unable to find valid hosts for %+v: %w", monitor, err)
hb.logger.Warnf("unable to find valid hosts for %+v: %v", monitor, err)
continue
}

Expand All @@ -135,7 +135,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option)
hb.logger.Debugf("unable to create config from MapStr %+v", tempCfg)
return []*conf.C{}
}
hb.logger.Debugf("hints.builder", "generated config %+v", config)
hb.logger.Debugf("generated config %+v", config)
configs = append(configs, config)
}

Expand Down
6 changes: 3 additions & 3 deletions heartbeat/beater/heartbeat.go
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) {
if err == nil {
trace = sockTrace
} else {
logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %w", stConfig.Path, stConfig.Wait, err)
logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %v", stConfig.Path, stConfig.Wait, err)
}
}

Expand All @@ -94,7 +94,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) {
trace.Abort()
return nil, fmt.Errorf("run_once mode fatal error: %w", err)
} else {
logp.L().Warnf("skipping monitor state management: %w", err)
logp.L().Warnf("skipping monitor state management: %v", err)
}
} else {
replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, parsedConfig.RunFrom))
Expand Down Expand Up @@ -277,7 +277,7 @@ func (bt *Heartbeat) RunCentralMgmtMonitors(b *beat.Beat) {
// Backoff panics with 0 duration, set to smallest unit
esClient, err := makeESClient(context.TODO(), outCfg.Config(), 1, 1*time.Nanosecond)
if err != nil {
logp.L().Warnf("skipping monitor state management during managed reload: %w", err)
logp.L().Warnf("skipping monitor state management during managed reload: %v", err)
} else {
bt.replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, bt.config.RunFrom))
}
Expand Down
2 changes: 1 addition & 1 deletion heartbeat/monitors/active/icmp/stdloop.go
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,7 @@
"encoding/binary"
"errors"
"fmt"
"math/rand"

Check failure on line 25 in heartbeat/monitors/active/icmp/stdloop.go

View workflow job for this annotation

GitHub Actions / lint (macos-latest)

import 'math/rand' is not allowed from list 'main': superseded by math/rand/v2 (depguard)
"net"
"os"
"runtime"
Expand Down Expand Up @@ -165,7 +165,7 @@
bytes := make([]byte, 512)
err := conn.SetReadDeadline(time.Now().Add(time.Second))
if err != nil {
logp.L().Errorf("could not set read deadline for ICMP: %w", err)
logp.L().Errorf("could not set read deadline for ICMP: %v", err)
return
}
_, addr, err := conn.ReadFrom(bytes)
Expand Down
2 changes: 1 addition & 1 deletion heartbeat/monitors/factory.go
Original file line number Diff line number Diff line change
Expand Up @@ -291,7 +291,7 @@ func preProcessors(info beat.Info, location *config.LocationWithID, settings pub
geoM, err := util.GeoConfigToMap(location.Geo)
if err != nil {
geoErrOnce.Do(func() {
logp.L().Warnf("could not add heartbeat geo info: %w", err)
logp.L().Warnf("could not add heartbeat geo info: %v", err)
})
}

Expand Down
2 changes: 1 addition & 1 deletion heartbeat/monitors/monitor.go
Original file line number Diff line number Diff line change
Expand Up @@ -266,7 +266,7 @@ func (m *Monitor) Stop() {
if m.close != nil {
err := m.close()
if err != nil {
logp.L().Errorf("error closing monitor %s: %w", m.String(), err)
logp.L().Errorf("error closing monitor %s: %v", m.String(), err)
}
}

Expand Down
2 changes: 1 addition & 1 deletion heartbeat/scheduler/schedjob.go
Original file line number Diff line number Diff line change
Expand Up @@ -65,7 +65,7 @@ func (sj *schedJob) run() (startedAt time.Time) {
if err == nil {
defer sj.jobLimitSem.Release(1)
} else {
logp.L().Errorf("could not acquire semaphore: %w", err)
logp.L().Errorf("could not acquire semaphore: %v", err)
}
}

Expand Down
2 changes: 1 addition & 1 deletion libbeat/autodiscover/appenders/config/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@
)

func init() {
autodiscover.Registry.AddAppender("config", NewConfigAppender)

Check failure on line 34 in libbeat/autodiscover/appenders/config/config.go

View workflow job for this annotation

GitHub Actions / lint (macos-latest)

Error return value of `autodiscover.Registry.AddAppender` is not checked (errcheck)
}

type config struct {
Expand All @@ -47,12 +47,12 @@

// NewConfigAppender creates a configAppender that can append templatized configs into built configs
func NewConfigAppender(cfg *conf.C, logger *logp.Logger) (autodiscover.Appender, error) {
logger.Warnf(cfgwarn.Beta("The config appender is beta"))
logger.Warn(cfgwarn.Beta("The config appender is beta"))

config := config{}
err := cfg.Unpack(&config)
if err != nil {
return nil, fmt.Errorf("unable to unpack config appender due to error: %+v", err)

Check failure on line 55 in libbeat/autodiscover/appenders/config/config.go

View workflow job for this annotation

GitHub Actions / lint (macos-latest)

non-wrapping format verb for fmt.Errorf. Use `%w` to format errors (errorlint)
}

var cond conditions.Condition
Expand Down
2 changes: 1 addition & 1 deletion libbeat/autodiscover/providers/kubernetes/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -352,7 +352,7 @@ func (p *leaderElectionManager) GenerateHints(event bus.Event) bus.Event {
func (p *leaderElectionManager) startLeaderElectorIndefinitely(ctx context.Context, lec leaderelection.LeaderElectionConfig) {
le, err := leaderelection.NewLeaderElector(lec)
if err != nil {
p.logger.Errorf("error while creating Leader Elector: %w", err)
p.logger.Errorf("error while creating Leader Elector: %v", err)
}
p.logger.Debugf("Starting Leader Elector")

Expand Down
2 changes: 1 addition & 1 deletion libbeat/autodiscover/template/config.go
Original file line number Diff line number Diff line change
Expand Up @@ -157,7 +157,7 @@ func ApplyConfigTemplate(event bus.Event, configs []*conf.C, logger *logp.Logger
var unpacked map[string]interface{}
err = c.Unpack(&unpacked, opts...)
if err != nil {
logger.Debugf("autodiscover", "Configuration template cannot be resolved: %v", err)
logger.Debugf("Configuration template cannot be resolved: %v", err)
continue
}
// Repack again:
Expand Down
2 changes: 1 addition & 1 deletion libbeat/conditions/range.go
Original file line number Diff line number Diff line change
Expand Up @@ -117,7 +117,7 @@ func (c Range) Check(event ValuesMap) bool {

floatValue, err := ExtractFloat(value)
if err != nil {
c.logger.Named(logName).Warnf(err.Error())
c.logger.Named(logName).Warn(err.Error())
return false
}

Expand Down
4 changes: 2 additions & 2 deletions libbeat/processors/add_host_metadata/add_host_metadata.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func New(cfg *config.C, log *logp.Logger) (beat.Processor, error) {
cbID, err := uuid.NewV4()
// if we fail, fall back to the processor name, hope for the best.
if err != nil {
p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %w", err)
p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %v", err)
cbIDStr = processorName
} else {
cbIDStr = cbID.String()
Expand Down Expand Up @@ -262,7 +262,7 @@ func (p *addHostMetadata) updateOrExpire(useFQDN bool) {
go func() {
err := p.loadData(false, useFQDN)
if err != nil {
p.logger.Errorf("error updating data for processor: %w")
p.logger.Errorf("error updating data for processor: %v", err)
updateChanSuccess <- false
return
}
Expand Down
12 changes: 6 additions & 6 deletions libbeat/processors/add_kubernetes_metadata/kubernetes.go
Original file line number Diff line number Diff line change
Expand Up @@ -191,7 +191,7 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) {
if config.Scope == "node" {
config.Node, err = kubernetes.DiscoverKubernetesNode(k.log, nd)
if err != nil {
k.log.Errorf("Couldn't discover Kubernetes node: %w", err)
k.log.Errorf("Couldn't discover Kubernetes node: %v", err)
return
}
k.log.Debugf("Initializing a new Kubernetes watcher using host: %s", config.Node)
Expand Down Expand Up @@ -300,30 +300,30 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) {
// be populated before trying to generate metadata for Pods.
if k.nodeWatcher != nil {
if err := k.nodeWatcher.Start(); err != nil {
k.log.Debugf("add_kubernetes_metadata", "Couldn't start node watcher: %v", err)
k.log.Debugf("Couldn't start node watcher: %v", err)
return
}
}
if k.nsWatcher != nil {
if err := k.nsWatcher.Start(); err != nil {
k.log.Debugf("add_kubernetes_metadata", "Couldn't start namespace watcher: %v", err)
k.log.Debugf("Couldn't start namespace watcher: %v", err)
return
}
}
if k.rsWatcher != nil {
if err := k.rsWatcher.Start(); err != nil {
k.log.Debugf("add_kubernetes_metadata", "Couldn't start replicaSet watcher: %v", err)
k.log.Debugf("Couldn't start replicaSet watcher: %v", err)
return
}
}
if k.jobWatcher != nil {
if err := k.jobWatcher.Start(); err != nil {
k.log.Debugf("add_kubernetes_metadata", "Couldn't start job watcher: %v", err)
k.log.Debugf("Couldn't start job watcher: %v", err)
return
}
}
if err := watcher.Start(); err != nil {
k.log.Debugf("add_kubernetes_metadata", "Couldn't start pod watcher: %v", err)
k.log.Debugf("Couldn't start pod watcher: %v", err)
return
}
})
Expand Down
2 changes: 1 addition & 1 deletion libbeat/publisher/queue/diskqueue/segments.go
Original file line number Diff line number Diff line change
Expand Up @@ -174,7 +174,7 @@
for _, dirEntry := range dirEntries {
file, err := dirEntry.Info()
if err != nil {
logger.Errorf("could not get info for file '%s', skipping. Error: %w", dirEntry.Name(), err)
logger.Errorf("could not get info for file '%s', skipping. Error: %v", dirEntry.Name(), err)
continue
}

Expand All @@ -201,7 +201,7 @@
id: segmentID(id),
schemaVersion: &header.version,
frameCount: header.frameCount,
byteCount: uint64(file.Size()),

Check failure on line 204 in libbeat/publisher/queue/diskqueue/segments.go

View workflow job for this annotation

GitHub Actions / lint (macos-latest)

G115: integer overflow conversion int64 -> uint64 (gosec)
})
}
}
Expand Down
2 changes: 1 addition & 1 deletion libbeat/scripts/cmd/stress_pipeline/main.go
Original file line number Diff line number Diff line change
Expand Up @@ -70,7 +70,7 @@ func run() error {
flag.Parse()

files := flag.Args()
logger.Infof("load config files:", files)
logger.Infof("load config files: %v", files)

cfg, err := common.LoadFiles(files...)
if err != nil {
Expand Down
4 changes: 2 additions & 2 deletions metricbeat/mb/lightmodules.go
Original file line number Diff line number Diff line change
Expand Up @@ -258,12 +258,12 @@
modules := make(map[string]bool)
for _, dir := range s.paths {
if _, err := os.Stat(dir); os.IsNotExist(err) {
s.log.Debugf("Light modules directory '%d' doesn't exist", dir)
s.log.Debugf("Light modules directory '%s' doesn't exist", dir)
continue
}
files, err := ioutil.ReadDir(dir)
if err != nil {
return nil, fmt.Errorf("listing modules on path '%s': %w", dir, err)
return nil, fmt.Errorf("listing modules on path '%s': %v", dir, err)

Check failure on line 266 in metricbeat/mb/lightmodules.go

View workflow job for this annotation

GitHub Actions / lint (macos-latest)

non-wrapping format verb for fmt.Errorf. Use `%w` to format errors (errorlint)
}
for _, f := range files {
if !f.IsDir() {
Expand Down
2 changes: 1 addition & 1 deletion metricbeat/module/kafka/consumergroup/query.go
Original file line number Diff line number Diff line change
Expand Up @@ -54,7 +54,7 @@ func fetchGroupInfo(
return nil
}

logger.Named("kafka").Debugf("known consumer groups: ", groups)
logger.Named("kafka").Debugf("known consumer groups: %s", groups)

assignments, err := fetchGroupAssignments(b, groups)
if err != nil {
Expand Down
2 changes: 1 addition & 1 deletion metricbeat/module/kafka/partition/partition.go
Original file line number Diff line number Diff line change
Expand Up @@ -97,7 +97,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error {
}

for _, topic := range topics {
m.Logger().Named("kafka").Debugf("fetch events for topic: ", topic.Name)
m.Logger().Named("kafka").Debugf("fetch events for topic: %s", topic.Name)
evtTopic := mapstr.M{
"name": topic.Name,
}
Expand Down
6 changes: 3 additions & 3 deletions metricbeat/module/kubernetes/event/event.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) {
cfg, _ := conf.NewConfigFrom(&config)
ecsClusterMeta, err := util.GetClusterECSMeta(cfg, client, ms.Logger())
if err != nil {
ms.Logger().Debugf("could not retrieve cluster metadata: %w", err)
ms.Logger().Debugf("could not retrieve cluster metadata: %v", err)
}
if ecsClusterMeta != nil {
ms.clusterMeta = ecsClusterMeta
Expand All @@ -127,7 +127,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) {
FilterFunc: func(obj interface{}) bool {
eve, ok := obj.(*kubernetes.Event)
if !ok {
m.Logger().Debugf("Error while casting event: %s", ok)
m.Logger().Debugf("Error while casting event. Got type: %T", obj)
}
// if fields are null they are decoded to `0001-01-01 00:00:00 +0000 UTC`
// so we need to check if they are valid first
Expand All @@ -148,7 +148,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) {
// start event watcher
err := m.watcher.Start()
if err != nil {
m.Logger().Debugf("Unable to start watcher: %w", err)
m.Logger().Debugf("Unable to start watcher: %v", err)
}
<-reporter.Done()
m.watcher.Stop()
Expand Down
Loading
Loading