diff --git a/auditbeat/helper/hasher/cached_hasher.go b/auditbeat/helper/hasher/cached_hasher.go index 0af7bb01c74d..6543fb20cdaa 100644 --- a/auditbeat/helper/hasher/cached_hasher.go +++ b/auditbeat/helper/hasher/cached_hasher.go @@ -126,7 +126,7 @@ func (ch *CachedHasher) HashFile(path string) (map[HashType]Digest, error) { entry := hashEntry{hashes: hashes, statx: statx} if ch.hashLRU.Add(path, entry) { ch.stats.Evictions++ - ch.log.Debugf("evict (%s)") + ch.log.Debugf("evict (%s)", path) } ch.log.Debugf("miss (%s) took %v", path, time.Since(x)) diff --git a/auditbeat/module/auditd/audit_linux.go b/auditbeat/module/auditd/audit_linux.go index 97f755ca4139..e7b5fa7cfe36 100644 --- a/auditbeat/module/auditd/audit_linux.go +++ b/auditbeat/module/auditd/audit_linux.go @@ -442,7 +442,7 @@ func (ms *MetricSet) updateKernelLostMetric(lost uint32) { } logFn("kernel lost events: %d (total: %d)", delta, lost) } else { - ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost, lost) + ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost.counter, lost) } ms.kernelLost.counter = lost } diff --git a/filebeat/autodiscover/builder/hints/logs.go b/filebeat/autodiscover/builder/hints/logs.go index 042ddec8a671..1130c4c39131 100644 --- a/filebeat/autodiscover/builder/hints/logs.go +++ b/filebeat/autodiscover/builder/hints/logs.go @@ -163,7 +163,7 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf // Merge config template with the configs from the annotations // AppendValues option is used to append arrays from annotations to existing arrays while merging if err := config.MergeWithOpts(tempCfg, ucfg.AppendValues); err != nil { - l.log.Debugf("hints.builder", "config merge failed with error: %v", err) + l.log.Debugf("config merge failed with error: %v", err) continue } module := l.getModule(hints) @@ -193,11 +193,11 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf moduleConf[fileset+".enabled"] = cfg.Enabled moduleConf[fileset+".input"] = filesetConf - l.log.Debugf("hints.builder", "generated config %+v", moduleConf) + l.log.Debugf("generated config %+v", moduleConf) } config, _ = conf.NewConfigFrom(moduleConf) } - l.log.Debugf("hints.builder", "generated config %+v of logHints %+v", config, l) + l.log.Debugf("generated config %+v of logHints %+v", config, l) configs = append(configs, config) } // Apply information in event to the template to generate the final config diff --git a/filebeat/input/filestream/copytruncate_prospector.go b/filebeat/input/filestream/copytruncate_prospector.go index 50ea7df25c0f..e6ee4c908d96 100644 --- a/filebeat/input/filestream/copytruncate_prospector.go +++ b/filebeat/input/filestream/copytruncate_prospector.go @@ -280,7 +280,7 @@ func (p *copyTruncateFileProspector) onFSEvent( err := updater.ResetCursor(src, state{Offset: 0}) if err != nil { - log.Errorf("failed to reset file cursor: %w", err) + log.Errorf("failed to reset file cursor: %v", err) } group.Restart(ctx, src) diff --git a/filebeat/input/filestream/filestream.go b/filebeat/input/filestream/filestream.go index 7dec0182f459..ec13d625f405 100644 --- a/filebeat/input/filestream/filestream.go +++ b/filebeat/input/filestream/filestream.go @@ -142,7 +142,7 @@ func (f *logFile) startFileMonitoringIfNeeded() { return nil }) if err != nil { - f.log.Errorf("failed to start file monitoring: %w", err) + f.log.Errorf("failed to start file monitoring: %v", err) } } @@ -152,7 +152,7 @@ func (f *logFile) startFileMonitoringIfNeeded() { return nil }) if err != nil { - f.log.Errorf("failed to schedule a file close: %w", err) + f.log.Errorf("failed to schedule a file close: %v", err) } } } diff --git a/filebeat/input/filestream/internal/input-logfile/harvester.go b/filebeat/input/filestream/internal/input-logfile/harvester.go index 39834293cd70..15c7a67deee9 100644 --- a/filebeat/input/filestream/internal/input-logfile/harvester.go +++ b/filebeat/input/filestream/internal/input-logfile/harvester.go @@ -140,7 +140,7 @@ func (hg *defaultHarvesterGroup) Start(ctx inputv2.Context, src Source) { if err := hg.tg.Go(startHarvester(ctx, hg, src, false, hg.metrics)); err != nil { ctx.Logger.Warnf( - "tried to start harvester with task group already closed", + "tried to start harvester for %s with task group already closed", ctx.ID) } } diff --git a/filebeat/input/filestream/internal/input-logfile/publish.go b/filebeat/input/filestream/internal/input-logfile/publish.go index dcf5f0390b5d..2d1f1f3b3aa8 100644 --- a/filebeat/input/filestream/internal/input-logfile/publish.go +++ b/filebeat/input/filestream/internal/input-logfile/publish.go @@ -134,7 +134,7 @@ func (op *updateOp) Execute(store *store, n uint) { } else { err := typeconv.Convert(&resource.cursor, op.delta) if err != nil { - store.log.Errorf("failed to perform type conversion: %w", err) + store.log.Errorf("failed to perform type conversion: %v", err) } } diff --git a/filebeat/input/filestream/internal/input-logfile/update_writer.go b/filebeat/input/filestream/internal/input-logfile/update_writer.go index 4c550fade4de..b6faf75a0140 100644 --- a/filebeat/input/filestream/internal/input-logfile/update_writer.go +++ b/filebeat/input/filestream/internal/input-logfile/update_writer.go @@ -66,7 +66,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter { return nil }) if err != nil { - store.log.Errorf("failed to schedule the update writer routine: %w", err) + store.log.Errorf("failed to schedule the update writer routine: %v", err) } return w @@ -77,7 +77,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter { func (w *updateWriter) Close() { err := w.tg.Stop() if err != nil { - w.store.log.Errorf("failed to stop the update writer routine: %w", err) + w.store.log.Errorf("failed to stop the update writer routine: %v", err) } w.syncStates(w.ch.TryRecv()) } diff --git a/filebeat/input/filestream/prospector.go b/filebeat/input/filestream/prospector.go index 1e3b7cb6c692..324ff586d3ec 100644 --- a/filebeat/input/filestream/prospector.go +++ b/filebeat/input/filestream/prospector.go @@ -347,7 +347,7 @@ func (p *fileProspector) onRename(log *logp.Logger, ctx input.Context, fe loginp err := s.FindCursorMeta(src, &meta) if err != nil { meta.IdentifierName = p.identifier.Name() - log.Warnf("Error while getting cursor meta data of entry '%s': '%w'"+ + log.Warnf("Error while getting cursor meta data of entry '%s': '%v'"+ ", using prospector's identifier: '%s'", src.Name(), err, meta.IdentifierName) } diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index 7dc4cf50966e..1c189444e938 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -215,7 +215,7 @@ func (inp *journald) Run( if err := publisher.Publish(event, event.Private); err != nil { msg := fmt.Sprintf("could not publish event: %s", err) ctx.UpdateStatus(status.Failed, msg) - logger.Errorf(msg) + logger.Errorf("%s", msg) return err } } diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index e2090f6ebb88..655491432082 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -96,7 +96,7 @@ func NewInput( cleanupIfNeeded := func(f func() error) { if cleanupNeeded { if err := f(); err != nil { - logp.L().Named("input.log").Errorf("clean up function returned an error: %w", err) + logp.L().Named("input.log").Errorf("clean up function returned an error: %v", err) } } } diff --git a/filebeat/input/unix/input.go b/filebeat/input/unix/input.go index 598b6393e112..65414d65de9f 100644 --- a/filebeat/input/unix/input.go +++ b/filebeat/input/unix/input.go @@ -119,7 +119,7 @@ func (s *server) Run(ctx input.Context, publisher stateless.Publisher) error { return err } - log.Debugf("%s Input '%v' initialized", s.config.Config.SocketType, ctx.ID) + log.Debugf("%v Input '%v' initialized", s.config.Config.SocketType, ctx.ID) err = server.Run(ctxtool.FromCanceller(ctx.Cancelation)) diff --git a/heartbeat/autodiscover/builder/hints/monitors.go b/heartbeat/autodiscover/builder/hints/monitors.go index 33ebc409eb4e..7b9b96b72acf 100644 --- a/heartbeat/autodiscover/builder/hints/monitors.go +++ b/heartbeat/autodiscover/builder/hints/monitors.go @@ -124,7 +124,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option) h, err := hb.getHostsWithPort(monitor, port, podEvent) if err != nil { - hb.logger.Warnf("unable to find valid hosts for %+v: %w", monitor, err) + hb.logger.Warnf("unable to find valid hosts for %+v: %v", monitor, err) continue } @@ -135,7 +135,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option) hb.logger.Debugf("unable to create config from MapStr %+v", tempCfg) return []*conf.C{} } - hb.logger.Debugf("hints.builder", "generated config %+v", config) + hb.logger.Debugf("generated config %+v", config) configs = append(configs, config) } diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index 227b375ee900..bfa51ed61b24 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -79,7 +79,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { if err == nil { trace = sockTrace } else { - logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %w", stConfig.Path, stConfig.Wait, err) + logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %v", stConfig.Path, stConfig.Wait, err) } } @@ -94,7 +94,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { trace.Abort() return nil, fmt.Errorf("run_once mode fatal error: %w", err) } else { - logp.L().Warnf("skipping monitor state management: %w", err) + logp.L().Warnf("skipping monitor state management: %v", err) } } else { replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, parsedConfig.RunFrom)) @@ -277,7 +277,7 @@ func (bt *Heartbeat) RunCentralMgmtMonitors(b *beat.Beat) { // Backoff panics with 0 duration, set to smallest unit esClient, err := makeESClient(context.TODO(), outCfg.Config(), 1, 1*time.Nanosecond) if err != nil { - logp.L().Warnf("skipping monitor state management during managed reload: %w", err) + logp.L().Warnf("skipping monitor state management during managed reload: %v", err) } else { bt.replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, bt.config.RunFrom)) } diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 8fa0816bb5b9..80ab3b3cc575 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -165,7 +165,7 @@ func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { bytes := make([]byte, 512) err := conn.SetReadDeadline(time.Now().Add(time.Second)) if err != nil { - logp.L().Errorf("could not set read deadline for ICMP: %w", err) + logp.L().Errorf("could not set read deadline for ICMP: %v", err) return } _, addr, err := conn.ReadFrom(bytes) diff --git a/heartbeat/monitors/factory.go b/heartbeat/monitors/factory.go index 9e6d4449d312..de5bdd659b1a 100644 --- a/heartbeat/monitors/factory.go +++ b/heartbeat/monitors/factory.go @@ -291,7 +291,7 @@ func preProcessors(info beat.Info, location *config.LocationWithID, settings pub geoM, err := util.GeoConfigToMap(location.Geo) if err != nil { geoErrOnce.Do(func() { - logp.L().Warnf("could not add heartbeat geo info: %w", err) + logp.L().Warnf("could not add heartbeat geo info: %v", err) }) } diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 8fa82c10ea14..9636bb2902bb 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -266,7 +266,7 @@ func (m *Monitor) Stop() { if m.close != nil { err := m.close() if err != nil { - logp.L().Errorf("error closing monitor %s: %w", m.String(), err) + logp.L().Errorf("error closing monitor %s: %v", m.String(), err) } } diff --git a/heartbeat/scheduler/schedjob.go b/heartbeat/scheduler/schedjob.go index 50f94a895d0b..0d720b28caa0 100644 --- a/heartbeat/scheduler/schedjob.go +++ b/heartbeat/scheduler/schedjob.go @@ -65,7 +65,7 @@ func (sj *schedJob) run() (startedAt time.Time) { if err == nil { defer sj.jobLimitSem.Release(1) } else { - logp.L().Errorf("could not acquire semaphore: %w", err) + logp.L().Errorf("could not acquire semaphore: %v", err) } } diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index 823989787fdb..2f4fd1cfead3 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -350,7 +350,7 @@ func (p *leaderElectionManager) GenerateHints(event bus.Event) bus.Event { func (p *leaderElectionManager) startLeaderElectorIndefinitely(ctx context.Context, lec leaderelection.LeaderElectionConfig) { le, err := leaderelection.NewLeaderElector(lec) if err != nil { - p.logger.Errorf("error while creating Leader Elector: %w", err) + p.logger.Errorf("error while creating Leader Elector: %v", err) } p.logger.Debugf("Starting Leader Elector") diff --git a/libbeat/conditions/range.go b/libbeat/conditions/range.go index cb5e75603fda..70272a180302 100644 --- a/libbeat/conditions/range.go +++ b/libbeat/conditions/range.go @@ -114,7 +114,7 @@ func (c Range) Check(event ValuesMap) bool { floatValue, err := ExtractFloat(value) if err != nil { - logp.L().Named(logName).Warnf(err.Error()) + logp.L().Named(logName).Warn(err.Error()) return false } diff --git a/libbeat/processors/add_host_metadata/add_host_metadata.go b/libbeat/processors/add_host_metadata/add_host_metadata.go index 3a7e7b40c55f..8f7fb89b2f8f 100644 --- a/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -101,7 +101,7 @@ func New(cfg *config.C) (beat.Processor, error) { cbID, err := uuid.NewV4() // if we fail, fall back to the processor name, hope for the best. if err != nil { - p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %w", err) + p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %v", err) cbIDStr = processorName } else { cbIDStr = cbID.String() @@ -262,7 +262,7 @@ func (p *addHostMetadata) updateOrExpire(useFQDN bool) { go func() { err := p.loadData(false, useFQDN) if err != nil { - p.logger.Errorf("error updating data for processor: %w") + p.logger.Errorf("error updating data for processor: %v", err) updateChanSuccess <- false return } diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index 7bb1ddd0905d..f8965166deec 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -191,7 +191,7 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { if config.Scope == "node" { config.Node, err = kubernetes.DiscoverKubernetesNode(k.log, nd) if err != nil { - k.log.Errorf("Couldn't discover Kubernetes node: %w", err) + k.log.Errorf("Couldn't discover Kubernetes node: %v", err) return } k.log.Debugf("Initializing a new Kubernetes watcher using host: %s", config.Node) @@ -299,30 +299,30 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { // be populated before trying to generate metadata for Pods. if k.nodeWatcher != nil { if err := k.nodeWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start node watcher: %v", err) + k.log.Debugf("Couldn't start node watcher: %v", err) return } } if k.nsWatcher != nil { if err := k.nsWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start namespace watcher: %v", err) + k.log.Debugf("Couldn't start namespace watcher: %v", err) return } } if k.rsWatcher != nil { if err := k.rsWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start replicaSet watcher: %v", err) + k.log.Debugf("Couldn't start replicaSet watcher: %v", err) return } } if k.jobWatcher != nil { if err := k.jobWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start job watcher: %v", err) + k.log.Debugf("Couldn't start job watcher: %v", err) return } } if err := watcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start pod watcher: %v", err) + k.log.Debugf("Couldn't start pod watcher: %v", err) return } }) diff --git a/libbeat/publisher/queue/diskqueue/segments.go b/libbeat/publisher/queue/diskqueue/segments.go index 11eeb9991c6a..8652be768da1 100644 --- a/libbeat/publisher/queue/diskqueue/segments.go +++ b/libbeat/publisher/queue/diskqueue/segments.go @@ -174,7 +174,7 @@ func scanExistingSegments(logger *logp.Logger, pathStr string) ([]*queueSegment, for _, dirEntry := range dirEntries { file, err := dirEntry.Info() if err != nil { - logger.Errorf("could not get info for file '%s', skipping. Error: %w", dirEntry.Name(), err) + logger.Errorf("could not get info for file '%s', skipping. Error: %v", dirEntry.Name(), err) continue } diff --git a/metricbeat/mb/lightmodules.go b/metricbeat/mb/lightmodules.go index 50293d7f60b6..fb75103ad882 100644 --- a/metricbeat/mb/lightmodules.go +++ b/metricbeat/mb/lightmodules.go @@ -257,12 +257,12 @@ func (s *LightModulesSource) moduleNames() ([]string, error) { modules := make(map[string]bool) for _, dir := range s.paths { if _, err := os.Stat(dir); os.IsNotExist(err) { - s.log.Debugf("Light modules directory '%d' doesn't exist", dir) + s.log.Debugf("Light modules directory '%s' doesn't exist", dir) continue } files, err := ioutil.ReadDir(dir) if err != nil { - return nil, fmt.Errorf("listing modules on path '%s': %w", dir, err) + return nil, fmt.Errorf("listing modules on path '%s': %v", dir, err) } for _, f := range files { if !f.IsDir() { diff --git a/metricbeat/module/kubernetes/event/event.go b/metricbeat/module/kubernetes/event/event.go index 637fa8e034d6..74bda247e38d 100644 --- a/metricbeat/module/kubernetes/event/event.go +++ b/metricbeat/module/kubernetes/event/event.go @@ -101,7 +101,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfg, _ := conf.NewConfigFrom(&config) ecsClusterMeta, err := util.GetClusterECSMeta(cfg, client, ms.Logger()) if err != nil { - ms.Logger().Debugf("could not retrieve cluster metadata: %w", err) + ms.Logger().Debugf("could not retrieve cluster metadata: %v", err) } if ecsClusterMeta != nil { ms.clusterMeta = ecsClusterMeta @@ -127,7 +127,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) { FilterFunc: func(obj interface{}) bool { eve, ok := obj.(*kubernetes.Event) if !ok { - m.Logger().Debugf("Error while casting event: %s", ok) + m.Logger().Debugf("Error while casting event. Got type: %T", obj) } // if fields are null they are decoded to `0001-01-01 00:00:00 +0000 UTC` // so we need to check if they are valid first @@ -148,7 +148,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) { // start event watcher err := m.watcher.Start() if err != nil { - m.Logger().Debugf("Unable to start watcher: %w", err) + m.Logger().Debugf("Unable to start watcher: %v", err) } <-reporter.Done() m.watcher.Stop() diff --git a/metricbeat/module/kubernetes/state_container/state_container.go b/metricbeat/module/kubernetes/state_container/state_container.go index d00515fe081e..085bdb5e285e 100644 --- a/metricbeat/module/kubernetes/state_container/state_container.go +++ b/metricbeat/module/kubernetes/state_container/state_container.go @@ -147,7 +147,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { // empty string cID, ok := (containerID).(string) if !ok { - m.Logger().Debugf("Error while casting containerID: %s", ok) + m.Logger().Debugf("Error while casting containerID, got %T", containerID) } split := strings.Index(cID, "://") if split != -1 { @@ -162,7 +162,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { if containerImage, ok := event["image"]; ok { cImage, ok := (containerImage).(string) if !ok { - m.Logger().Debugf("Error while casting containerImage: %s", ok) + m.Logger().Debugf("Error while casting containerImage, got %T", containerImage) } kubernetes.ShouldPut(containerFields, "image.name", cImage, m.Logger()) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 72034da67e39..a092dd6b067e 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -861,7 +861,7 @@ func NewContainerMetadataEnricher( pod, ok := r.(*kubernetes.Pod) if !ok { - base.Logger().Debugf("Error while casting event: %s", ok) + base.Logger().Debugf("Error while casting event, got %T", r) } pmeta := metaGen.Generate(pod) @@ -900,7 +900,7 @@ func NewContainerMetadataEnricher( ids := make([]string, 0) pod, ok := r.(*kubernetes.Pod) if !ok { - base.Logger().Debugf("Error while casting event: %s", ok) + base.Logger().Debugf("Error while casting event, got %T", r) } for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { diff --git a/metricbeat/module/linux/rapl/rapl.go b/metricbeat/module/linux/rapl/rapl.go index 5dd823efc540..c8d69247ade4 100644 --- a/metricbeat/module/linux/rapl/rapl.go +++ b/metricbeat/module/linux/rapl/rapl.go @@ -176,7 +176,7 @@ func (m *MetricSet) updatePower() map[int]map[rapl.RAPLDomain]energyUsage { continue } if err != nil { - logp.L().Infof("Error reading MSR from domain %s: %s skipping.", domain, err) + logp.L().Infof("Error reading MSR from domain %s: %s skipping.", domain.Name, err) continue } domainList[domain] = energyTrack{joules: joules, time: time.Now()} diff --git a/metricbeat/module/mysql/query/query.go b/metricbeat/module/mysql/query/query.go index e9225c316b70..1e1d31077dc8 100644 --- a/metricbeat/module/mysql/query/query.go +++ b/metricbeat/module/mysql/query/query.go @@ -109,7 +109,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { for _, q := range m.Config.Queries { err := m.fetchQuery(ctx, q, reporter) if err != nil { - m.Logger().Errorf("error doing query %s", q, err) + m.Logger().Errorf("error doing query %v: %v", q, err) } } diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index e57dc19560fc..068581991204 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -80,7 +80,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { err = m.statistics.OpenSampling() // CPU sampling does not seem to be used by any of the diskio metrics we're using. Mostly used by iostat. if err != nil { - m.Logger().Warnf("Error in CPU sampling for diskio: %w", err) + m.Logger().Warnf("Error in CPU sampling for diskio: %v", err) } // Store the last cpu counter when finished diff --git a/metricbeat/module/vsphere/cluster/cluster.go b/metricbeat/module/vsphere/cluster/cluster.go index 82396f9261d2..2849b7ff82d9 100644 --- a/metricbeat/module/vsphere/cluster/cluster.go +++ b/metricbeat/module/vsphere/cluster/cluster.go @@ -144,7 +144,7 @@ func (m *ClusterMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) er triggeredAlarm, err := getTriggeredAlarm(ctx, pc, clt[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from cluster %s: %w", clt[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from cluster %s: %v", clt[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/datastore/datastore.go b/metricbeat/module/vsphere/datastore/datastore.go index c1899048240b..8139188ee737 100644 --- a/metricbeat/module/vsphere/datastore/datastore.go +++ b/metricbeat/module/vsphere/datastore/datastore.go @@ -159,7 +159,7 @@ func (m *DataStoreMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) triggeredAlarm, err := getTriggeredAlarm(ctx, pc, dst[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from datastore %s: %w", dst[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from datastore %s: %v", dst[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/datastorecluster/datastorecluster.go b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go index 1d57e8dae6cf..51132bd278fe 100644 --- a/metricbeat/module/vsphere/datastorecluster/datastorecluster.go +++ b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go @@ -127,12 +127,12 @@ func (m *DatastoreClusterMetricSet) Fetch(ctx context.Context, reporter mb.Repor assetNames, err := getAssetNames(ctx, pc, &datastoreCluster[i]) if err != nil { - m.Logger().Errorf("Failed to retrieve object from datastore cluster %s: v", datastoreCluster[i].Name, err) + m.Logger().Errorf("Failed to retrieve object from datastore cluster %s: %v", datastoreCluster[i].Name, err) } triggeredAlarm, err := getTriggeredAlarm(ctx, pc, datastoreCluster[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from datastore cluster %s: %w", datastoreCluster[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from datastore cluster %s: %v", datastoreCluster[i].Name, err) } reporter.Event(mb.Event{MetricSetFields: m.mapEvent(datastoreCluster[i], &metricData{assetNames: assetNames, triggeredAlarms: triggeredAlarm})}) diff --git a/metricbeat/module/vsphere/host/host.go b/metricbeat/module/vsphere/host/host.go index e2f3989933ca..7514afe3476d 100644 --- a/metricbeat/module/vsphere/host/host.go +++ b/metricbeat/module/vsphere/host/host.go @@ -171,7 +171,7 @@ func (m *HostMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error triggeredAlarm, err := getTriggeredAlarm(ctx, pc, hst[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve triggered alarms from host %s: %w", hst[i].Name, err) + m.Logger().Errorf("Failed to retrieve triggered alarms from host %s: %v", hst[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/network/network.go b/metricbeat/module/vsphere/network/network.go index a6c860fed031..29395190851f 100644 --- a/metricbeat/module/vsphere/network/network.go +++ b/metricbeat/module/vsphere/network/network.go @@ -137,7 +137,7 @@ func (m *NetworkMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) er triggeredAlarm, err := getTriggeredAlarm(ctx, pc, networks[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from network %s: %w", networks[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from network %s: %v", networks[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/resourcepool/resourcepool.go b/metricbeat/module/vsphere/resourcepool/resourcepool.go index 9a127622a40d..1a754bbae46b 100644 --- a/metricbeat/module/vsphere/resourcepool/resourcepool.go +++ b/metricbeat/module/vsphere/resourcepool/resourcepool.go @@ -140,7 +140,7 @@ func (m *ResourcePoolMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV triggeredAlarm, err := getTriggeredAlarm(ctx, pc, rps[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from resource pool %s: %w", rps[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from resource pool %s: %v", rps[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go index dcde2b06e89c..6f2794ccc899 100644 --- a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go +++ b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go @@ -244,7 +244,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { triggeredAlarm, err := getTriggeredAlarm(ctx, pc, vm.TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from VM %s: %w", vm.Name, err) + m.Logger().Errorf("Failed to retrieve alerts from VM %s: %v", vm.Name, err) } data := VMData{ diff --git a/packetbeat/protos/dns/dns_tcp.go b/packetbeat/protos/dns/dns_tcp.go index ac1eacaf88e2..cf5c74af6ecd 100644 --- a/packetbeat/protos/dns/dns_tcp.go +++ b/packetbeat/protos/dns/dns_tcp.go @@ -56,7 +56,7 @@ type dnsConnectionData struct { func (dns *dnsPlugin) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { defer dns.logger.Recover("Dns ParseTcp") - dns.logger.Debugf("dns", "Parsing packet addressed with %s of length %d.", &pkt.Tuple, len(pkt.Payload)) + dns.logger.Debugf("Parsing packet addressed with %s of length %d.", &pkt.Tuple, len(pkt.Payload)) conn := ensureDNSConnection(private, dns.logger) @@ -99,7 +99,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu stream.rawData = append(stream.rawData, payload...) if len(stream.rawData) > tcp.TCPMaxDataInStream { - dns.logger.Debugf("dns", "Stream data too large, dropping DNS stream") + dns.logger.Debug("Stream data too large, dropping DNS stream") conn.data[dir] = nil return conn } @@ -107,7 +107,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu decodedData, err := stream.handleTCPRawData() if err != nil { if err == incompleteMsg { //nolint:errorlint // incompleteMsg is not wrapped. - dns.logger.Debugf("dns", "Waiting for more raw data") + dns.logger.Debug("Waiting for more raw data") return conn } @@ -115,7 +115,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) // This means that malformed requests or responses are being sent... // TODO: publish the situation also if Request @@ -187,7 +187,7 @@ func (dns *dnsPlugin) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) return conn } @@ -216,8 +216,8 @@ func (dns *dnsPlugin) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes i dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) - dns.logger.Debugf("dns", "Dropping the stream %s", tcpTuple) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("Dropping the stream %s", tcpTuple) // drop the stream because it is binary Data and it would be unexpected to have a decodable message later return private, true diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index cae8aaa9d80f..276771dafba2 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -295,7 +295,7 @@ func (m *MetricSet) Setup() (err error) { continue } if tracing.IsTraceFSAvailable() != nil { - m.log.Warnf("Mounted %s but no kprobes available", mount, err) + m.log.Warnf("Mounted %s but no kprobes available: %v", mount, err) mount.unmount() continue } diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index a51d68c0936f..5bc6906396db 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -125,9 +125,9 @@ func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) cwPoller.metrics.logGroupsTotal.Add(uint64(len(logGroupIDs))) cwPoller.startWorkers(ctx, svc, logProcessor) - log.Debugf("Config latency = %f", cwPoller.config.Latency) - log.Debugf("Config scan_frequency = %f", cwPoller.config.ScanFrequency) - log.Debugf("Config api_sleep = %f", cwPoller.config.APISleep) + log.Debugf("Config latency = %s", cwPoller.config.Latency) + log.Debugf("Config scan_frequency = %s", cwPoller.config.ScanFrequency) + log.Debugf("Config api_sleep = %s", cwPoller.config.APISleep) cwPoller.receive(ctx, logGroupIDs, time.Now) return nil } diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index cef0d2dc108a..fd6ee6da20de 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -222,7 +222,7 @@ func (r sqsProcessingResult) Done() { return } p.metrics.sqsMessagesDeletedTotal.Inc() - p.log.Errorf("failed processing SQS message (message was deleted): %w", processingErr) + p.log.Errorf("failed processing SQS message (message was deleted): %v", processingErr) return } @@ -231,7 +231,7 @@ func (r sqsProcessingResult) Done() { // queue is enabled then the message will eventually placed on the DLQ // after maximum receives is reached. p.metrics.sqsMessagesReturnedTotal.Inc() - p.log.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %w", processingErr) + p.log.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %v", processingErr) } func (p *sqsS3EventProcessor) keepalive(ctx context.Context, log *logp.Logger, msg *types.Message) { diff --git a/x-pack/filebeat/input/netflow/input.go b/x-pack/filebeat/input/netflow/input.go index bb4046b74a91..e892c1c9fabe 100644 --- a/x-pack/filebeat/input/netflow/input.go +++ b/x-pack/filebeat/input/netflow/input.go @@ -222,7 +222,7 @@ func (n *netflowInput) Run(env v2.Context, connector beat.PipelineConnector) err err = udpServer.Start() if err != nil { errorMsg := fmt.Sprintf("Failed to start udp server: %v", err) - n.logger.Errorf(errorMsg) + n.logger.Error(errorMsg) env.UpdateStatus(status.Failed, errorMsg) n.stop() return err diff --git a/x-pack/heartbeat/monitors/browser/sourcejob.go b/x-pack/heartbeat/monitors/browser/sourcejob.go index e191d5d2131c..5528d62f852f 100644 --- a/x-pack/heartbeat/monitors/browser/sourcejob.go +++ b/x-pack/heartbeat/monitors/browser/sourcejob.go @@ -129,7 +129,7 @@ func (sj *SourceJob) extraArgs(uiOrigin bool) []string { s, err := json.Marshal(sj.browserCfg.PlaywrightOpts) if err != nil { // This should never happen, if it was parsed as a config it should be serializable - logp.L().Warnf("could not serialize playwright options '%v': %w", sj.browserCfg.PlaywrightOpts, err) + logp.L().Warnf("could not serialize playwright options '%v': %v", sj.browserCfg.PlaywrightOpts, err) } else { extraArgs = append(extraArgs, "--playwright-options", string(s)) } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go index 0f88e859c59d..939f5181d343 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go @@ -217,7 +217,7 @@ func runCmd( break } if err != nil { - logp.L().Warnf("error decoding json for test json results: %w", err) + logp.L().Warnf("error decoding json for test json results: %v", err) } mpx.writeSynthEvent(&se) diff --git a/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go b/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go index f33a2f1f7460..cc6ed89ccb66 100644 --- a/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go +++ b/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go @@ -131,7 +131,7 @@ func filterLogs(e *events.Envelope) bool { func (c *DopplerConsumer) reportError(e EventError) { if c.callbacks.Error == nil { - c.log.Debugf("No callback for errors, error received: %s", e) + c.log.Debugf("No callback for errors, error received: %v", e) return } c.callbacks.Error(e) diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index 288f28ae0de5..959b791932fe 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -991,7 +991,7 @@ func (cm *BeatV2Manager) handleDebugYaml() []byte { data, err := yaml.Marshal(beatCfg) if err != nil { - cm.logger.Errorf("error generating YAML for input debug callback: %w", err) + cm.logger.Errorf("error generating YAML for input debug callback: %v", err) return nil } return data diff --git a/x-pack/metricbeat/module/aws/awshealth/awshealth.go b/x-pack/metricbeat/module/aws/awshealth/awshealth.go index ed864c463b8e..7b86b7663dd6 100644 --- a/x-pack/metricbeat/module/aws/awshealth/awshealth.go +++ b/x-pack/metricbeat/module/aws/awshealth/awshealth.go @@ -199,7 +199,7 @@ func (m *MetricSet) getEventDetails( m.Logger().Errorf("[AWS Health] DescribeEvents failed with: Operation=%s, UnderlyingError=%v", opErr.Operation(), opErr.Err) } else { - m.Logger().Errorf("[AWS Health] DescribeEvents failed with: %w", err) + m.Logger().Errorf("[AWS Health] DescribeEvents failed with: %v", err) } break } @@ -232,7 +232,7 @@ func (m *MetricSet) getEventDetails( m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: Operation=%s, UnderlyingError=%v", opErr.Operation(), opErr.Err) } else { - m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: %w", err) + m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: %v", err) } break } @@ -261,7 +261,7 @@ func (m *MetricSet) getEventDetails( // Fetch current page of affected entities affCurrentPage, err := affPage.NextPage(ctx) if err != nil { - m.Logger().Errorf("[AWS Health] DescribeAffectedEntitie failed with : %w", err) + m.Logger().Errorf("[AWS Health] DescribeAffectedEntitie failed with : %v", err) break } // Extract relevant details of affected entities and match them with event details diff --git a/x-pack/metricbeat/module/aws/billing/billing.go b/x-pack/metricbeat/module/aws/billing/billing.go index 378d6564389f..89b270e91c85 100644 --- a/x-pack/metricbeat/module/aws/billing/billing.go +++ b/x-pack/metricbeat/module/aws/billing/billing.go @@ -269,7 +269,7 @@ func (m *MetricSet) getCostGroupBy(svcCostExplorer *costexplorer.Client, groupBy groupByOutput, err := svcCostExplorer.GetCostAndUsage(context.Background(), &groupByCostInput) if err != nil { err = fmt.Errorf("costexplorer GetCostAndUsageRequest failed: %w", err) - m.Logger().Errorf(err.Error()) + m.Logger().Error(err.Error()) return nil } @@ -328,7 +328,7 @@ func (m *MetricSet) addCostMetrics(metrics map[string]costexplorertypes.MetricVa costFloat, err := strconv.ParseFloat(*cost.Amount, 64) if err != nil { err = fmt.Errorf("strconv ParseFloat failed: %w", err) - m.Logger().Errorf(err.Error()) + m.Logger().Error(err.Error()) continue } diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index 355c6710093c..c3de7284a7b4 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -148,8 +148,8 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // Get listMetricDetailTotal and namespaceDetailTotal from configuration listMetricDetailTotal, namespaceDetailTotal := m.readCloudwatchConfig() - m.logger.Debugf("listMetricDetailTotal = %s", listMetricDetailTotal) - m.logger.Debugf("namespaceDetailTotal = %s", namespaceDetailTotal) + m.logger.Debugf("listMetricDetailTotal = %v", listMetricDetailTotal) + m.logger.Debugf("namespaceDetailTotal = %v", namespaceDetailTotal) var config aws.Config err = m.Module().UnpackConfig(&config) @@ -200,13 +200,13 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { if len(namespaceDetailTotal) == 0 { listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { - m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, "*", err) + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %v", regionName, "*", err) } } else { for namespace := range namespaceDetailTotal { listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { - m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, namespace, err) + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %v", regionName, namespace, err) } listMetricsOutput = append(listMetricsOutput, listMetricsOutputPerNamespace...) } diff --git a/x-pack/metricbeat/module/azure/billing/data.go b/x-pack/metricbeat/module/azure/billing/data.go index bec33fcf185b..8f1a1542083d 100644 --- a/x-pack/metricbeat/module/azure/billing/data.go +++ b/x-pack/metricbeat/module/azure/billing/data.go @@ -229,7 +229,7 @@ func getEventsFromQueryResult(result armcostmanagement.QueryResult, subscription // 20170401 (float64) --> "2017-04-01T00:00:00Z" (time.Time) usageDate, err = time.Parse("20060102", strconv.FormatInt(int64(value), 10)) if err != nil { - logger.Errorf("unsupported usage date format: not valid date: %w", err) + logger.Errorf("unsupported usage date format: not valid date: %v", err) continue } } diff --git a/x-pack/metricbeat/module/containerd/memory/memory.go b/x-pack/metricbeat/module/containerd/memory/memory.go index 7af7b6e76400..386982926329 100644 --- a/x-pack/metricbeat/module/containerd/memory/memory.go +++ b/x-pack/metricbeat/module/containerd/memory/memory.go @@ -128,17 +128,17 @@ func (m *metricset) Fetch(reporter mb.ReporterV2) error { if m.calcPct { inactiveFiles, err := event.GetValue("inactiveFiles") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. inactiveFiles not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. inactiveFiles not present in the event: %v", err) continue } usageTotal, err := event.GetValue("usage.total") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. usage.total not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.total not present in the event: %v", err) continue } memoryLimit, err := event.GetValue("usage.limit") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. usage.limit not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.limit not present in the event: %v", err) continue } mLfloat, ok := memoryLimit.(float64)