diff --git a/auditbeat/helper/hasher/cached_hasher.go b/auditbeat/helper/hasher/cached_hasher.go index 0af7bb01c74d..6543fb20cdaa 100644 --- a/auditbeat/helper/hasher/cached_hasher.go +++ b/auditbeat/helper/hasher/cached_hasher.go @@ -126,7 +126,7 @@ func (ch *CachedHasher) HashFile(path string) (map[HashType]Digest, error) { entry := hashEntry{hashes: hashes, statx: statx} if ch.hashLRU.Add(path, entry) { ch.stats.Evictions++ - ch.log.Debugf("evict (%s)") + ch.log.Debugf("evict (%s)", path) } ch.log.Debugf("miss (%s) took %v", path, time.Since(x)) diff --git a/auditbeat/module/auditd/audit_linux.go b/auditbeat/module/auditd/audit_linux.go index 85cf26c90c58..5854cca4e4cb 100644 --- a/auditbeat/module/auditd/audit_linux.go +++ b/auditbeat/module/auditd/audit_linux.go @@ -450,7 +450,7 @@ func (ms *MetricSet) updateKernelLostMetric(lost uint32) { } logFn("kernel lost events: %d (total: %d)", delta, lost) } else { - ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost, lost) + ms.log.Warnf("kernel lost event counter reset from %d to %d", ms.kernelLost.counter, lost) } ms.kernelLost.counter = lost } diff --git a/filebeat/autodiscover/builder/hints/logs.go b/filebeat/autodiscover/builder/hints/logs.go index 5cc81279edb4..cadef8bd7211 100644 --- a/filebeat/autodiscover/builder/hints/logs.go +++ b/filebeat/autodiscover/builder/hints/logs.go @@ -160,7 +160,7 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf // Merge config template with the configs from the annotations // AppendValues option is used to append arrays from annotations to existing arrays while merging if err := config.MergeWithOpts(tempCfg, ucfg.AppendValues); err != nil { - l.log.Debugf("hints.builder", "config merge failed with error: %v", err) + l.log.Debugf("config merge failed with error: %v", err) continue } module := l.getModule(hints) @@ -191,11 +191,11 @@ func (l *logHints) CreateConfig(event bus.Event, options ...ucfg.Option) []*conf moduleConf[fileset+".enabled"] = cfg.Enabled moduleConf[fileset+".input"] = filesetConf - l.log.Debugf("hints.builder", "generated config %+v", moduleConf) + l.log.Debugf("generated config %+v", moduleConf) } config, _ = conf.NewConfigFrom(moduleConf) } - l.log.Debugf("hints.builder", "generated config %+v of logHints %+v", config, l) + l.log.Debugf("generated config %+v of logHints %+v", config, l) configs = append(configs, config) } // Apply information in event to the template to generate the final config diff --git a/filebeat/input/filestream/copytruncate_prospector.go b/filebeat/input/filestream/copytruncate_prospector.go index 50ea7df25c0f..e6ee4c908d96 100644 --- a/filebeat/input/filestream/copytruncate_prospector.go +++ b/filebeat/input/filestream/copytruncate_prospector.go @@ -280,7 +280,7 @@ func (p *copyTruncateFileProspector) onFSEvent( err := updater.ResetCursor(src, state{Offset: 0}) if err != nil { - log.Errorf("failed to reset file cursor: %w", err) + log.Errorf("failed to reset file cursor: %v", err) } group.Restart(ctx, src) diff --git a/filebeat/input/filestream/filestream.go b/filebeat/input/filestream/filestream.go index 1d29f47f0ba0..70392d680abd 100644 --- a/filebeat/input/filestream/filestream.go +++ b/filebeat/input/filestream/filestream.go @@ -159,7 +159,7 @@ func (f *logFile) startFileMonitoringIfNeeded() { return nil }) if err != nil { - f.log.Errorf("failed to start file monitoring: %w", err) + f.log.Errorf("failed to start file monitoring: %v", err) } } @@ -169,7 +169,7 @@ func (f *logFile) startFileMonitoringIfNeeded() { return nil }) if err != nil { - f.log.Errorf("failed to schedule a file close: %w", err) + f.log.Errorf("failed to schedule a file close: %v", err) } } } diff --git a/filebeat/input/filestream/internal/input-logfile/harvester.go b/filebeat/input/filestream/internal/input-logfile/harvester.go index 1afe31c292fe..b92fe982d3e1 100644 --- a/filebeat/input/filestream/internal/input-logfile/harvester.go +++ b/filebeat/input/filestream/internal/input-logfile/harvester.go @@ -138,7 +138,7 @@ func (hg *defaultHarvesterGroup) Start(ctx inputv2.Context, src Source) { if err := hg.tg.Go(startHarvester(ctx, hg, src, false, hg.metrics)); err != nil { ctx.Logger.Warnf( - "tried to start harvester with task group already closed", + "tried to start harvester for %s with task group already closed", ctx.ID) } } diff --git a/filebeat/input/filestream/internal/input-logfile/publish.go b/filebeat/input/filestream/internal/input-logfile/publish.go index 650d6672a61a..427e129bcc0f 100644 --- a/filebeat/input/filestream/internal/input-logfile/publish.go +++ b/filebeat/input/filestream/internal/input-logfile/publish.go @@ -134,7 +134,7 @@ func (op *updateOp) Execute(store *store, n uint) { } else { err := typeconv.Convert(&resource.cursor, op.delta) if err != nil { - store.log.Errorf("failed to perform type conversion: %w", err) + store.log.Errorf("failed to perform type conversion: %v", err) } } diff --git a/filebeat/input/filestream/internal/input-logfile/update_writer.go b/filebeat/input/filestream/internal/input-logfile/update_writer.go index 4c550fade4de..b6faf75a0140 100644 --- a/filebeat/input/filestream/internal/input-logfile/update_writer.go +++ b/filebeat/input/filestream/internal/input-logfile/update_writer.go @@ -66,7 +66,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter { return nil }) if err != nil { - store.log.Errorf("failed to schedule the update writer routine: %w", err) + store.log.Errorf("failed to schedule the update writer routine: %v", err) } return w @@ -77,7 +77,7 @@ func newUpdateWriter(store *store, ch *updateChan) *updateWriter { func (w *updateWriter) Close() { err := w.tg.Stop() if err != nil { - w.store.log.Errorf("failed to stop the update writer routine: %w", err) + w.store.log.Errorf("failed to stop the update writer routine: %v", err) } w.syncStates(w.ch.TryRecv()) } diff --git a/filebeat/input/journald/input.go b/filebeat/input/journald/input.go index b33dcc8c7ed3..f1ff853e8f67 100644 --- a/filebeat/input/journald/input.go +++ b/filebeat/input/journald/input.go @@ -216,7 +216,7 @@ func (inp *journald) Run( if err := publisher.Publish(event, event.Private); err != nil { msg := fmt.Sprintf("could not publish event: %s", err) ctx.UpdateStatus(status.Failed, msg) - logger.Errorf(msg) + logger.Errorf("%s", msg) return err } } diff --git a/filebeat/input/log/input.go b/filebeat/input/log/input.go index bd8e7f358cce..34d1ff8834f0 100644 --- a/filebeat/input/log/input.go +++ b/filebeat/input/log/input.go @@ -107,7 +107,7 @@ func NewInput( cleanupIfNeeded := func(f func() error) { if cleanupNeeded { if err := f(); err != nil { - logger.Named("input.log").Errorf("clean up function returned an error: %w", err) + logger.Named("input.log").Errorf("clean up function returned an error: %v", err) } } } diff --git a/filebeat/input/unix/input.go b/filebeat/input/unix/input.go index 598a912daa54..5cb6c16c5f74 100644 --- a/filebeat/input/unix/input.go +++ b/filebeat/input/unix/input.go @@ -116,7 +116,7 @@ func (s *server) Run(ctx input.Context, publisher stateless.Publisher) error { return err } - log.Debugf("%s Input '%v' initialized", s.config.Config.SocketType, ctx.ID) + log.Debugf("%v Input '%v' initialized", s.config.Config.SocketType, ctx.ID) err = server.Run(ctxtool.FromCanceller(ctx.Cancelation)) diff --git a/heartbeat/autodiscover/builder/hints/monitors.go b/heartbeat/autodiscover/builder/hints/monitors.go index 8e9ee45520f8..94faea5fc4ef 100644 --- a/heartbeat/autodiscover/builder/hints/monitors.go +++ b/heartbeat/autodiscover/builder/hints/monitors.go @@ -124,7 +124,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option) h, err := hb.getHostsWithPort(monitor, port, podEvent) if err != nil { - hb.logger.Warnf("unable to find valid hosts for %+v: %w", monitor, err) + hb.logger.Warnf("unable to find valid hosts for %+v: %v", monitor, err) continue } @@ -135,7 +135,7 @@ func (hb *heartbeatHints) CreateConfig(event bus.Event, options ...ucfg.Option) hb.logger.Debugf("unable to create config from MapStr %+v", tempCfg) return []*conf.C{} } - hb.logger.Debugf("hints.builder", "generated config %+v", config) + hb.logger.Debugf("generated config %+v", config) configs = append(configs, config) } diff --git a/heartbeat/beater/heartbeat.go b/heartbeat/beater/heartbeat.go index e1a43b0b2531..f1df1b3c4b83 100644 --- a/heartbeat/beater/heartbeat.go +++ b/heartbeat/beater/heartbeat.go @@ -79,7 +79,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { if err == nil { trace = sockTrace } else { - logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %w", stConfig.Path, stConfig.Wait, err) + logp.L().Warnf("could not connect to socket trace at path %s after %s timeout: %v", stConfig.Path, stConfig.Wait, err) } } @@ -94,7 +94,7 @@ func New(b *beat.Beat, rawConfig *conf.C) (beat.Beater, error) { trace.Abort() return nil, fmt.Errorf("run_once mode fatal error: %w", err) } else { - logp.L().Warnf("skipping monitor state management: %w", err) + logp.L().Warnf("skipping monitor state management: %v", err) } } else { replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, parsedConfig.RunFrom)) @@ -277,7 +277,7 @@ func (bt *Heartbeat) RunCentralMgmtMonitors(b *beat.Beat) { // Backoff panics with 0 duration, set to smallest unit esClient, err := makeESClient(context.TODO(), outCfg.Config(), 1, 1*time.Nanosecond) if err != nil { - logp.L().Warnf("skipping monitor state management during managed reload: %w", err) + logp.L().Warnf("skipping monitor state management during managed reload: %v", err) } else { bt.replaceStateLoader(monitorstate.MakeESLoader(esClient, monitorstate.DefaultDataStreams, bt.config.RunFrom)) } diff --git a/heartbeat/monitors/active/icmp/stdloop.go b/heartbeat/monitors/active/icmp/stdloop.go index 8fa0816bb5b9..80ab3b3cc575 100644 --- a/heartbeat/monitors/active/icmp/stdloop.go +++ b/heartbeat/monitors/active/icmp/stdloop.go @@ -165,7 +165,7 @@ func (l *stdICMPLoop) runICMPRecv(conn *icmp.PacketConn, proto int) { bytes := make([]byte, 512) err := conn.SetReadDeadline(time.Now().Add(time.Second)) if err != nil { - logp.L().Errorf("could not set read deadline for ICMP: %w", err) + logp.L().Errorf("could not set read deadline for ICMP: %v", err) return } _, addr, err := conn.ReadFrom(bytes) diff --git a/heartbeat/monitors/factory.go b/heartbeat/monitors/factory.go index aaf0b76535b6..0af168028d1a 100644 --- a/heartbeat/monitors/factory.go +++ b/heartbeat/monitors/factory.go @@ -291,7 +291,7 @@ func preProcessors(info beat.Info, location *config.LocationWithID, settings pub geoM, err := util.GeoConfigToMap(location.Geo) if err != nil { geoErrOnce.Do(func() { - logp.L().Warnf("could not add heartbeat geo info: %w", err) + logp.L().Warnf("could not add heartbeat geo info: %v", err) }) } diff --git a/heartbeat/monitors/monitor.go b/heartbeat/monitors/monitor.go index 743cb0fd795b..2fdb64a966cf 100644 --- a/heartbeat/monitors/monitor.go +++ b/heartbeat/monitors/monitor.go @@ -266,7 +266,7 @@ func (m *Monitor) Stop() { if m.close != nil { err := m.close() if err != nil { - logp.L().Errorf("error closing monitor %s: %w", m.String(), err) + logp.L().Errorf("error closing monitor %s: %v", m.String(), err) } } diff --git a/heartbeat/scheduler/schedjob.go b/heartbeat/scheduler/schedjob.go index 50f94a895d0b..0d720b28caa0 100644 --- a/heartbeat/scheduler/schedjob.go +++ b/heartbeat/scheduler/schedjob.go @@ -65,7 +65,7 @@ func (sj *schedJob) run() (startedAt time.Time) { if err == nil { defer sj.jobLimitSem.Release(1) } else { - logp.L().Errorf("could not acquire semaphore: %w", err) + logp.L().Errorf("could not acquire semaphore: %v", err) } } diff --git a/libbeat/autodiscover/appenders/config/config.go b/libbeat/autodiscover/appenders/config/config.go index 7640e2c8e9cb..09e3b89b0bf0 100644 --- a/libbeat/autodiscover/appenders/config/config.go +++ b/libbeat/autodiscover/appenders/config/config.go @@ -47,7 +47,7 @@ type configAppender struct { // NewConfigAppender creates a configAppender that can append templatized configs into built configs func NewConfigAppender(cfg *conf.C, logger *logp.Logger) (autodiscover.Appender, error) { - logger.Warnf(cfgwarn.Beta("The config appender is beta")) + logger.Warn(cfgwarn.Beta("The config appender is beta")) config := config{} err := cfg.Unpack(&config) diff --git a/libbeat/autodiscover/providers/kubernetes/kubernetes.go b/libbeat/autodiscover/providers/kubernetes/kubernetes.go index 74e6f86627e9..dc290f6e2af1 100644 --- a/libbeat/autodiscover/providers/kubernetes/kubernetes.go +++ b/libbeat/autodiscover/providers/kubernetes/kubernetes.go @@ -352,7 +352,7 @@ func (p *leaderElectionManager) GenerateHints(event bus.Event) bus.Event { func (p *leaderElectionManager) startLeaderElectorIndefinitely(ctx context.Context, lec leaderelection.LeaderElectionConfig) { le, err := leaderelection.NewLeaderElector(lec) if err != nil { - p.logger.Errorf("error while creating Leader Elector: %w", err) + p.logger.Errorf("error while creating Leader Elector: %v", err) } p.logger.Debugf("Starting Leader Elector") diff --git a/libbeat/autodiscover/template/config.go b/libbeat/autodiscover/template/config.go index 86f9aff23808..2955fcc15139 100644 --- a/libbeat/autodiscover/template/config.go +++ b/libbeat/autodiscover/template/config.go @@ -157,7 +157,7 @@ func ApplyConfigTemplate(event bus.Event, configs []*conf.C, logger *logp.Logger var unpacked map[string]interface{} err = c.Unpack(&unpacked, opts...) if err != nil { - logger.Debugf("autodiscover", "Configuration template cannot be resolved: %v", err) + logger.Debugf("Configuration template cannot be resolved: %v", err) continue } // Repack again: diff --git a/libbeat/conditions/range.go b/libbeat/conditions/range.go index ebcf62378a51..5df8e3cd8120 100644 --- a/libbeat/conditions/range.go +++ b/libbeat/conditions/range.go @@ -117,7 +117,7 @@ func (c Range) Check(event ValuesMap) bool { floatValue, err := ExtractFloat(value) if err != nil { - c.logger.Named(logName).Warnf(err.Error()) + c.logger.Named(logName).Warn(err.Error()) return false } diff --git a/libbeat/processors/add_host_metadata/add_host_metadata.go b/libbeat/processors/add_host_metadata/add_host_metadata.go index 40c26d3f45b2..647251c3fd0f 100644 --- a/libbeat/processors/add_host_metadata/add_host_metadata.go +++ b/libbeat/processors/add_host_metadata/add_host_metadata.go @@ -101,7 +101,7 @@ func New(cfg *config.C, log *logp.Logger) (beat.Processor, error) { cbID, err := uuid.NewV4() // if we fail, fall back to the processor name, hope for the best. if err != nil { - p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %w", err) + p.logger.Errorf("error generating ID for FQDN callback, reverting to processor name: %v", err) cbIDStr = processorName } else { cbIDStr = cbID.String() @@ -262,7 +262,7 @@ func (p *addHostMetadata) updateOrExpire(useFQDN bool) { go func() { err := p.loadData(false, useFQDN) if err != nil { - p.logger.Errorf("error updating data for processor: %w") + p.logger.Errorf("error updating data for processor: %v", err) updateChanSuccess <- false return } diff --git a/libbeat/processors/add_kubernetes_metadata/kubernetes.go b/libbeat/processors/add_kubernetes_metadata/kubernetes.go index 0f336a70b5aa..a4467ce875d5 100644 --- a/libbeat/processors/add_kubernetes_metadata/kubernetes.go +++ b/libbeat/processors/add_kubernetes_metadata/kubernetes.go @@ -191,7 +191,7 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { if config.Scope == "node" { config.Node, err = kubernetes.DiscoverKubernetesNode(k.log, nd) if err != nil { - k.log.Errorf("Couldn't discover Kubernetes node: %w", err) + k.log.Errorf("Couldn't discover Kubernetes node: %v", err) return } k.log.Debugf("Initializing a new Kubernetes watcher using host: %s", config.Node) @@ -300,30 +300,30 @@ func (k *kubernetesAnnotator) init(config kubeAnnotatorConfig, cfg *config.C) { // be populated before trying to generate metadata for Pods. if k.nodeWatcher != nil { if err := k.nodeWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start node watcher: %v", err) + k.log.Debugf("Couldn't start node watcher: %v", err) return } } if k.nsWatcher != nil { if err := k.nsWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start namespace watcher: %v", err) + k.log.Debugf("Couldn't start namespace watcher: %v", err) return } } if k.rsWatcher != nil { if err := k.rsWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start replicaSet watcher: %v", err) + k.log.Debugf("Couldn't start replicaSet watcher: %v", err) return } } if k.jobWatcher != nil { if err := k.jobWatcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start job watcher: %v", err) + k.log.Debugf("Couldn't start job watcher: %v", err) return } } if err := watcher.Start(); err != nil { - k.log.Debugf("add_kubernetes_metadata", "Couldn't start pod watcher: %v", err) + k.log.Debugf("Couldn't start pod watcher: %v", err) return } }) diff --git a/libbeat/publisher/queue/diskqueue/segments.go b/libbeat/publisher/queue/diskqueue/segments.go index 11eeb9991c6a..8652be768da1 100644 --- a/libbeat/publisher/queue/diskqueue/segments.go +++ b/libbeat/publisher/queue/diskqueue/segments.go @@ -174,7 +174,7 @@ func scanExistingSegments(logger *logp.Logger, pathStr string) ([]*queueSegment, for _, dirEntry := range dirEntries { file, err := dirEntry.Info() if err != nil { - logger.Errorf("could not get info for file '%s', skipping. Error: %w", dirEntry.Name(), err) + logger.Errorf("could not get info for file '%s', skipping. Error: %v", dirEntry.Name(), err) continue } diff --git a/libbeat/scripts/cmd/stress_pipeline/main.go b/libbeat/scripts/cmd/stress_pipeline/main.go index 9a2586eb1a71..dd425413efa4 100644 --- a/libbeat/scripts/cmd/stress_pipeline/main.go +++ b/libbeat/scripts/cmd/stress_pipeline/main.go @@ -70,7 +70,7 @@ func run() error { flag.Parse() files := flag.Args() - logger.Infof("load config files:", files) + logger.Infof("load config files: %v", files) cfg, err := common.LoadFiles(files...) if err != nil { diff --git a/metricbeat/mb/lightmodules.go b/metricbeat/mb/lightmodules.go index e41523e3d3c9..3705665b0fc9 100644 --- a/metricbeat/mb/lightmodules.go +++ b/metricbeat/mb/lightmodules.go @@ -258,12 +258,12 @@ func (s *LightModulesSource) moduleNames() ([]string, error) { modules := make(map[string]bool) for _, dir := range s.paths { if _, err := os.Stat(dir); os.IsNotExist(err) { - s.log.Debugf("Light modules directory '%d' doesn't exist", dir) + s.log.Debugf("Light modules directory '%s' doesn't exist", dir) continue } files, err := ioutil.ReadDir(dir) if err != nil { - return nil, fmt.Errorf("listing modules on path '%s': %w", dir, err) + return nil, fmt.Errorf("listing modules on path '%s': %v", dir, err) } for _, f := range files { if !f.IsDir() { diff --git a/metricbeat/module/kafka/consumergroup/query.go b/metricbeat/module/kafka/consumergroup/query.go index 5c7f855d3b8e..11022b929672 100644 --- a/metricbeat/module/kafka/consumergroup/query.go +++ b/metricbeat/module/kafka/consumergroup/query.go @@ -54,7 +54,7 @@ func fetchGroupInfo( return nil } - logger.Named("kafka").Debugf("known consumer groups: ", groups) + logger.Named("kafka").Debugf("known consumer groups: %s", groups) assignments, err := fetchGroupAssignments(b, groups) if err != nil { diff --git a/metricbeat/module/kafka/partition/partition.go b/metricbeat/module/kafka/partition/partition.go index ff3727a57958..2a5b03768226 100644 --- a/metricbeat/module/kafka/partition/partition.go +++ b/metricbeat/module/kafka/partition/partition.go @@ -97,7 +97,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { } for _, topic := range topics { - m.Logger().Named("kafka").Debugf("fetch events for topic: ", topic.Name) + m.Logger().Named("kafka").Debugf("fetch events for topic: %s", topic.Name) evtTopic := mapstr.M{ "name": topic.Name, } diff --git a/metricbeat/module/kubernetes/event/event.go b/metricbeat/module/kubernetes/event/event.go index 1c73ec73a902..97f2deb23f56 100644 --- a/metricbeat/module/kubernetes/event/event.go +++ b/metricbeat/module/kubernetes/event/event.go @@ -101,7 +101,7 @@ func New(base mb.BaseMetricSet) (mb.MetricSet, error) { cfg, _ := conf.NewConfigFrom(&config) ecsClusterMeta, err := util.GetClusterECSMeta(cfg, client, ms.Logger()) if err != nil { - ms.Logger().Debugf("could not retrieve cluster metadata: %w", err) + ms.Logger().Debugf("could not retrieve cluster metadata: %v", err) } if ecsClusterMeta != nil { ms.clusterMeta = ecsClusterMeta @@ -127,7 +127,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) { FilterFunc: func(obj interface{}) bool { eve, ok := obj.(*kubernetes.Event) if !ok { - m.Logger().Debugf("Error while casting event: %s", ok) + m.Logger().Debugf("Error while casting event. Got type: %T", obj) } // if fields are null they are decoded to `0001-01-01 00:00:00 +0000 UTC` // so we need to check if they are valid first @@ -148,7 +148,7 @@ func (m *MetricSet) Run(reporter mb.PushReporterV2) { // start event watcher err := m.watcher.Start() if err != nil { - m.Logger().Debugf("Unable to start watcher: %w", err) + m.Logger().Debugf("Unable to start watcher: %v", err) } <-reporter.Done() m.watcher.Stop() diff --git a/metricbeat/module/kubernetes/state_container/state_container.go b/metricbeat/module/kubernetes/state_container/state_container.go index d00515fe081e..085bdb5e285e 100644 --- a/metricbeat/module/kubernetes/state_container/state_container.go +++ b/metricbeat/module/kubernetes/state_container/state_container.go @@ -147,7 +147,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { // empty string cID, ok := (containerID).(string) if !ok { - m.Logger().Debugf("Error while casting containerID: %s", ok) + m.Logger().Debugf("Error while casting containerID, got %T", containerID) } split := strings.Index(cID, "://") if split != -1 { @@ -162,7 +162,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { if containerImage, ok := event["image"]; ok { cImage, ok := (containerImage).(string) if !ok { - m.Logger().Debugf("Error while casting containerImage: %s", ok) + m.Logger().Debugf("Error while casting containerImage, got %T", containerImage) } kubernetes.ShouldPut(containerFields, "image.name", cImage, m.Logger()) diff --git a/metricbeat/module/kubernetes/util/kubernetes.go b/metricbeat/module/kubernetes/util/kubernetes.go index 3d39a70aa253..1ce70b180972 100644 --- a/metricbeat/module/kubernetes/util/kubernetes.go +++ b/metricbeat/module/kubernetes/util/kubernetes.go @@ -889,7 +889,7 @@ func NewContainerMetadataEnricher( pod, ok := r.(*kubernetes.Pod) if !ok { - base.Logger().Debugf("Error while casting event: %s", ok) + base.Logger().Debugf("Error while casting event, got %T", r) } pmeta := metaGen.Generate(pod) @@ -928,7 +928,7 @@ func NewContainerMetadataEnricher( ids := make([]string, 0) pod, ok := r.(*kubernetes.Pod) if !ok { - log.Debugf("Error while casting event: %s", ok) + log.Debugf("Error while casting event, got %T", r) } for _, container := range append(pod.Spec.Containers, pod.Spec.InitContainers...) { diff --git a/metricbeat/module/linux/rapl/rapl.go b/metricbeat/module/linux/rapl/rapl.go index 8d89a5dfdd85..594befe945d7 100644 --- a/metricbeat/module/linux/rapl/rapl.go +++ b/metricbeat/module/linux/rapl/rapl.go @@ -175,7 +175,7 @@ func (m *MetricSet) updatePower() map[int]map[rapl.RAPLDomain]energyUsage { continue } if err != nil { - m.Logger().Infof("Error reading MSR from domain %s: %s skipping.", domain, err) + m.Logger().Infof("Error reading MSR from domain %s: %s skipping.", domain.Name, err) continue } domainList[domain] = energyTrack{joules: joules, time: time.Now()} diff --git a/metricbeat/module/mysql/query/query.go b/metricbeat/module/mysql/query/query.go index 223aaec52e54..7fea2594c970 100644 --- a/metricbeat/module/mysql/query/query.go +++ b/metricbeat/module/mysql/query/query.go @@ -112,7 +112,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { for _, q := range m.Config.Queries { err := m.fetchQuery(ctx, q, reporter) if err != nil { - m.Logger().Errorf("error doing query %s", q, err) + m.Logger().Errorf("error doing query %v: %v", q, err) } } diff --git a/metricbeat/module/system/diskio/diskio.go b/metricbeat/module/system/diskio/diskio.go index e57dc19560fc..068581991204 100644 --- a/metricbeat/module/system/diskio/diskio.go +++ b/metricbeat/module/system/diskio/diskio.go @@ -80,7 +80,7 @@ func (m *MetricSet) Fetch(r mb.ReporterV2) error { err = m.statistics.OpenSampling() // CPU sampling does not seem to be used by any of the diskio metrics we're using. Mostly used by iostat. if err != nil { - m.Logger().Warnf("Error in CPU sampling for diskio: %w", err) + m.Logger().Warnf("Error in CPU sampling for diskio: %v", err) } // Store the last cpu counter when finished diff --git a/metricbeat/module/vsphere/cluster/cluster.go b/metricbeat/module/vsphere/cluster/cluster.go index 82396f9261d2..2849b7ff82d9 100644 --- a/metricbeat/module/vsphere/cluster/cluster.go +++ b/metricbeat/module/vsphere/cluster/cluster.go @@ -144,7 +144,7 @@ func (m *ClusterMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) er triggeredAlarm, err := getTriggeredAlarm(ctx, pc, clt[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from cluster %s: %w", clt[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from cluster %s: %v", clt[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/datastore/datastore.go b/metricbeat/module/vsphere/datastore/datastore.go index c1899048240b..8139188ee737 100644 --- a/metricbeat/module/vsphere/datastore/datastore.go +++ b/metricbeat/module/vsphere/datastore/datastore.go @@ -159,7 +159,7 @@ func (m *DataStoreMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) triggeredAlarm, err := getTriggeredAlarm(ctx, pc, dst[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from datastore %s: %w", dst[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from datastore %s: %v", dst[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/datastorecluster/datastorecluster.go b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go index 1d57e8dae6cf..51132bd278fe 100644 --- a/metricbeat/module/vsphere/datastorecluster/datastorecluster.go +++ b/metricbeat/module/vsphere/datastorecluster/datastorecluster.go @@ -127,12 +127,12 @@ func (m *DatastoreClusterMetricSet) Fetch(ctx context.Context, reporter mb.Repor assetNames, err := getAssetNames(ctx, pc, &datastoreCluster[i]) if err != nil { - m.Logger().Errorf("Failed to retrieve object from datastore cluster %s: v", datastoreCluster[i].Name, err) + m.Logger().Errorf("Failed to retrieve object from datastore cluster %s: %v", datastoreCluster[i].Name, err) } triggeredAlarm, err := getTriggeredAlarm(ctx, pc, datastoreCluster[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from datastore cluster %s: %w", datastoreCluster[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from datastore cluster %s: %v", datastoreCluster[i].Name, err) } reporter.Event(mb.Event{MetricSetFields: m.mapEvent(datastoreCluster[i], &metricData{assetNames: assetNames, triggeredAlarms: triggeredAlarm})}) diff --git a/metricbeat/module/vsphere/host/host.go b/metricbeat/module/vsphere/host/host.go index e2f3989933ca..7514afe3476d 100644 --- a/metricbeat/module/vsphere/host/host.go +++ b/metricbeat/module/vsphere/host/host.go @@ -171,7 +171,7 @@ func (m *HostMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error triggeredAlarm, err := getTriggeredAlarm(ctx, pc, hst[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve triggered alarms from host %s: %w", hst[i].Name, err) + m.Logger().Errorf("Failed to retrieve triggered alarms from host %s: %v", hst[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/network/network.go b/metricbeat/module/vsphere/network/network.go index a6c860fed031..29395190851f 100644 --- a/metricbeat/module/vsphere/network/network.go +++ b/metricbeat/module/vsphere/network/network.go @@ -137,7 +137,7 @@ func (m *NetworkMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) er triggeredAlarm, err := getTriggeredAlarm(ctx, pc, networks[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from network %s: %w", networks[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from network %s: %v", networks[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/resourcepool/resourcepool.go b/metricbeat/module/vsphere/resourcepool/resourcepool.go index 9a127622a40d..1a754bbae46b 100644 --- a/metricbeat/module/vsphere/resourcepool/resourcepool.go +++ b/metricbeat/module/vsphere/resourcepool/resourcepool.go @@ -140,7 +140,7 @@ func (m *ResourcePoolMetricSet) Fetch(ctx context.Context, reporter mb.ReporterV triggeredAlarm, err := getTriggeredAlarm(ctx, pc, rps[i].TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from resource pool %s: %w", rps[i].Name, err) + m.Logger().Errorf("Failed to retrieve alerts from resource pool %s: %v", rps[i].Name, err) } reporter.Event(mb.Event{ diff --git a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go index dcde2b06e89c..6f2794ccc899 100644 --- a/metricbeat/module/vsphere/virtualmachine/virtualmachine.go +++ b/metricbeat/module/vsphere/virtualmachine/virtualmachine.go @@ -244,7 +244,7 @@ func (m *MetricSet) Fetch(ctx context.Context, reporter mb.ReporterV2) error { triggeredAlarm, err := getTriggeredAlarm(ctx, pc, vm.TriggeredAlarmState) if err != nil { - m.Logger().Errorf("Failed to retrieve alerts from VM %s: %w", vm.Name, err) + m.Logger().Errorf("Failed to retrieve alerts from VM %s: %v", vm.Name, err) } data := VMData{ diff --git a/packetbeat/protos/dns/dns_tcp.go b/packetbeat/protos/dns/dns_tcp.go index ac1eacaf88e2..cf5c74af6ecd 100644 --- a/packetbeat/protos/dns/dns_tcp.go +++ b/packetbeat/protos/dns/dns_tcp.go @@ -56,7 +56,7 @@ type dnsConnectionData struct { func (dns *dnsPlugin) Parse(pkt *protos.Packet, tcpTuple *common.TCPTuple, dir uint8, private protos.ProtocolData) protos.ProtocolData { defer dns.logger.Recover("Dns ParseTcp") - dns.logger.Debugf("dns", "Parsing packet addressed with %s of length %d.", &pkt.Tuple, len(pkt.Payload)) + dns.logger.Debugf("Parsing packet addressed with %s of length %d.", &pkt.Tuple, len(pkt.Payload)) conn := ensureDNSConnection(private, dns.logger) @@ -99,7 +99,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu stream.rawData = append(stream.rawData, payload...) if len(stream.rawData) > tcp.TCPMaxDataInStream { - dns.logger.Debugf("dns", "Stream data too large, dropping DNS stream") + dns.logger.Debug("Stream data too large, dropping DNS stream") conn.data[dir] = nil return conn } @@ -107,7 +107,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu decodedData, err := stream.handleTCPRawData() if err != nil { if err == incompleteMsg { //nolint:errorlint // incompleteMsg is not wrapped. - dns.logger.Debugf("dns", "Waiting for more raw data") + dns.logger.Debug("Waiting for more raw data") return conn } @@ -115,7 +115,7 @@ func (dns *dnsPlugin) doParse(conn *dnsConnectionData, pkt *protos.Packet, tcpTu dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) // This means that malformed requests or responses are being sent... // TODO: publish the situation also if Request @@ -187,7 +187,7 @@ func (dns *dnsPlugin) ReceivedFin(tcpTuple *common.TCPTuple, dir uint8, private dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) return conn } @@ -216,8 +216,8 @@ func (dns *dnsPlugin) GapInStream(tcpTuple *common.TCPTuple, dir uint8, nbytes i dns.publishResponseError(conn, err) } - dns.logger.Debugf("dns", "%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) - dns.logger.Debugf("dns", "Dropping the stream %s", tcpTuple) + dns.logger.Debugf("%v addresses %s, length %d", err, tcpTuple, len(stream.rawData)) + dns.logger.Debugf("Dropping the stream %s", tcpTuple) // drop the stream because it is binary Data and it would be unexpected to have a decodable message later return private, true diff --git a/x-pack/auditbeat/module/system/socket/socket_linux.go b/x-pack/auditbeat/module/system/socket/socket_linux.go index c8f8cb98f0a9..48378eb4b598 100644 --- a/x-pack/auditbeat/module/system/socket/socket_linux.go +++ b/x-pack/auditbeat/module/system/socket/socket_linux.go @@ -295,7 +295,7 @@ func (m *MetricSet) Setup() (err error) { continue } if tracing.IsTraceFSAvailable() != nil { - m.log.Warnf("Mounted %s but no kprobes available", mount, err) + m.log.Warnf("Mounted %s but no kprobes available: %v", mount, err) mount.unmount() continue } diff --git a/x-pack/filebeat/input/awscloudwatch/input.go b/x-pack/filebeat/input/awscloudwatch/input.go index 06b01c38a69f..9486875925a7 100644 --- a/x-pack/filebeat/input/awscloudwatch/input.go +++ b/x-pack/filebeat/input/awscloudwatch/input.go @@ -150,9 +150,9 @@ func (in *cloudwatchInput) Run(inputContext v2.Context, pipeline beat.Pipeline) return err } - log.Debugf("Config latency = %f", cwPoller.config.Latency) - log.Debugf("Config scan_frequency = %f", cwPoller.config.ScanFrequency) - log.Debugf("Config api_sleep = %f", cwPoller.config.APISleep) + log.Debugf("Config latency = %s", cwPoller.config.Latency) + log.Debugf("Config scan_frequency = %s", cwPoller.config.ScanFrequency) + log.Debugf("Config api_sleep = %s", cwPoller.config.APISleep) cwPoller.receive(ctx, logGroupIDs, time.Now) return nil } diff --git a/x-pack/filebeat/input/awss3/sqs_s3_event.go b/x-pack/filebeat/input/awss3/sqs_s3_event.go index ddefd8408ba8..16087f47954b 100644 --- a/x-pack/filebeat/input/awss3/sqs_s3_event.go +++ b/x-pack/filebeat/input/awss3/sqs_s3_event.go @@ -263,7 +263,7 @@ func (r sqsProcessingResult) Done() { return } p.metrics.sqsMessagesDeletedTotal.Inc() - p.log.Errorf("failed processing SQS message (message was deleted): %w", processingErr) + p.log.Errorf("failed processing SQS message (message was deleted): %v", processingErr) r.processor.status.UpdateStatus(status.Degraded, fmt.Sprintf("Failed processing SQS message. Message was deleted. Processing error: %s", processingErr.Error())) return } @@ -273,7 +273,7 @@ func (r sqsProcessingResult) Done() { // queue is enabled then the message will eventually placed on the DLQ // after maximum receives is reached. p.metrics.sqsMessagesReturnedTotal.Inc() - p.log.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %w", processingErr) + p.log.Errorf("failed processing SQS message (it will return to queue after visibility timeout): %v", processingErr) r.processor.status.UpdateStatus(status.Degraded, fmt.Sprintf("Failed processing SQS message. Processing will be reattempted: %s", processingErr.Error())) } diff --git a/x-pack/filebeat/input/netflow/input.go b/x-pack/filebeat/input/netflow/input.go index 5b1ac3232fdd..228e2b4849cb 100644 --- a/x-pack/filebeat/input/netflow/input.go +++ b/x-pack/filebeat/input/netflow/input.go @@ -224,7 +224,7 @@ func (n *netflowInput) Run(env v2.Context, connector beat.PipelineConnector) err err = udpServer.Start() if err != nil { errorMsg := fmt.Sprintf("Failed to start udp server: %v", err) - n.logger.Errorf(errorMsg) + n.logger.Error(errorMsg) env.UpdateStatus(status.Failed, errorMsg) n.stop() return err diff --git a/x-pack/heartbeat/monitors/browser/sourcejob.go b/x-pack/heartbeat/monitors/browser/sourcejob.go index e191d5d2131c..5528d62f852f 100644 --- a/x-pack/heartbeat/monitors/browser/sourcejob.go +++ b/x-pack/heartbeat/monitors/browser/sourcejob.go @@ -129,7 +129,7 @@ func (sj *SourceJob) extraArgs(uiOrigin bool) []string { s, err := json.Marshal(sj.browserCfg.PlaywrightOpts) if err != nil { // This should never happen, if it was parsed as a config it should be serializable - logp.L().Warnf("could not serialize playwright options '%v': %w", sj.browserCfg.PlaywrightOpts, err) + logp.L().Warnf("could not serialize playwright options '%v': %v", sj.browserCfg.PlaywrightOpts, err) } else { extraArgs = append(extraArgs, "--playwright-options", string(s)) } diff --git a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go index 0f88e859c59d..939f5181d343 100644 --- a/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go +++ b/x-pack/heartbeat/monitors/browser/synthexec/synthexec.go @@ -217,7 +217,7 @@ func runCmd( break } if err != nil { - logp.L().Warnf("error decoding json for test json results: %w", err) + logp.L().Warnf("error decoding json for test json results: %v", err) } mpx.writeSynthEvent(&se) diff --git a/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go b/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go index f33a2f1f7460..cc6ed89ccb66 100644 --- a/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go +++ b/x-pack/libbeat/common/cloudfoundry/dopplerconsumer.go @@ -131,7 +131,7 @@ func filterLogs(e *events.Envelope) bool { func (c *DopplerConsumer) reportError(e EventError) { if c.callbacks.Error == nil { - c.log.Debugf("No callback for errors, error received: %s", e) + c.log.Debugf("No callback for errors, error received: %v", e) return } c.callbacks.Error(e) diff --git a/x-pack/libbeat/management/managerV2.go b/x-pack/libbeat/management/managerV2.go index b60f0327f2e5..e088b948d34f 100644 --- a/x-pack/libbeat/management/managerV2.go +++ b/x-pack/libbeat/management/managerV2.go @@ -1052,7 +1052,7 @@ func (cm *BeatV2Manager) handleDebugYaml() []byte { data, err := yaml.Marshal(beatCfg) if err != nil { - cm.logger.Errorf("error generating YAML for input debug callback: %w", err) + cm.logger.Errorf("error generating YAML for input debug callback: %v", err) return nil } return data diff --git a/x-pack/metricbeat/module/aws/awshealth/awshealth.go b/x-pack/metricbeat/module/aws/awshealth/awshealth.go index 1fbd8da9c91d..a936d8978708 100644 --- a/x-pack/metricbeat/module/aws/awshealth/awshealth.go +++ b/x-pack/metricbeat/module/aws/awshealth/awshealth.go @@ -207,7 +207,7 @@ func (m *MetricSet) getEventDetails( m.Logger().Errorf("[AWS Health] DescribeEvents failed with: Operation=%s, UnderlyingError=%v", opErr.Operation(), opErr.Err) } else { - m.Logger().Errorf("[AWS Health] DescribeEvents failed with: %w", err) + m.Logger().Errorf("[AWS Health] DescribeEvents failed with: %v", err) } break } @@ -240,7 +240,7 @@ func (m *MetricSet) getEventDetails( m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: Operation=%s, UnderlyingError=%v", opErr.Operation(), opErr.Err) } else { - m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: %w", err) + m.Logger().Errorf("[AWS Health] DescribeEventDetails failed with: %v", err) } break } @@ -269,7 +269,7 @@ func (m *MetricSet) getEventDetails( // Fetch current page of affected entities affCurrentPage, err := affPage.NextPage(ctx) if err != nil { - m.Logger().Errorf("[AWS Health] DescribeAffectedEntitie failed with : %w", err) + m.Logger().Errorf("[AWS Health] DescribeAffectedEntitie failed with : %v", err) break } // Extract relevant details of affected entities and match them with event details diff --git a/x-pack/metricbeat/module/aws/billing/billing.go b/x-pack/metricbeat/module/aws/billing/billing.go index da7b86b24834..ce19fb434569 100644 --- a/x-pack/metricbeat/module/aws/billing/billing.go +++ b/x-pack/metricbeat/module/aws/billing/billing.go @@ -288,7 +288,7 @@ func (m *MetricSet) getCostGroupBy(svcCostExplorer *costexplorer.Client, groupBy groupByOutput, err := svcCostExplorer.GetCostAndUsage(context.Background(), &groupByCostInput) if err != nil { err = fmt.Errorf("costexplorer GetCostAndUsageRequest failed: %w", err) - m.Logger().Errorf(err.Error()) + m.Logger().Error(err.Error()) return nil } @@ -347,7 +347,7 @@ func (m *MetricSet) addCostMetrics(metrics map[string]costexplorertypes.MetricVa costFloat, err := strconv.ParseFloat(*cost.Amount, 64) if err != nil { err = fmt.Errorf("strconv ParseFloat failed: %w", err) - m.Logger().Errorf(err.Error()) + m.Logger().Error(err.Error()) continue } diff --git a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go index 230fcc9570d9..8272783d7dfc 100644 --- a/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go +++ b/x-pack/metricbeat/module/aws/cloudwatch/cloudwatch.go @@ -149,8 +149,8 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { // Get listMetricDetailTotal and namespaceDetailTotal from configuration listMetricDetailTotal, namespaceDetailTotal := m.readCloudwatchConfig() - m.logger.Debugf("listMetricDetailTotal = %s", listMetricDetailTotal) - m.logger.Debugf("namespaceDetailTotal = %s", namespaceDetailTotal) + m.logger.Debugf("listMetricDetailTotal = %v", listMetricDetailTotal) + m.logger.Debugf("namespaceDetailTotal = %v", namespaceDetailTotal) var config aws.Config err = m.Module().UnpackConfig(&config) @@ -209,13 +209,13 @@ func (m *MetricSet) Fetch(report mb.ReporterV2) error { if len(namespaceDetailTotal) == 0 { listMetricsOutput, err = aws.GetListMetricsOutput("*", regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { - m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, "*", err) + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %v", regionName, "*", err) } } else { for namespace := range namespaceDetailTotal { listMetricsOutputPerNamespace, err := aws.GetListMetricsOutput(namespace, regionName, m.Period, m.IncludeLinkedAccounts, m.OwningAccount, m.MonitoringAccountID, APIClients.CloudWatchClient) if err != nil { - m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %w", regionName, namespace, err) + m.Logger().Errorf("Error while retrieving the list of metrics for region %s and namespace %s: %v", regionName, namespace, err) } listMetricsOutput = append(listMetricsOutput, listMetricsOutputPerNamespace...) } diff --git a/x-pack/metricbeat/module/azure/billing/data.go b/x-pack/metricbeat/module/azure/billing/data.go index 03f9a56b9738..3c0200727eca 100644 --- a/x-pack/metricbeat/module/azure/billing/data.go +++ b/x-pack/metricbeat/module/azure/billing/data.go @@ -231,7 +231,7 @@ func getEventsFromQueryResult(result armcostmanagement.QueryResult, subscription // 20170401 (float64) --> "2017-04-01T00:00:00Z" (time.Time) usageDate, err = time.Parse("20060102", strconv.FormatInt(int64(value), 10)) if err != nil { - logger.Errorf("unsupported usage date format: not valid date: %w", err) + logger.Errorf("unsupported usage date format: not valid date: %v", err) continue } } diff --git a/x-pack/metricbeat/module/containerd/memory/memory.go b/x-pack/metricbeat/module/containerd/memory/memory.go index 871bf2bddedb..943a77c900d5 100644 --- a/x-pack/metricbeat/module/containerd/memory/memory.go +++ b/x-pack/metricbeat/module/containerd/memory/memory.go @@ -127,17 +127,17 @@ func (m *metricset) Fetch(reporter mb.ReporterV2) error { if m.calcPct { inactiveFiles, err := event.GetValue("inactiveFiles") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. inactiveFiles not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. inactiveFiles not present in the event: %v", err) continue } usageTotal, err := event.GetValue("usage.total") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. usage.total not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.total not present in the event: %v", err) continue } memoryLimit, err := event.GetValue("usage.limit") if err != nil { - m.Logger().Debugf("memoryUsagePct calculation skipped. usage.limit not present in the event: %w", err) + m.Logger().Debugf("memoryUsagePct calculation skipped. usage.limit not present in the event: %v", err) continue } mLfloat, ok := memoryLimit.(float64) diff --git a/x-pack/metricbeat/module/meraki/device_health/device_health.go b/x-pack/metricbeat/module/meraki/device_health/device_health.go index de74273370dd..6141bcc18744 100644 --- a/x-pack/metricbeat/module/meraki/device_health/device_health.go +++ b/x-pack/metricbeat/module/meraki/device_health/device_health.go @@ -104,7 +104,7 @@ func (m *MetricSet) Fetch(reporter mb.ReporterV2) error { err = getDeviceVPNStatuses(m.client, org, devices, m.logger) if err != nil { - m.logger.Errorf("GetVPNStatuses failed; %w", err) + m.logger.Errorf("GetVPNStatuses failed; %v", err) // continue so we still report the rest of the device health metrics } diff --git a/x-pack/metricbeat/module/meraki/http.go b/x-pack/metricbeat/module/meraki/http.go index 6891f195c5bf..290ca6e0c5a5 100644 --- a/x-pack/metricbeat/module/meraki/http.go +++ b/x-pack/metricbeat/module/meraki/http.go @@ -48,7 +48,7 @@ func (p *paginator[T]) GetAllPages() error { val, res, err := p.doRequest() if err != nil { - p.logger.Debugf("onError; err: %w, res: %s", err, res) + p.logger.Debugf("onError; err: %v, res: %s", err, res) return p.onError(err, res) } diff --git a/x-pack/metricbeat/module/prometheus/remote_write/data.go b/x-pack/metricbeat/module/prometheus/remote_write/data.go index d021d148d230..b683faf78103 100644 --- a/x-pack/metricbeat/module/prometheus/remote_write/data.go +++ b/x-pack/metricbeat/module/prometheus/remote_write/data.go @@ -91,7 +91,7 @@ func (g *remoteWriteTypedGenerator) Start() { } func (g *remoteWriteTypedGenerator) Stop() { - g.logger.Debugf("prometheus.remote_write.cache", "stopping counterCache") + g.logger.Debug("stopping counterCache") g.counterCache.Stop() }