|
| 1 | +// Copyright Splunk Inc. |
| 2 | +// SPDX-License-Identifier: Apache-2.0 |
| 3 | + |
| 4 | +package k8sevents |
| 5 | + |
| 6 | +import ( |
| 7 | + "fmt" |
| 8 | + "os" |
| 9 | + "path/filepath" |
| 10 | + "testing" |
| 11 | + |
| 12 | + "github.com/stretchr/testify/assert" |
| 13 | + "github.com/stretchr/testify/require" |
| 14 | + "go.opentelemetry.io/collector/consumer/consumertest" |
| 15 | + "go.opentelemetry.io/collector/pdata/pcommon" |
| 16 | + "helm.sh/helm/v3/pkg/action" |
| 17 | + "helm.sh/helm/v3/pkg/chart/loader" |
| 18 | + "helm.sh/helm/v3/pkg/cli" |
| 19 | + "helm.sh/helm/v3/pkg/registry" |
| 20 | + |
| 21 | + "github.com/signalfx/splunk-otel-collector-chart/functional_tests/internal" |
| 22 | +) |
| 23 | + |
| 24 | +const ( |
| 25 | + redisReleaseName = "test-redis" |
| 26 | + redisChartRepo = "https://charts.bitnami.com/bitnami" |
| 27 | + redisChart = "redis" |
| 28 | +) |
| 29 | + |
| 30 | +// Env vars to control the test behavior: |
| 31 | +// KUBECONFIG (required): the path to the kubeconfig file |
| 32 | +// TEARDOWN_BEFORE_SETUP: if set to true, the test will run teardown before setup |
| 33 | +// SKIP_TEARDOWN: if set to true, the test will skip teardown |
| 34 | +// SKIP_SETUP: if set to true, the test will skip setup |
| 35 | +func Test_Discovery(t *testing.T) { |
| 36 | + testKubeConfig, ok := os.LookupEnv("KUBECONFIG") |
| 37 | + require.True(t, ok, "the environment variable KUBECONFIG must be set") |
| 38 | + if os.Getenv("TEARDOWN_BEFORE_SETUP") == "true" { |
| 39 | + teardown(t, testKubeConfig) |
| 40 | + } |
| 41 | + installRedisChart(t, testKubeConfig) |
| 42 | + t.Cleanup(func() { |
| 43 | + if os.Getenv("SKIP_TEARDOWN") == "true" { |
| 44 | + t.Log("Skipping teardown as SKIP_TEARDOWN is set to true") |
| 45 | + return |
| 46 | + } |
| 47 | + teardown(t, testKubeConfig) |
| 48 | + }) |
| 49 | + |
| 50 | + internal.SetupSignalFxApiServer(t) |
| 51 | + |
| 52 | + tests := []struct { |
| 53 | + name string |
| 54 | + valuesTmpl string |
| 55 | + }{ |
| 56 | + { |
| 57 | + name: "agent_only", |
| 58 | + valuesTmpl: "agent_only_values.tmpl", |
| 59 | + }, |
| 60 | + { |
| 61 | + name: "agent_with_gateway", |
| 62 | + valuesTmpl: "agent_with_gateway_values.tmpl", |
| 63 | + }, |
| 64 | + } |
| 65 | + for _, tt := range tests { |
| 66 | + t.Run(tt.name, func(t *testing.T) { |
| 67 | + metricsSink := internal.SetupSignalfxReceiver(t, internal.SignalFxReceiverPort) |
| 68 | + eventsSink := internal.SetupOTLPLogsSink(t) |
| 69 | + installCollectorChart(t, testKubeConfig, tt.valuesTmpl) |
| 70 | + t.Cleanup(func() { |
| 71 | + if os.Getenv("SKIP_TEARDOWN") == "true" { |
| 72 | + return |
| 73 | + } |
| 74 | + internal.ChartUninstall(t, testKubeConfig) |
| 75 | + }) |
| 76 | + assertRedisEntities(t, eventsSink) |
| 77 | + assertRedisMetrics(t, metricsSink) |
| 78 | + }) |
| 79 | + } |
| 80 | +} |
| 81 | + |
| 82 | +func assertRedisEntities(t *testing.T, sink *consumertest.LogsSink) { |
| 83 | + internal.WaitForLogs(t, 1, sink) |
| 84 | + rl := sink.AllLogs()[len(sink.AllLogs())-1].ResourceLogs().At(0) |
| 85 | + assertAttr(t, rl.Resource().Attributes(), "k8s.cluster.name", "test-cluster") |
| 86 | + assert.Equal(t, 1, rl.ScopeLogs().Len()) |
| 87 | + sl := rl.ScopeLogs().At(0) |
| 88 | + assertAttr(t, sl.Scope().Attributes(), "otel.entity.event_as_log", true) |
| 89 | + assert.Equal(t, 1, sl.LogRecords().Len()) |
| 90 | + lrAttrs := sl.LogRecords().At(0).Attributes() |
| 91 | + assertAttr(t, lrAttrs, "otel.entity.event.type", "entity_state") |
| 92 | + assertAttr(t, lrAttrs, "otel.entity.event.type", "entity_state") |
| 93 | + assertAttr(t, lrAttrs, "otel.entity.type", "service") |
| 94 | + entityAttrsVal, ok := lrAttrs.Get("otel.entity.attributes") |
| 95 | + assert.True(t, ok) |
| 96 | + entityAttrs := entityAttrsVal.Map() |
| 97 | + assertAttr(t, entityAttrs, "k8s.namespace.name", internal.Namespace) |
| 98 | + assertAttr(t, entityAttrs, "service.type", "redis") |
| 99 | + assertAttr(t, entityAttrs, "service.name", "redis") |
| 100 | + assertAttr(t, entityAttrs, "k8s.pod.name", "test-redis-master-0") |
| 101 | + assertAttr(t, entityAttrs, "discovery.status", "successful") |
| 102 | +} |
| 103 | + |
| 104 | +func assertAttr(t *testing.T, attrs pcommon.Map, name string, val any) { |
| 105 | + entityType, ok := attrs.Get(name) |
| 106 | + assert.True(t, ok) |
| 107 | + if ok { |
| 108 | + assert.Equal(t, val, entityType.AsRaw()) |
| 109 | + } |
| 110 | +} |
| 111 | + |
| 112 | +func assertRedisMetrics(t *testing.T, sink *consumertest.MetricsSink) { |
| 113 | + internal.WaitForMetrics(t, 5, sink) |
| 114 | + foundMetrics := make(map[string]bool) |
| 115 | + for _, m := range sink.AllMetrics() { |
| 116 | + for i := 0; i < m.ResourceMetrics().Len(); i++ { |
| 117 | + sm := m.ResourceMetrics().At(i).ScopeMetrics().At(0) |
| 118 | + for j := 0; j < sm.Metrics().Len(); j++ { |
| 119 | + foundMetrics[sm.Metrics().At(j).Name()] = true |
| 120 | + } |
| 121 | + } |
| 122 | + } |
| 123 | + expectedRedisMetrics := []string{ |
| 124 | + "redis.clients.blocked", |
| 125 | + "redis.clients.connected", |
| 126 | + "redis.clients.max_input_buffer", |
| 127 | + "redis.clients.max_output_buffer", |
| 128 | + "redis.commands", |
| 129 | + "redis.commands.processed", |
| 130 | + "redis.connections.received", |
| 131 | + "redis.connections.rejected", |
| 132 | + "redis.cpu.time", |
| 133 | + "redis.keys.evicted", |
| 134 | + "redis.keys.expired", |
| 135 | + "redis.keyspace.hits", |
| 136 | + "redis.keyspace.misses", |
| 137 | + "redis.latest_fork", |
| 138 | + "redis.memory.fragmentation_ratio", |
| 139 | + "redis.memory.lua", |
| 140 | + "redis.memory.peak", |
| 141 | + "redis.memory.rss", |
| 142 | + "redis.memory.used", |
| 143 | + "redis.net.input", |
| 144 | + "redis.net.output", |
| 145 | + "redis.rdb.changes_since_last_save", |
| 146 | + "redis.replication.backlog_first_byte_offset", |
| 147 | + "redis.replication.offset", |
| 148 | + "redis.slaves.connected", |
| 149 | + "redis.uptime", |
| 150 | + } |
| 151 | + for _, rm := range expectedRedisMetrics { |
| 152 | + assert.Contains(t, foundMetrics, rm) |
| 153 | + } |
| 154 | +} |
| 155 | + |
| 156 | +func installCollectorChart(t *testing.T, kubeConfig, valuesTmpl string) { |
| 157 | + t.Helper() |
| 158 | + if os.Getenv("SKIP_SETUP") == "true" { |
| 159 | + t.Log("Skipping collector chart installation as SKIP_SETUP is set to true") |
| 160 | + return |
| 161 | + } |
| 162 | + |
| 163 | + hostEp := internal.HostEndpoint(t) |
| 164 | + valuesFile, err := filepath.Abs(filepath.Join("testdata", valuesTmpl)) |
| 165 | + require.NoError(t, err) |
| 166 | + internal.ChartInstallOrUpgrade(t, kubeConfig, valuesFile, map[string]any{ |
| 167 | + "ApiURL": fmt.Sprintf("http://%s:%d", hostEp, internal.SignalFxAPIPort), |
| 168 | + "IngestURL": fmt.Sprintf("http://%s:%d", hostEp, internal.SignalFxReceiverPort), |
| 169 | + "EventsURL": fmt.Sprintf("http://%s:%d", hostEp, internal.OTLPHTTPReceiverPort), |
| 170 | + }) |
| 171 | +} |
| 172 | + |
| 173 | +// installRedisChart deploys a simple Redis server with official helm chart. |
| 174 | +func installRedisChart(t *testing.T, kubeConfig string) { |
| 175 | + t.Helper() |
| 176 | + if os.Getenv("SKIP_SETUP") == "true" { |
| 177 | + t.Log("Skipping redis chart installation as SKIP_SETUP is set to true") |
| 178 | + return |
| 179 | + } |
| 180 | + |
| 181 | + actionConfig := internal.InitHelmActionConfig(t, kubeConfig) |
| 182 | + rc, err := registry.NewClient() |
| 183 | + require.NoError(t, err) |
| 184 | + actionConfig.RegistryClient = rc |
| 185 | + install := action.NewInstall(actionConfig) |
| 186 | + install.Namespace = internal.Namespace |
| 187 | + install.ReleaseName = redisReleaseName |
| 188 | + install.ChartPathOptions.RepoURL = redisChartRepo |
| 189 | + install.Wait = true |
| 190 | + install.Timeout = internal.HelmActionTimeout |
| 191 | + hCli := cli.New() |
| 192 | + hCli.KubeConfig = kubeConfig |
| 193 | + chartPath, err := install.ChartPathOptions.LocateChart(redisChart, hCli) |
| 194 | + require.NoError(t, err) |
| 195 | + ch, err := loader.Load(chartPath) |
| 196 | + |
| 197 | + // Install the redis chart with no replicas and no auth |
| 198 | + release, err := install.Run(ch, map[string]any{ |
| 199 | + "auth": map[string]any{ |
| 200 | + "enabled": false, |
| 201 | + }, |
| 202 | + "replica": map[string]any{ |
| 203 | + "replicaCount": 0, |
| 204 | + }, |
| 205 | + }) |
| 206 | + require.NoError(t, err) |
| 207 | + t.Logf("Helm chart installed. Release name: %s", release.Name) |
| 208 | +} |
| 209 | + |
| 210 | +func uninstallRedisChart(t *testing.T, kubeConfig string) { |
| 211 | + t.Helper() |
| 212 | + uninstallAction := action.NewUninstall(internal.InitHelmActionConfig(t, kubeConfig)) |
| 213 | + uninstallAction.Wait = true |
| 214 | + uninstallAction.Timeout = internal.HelmActionTimeout |
| 215 | + uninstallAction.IgnoreNotFound = true |
| 216 | + _, err := uninstallAction.Run(redisReleaseName) |
| 217 | + require.NoError(t, err) |
| 218 | + t.Logf("Helm release %q uninstalled", redisReleaseName) |
| 219 | +} |
| 220 | + |
| 221 | +func teardown(t *testing.T, kubeConfig string) { |
| 222 | + t.Helper() |
| 223 | + uninstallRedisChart(t, kubeConfig) |
| 224 | + internal.ChartUninstall(t, kubeConfig) |
| 225 | +} |
0 commit comments