4
4
package collection // import "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/collection"
5
5
6
6
import (
7
- "reflect"
8
7
"time"
9
8
10
9
quotav1 "github.com/openshift/api/quota/v1"
10
+ "go.opentelemetry.io/collector/pdata/pcommon"
11
11
"go.opentelemetry.io/collector/pdata/pmetric"
12
12
"go.opentelemetry.io/collector/receiver"
13
- "go.uber.org/zap"
14
13
appsv1 "k8s.io/api/apps/v1"
15
14
autoscalingv2 "k8s.io/api/autoscaling/v2"
16
15
autoscalingv2beta2 "k8s.io/api/autoscaling/v2beta2"
17
16
batchv1 "k8s.io/api/batch/v1"
18
17
batchv1beta1 "k8s.io/api/batch/v1beta1"
19
18
corev1 "k8s.io/api/core/v1"
20
- "k8s.io/apimachinery/pkg/runtime"
21
- "k8s.io/apimachinery/pkg/runtime/schema"
22
- "k8s.io/apimachinery/pkg/types"
23
- "k8s.io/client-go/tools/cache"
24
19
25
- "github.com/open-telemetry/opentelemetry-collector-contrib/pkg/experimentalmetricmetadata"
26
20
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/clusterresourcequota"
27
21
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/cronjob"
28
22
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/demonset"
29
23
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/deployment"
24
+ "github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/gvk"
30
25
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/hpa"
31
26
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/jobs"
32
27
"github.com/open-telemetry/opentelemetry-collector-contrib/receiver/k8sclusterreceiver/internal/metadata"
@@ -42,137 +37,82 @@ import (
42
37
// TODO: Consider moving some of these constants to
43
38
// https://go.opentelemetry.io/collector/blob/main/model/semconv/opentelemetry.go.
44
39
45
- // DataCollector wraps around a metricsStore and a metadaStore exposing
46
- // methods to perform on the underlying stores. DataCollector also provides
47
- // an interface to interact with refactored code from SignalFx Agent which is
48
- // confined to the collection package.
40
+ // DataCollector emits metrics with CollectMetricData based on the Kubernetes API objects in the metadata store.
49
41
type DataCollector struct {
50
42
settings receiver.CreateSettings
51
- metricsStore * metricsStore
52
43
metadataStore * metadata.Store
53
44
nodeConditionsToReport []string
54
45
allocatableTypesToReport []string
55
- metricsBuilderConfig metadata.MetricsBuilderConfig
46
+ metricsBuilder * metadata.MetricsBuilder
56
47
}
57
48
58
49
// NewDataCollector returns a DataCollector.
59
- func NewDataCollector (set receiver.CreateSettings , metricsBuilderConfig metadata.MetricsBuilderConfig , nodeConditionsToReport , allocatableTypesToReport []string ) * DataCollector {
50
+ func NewDataCollector (set receiver.CreateSettings , ms * metadata.Store ,
51
+ metricsBuilderConfig metadata.MetricsBuilderConfig , nodeConditionsToReport , allocatableTypesToReport []string ) * DataCollector {
60
52
return & DataCollector {
61
- settings : set ,
62
- metricsStore : & metricsStore {
63
- metricsCache : make (map [types.UID ]pmetric.Metrics ),
64
- },
65
- metadataStore : & metadata.Store {},
53
+ settings : set ,
54
+ metadataStore : ms ,
66
55
nodeConditionsToReport : nodeConditionsToReport ,
67
56
allocatableTypesToReport : allocatableTypesToReport ,
68
- metricsBuilderConfig : metricsBuilderConfig ,
69
- }
70
- }
71
-
72
- // SetupMetadataStore initializes a metadata store for the kubernetes kind.
73
- func (dc * DataCollector ) SetupMetadataStore (gvk schema.GroupVersionKind , store cache.Store ) {
74
- dc .metadataStore .Setup (gvk , store )
75
- }
76
-
77
- func (dc * DataCollector ) RemoveFromMetricsStore (obj interface {}) {
78
- if err := dc .metricsStore .remove (obj .(runtime.Object )); err != nil {
79
- dc .settings .TelemetrySettings .Logger .Error (
80
- "failed to remove from metric cache" ,
81
- zap .String ("obj" , reflect .TypeOf (obj ).String ()),
82
- zap .Error (err ),
83
- )
84
- }
85
- }
86
-
87
- func (dc * DataCollector ) UpdateMetricsStore (obj interface {}, md pmetric.Metrics ) {
88
- if err := dc .metricsStore .update (obj .(runtime.Object ), md ); err != nil {
89
- dc .settings .TelemetrySettings .Logger .Error (
90
- "failed to update metric cache" ,
91
- zap .String ("obj" , reflect .TypeOf (obj ).String ()),
92
- zap .Error (err ),
93
- )
57
+ metricsBuilder : metadata .NewMetricsBuilder (metricsBuilderConfig , set ),
94
58
}
95
59
}
96
60
97
61
func (dc * DataCollector ) CollectMetricData (currentTime time.Time ) pmetric.Metrics {
98
- return dc .metricsStore .getMetricData (currentTime )
99
- }
100
-
101
- // SyncMetrics updates the metric store with latest metrics from the kubernetes object.
102
- func (dc * DataCollector ) SyncMetrics (obj interface {}) {
103
- var md pmetric.Metrics
104
-
105
- switch o := obj .(type ) {
106
- case * corev1.Pod :
107
- md = pod .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
108
- case * corev1.Node :
109
- md = node .GetMetrics (dc .settings , dc .metricsBuilderConfig , o , dc .nodeConditionsToReport , dc .allocatableTypesToReport )
110
- case * corev1.Namespace :
111
- md = namespace .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
112
- case * corev1.ReplicationController :
113
- md = replicationcontroller .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
114
- case * corev1.ResourceQuota :
115
- md = resourcequota .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
116
- case * appsv1.Deployment :
117
- md = deployment .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
118
- case * appsv1.ReplicaSet :
119
- md = replicaset .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
120
- case * appsv1.DaemonSet :
121
- md = demonset .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
122
- case * appsv1.StatefulSet :
123
- md = statefulset .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
124
- case * batchv1.Job :
125
- md = jobs .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
126
- case * batchv1.CronJob :
127
- md = cronjob .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
128
- case * batchv1beta1.CronJob :
129
- md = cronjob .GetMetricsBeta (dc .settings , dc .metricsBuilderConfig , o )
130
- case * autoscalingv2.HorizontalPodAutoscaler :
131
- md = hpa .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
132
- case * autoscalingv2beta2.HorizontalPodAutoscaler :
133
- md = hpa .GetMetricsBeta (dc .settings , dc .metricsBuilderConfig , o )
134
- case * quotav1.ClusterResourceQuota :
135
- md = clusterresourcequota .GetMetrics (dc .settings , dc .metricsBuilderConfig , o )
136
- default :
137
- return
138
- }
139
-
140
- if md .DataPointCount () == 0 {
141
- return
142
- }
143
-
144
- dc .UpdateMetricsStore (obj , md )
145
- }
146
-
147
- // SyncMetadata updates the metric store with latest metrics from the kubernetes object
148
- func (dc * DataCollector ) SyncMetadata (obj interface {}) map [experimentalmetricmetadata.ResourceID ]* metadata.KubernetesMetadata {
149
- km := map [experimentalmetricmetadata.ResourceID ]* metadata.KubernetesMetadata {}
150
- switch o := obj .(type ) {
151
- case * corev1.Pod :
152
- km = pod .GetMetadata (o , dc .metadataStore , dc .settings .TelemetrySettings .Logger )
153
- case * corev1.Node :
154
- km = node .GetMetadata (o )
155
- case * corev1.ReplicationController :
156
- km = replicationcontroller .GetMetadata (o )
157
- case * appsv1.Deployment :
158
- km = deployment .GetMetadata (o )
159
- case * appsv1.ReplicaSet :
160
- km = replicaset .GetMetadata (o )
161
- case * appsv1.DaemonSet :
162
- km = demonset .GetMetadata (o )
163
- case * appsv1.StatefulSet :
164
- km = statefulset .GetMetadata (o )
165
- case * batchv1.Job :
166
- km = jobs .GetMetadata (o )
167
- case * batchv1.CronJob :
168
- km = cronjob .GetMetadata (o )
169
- case * batchv1beta1.CronJob :
170
- km = cronjob .GetMetadataBeta (o )
171
- case * autoscalingv2.HorizontalPodAutoscaler :
172
- km = hpa .GetMetadata (o )
173
- case * autoscalingv2beta2.HorizontalPodAutoscaler :
174
- km = hpa .GetMetadataBeta (o )
175
- }
176
-
177
- return km
62
+ ts := pcommon .NewTimestampFromTime (currentTime )
63
+ customRMs := pmetric .NewResourceMetricsSlice ()
64
+
65
+ dc .metadataStore .ForEach (gvk .Pod , func (o any ) {
66
+ pod .RecordMetrics (dc .settings .Logger , dc .metricsBuilder , o .(* corev1.Pod ), ts )
67
+ })
68
+ dc .metadataStore .ForEach (gvk .Node , func (o any ) {
69
+ crm := node .CustomMetrics (dc .settings , dc .metricsBuilder .NewResourceBuilder (), o .(* corev1.Node ),
70
+ dc .nodeConditionsToReport , dc .allocatableTypesToReport , ts )
71
+ if crm .ScopeMetrics ().Len () > 0 {
72
+ crm .MoveTo (customRMs .AppendEmpty ())
73
+ }
74
+ })
75
+ dc .metadataStore .ForEach (gvk .Namespace , func (o any ) {
76
+ namespace .RecordMetrics (dc .metricsBuilder , o .(* corev1.Namespace ), ts )
77
+ })
78
+ dc .metadataStore .ForEach (gvk .ReplicationController , func (o any ) {
79
+ replicationcontroller .RecordMetrics (dc .metricsBuilder , o .(* corev1.ReplicationController ), ts )
80
+ })
81
+ dc .metadataStore .ForEach (gvk .ResourceQuota , func (o any ) {
82
+ resourcequota .RecordMetrics (dc .metricsBuilder , o .(* corev1.ResourceQuota ), ts )
83
+ })
84
+ dc .metadataStore .ForEach (gvk .Deployment , func (o any ) {
85
+ deployment .RecordMetrics (dc .metricsBuilder , o .(* appsv1.Deployment ), ts )
86
+ })
87
+ dc .metadataStore .ForEach (gvk .ReplicaSet , func (o any ) {
88
+ replicaset .RecordMetrics (dc .metricsBuilder , o .(* appsv1.ReplicaSet ), ts )
89
+ })
90
+ dc .metadataStore .ForEach (gvk .DaemonSet , func (o any ) {
91
+ demonset .RecordMetrics (dc .metricsBuilder , o .(* appsv1.DaemonSet ), ts )
92
+ })
93
+ dc .metadataStore .ForEach (gvk .StatefulSet , func (o any ) {
94
+ statefulset .RecordMetrics (dc .metricsBuilder , o .(* appsv1.StatefulSet ), ts )
95
+ })
96
+ dc .metadataStore .ForEach (gvk .Job , func (o any ) {
97
+ jobs .RecordMetrics (dc .metricsBuilder , o .(* batchv1.Job ), ts )
98
+ })
99
+ dc .metadataStore .ForEach (gvk .CronJob , func (o any ) {
100
+ cronjob .RecordMetrics (dc .metricsBuilder , o .(* batchv1.CronJob ), ts )
101
+ })
102
+ dc .metadataStore .ForEach (gvk .CronJobBeta , func (o any ) {
103
+ cronjob .RecordMetricsBeta (dc .metricsBuilder , o .(* batchv1beta1.CronJob ), ts )
104
+ })
105
+ dc .metadataStore .ForEach (gvk .HorizontalPodAutoscaler , func (o any ) {
106
+ hpa .RecordMetrics (dc .metricsBuilder , o .(* autoscalingv2.HorizontalPodAutoscaler ), ts )
107
+ })
108
+ dc .metadataStore .ForEach (gvk .HorizontalPodAutoscalerBeta , func (o any ) {
109
+ hpa .RecordMetricsBeta (dc .metricsBuilder , o .(* autoscalingv2beta2.HorizontalPodAutoscaler ), ts )
110
+ })
111
+ dc .metadataStore .ForEach (gvk .ClusterResourceQuota , func (o any ) {
112
+ clusterresourcequota .RecordMetrics (dc .metricsBuilder , o .(* quotav1.ClusterResourceQuota ), ts )
113
+ })
114
+
115
+ m := dc .metricsBuilder .Emit ()
116
+ customRMs .MoveAndAppendTo (m .ResourceMetrics ())
117
+ return m
178
118
}
0 commit comments