From a92a4651cf5d28029574a947cce0ebc6f3ff26d3 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Sat, 23 Nov 2024 08:35:22 +0200 Subject: [PATCH 1/9] feat: add memory limiter for node collector to drop data --- .../controllers/common/memorylimiter.go | 17 +++++++++ .../controllers/datacollection/configmap.go | 36 ++++++++++++------- autoscaler/controllers/gateway/configmap.go | 6 +--- 3 files changed, 41 insertions(+), 18 deletions(-) create mode 100644 autoscaler/controllers/common/memorylimiter.go diff --git a/autoscaler/controllers/common/memorylimiter.go b/autoscaler/controllers/common/memorylimiter.go new file mode 100644 index 0000000000..636267a2bd --- /dev/null +++ b/autoscaler/controllers/common/memorylimiter.go @@ -0,0 +1,17 @@ +package common + +import ( + odigosv1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" + "github.com/odigos-io/odigos/common/config" +) + +func GetMemoryLimiterConfig(memorySettings odigosv1.CollectorsGroupMemorySettings) config.GenericMap { + // check_interval is currently hardcoded to 1s + // this seems to be a reasonable value for the memory limiter and what the processor uses in docs. + // preforming memory checks is expensive, so we trade off performance with fast reaction time to memory pressure. + return config.GenericMap{ + "check_interval": "1s", + "limit_mib": memorySettings.MemoryLimiterLimitMiB, + "spike_limit_mib": memorySettings.MemoryLimiterSpikeLimitMiB, + } +} diff --git a/autoscaler/controllers/datacollection/configmap.go b/autoscaler/controllers/datacollection/configmap.go index 8b7885dcd2..d662f41bb4 100644 --- a/autoscaler/controllers/datacollection/configmap.go +++ b/autoscaler/controllers/datacollection/configmap.go @@ -9,9 +9,10 @@ import ( "github.com/ghodss/yaml" odigosv1 "github.com/odigos-io/odigos/api/odigos/v1alpha1" + "github.com/odigos-io/odigos/autoscaler/controllers/common" commonconf "github.com/odigos-io/odigos/autoscaler/controllers/common" "github.com/odigos-io/odigos/autoscaler/controllers/datacollection/custom" - "github.com/odigos-io/odigos/common" + odigoscommon "github.com/odigos-io/odigos/common" "github.com/odigos-io/odigos/common/config" "github.com/odigos-io/odigos/common/consts" constsK8s "github.com/odigos-io/odigos/k8sutils/pkg/consts" @@ -124,23 +125,26 @@ func getDesiredConfigMap(apps *odigosv1.InstrumentedApplicationList, dests *odig return &desired, nil } -func calculateConfigMapData(collectorsGroup *odigosv1.CollectorsGroup, apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, +func calculateConfigMapData(nodeCG *odigosv1.CollectorsGroup, apps *odigosv1.InstrumentedApplicationList, dests *odigosv1.DestinationList, processors []*odigosv1.Processor, setTracesLoadBalancer bool, disableNameProcessor bool) (string, error) { - ownMetricsPort := collectorsGroup.Spec.CollectorOwnMetricsPort + ownMetricsPort := nodeCG.Spec.CollectorOwnMetricsPort empty := struct{}{} processorsCfg, tracesProcessors, metricsProcessors, logsProcessors, errs := config.GetCrdProcessorsConfigMap(commonconf.ToProcessorConfigurerArray(processors)) for name, err := range errs { - log.Log.V(0).Info(err.Error(), "processor", name) + log.Log.V(0).Error(err, "processor", name) } if !disableNameProcessor { processorsCfg["odigosresourcename"] = empty } + memoryLimiterConfiguration := common.GetMemoryLimiterConfig(nodeCG.Spec.MemorySettings) + processorsCfg["batch"] = empty + processorsCfg["memory_limiter"] = memoryLimiterConfiguration processorsCfg["resource"] = config.GenericMap{ "attributes": []config.GenericMap{{ "key": "k8s.node.name", @@ -266,13 +270,13 @@ func calculateConfigMapData(collectorsGroup *odigosv1.CollectorsGroup, apps *odi collectLogs := false for _, dst := range dests.Items { for _, s := range dst.Spec.Signals { - if s == common.LogsObservabilitySignal && !custom.DestRequiresCustom(dst.Spec.Type) { + if s == odigoscommon.LogsObservabilitySignal && !custom.DestRequiresCustom(dst.Spec.Type) { collectLogs = true } - if s == common.TracesObservabilitySignal || dst.Spec.Type == common.PrometheusDestinationType { + if s == odigoscommon.TracesObservabilitySignal || dst.Spec.Type == odigoscommon.PrometheusDestinationType { collectTraces = true } - if s == common.MetricsObservabilitySignal && !custom.DestRequiresCustom(dst.Spec.Type) { + if s == odigoscommon.MetricsObservabilitySignal && !custom.DestRequiresCustom(dst.Spec.Type) { collectMetrics = true } } @@ -364,7 +368,7 @@ func getConfigMap(ctx context.Context, c client.Client, namespace string) (*v1.C return configMap, nil } -func getSignalsFromOtelcolConfig(otelcolConfigContent string) ([]common.ObservabilitySignal, error) { +func getSignalsFromOtelcolConfig(otelcolConfigContent string) ([]odigoscommon.ObservabilitySignal, error) { config := config.Config{} err := yaml.Unmarshal([]byte(otelcolConfigContent), &config) if err != nil { @@ -389,22 +393,28 @@ func getSignalsFromOtelcolConfig(otelcolConfigContent string) ([]common.Observab } } - signals := []common.ObservabilitySignal{} + signals := []odigoscommon.ObservabilitySignal{} if tracesEnabled { - signals = append(signals, common.TracesObservabilitySignal) + signals = append(signals, odigoscommon.TracesObservabilitySignal) } if metricsEnabled { - signals = append(signals, common.MetricsObservabilitySignal) + signals = append(signals, odigoscommon.MetricsObservabilitySignal) } if logsEnabled { - signals = append(signals, common.LogsObservabilitySignal) + signals = append(signals, odigoscommon.LogsObservabilitySignal) } return signals, nil } func getCommonProcessors(disableNameProcessor bool) []string { - processors := []string{"batch"} + // memory limiter is placed right after batch processor an not the first processor in pipeline + // this is so that instrumented application always succeeds in sending data to the collector + // (on it being added to a batch) and checking the memory limit later after the batch + // where memory rejection would drop the data instead of backpressuring the application. + // Read more about it here: https://github.com/open-telemetry/opentelemetry-collector/issues/11726 + // Also related: https://github.com/open-telemetry/opentelemetry-collector/issues/9591 + processors := []string{"batch", "memory_limiter"} if !disableNameProcessor { processors = append(processors, "odigosresourcename") } diff --git a/autoscaler/controllers/gateway/configmap.go b/autoscaler/controllers/gateway/configmap.go index 1547517e1f..68b96c76c9 100644 --- a/autoscaler/controllers/gateway/configmap.go +++ b/autoscaler/controllers/gateway/configmap.go @@ -114,11 +114,7 @@ func addSelfTelemetryPipeline(c *config.Config, ownTelemetryPort int32) error { func syncConfigMap(dests *odigosv1.DestinationList, allProcessors *odigosv1.ProcessorList, gateway *odigosv1.CollectorsGroup, ctx context.Context, c client.Client, scheme *runtime.Scheme) (string, []odigoscommon.ObservabilitySignal, error) { logger := log.FromContext(ctx) - memoryLimiterConfiguration := config.GenericMap{ - "check_interval": "1s", - "limit_mib": gateway.Spec.MemorySettings.MemoryLimiterLimitMiB, - "spike_limit_mib": gateway.Spec.MemorySettings.MemoryLimiterSpikeLimitMiB, - } + memoryLimiterConfiguration := common.GetMemoryLimiterConfig(gateway.Spec.MemorySettings) processors := common.FilterAndSortProcessorsByOrderHint(allProcessors, odigosv1.CollectorsGroupRoleClusterGateway) From 0b7e1a12ad96881d95f3d74ec42cfa9264444425 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Sat, 23 Nov 2024 09:05:28 +0200 Subject: [PATCH 2/9] feat: set node collectors memory settings --- .../controllers/nodecollectorsgroup/common.go | 28 +++++++++++++++++++ 1 file changed, 28 insertions(+) diff --git a/scheduler/controllers/nodecollectorsgroup/common.go b/scheduler/controllers/nodecollectorsgroup/common.go index 2e739a5e7e..8bdd4cddb7 100644 --- a/scheduler/controllers/nodecollectorsgroup/common.go +++ b/scheduler/controllers/nodecollectorsgroup/common.go @@ -14,6 +14,33 @@ import ( "sigs.k8s.io/controller-runtime/pkg/client" ) +func getMemorySettings(odigosConfig common.OdigosConfiguration) odigosv1.CollectorsGroupMemorySettings { + // TODO: currently using hardcoded values, should be configurable. + // + // memory request is expensive on daemonsets since it will consume this memory + // on each node in the cluster. setting to 256, but allowing memory to spike higher + // to consume more available memory on the node. + // if the node has memory to spare, we can use it to buffer more data before dropping, + // but it also means that if no memory is available, collector might get killed by OOM killer. + // + // we can trade-off the memory request: + // - more memory request: more memory allocated per collector on each node, but more buffer for bursts and transient failures. + // - less memory request: efficient use of cluster resources, but data might be dropped earlier on spikes. + // currently choosing 256MiB as a balance (~200MiB left for heap to handle batches and export queues). + // + // we can trade-off how high the memory limit is set above the request: + // - limit is set to request: collector most stable (no OOM) but smaller buffer for bursts and early data drop. + // - limit is set way above request: in case of memory spike, collector will use extra memory available on the node to buffer data, but might get killed by OOM killer if this memory is not available. + // currently choosing 512MiB as a balance (200MiB guaranteed for heap, and the rest ~300MiB of buffer from node before start dropping). + // + return odigosv1.CollectorsGroupMemorySettings{ + MemoryRequestMiB: 256, + MemoryLimiterLimitMiB: 512, + MemoryLimiterSpikeLimitMiB: 128, // meaning that collector will start dropping data at 512-128=384MiB + GomemlimitMiB: 512 - 128 - 32, // start aggressive GC 32 MiB before soft limit and dropping data + } +} + func newNodeCollectorGroup(odigosConfig common.OdigosConfiguration) *odigosv1.CollectorsGroup { ownMetricsPort := consts.OdigosNodeCollectorOwnTelemetryPortDefault @@ -33,6 +60,7 @@ func newNodeCollectorGroup(odigosConfig common.OdigosConfiguration) *odigosv1.Co Spec: odigosv1.CollectorsGroupSpec{ Role: odigosv1.CollectorsGroupRoleNodeCollector, CollectorOwnMetricsPort: ownMetricsPort, + MemorySettings: getMemorySettings(odigosConfig), }, } } From 0ed0290eaf1b50508579c640b5a1896cd7c601d6 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Sat, 23 Nov 2024 10:26:42 +0200 Subject: [PATCH 3/9] feat: inlcude memory resource limit and GOMEMLIMIT for node ds --- autoscaler/controllers/datacollection/daemonset.go | 12 ++++++++++++ 1 file changed, 12 insertions(+) diff --git a/autoscaler/controllers/datacollection/daemonset.go b/autoscaler/controllers/datacollection/daemonset.go index 1d93b5d9a6..9c70d86627 100644 --- a/autoscaler/controllers/datacollection/daemonset.go +++ b/autoscaler/controllers/datacollection/daemonset.go @@ -16,6 +16,7 @@ import ( appsv1 "k8s.io/api/apps/v1" corev1 "k8s.io/api/core/v1" apierrors "k8s.io/apimachinery/pkg/api/errors" + "k8s.io/apimachinery/pkg/api/resource" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/apimachinery/pkg/runtime" "k8s.io/apimachinery/pkg/types" @@ -191,6 +192,8 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st rollingUpdate.MaxSurge = &maxSurge } + requestMemoryQuantity := resource.MustParse(fmt.Sprintf("%dMi", datacollection.Spec.MemorySettings.MemoryRequestMiB)) + desiredDs := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ Name: consts.OdigosNodeCollectorDaemonSetName, @@ -302,6 +305,10 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st }, }, }, + { + Name: "GOMEMLIMIT", + Value: fmt.Sprintf("%dMiB", datacollection.Spec.MemorySettings.GomemlimitMiB), + }, }, LivenessProbe: &corev1.Probe{ ProbeHandler: corev1.ProbeHandler{ @@ -319,6 +326,11 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st }, }, }, + Resources: corev1.ResourceRequirements{ + Requests: corev1.ResourceList{ + corev1.ResourceMemory: requestMemoryQuantity, + }, + }, SecurityContext: &corev1.SecurityContext{ Privileged: boolPtr(true), }, From ddd69af5f45f0829d028789eb109d34a374acbee Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Sun, 24 Nov 2024 17:33:29 +0200 Subject: [PATCH 4/9] test: add limits to asserts --- tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml | 2 ++ tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml | 2 ++ tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml | 2 ++ tests/e2e/workload-lifecycle/01-assert-pipeline.yaml | 2 ++ 4 files changed, 8 insertions(+) diff --git a/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml b/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml index e83ab4cede..e2b6f015a8 100644 --- a/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml @@ -59,6 +59,8 @@ spec: resources: requests: (memory != null): true + limits: + (memory != null): true volumeMounts: - mountPath: /conf name: collector-conf diff --git a/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml b/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml index 49d45bca6f..6355275dba 100644 --- a/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml @@ -59,6 +59,8 @@ spec: resources: requests: (memory != null): true + limits: + (memory != null): true volumeMounts: - mountPath: /conf name: collector-conf diff --git a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml index 61fcce3467..faef12fadb 100644 --- a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml @@ -59,6 +59,8 @@ spec: resources: requests: (memory != null): true + limits: + (memory != null): true volumeMounts: - mountPath: /conf name: collector-conf diff --git a/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml b/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml index ec54b094fa..e4ca446722 100644 --- a/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml +++ b/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml @@ -59,6 +59,8 @@ spec: resources: requests: (memory != null): true + limits: + (memory != null): true volumeMounts: - mountPath: /conf name: collector-conf From afa2644e83088a570867843126c15155b3d4cd37 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Wed, 27 Nov 2024 07:54:03 +0200 Subject: [PATCH 5/9] feat: node collector memory limits --- autoscaler/controllers/datacollection/daemonset.go | 8 ++++++-- scheduler/controllers/nodecollectorsgroup/common.go | 1 + 2 files changed, 7 insertions(+), 2 deletions(-) diff --git a/autoscaler/controllers/datacollection/daemonset.go b/autoscaler/controllers/datacollection/daemonset.go index 9c70d86627..46a569139a 100644 --- a/autoscaler/controllers/datacollection/daemonset.go +++ b/autoscaler/controllers/datacollection/daemonset.go @@ -192,7 +192,8 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st rollingUpdate.MaxSurge = &maxSurge } - requestMemoryQuantity := resource.MustParse(fmt.Sprintf("%dMi", datacollection.Spec.MemorySettings.MemoryRequestMiB)) + requestMemoryRequestQuantity := resource.MustParse(fmt.Sprintf("%dMi", datacollection.Spec.MemorySettings.MemoryRequestMiB)) + requestMemoryLimitQuantity := resource.MustParse(fmt.Sprintf("%dMi", datacollection.Spec.MemorySettings.MemoryLimitMiB)) desiredDs := &appsv1.DaemonSet{ ObjectMeta: metav1.ObjectMeta{ @@ -328,7 +329,10 @@ func getDesiredDaemonSet(datacollection *odigosv1.CollectorsGroup, configData st }, Resources: corev1.ResourceRequirements{ Requests: corev1.ResourceList{ - corev1.ResourceMemory: requestMemoryQuantity, + corev1.ResourceMemory: requestMemoryRequestQuantity, + }, + Limits: corev1.ResourceList{ + corev1.ResourceMemory: requestMemoryLimitQuantity, }, }, SecurityContext: &corev1.SecurityContext{ diff --git a/scheduler/controllers/nodecollectorsgroup/common.go b/scheduler/controllers/nodecollectorsgroup/common.go index 8bdd4cddb7..041ce82812 100644 --- a/scheduler/controllers/nodecollectorsgroup/common.go +++ b/scheduler/controllers/nodecollectorsgroup/common.go @@ -35,6 +35,7 @@ func getMemorySettings(odigosConfig common.OdigosConfiguration) odigosv1.Collect // return odigosv1.CollectorsGroupMemorySettings{ MemoryRequestMiB: 256, + MemoryLimitMiB: 512 + 64, MemoryLimiterLimitMiB: 512, MemoryLimiterSpikeLimitMiB: 128, // meaning that collector will start dropping data at 512-128=384MiB GomemlimitMiB: 512 - 128 - 32, // start aggressive GC 32 MiB before soft limit and dropping data From 4fe97144a9eca72ea95852e334521aeb44dbe567 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Wed, 27 Nov 2024 08:12:56 +0200 Subject: [PATCH 6/9] fix: add new fields to e2e tests assertion --- .../e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml | 7 +++++++ tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml | 7 +++++++ tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml | 7 +++++++ tests/e2e/workload-lifecycle/01-assert-pipeline.yaml | 7 +++++++ 4 files changed, 28 insertions(+) diff --git a/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml b/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml index aceb40496c..947d35ac8d 100644 --- a/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/cli-upgrade/assert-instrumented-and-pipeline.yaml @@ -152,6 +152,13 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name + - name: GOMEMLIMIT + (value != null): true + resources: + limits: + (memory != null): true + requests: + (memory != null): true hostNetwork: true nodeSelector: kubernetes.io/os: linux diff --git a/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml b/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml index 6355275dba..8e96851cfd 100644 --- a/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/helm-chart/assert-instrumented-and-pipeline.yaml @@ -152,6 +152,13 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name + - name: GOMEMLIMIT + (value != null): true + resources: + requests: + (memory != null): true + limits: + (memory != null): true hostNetwork: true nodeSelector: kubernetes.io/os: linux diff --git a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml index faef12fadb..f9c8f1113d 100644 --- a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml @@ -152,9 +152,16 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name + - name: GOMEMLIMIT + (value != null): true hostNetwork: true nodeSelector: kubernetes.io/os: linux + resources: + requests: + (memory != null): true + limits: + (memory != null): true securityContext: {} serviceAccount: odigos-data-collection serviceAccountName: odigos-data-collection diff --git a/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml b/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml index e4ca446722..374984b7e8 100644 --- a/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml +++ b/tests/e2e/workload-lifecycle/01-assert-pipeline.yaml @@ -152,6 +152,13 @@ spec: fieldRef: apiVersion: v1 fieldPath: metadata.name + - name: GOMEMLIMIT + (value != null): true + resources: + requests: + (memory != null): true + limits: + (memory != null): true hostNetwork: true nodeSelector: kubernetes.io/os: linux From cda722530a4acb5cdd66aa244c16ecb48ebc3148 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Wed, 27 Nov 2024 09:25:08 +0200 Subject: [PATCH 7/9] test: fix assertion in tests --- .../multi-apps/assert-instrumented-and-pipeline.yaml | 10 +++++----- 1 file changed, 5 insertions(+), 5 deletions(-) diff --git a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml index f9c8f1113d..c71432397b 100644 --- a/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml +++ b/tests/e2e/multi-apps/assert-instrumented-and-pipeline.yaml @@ -154,14 +154,14 @@ spec: fieldPath: metadata.name - name: GOMEMLIMIT (value != null): true + resources: + requests: + (memory != null): true + limits: + (memory != null): true hostNetwork: true nodeSelector: kubernetes.io/os: linux - resources: - requests: - (memory != null): true - limits: - (memory != null): true securityContext: {} serviceAccount: odigos-data-collection serviceAccountName: odigos-data-collection From b2d0d76c953b55be986fc11a65090c9e12040325 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Fri, 29 Nov 2024 09:45:46 +0200 Subject: [PATCH 8/9] chore: make org configurable for make commands --- Makefile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Makefile b/Makefile index 87412c735f..ec22337b96 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,6 @@ TAG ?= $(shell odigos version --cluster) ODIGOS_CLI_VERSION ?= $(shell odigos version --cli) -ORG := keyval +ORG ?= keyval .PHONY: build-odiglet build-odiglet: From 323045a6ba0fc8e808c3e8e0b7de9db4c99aae06 Mon Sep 17 00:00:00 2001 From: Amir Blum Date: Mon, 2 Dec 2024 16:51:32 +0200 Subject: [PATCH 9/9] fix: make file push ui --- Makefile | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/Makefile b/Makefile index ec22337b96..f53b2179ab 100644 --- a/Makefile +++ b/Makefile @@ -62,6 +62,10 @@ push-scheduler: push-collector: docker buildx build --platform linux/amd64,linux/arm64/v8 --push -t $(ORG)/odigos-collector:$(TAG) collector -f collector/Dockerfile +.PHONY: push-ui +push-ui: + docker buildx build --platform linux/amd64,linux/arm64/v8 --push -t $(ORG)/odigos-ui:$(TAG) . -f frontend/Dockerfile + .PHONY: push-images push-images: make push-autoscaler TAG=$(TAG) @@ -69,6 +73,7 @@ push-images: make push-odiglet TAG=$(TAG) make push-instrumentor TAG=$(TAG) make push-collector TAG=$(TAG) + make push-ui TAG=$(TAG) .PHONY: load-to-kind-odiglet load-to-kind-odiglet: