github.com/verrazzano/verrazzano@v1.7.0/application-operator/mcagent/mcagent_metrics_test.go (about) 1 // Copyright (c) 2022, Oracle and/or its affiliates. 2 // Licensed under the Universal Permissive License v 1.0 as shown at https://oss.oracle.com/licenses/upl. 3 4 package mcagent 5 6 import ( 7 "context" 8 "testing" 9 10 promoperapi "github.com/prometheus-operator/prometheus-operator/pkg/apis/monitoring/v1" 11 "github.com/stretchr/testify/assert" 12 "go.uber.org/zap" 13 v12 "k8s.io/apimachinery/pkg/apis/meta/v1" 14 "k8s.io/apimachinery/pkg/runtime" 15 "k8s.io/apimachinery/pkg/types" 16 k8scheme "k8s.io/client-go/kubernetes/scheme" 17 "sigs.k8s.io/controller-runtime/pkg/client" 18 "sigs.k8s.io/controller-runtime/pkg/client/fake" 19 ) 20 21 func TestSyncer_updatePrometheusMonitorsClusterName(t *testing.T) { 22 type fields struct { 23 OldManagedClusterName string 24 NewManagedClusterName string 25 } 26 tests := []struct { 27 name string 28 fields fields 29 }{ 30 {"managed cluster name changed", fields{OldManagedClusterName: "local", NewManagedClusterName: "mgdcluster1"}}, 31 {"managed cluster name unchanged", fields{OldManagedClusterName: "mgcluster", NewManagedClusterName: "mgcluster"}}, 32 } 33 for _, tt := range tests { 34 t.Run(tt.name, func(t *testing.T) { 35 ns1 := "ns1" 36 ns2 := "ns2" 37 ns3 := "ns3" 38 smWithOldClusterName := createTestServiceMonitor(true, tt.fields.OldManagedClusterName, "smold", ns1) 39 smWithNewClusterName := createTestServiceMonitor(true, tt.fields.NewManagedClusterName, "smnew", ns2) 40 smNoClusterName := createTestServiceMonitor(false, "", "smnone", ns3) 41 pmWithOldClusterName := createTestPodMonitor(true, tt.fields.OldManagedClusterName, "pmold", ns1) 42 pmWithNewClusterName := createTestPodMonitor(true, tt.fields.NewManagedClusterName, "pmnew", ns3) 43 pmNoClusterName := createTestPodMonitor(false, "", "pmnone", ns2) 44 45 scheme := runtime.NewScheme() 46 _ = promoperapi.AddToScheme(scheme) 47 mgdClient := fake.NewClientBuilder().WithScheme(scheme).WithObjects( 48 smWithOldClusterName, smWithNewClusterName, smNoClusterName, 49 pmWithOldClusterName, pmWithNewClusterName, pmNoClusterName).Build() 50 adminClient := fake.NewClientBuilder().WithScheme(k8scheme.Scheme).Build() 51 52 s := &Syncer{ 53 AdminClient: adminClient, 54 LocalClient: mgdClient, 55 Log: zap.S().With(tt.name), 56 ManagedClusterName: tt.fields.NewManagedClusterName, 57 Context: context.TODO(), 58 } 59 err := s.updatePrometheusMonitorsClusterName() 60 assert.NoError(t, err) 61 assertServiceMonitorLabel(t, mgdClient, smWithOldClusterName, tt.fields.NewManagedClusterName) 62 assertServiceMonitorLabel(t, mgdClient, smWithNewClusterName, tt.fields.NewManagedClusterName) 63 assertPodMonitorLabel(t, mgdClient, pmWithOldClusterName, tt.fields.NewManagedClusterName) 64 assertPodMonitorLabel(t, mgdClient, pmWithNewClusterName, tt.fields.NewManagedClusterName) 65 }) 66 } 67 } 68 69 func assertServiceMonitorLabel(t *testing.T, client client.WithWatch, sm *promoperapi.ServiceMonitor, newClusterName string) { 70 retrievedSM := promoperapi.ServiceMonitor{} 71 err := client.Get(context.TODO(), types.NamespacedName{Namespace: sm.Namespace, Name: sm.Name}, &retrievedSM) 72 assert.NoError(t, err) 73 for i, ep := range retrievedSM.Spec.Endpoints { 74 assertRCLabels(t, sm.Spec.Endpoints[i].RelabelConfigs, ep.RelabelConfigs, newClusterName) 75 } 76 } 77 78 func assertPodMonitorLabel(t *testing.T, client client.WithWatch, pm *promoperapi.PodMonitor, newClusterName string) { 79 retrievedPM := promoperapi.PodMonitor{} 80 err := client.Get(context.TODO(), types.NamespacedName{Namespace: pm.Namespace, Name: pm.Name}, &retrievedPM) 81 assert.NoError(t, err) 82 assert.Equal(t, len(pm.Spec.PodMetricsEndpoints), len(retrievedPM.Spec.PodMetricsEndpoints)) 83 for i, ep := range retrievedPM.Spec.PodMetricsEndpoints { 84 assertRCLabels(t, pm.Spec.PodMetricsEndpoints[i].RelabelConfigs, ep.RelabelConfigs, newClusterName) 85 } 86 } 87 88 func assertRCLabels(t *testing.T, oldRCs []*promoperapi.RelabelConfig, newRCs []*promoperapi.RelabelConfig, clusterName string) { 89 assert.Equal(t, len(oldRCs), len(newRCs)) 90 for _, rc := range newRCs { 91 if rc.TargetLabel == prometheusClusterNameLabel { 92 assert.Equal(t, clusterName, rc.Replacement) 93 } 94 } 95 } 96 97 func createTestServiceMonitor(hasClusterNameRelabelConfig bool, clusterName string, monitorName string, monitorNS string) *promoperapi.ServiceMonitor { 98 relabelConfigs := []*promoperapi.RelabelConfig{} 99 if hasClusterNameRelabelConfig { 100 relabelConfigs = append(relabelConfigs, &promoperapi.RelabelConfig{TargetLabel: prometheusClusterNameLabel, Replacement: clusterName}) 101 } 102 return &promoperapi.ServiceMonitor{ 103 ObjectMeta: v12.ObjectMeta{Name: monitorName, Namespace: monitorNS}, 104 Spec: promoperapi.ServiceMonitorSpec{ 105 Endpoints: []promoperapi.Endpoint{ 106 {RelabelConfigs: relabelConfigs}, 107 }, 108 }} 109 } 110 111 func createTestPodMonitor(hasClusterNameRelabelConfig bool, clusterName string, monitorName string, monitorNS string) *promoperapi.PodMonitor { 112 relabelConfigs := []*promoperapi.RelabelConfig{} 113 if hasClusterNameRelabelConfig { 114 relabelConfigs = append(relabelConfigs, &promoperapi.RelabelConfig{TargetLabel: prometheusClusterNameLabel, Replacement: clusterName}) 115 } 116 return &promoperapi.PodMonitor{ 117 ObjectMeta: v12.ObjectMeta{Name: monitorName, Namespace: monitorNS}, 118 Spec: promoperapi.PodMonitorSpec{ 119 PodMetricsEndpoints: []promoperapi.PodMetricsEndpoint{ 120 {RelabelConfigs: relabelConfigs}, 121 }, 122 }} 123 }