k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e_node/memory_manager_metrics_test.go (about) 1 //go:build linux 2 3 /* 4 Copyright 2023 The Kubernetes Authors. 5 6 Licensed under the Apache License, Version 2.0 (the "License"); 7 you may not use this file except in compliance with the License. 8 You may obtain a copy of the License at 9 10 http://www.apache.org/licenses/LICENSE-2.0 11 12 Unless required by applicable law or agreed to in writing, software 13 distributed under the License is distributed on an "AS IS" BASIS, 14 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 15 See the License for the specific language governing permissions and 16 limitations under the License. 17 */ 18 19 package e2enode 20 21 import ( 22 "context" 23 "time" 24 25 "github.com/onsi/ginkgo/v2" 26 "github.com/onsi/gomega" 27 "github.com/onsi/gomega/gstruct" 28 v1 "k8s.io/api/core/v1" 29 "k8s.io/apimachinery/pkg/api/resource" 30 kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config" 31 "k8s.io/kubernetes/test/e2e/feature" 32 "k8s.io/kubernetes/test/e2e/framework" 33 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 34 admissionapi "k8s.io/pod-security-admission/api" 35 ) 36 37 var _ = SIGDescribe("Memory Manager Metrics", framework.WithSerial(), feature.MemoryManager, func() { 38 f := framework.NewDefaultFramework("memorymanager-metrics") 39 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged 40 41 ginkgo.Context("when querying /metrics", func() { 42 var testPod *v1.Pod 43 44 ginkgo.BeforeEach(func(ctx context.Context) { 45 var oldCfg *kubeletconfig.KubeletConfiguration 46 var err error 47 if oldCfg == nil { 48 oldCfg, err = getCurrentKubeletConfig(ctx) 49 framework.ExpectNoError(err) 50 } 51 52 newCfg := oldCfg.DeepCopy() 53 updateKubeletConfigWithMemoryManagerParams(newCfg, 54 &memoryManagerKubeletParams{ 55 policy: staticPolicy, 56 systemReservedMemory: []kubeletconfig.MemoryReservation{ 57 { 58 NumaNode: 0, 59 Limits: v1.ResourceList{ 60 resourceMemory: resource.MustParse("1100Mi"), 61 }, 62 }, 63 }, 64 systemReserved: map[string]string{resourceMemory: "500Mi"}, 65 kubeReserved: map[string]string{resourceMemory: "500Mi"}, 66 evictionHard: map[string]string{evictionHardMemory: "100Mi"}, 67 }, 68 ) 69 updateKubeletConfig(ctx, f, newCfg, true) 70 ginkgo.DeferCleanup(func(ctx context.Context) { 71 if testPod != nil { 72 deletePodSyncByName(ctx, f, testPod.Name) 73 } 74 updateKubeletConfig(ctx, f, oldCfg, true) 75 }) 76 }) 77 78 ginkgo.It("should report zero pinning counters after a fresh restart", func(ctx context.Context) { 79 // we updated the kubelet config in BeforeEach, so we can assume we start fresh. 80 // being [Serial], we can also assume no one else but us is running pods. 81 ginkgo.By("Checking the memorymanager metrics right after the kubelet restart, with no pods running") 82 83 matchResourceMetrics := gstruct.MatchKeys(gstruct.IgnoreExtras, gstruct.Keys{ 84 "kubelet_memory_manager_pinning_requests_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 85 "": timelessSample(0), 86 }), 87 "kubelet_memory_manager_pinning_errors_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 88 "": timelessSample(0), 89 }), 90 }) 91 92 ginkgo.By("Giving the Kubelet time to start up and produce metrics") 93 gomega.Eventually(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 94 ginkgo.By("Ensuring the metrics match the expectations a few more times") 95 gomega.Consistently(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 96 }) 97 98 ginkgo.It("should report pinning failures when the memorymanager allocation is known to fail", func(ctx context.Context) { 99 ginkgo.By("Creating the test pod which will be rejected for memory request which is too big") 100 testPod = e2epod.NewPodClient(f).Create(ctx, makeMemoryManagerPod("memmngrpod", nil, 101 []memoryManagerCtnAttributes{ 102 { 103 ctnName: "memmngrcnt", 104 cpus: "100m", 105 memory: "1000Gi"}, 106 })) 107 108 // we updated the kubelet config in BeforeEach, so we can assume we start fresh. 109 // being [Serial], we can also assume noone else but us is running pods. 110 ginkgo.By("Checking the memorymanager metrics right after the kubelet restart, with pod failed to admit") 111 112 matchResourceMetrics := gstruct.MatchKeys(gstruct.IgnoreExtras, gstruct.Keys{ 113 "kubelet_memory_manager_pinning_requests_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 114 "": timelessSample(1), 115 }), 116 "kubelet_memory_manager_pinning_errors_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 117 "": timelessSample(1), 118 }), 119 }) 120 121 ginkgo.By("Giving the Kubelet time to start up and produce metrics") 122 gomega.Eventually(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 123 ginkgo.By("Ensuring the metrics match the expectations a few more times") 124 gomega.Consistently(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 125 }) 126 127 ginkgo.It("should not report any pinning failures when the memorymanager allocation is expected to succeed", func(ctx context.Context) { 128 ginkgo.By("Creating the test pod") 129 testPod = e2epod.NewPodClient(f).Create(ctx, makeMemoryManagerPod("memmngrpod", nil, 130 []memoryManagerCtnAttributes{ 131 { 132 ctnName: "memmngrcnt", 133 cpus: "100m", 134 memory: "64Mi"}, 135 })) 136 137 // we updated the kubelet config in BeforeEach, so we can assume we start fresh. 138 // being [Serial], we can also assume noone else but us is running pods. 139 ginkgo.By("Checking the memorymanager metrics right after the kubelet restart, with pod should be admitted") 140 matchResourceMetrics := gstruct.MatchKeys(gstruct.IgnoreExtras, gstruct.Keys{ 141 "kubelet_memory_manager_pinning_requests_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 142 "": timelessSample(1), 143 }), 144 "kubelet_cpu_manager_pinning_errors_total": gstruct.MatchAllElements(nodeID, gstruct.Elements{ 145 "": timelessSample(0), 146 }), 147 }) 148 149 ginkgo.By("Giving the Kubelet time to start up and produce metrics") 150 gomega.Eventually(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 151 ginkgo.By("Ensuring the metrics match the expectations a few more times") 152 gomega.Consistently(getKubeletMetrics, 1*time.Minute, 15*time.Second).WithContext(ctx).Should(matchResourceMetrics) 153 }) 154 }) 155 })