k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/integration/volumescheduling/volume_capacity_priority_test.go (about) 1 /* 2 Copyright 2021 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package volumescheduling 18 19 // This file tests the VolumeCapacityPriority feature. 20 21 import ( 22 "context" 23 "testing" 24 "time" 25 26 v1 "k8s.io/api/core/v1" 27 storagev1 "k8s.io/api/storage/v1" 28 "k8s.io/apimachinery/pkg/api/resource" 29 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 30 utilfeature "k8s.io/apiserver/pkg/util/feature" 31 featuregatetesting "k8s.io/component-base/featuregate/testing" 32 "k8s.io/klog/v2" 33 "k8s.io/kubernetes/pkg/features" 34 testutil "k8s.io/kubernetes/test/integration/util" 35 ) 36 37 var ( 38 waitSSDSC = makeStorageClass("ssd", &modeWait) 39 waitHDDSC = makeStorageClass("hdd", &modeWait) 40 ) 41 42 func mergeNodeLabels(node *v1.Node, labels map[string]string) *v1.Node { 43 for k, v := range labels { 44 node.Labels[k] = v 45 } 46 return node 47 } 48 49 func setupClusterForVolumeCapacityPriority(t *testing.T, nsName string, resyncPeriod time.Duration, provisionDelaySeconds int) *testConfig { 50 testCtx := testutil.InitTestSchedulerWithOptions(t, testutil.InitTestAPIServer(t, nsName, nil), resyncPeriod) 51 testutil.SyncSchedulerInformerFactory(testCtx) 52 go testCtx.Scheduler.Run(testCtx.Ctx) 53 54 clientset := testCtx.ClientSet 55 ns := testCtx.NS.Name 56 57 ctrl, informerFactory, err := initPVController(t, testCtx, provisionDelaySeconds) 58 if err != nil { 59 t.Fatalf("Failed to create PV controller: %v", err) 60 } 61 go ctrl.Run(testCtx.Ctx) 62 63 // Start informer factory after all controllers are configured and running. 64 informerFactory.Start(testCtx.Ctx.Done()) 65 informerFactory.WaitForCacheSync(testCtx.Ctx.Done()) 66 67 return &testConfig{ 68 client: clientset, 69 ns: ns, 70 stop: testCtx.Ctx.Done(), 71 teardown: func() { 72 klog.Infof("test cluster %q start to tear down", ns) 73 deleteTestObjects(clientset, ns, metav1.DeleteOptions{}) 74 }, 75 } 76 } 77 78 func TestVolumeCapacityPriority(t *testing.T) { 79 featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.VolumeCapacityPriority, true) 80 81 config := setupClusterForVolumeCapacityPriority(t, "volume-capacity-priority", 0, 0) 82 defer config.teardown() 83 84 tests := []struct { 85 name string 86 pod *v1.Pod 87 nodes []*v1.Node 88 pvs []*v1.PersistentVolume 89 pvcs []*v1.PersistentVolumeClaim 90 wantNodeName string 91 }{ 92 { 93 name: "local volumes with close capacity are preferred", 94 pod: makePod("pod", config.ns, []string{"data"}), 95 nodes: []*v1.Node{ 96 makeNode(0), 97 makeNode(1), 98 makeNode(2), 99 }, 100 pvs: []*v1.PersistentVolume{ 101 setPVNodeAffinity(setPVCapacity(makePV("pv-0", waitSSDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 102 setPVNodeAffinity(setPVCapacity(makePV("pv-1", waitSSDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 103 setPVNodeAffinity(setPVCapacity(makePV("pv-2", waitSSDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 104 setPVNodeAffinity(setPVCapacity(makePV("pv-3", waitSSDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 105 setPVNodeAffinity(setPVCapacity(makePV("pv-4", waitSSDSC.Name, "", config.ns, "node-2"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 106 setPVNodeAffinity(setPVCapacity(makePV("pv-5", waitSSDSC.Name, "", config.ns, "node-2"), resource.MustParse("50Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 107 }, 108 pvcs: []*v1.PersistentVolumeClaim{ 109 setPVCRequestStorage(makePVC("data", config.ns, &waitSSDSC.Name, ""), resource.MustParse("20Gi")), 110 }, 111 wantNodeName: "node-2", 112 }, 113 { 114 name: "local volumes with close capacity are preferred (multiple pvcs)", 115 pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), 116 nodes: []*v1.Node{ 117 makeNode(0), 118 makeNode(1), 119 makeNode(2), 120 }, 121 pvs: []*v1.PersistentVolume{ 122 setPVNodeAffinity(setPVCapacity(makePV("pv-0", waitSSDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 123 setPVNodeAffinity(setPVCapacity(makePV("pv-1", waitSSDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 124 setPVNodeAffinity(setPVCapacity(makePV("pv-2", waitSSDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 125 setPVNodeAffinity(setPVCapacity(makePV("pv-3", waitSSDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 126 setPVNodeAffinity(setPVCapacity(makePV("pv-4", waitSSDSC.Name, "", config.ns, "node-2"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 127 setPVNodeAffinity(setPVCapacity(makePV("pv-5", waitSSDSC.Name, "", config.ns, "node-2"), resource.MustParse("50Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 128 }, 129 pvcs: []*v1.PersistentVolumeClaim{ 130 setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), 131 setPVCRequestStorage(makePVC("data-1", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), 132 }, 133 wantNodeName: "node-1", 134 }, 135 { 136 name: "local volumes with close capacity are preferred (multiple pvcs, multiple classes)", 137 pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), 138 nodes: []*v1.Node{ 139 makeNode(0), 140 makeNode(1), 141 makeNode(2), 142 }, 143 pvs: []*v1.PersistentVolume{ 144 setPVNodeAffinity(setPVCapacity(makePV("pv-0", waitSSDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 145 setPVNodeAffinity(setPVCapacity(makePV("pv-1", waitHDDSC.Name, "", config.ns, "node-0"), resource.MustParse("200Gi")), map[string][]string{v1.LabelHostname: {"node-0"}}), 146 setPVNodeAffinity(setPVCapacity(makePV("pv-2", waitSSDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 147 setPVNodeAffinity(setPVCapacity(makePV("pv-3", waitHDDSC.Name, "", config.ns, "node-1"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-1"}}), 148 setPVNodeAffinity(setPVCapacity(makePV("pv-4", waitSSDSC.Name, "", config.ns, "node-2"), resource.MustParse("100Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 149 setPVNodeAffinity(setPVCapacity(makePV("pv-5", waitHDDSC.Name, "", config.ns, "node-2"), resource.MustParse("50Gi")), map[string][]string{v1.LabelHostname: {"node-2"}}), 150 }, 151 pvcs: []*v1.PersistentVolumeClaim{ 152 setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), 153 setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")), 154 }, 155 wantNodeName: "node-1", 156 }, 157 { 158 name: "zonal volumes with close capacity are preferred (multiple pvcs, multiple classes)", 159 pod: makePod("pod", config.ns, []string{"data-0", "data-1"}), 160 nodes: []*v1.Node{ 161 mergeNodeLabels(makeNode(0), map[string]string{ 162 "topology.kubernetes.io/region": "region-a", 163 "topology.kubernetes.io/zone": "zone-a", 164 }), 165 mergeNodeLabels(makeNode(1), map[string]string{ 166 "topology.kubernetes.io/region": "region-b", 167 "topology.kubernetes.io/zone": "zone-b", 168 }), 169 mergeNodeLabels(makeNode(2), map[string]string{ 170 "topology.kubernetes.io/region": "region-c", 171 "topology.kubernetes.io/zone": "zone-c", 172 }), 173 }, 174 pvs: []*v1.PersistentVolume{ 175 setPVNodeAffinity(setPVCapacity(makePV("pv-0", waitSSDSC.Name, "", config.ns, ""), resource.MustParse("200Gi")), map[string][]string{ 176 "topology.kubernetes.io/region": {"region-a"}, 177 "topology.kubernetes.io/zone": {"zone-a"}, 178 }), 179 setPVNodeAffinity(setPVCapacity(makePV("pv-1", waitHDDSC.Name, "", config.ns, ""), resource.MustParse("200Gi")), map[string][]string{ 180 "topology.kubernetes.io/region": {"region-a"}, 181 "topology.kubernetes.io/zone": {"zone-a"}, 182 }), 183 setPVNodeAffinity(setPVCapacity(makePV("pv-2", waitSSDSC.Name, "", config.ns, ""), resource.MustParse("100Gi")), map[string][]string{ 184 "topology.kubernetes.io/region": {"region-b"}, 185 "topology.kubernetes.io/zone": {"zone-b"}, 186 }), 187 setPVNodeAffinity(setPVCapacity(makePV("pv-3", waitHDDSC.Name, "", config.ns, ""), resource.MustParse("100Gi")), map[string][]string{ 188 "topology.kubernetes.io/region": {"region-b"}, 189 "topology.kubernetes.io/zone": {"zone-b"}, 190 }), 191 setPVNodeAffinity(setPVCapacity(makePV("pv-4", waitSSDSC.Name, "", config.ns, ""), resource.MustParse("100Gi")), map[string][]string{ 192 "topology.kubernetes.io/region": {"region-c"}, 193 "topology.kubernetes.io/zone": {"zone-c"}, 194 }), 195 setPVNodeAffinity(setPVCapacity(makePV("pv-5", waitHDDSC.Name, "", config.ns, ""), resource.MustParse("50Gi")), map[string][]string{ 196 "topology.kubernetes.io/region": {"region-c"}, 197 "topology.kubernetes.io/zone": {"zone-c"}, 198 }), 199 }, 200 pvcs: []*v1.PersistentVolumeClaim{ 201 setPVCRequestStorage(makePVC("data-0", config.ns, &waitSSDSC.Name, ""), resource.MustParse("80Gi")), 202 setPVCRequestStorage(makePVC("data-1", config.ns, &waitHDDSC.Name, ""), resource.MustParse("80Gi")), 203 }, 204 wantNodeName: "node-1", 205 }, 206 } 207 208 c := config.client 209 210 t.Log("Creating StorageClasses") 211 classes := map[string]*storagev1.StorageClass{} 212 classes[waitSSDSC.Name] = waitSSDSC 213 classes[waitHDDSC.Name] = waitHDDSC 214 for _, sc := range classes { 215 if _, err := c.StorageV1().StorageClasses().Create(context.TODO(), sc, metav1.CreateOptions{}); err != nil { 216 t.Fatalf("failed to create StorageClass %q: %v", sc.Name, err) 217 } 218 } 219 220 for _, tt := range tests { 221 t.Run(tt.name, func(t *testing.T) { 222 t.Log("Creating Nodes") 223 for _, node := range tt.nodes { 224 if _, err := c.CoreV1().Nodes().Create(context.TODO(), node, metav1.CreateOptions{}); err != nil { 225 t.Fatalf("failed to create Node %q: %v", node.Name, err) 226 } 227 } 228 229 t.Log("Creating PVs") 230 for _, pv := range tt.pvs { 231 if _, err := c.CoreV1().PersistentVolumes().Create(context.TODO(), pv, metav1.CreateOptions{}); err != nil { 232 t.Fatalf("failed to create PersistentVolume %q: %v", pv.Name, err) 233 } 234 } 235 236 // https://github.com/kubernetes/kubernetes/issues/85320 237 t.Log("Waiting for PVs to become available to avoid race condition in PV controller") 238 for _, pv := range tt.pvs { 239 if err := waitForPVPhase(c, pv.Name, v1.VolumeAvailable); err != nil { 240 t.Fatalf("failed to wait for PersistentVolume %q to become available: %v", pv.Name, err) 241 } 242 } 243 244 t.Log("Creating PVCs") 245 for _, pvc := range tt.pvcs { 246 if _, err := c.CoreV1().PersistentVolumeClaims(config.ns).Create(context.TODO(), pvc, metav1.CreateOptions{}); err != nil { 247 t.Fatalf("failed to create PersistentVolumeClaim %q: %v", pvc.Name, err) 248 } 249 } 250 251 t.Log("Create Pod") 252 if _, err := c.CoreV1().Pods(config.ns).Create(context.TODO(), tt.pod, metav1.CreateOptions{}); err != nil { 253 t.Fatalf("failed to create Pod %q: %v", tt.pod.Name, err) 254 } 255 if err := waitForPodToSchedule(c, tt.pod); err != nil { 256 t.Errorf("failed to schedule Pod %q: %v", tt.pod.Name, err) 257 } 258 259 t.Log("Verify the assigned node") 260 pod, err := c.CoreV1().Pods(config.ns).Get(context.TODO(), tt.pod.Name, metav1.GetOptions{}) 261 if err != nil { 262 t.Fatalf("failed to get Pod %q: %v", tt.pod.Name, err) 263 } 264 if pod.Spec.NodeName != tt.wantNodeName { 265 t.Errorf("pod %s assigned node expects %q, got %q", pod.Name, tt.wantNodeName, pod.Spec.NodeName) 266 } 267 268 t.Log("Cleanup test objects") 269 c.CoreV1().Nodes().DeleteCollection(context.TODO(), deleteOption, metav1.ListOptions{}) 270 c.CoreV1().Pods(config.ns).DeleteCollection(context.TODO(), deleteOption, metav1.ListOptions{}) 271 c.CoreV1().PersistentVolumeClaims(config.ns).DeleteCollection(context.TODO(), deleteOption, metav1.ListOptions{}) 272 c.CoreV1().PersistentVolumes().DeleteCollection(context.TODO(), deleteOption, metav1.ListOptions{}) 273 }) 274 } 275 } 276 277 func setPVNodeAffinity(pv *v1.PersistentVolume, keyValues map[string][]string) *v1.PersistentVolume { 278 matchExpressions := make([]v1.NodeSelectorRequirement, 0) 279 for key, values := range keyValues { 280 matchExpressions = append(matchExpressions, v1.NodeSelectorRequirement{ 281 Key: key, 282 Operator: v1.NodeSelectorOpIn, 283 Values: values, 284 }) 285 } 286 pv.Spec.NodeAffinity = &v1.VolumeNodeAffinity{ 287 Required: &v1.NodeSelector{ 288 NodeSelectorTerms: []v1.NodeSelectorTerm{ 289 { 290 MatchExpressions: matchExpressions, 291 }, 292 }, 293 }, 294 } 295 return pv 296 } 297 298 func setPVCapacity(pv *v1.PersistentVolume, capacity resource.Quantity) *v1.PersistentVolume { 299 if pv.Spec.Capacity == nil { 300 pv.Spec.Capacity = make(v1.ResourceList) 301 } 302 pv.Spec.Capacity[v1.ResourceName(v1.ResourceStorage)] = capacity 303 return pv 304 } 305 306 func setPVCRequestStorage(pvc *v1.PersistentVolumeClaim, request resource.Quantity) *v1.PersistentVolumeClaim { 307 pvc.Spec.Resources = v1.VolumeResourceRequirements{ 308 Requests: v1.ResourceList{ 309 v1.ResourceName(v1.ResourceStorage): request, 310 }, 311 } 312 return pvc 313 }