k8s.io/kubernetes@v1.29.3/pkg/kubelet/preemption/preemption_test.go (about) 1 /* 2 Copyright 2017 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package preemption 18 19 import ( 20 "fmt" 21 "testing" 22 23 v1 "k8s.io/api/core/v1" 24 "k8s.io/apimachinery/pkg/api/resource" 25 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 26 "k8s.io/client-go/tools/record" 27 kubeapi "k8s.io/kubernetes/pkg/apis/core" 28 "k8s.io/kubernetes/pkg/apis/scheduling" 29 ) 30 31 const ( 32 clusterCritical = "cluster-critical" 33 nodeCritical = "node-critical" 34 bestEffort = "bestEffort" 35 burstable = "burstable" 36 highRequestBurstable = "high-request-burstable" 37 guaranteed = "guaranteed" 38 highRequestGuaranteed = "high-request-guaranteed" 39 tinyBurstable = "tiny" 40 maxPods = 110 41 ) 42 43 type fakePodKiller struct { 44 killedPods []*v1.Pod 45 errDuringPodKilling bool 46 } 47 48 func newFakePodKiller(errPodKilling bool) *fakePodKiller { 49 return &fakePodKiller{killedPods: []*v1.Pod{}, errDuringPodKilling: errPodKilling} 50 } 51 52 func (f *fakePodKiller) clear() { 53 f.killedPods = []*v1.Pod{} 54 } 55 56 func (f *fakePodKiller) getKilledPods() []*v1.Pod { 57 return f.killedPods 58 } 59 60 func (f *fakePodKiller) killPodNow(pod *v1.Pod, evict bool, gracePeriodOverride *int64, fn func(status *v1.PodStatus)) error { 61 if f.errDuringPodKilling { 62 f.killedPods = []*v1.Pod{} 63 return fmt.Errorf("problem killing pod %v", pod) 64 } 65 f.killedPods = append(f.killedPods, pod) 66 return nil 67 } 68 69 type fakePodProvider struct { 70 pods []*v1.Pod 71 } 72 73 func newFakePodProvider() *fakePodProvider { 74 return &fakePodProvider{pods: []*v1.Pod{}} 75 } 76 77 func (f *fakePodProvider) setPods(pods []*v1.Pod) { 78 f.pods = pods 79 } 80 81 func (f *fakePodProvider) getPods() []*v1.Pod { 82 return f.pods 83 } 84 85 func getTestCriticalPodAdmissionHandler(podProvider *fakePodProvider, podKiller *fakePodKiller) *CriticalPodAdmissionHandler { 86 return &CriticalPodAdmissionHandler{ 87 getPodsFunc: podProvider.getPods, 88 killPodFunc: podKiller.killPodNow, 89 recorder: &record.FakeRecorder{}, 90 } 91 } 92 93 func TestEvictPodsToFreeRequestsWithError(t *testing.T) { 94 type testRun struct { 95 testName string 96 inputPods []*v1.Pod 97 insufficientResources admissionRequirementList 98 expectErr bool 99 expectedOutput []*v1.Pod 100 } 101 podProvider := newFakePodProvider() 102 podKiller := newFakePodKiller(true) 103 criticalPodAdmissionHandler := getTestCriticalPodAdmissionHandler(podProvider, podKiller) 104 allPods := getTestPods() 105 runs := []testRun{ 106 { 107 testName: "multiple pods eviction error", 108 inputPods: []*v1.Pod{ 109 allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], 110 allPods[guaranteed], allPods[highRequestGuaranteed]}, 111 insufficientResources: getAdmissionRequirementList(0, 550, 0), 112 expectErr: false, 113 expectedOutput: nil, 114 }, 115 } 116 for _, r := range runs { 117 podProvider.setPods(r.inputPods) 118 outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(allPods[clusterCritical], r.insufficientResources) 119 outputPods := podKiller.getKilledPods() 120 if !r.expectErr && outErr != nil { 121 t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr) 122 } else if r.expectErr && outErr == nil { 123 t.Errorf("evictPodsToFreeRequests expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName) 124 } else if !podListEqual(r.expectedOutput, outputPods) { 125 t.Errorf("evictPodsToFreeRequests expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName) 126 } 127 podKiller.clear() 128 } 129 } 130 131 func TestEvictPodsToFreeRequests(t *testing.T) { 132 type testRun struct { 133 testName string 134 inputPods []*v1.Pod 135 insufficientResources admissionRequirementList 136 expectErr bool 137 expectedOutput []*v1.Pod 138 } 139 podProvider := newFakePodProvider() 140 podKiller := newFakePodKiller(false) 141 criticalPodAdmissionHandler := getTestCriticalPodAdmissionHandler(podProvider, podKiller) 142 allPods := getTestPods() 143 runs := []testRun{ 144 { 145 testName: "critical pods cannot be preempted", 146 inputPods: []*v1.Pod{allPods[clusterCritical]}, 147 insufficientResources: getAdmissionRequirementList(0, 0, 1), 148 expectErr: true, 149 expectedOutput: nil, 150 }, 151 { 152 testName: "best effort pods are not preempted when attempting to free resources", 153 inputPods: []*v1.Pod{allPods[bestEffort]}, 154 insufficientResources: getAdmissionRequirementList(0, 1, 0), 155 expectErr: true, 156 expectedOutput: nil, 157 }, 158 { 159 testName: "multiple pods evicted", 160 inputPods: []*v1.Pod{ 161 allPods[clusterCritical], allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], 162 allPods[guaranteed], allPods[highRequestGuaranteed]}, 163 insufficientResources: getAdmissionRequirementList(0, 550, 0), 164 expectErr: false, 165 expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]}, 166 }, 167 } 168 for _, r := range runs { 169 podProvider.setPods(r.inputPods) 170 outErr := criticalPodAdmissionHandler.evictPodsToFreeRequests(allPods[clusterCritical], r.insufficientResources) 171 outputPods := podKiller.getKilledPods() 172 if !r.expectErr && outErr != nil { 173 t.Errorf("evictPodsToFreeRequests returned an unexpected error during the %s test. Err: %v", r.testName, outErr) 174 } else if r.expectErr && outErr == nil { 175 t.Errorf("evictPodsToFreeRequests expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName) 176 } else if !podListEqual(r.expectedOutput, outputPods) { 177 t.Errorf("evictPodsToFreeRequests expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName) 178 } 179 podKiller.clear() 180 } 181 } 182 183 func BenchmarkGetPodsToPreempt(t *testing.B) { 184 allPods := getTestPods() 185 inputPods := []*v1.Pod{} 186 for i := 0; i < maxPods; i++ { 187 inputPods = append(inputPods, allPods[tinyBurstable]) 188 } 189 for n := 0; n < t.N; n++ { 190 getPodsToPreempt(allPods[bestEffort], inputPods, admissionRequirementList([]*admissionRequirement{ 191 { 192 resourceName: v1.ResourceCPU, 193 quantity: parseCPUToInt64("110m"), 194 }})) 195 } 196 } 197 198 func TestGetPodsToPreempt(t *testing.T) { 199 type testRun struct { 200 testName string 201 preemptor *v1.Pod 202 inputPods []*v1.Pod 203 insufficientResources admissionRequirementList 204 expectErr bool 205 expectedOutput []*v1.Pod 206 } 207 allPods := getTestPods() 208 runs := []testRun{ 209 { 210 testName: "no requirements", 211 preemptor: allPods[clusterCritical], 212 inputPods: []*v1.Pod{}, 213 insufficientResources: getAdmissionRequirementList(0, 0, 0), 214 expectErr: false, 215 expectedOutput: []*v1.Pod{}, 216 }, 217 { 218 testName: "no pods", 219 preemptor: allPods[clusterCritical], 220 inputPods: []*v1.Pod{}, 221 insufficientResources: getAdmissionRequirementList(0, 0, 1), 222 expectErr: true, 223 expectedOutput: nil, 224 }, 225 { 226 testName: "equal pods and resources requirements", 227 preemptor: allPods[clusterCritical], 228 inputPods: []*v1.Pod{allPods[burstable]}, 229 insufficientResources: getAdmissionRequirementList(100, 100, 1), 230 expectErr: false, 231 expectedOutput: []*v1.Pod{allPods[burstable]}, 232 }, 233 { 234 testName: "higher requirements than pod requests", 235 preemptor: allPods[clusterCritical], 236 inputPods: []*v1.Pod{allPods[burstable]}, 237 insufficientResources: getAdmissionRequirementList(200, 200, 2), 238 expectErr: true, 239 expectedOutput: nil, 240 }, 241 { 242 testName: "choose between bestEffort and burstable", 243 preemptor: allPods[clusterCritical], 244 inputPods: []*v1.Pod{allPods[burstable], allPods[bestEffort]}, 245 insufficientResources: getAdmissionRequirementList(0, 0, 1), 246 expectErr: false, 247 expectedOutput: []*v1.Pod{allPods[bestEffort]}, 248 }, 249 { 250 testName: "choose between burstable and guaranteed", 251 preemptor: allPods[clusterCritical], 252 inputPods: []*v1.Pod{allPods[burstable], allPods[guaranteed]}, 253 insufficientResources: getAdmissionRequirementList(0, 0, 1), 254 expectErr: false, 255 expectedOutput: []*v1.Pod{allPods[burstable]}, 256 }, 257 { 258 testName: "choose lower request burstable if it meets requirements", 259 preemptor: allPods[clusterCritical], 260 inputPods: []*v1.Pod{allPods[bestEffort], allPods[highRequestBurstable], allPods[burstable]}, 261 insufficientResources: getAdmissionRequirementList(100, 100, 0), 262 expectErr: false, 263 expectedOutput: []*v1.Pod{allPods[burstable]}, 264 }, 265 { 266 testName: "choose higher request burstable if lower does not meet requirements", 267 preemptor: allPods[clusterCritical], 268 inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable]}, 269 insufficientResources: getAdmissionRequirementList(150, 150, 0), 270 expectErr: false, 271 expectedOutput: []*v1.Pod{allPods[highRequestBurstable]}, 272 }, 273 { 274 testName: "multiple pods required", 275 preemptor: allPods[clusterCritical], 276 inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]}, 277 insufficientResources: getAdmissionRequirementList(350, 350, 0), 278 expectErr: false, 279 expectedOutput: []*v1.Pod{allPods[burstable], allPods[highRequestBurstable]}, 280 }, 281 { 282 testName: "evict guaranteed when we have to, and dont evict the extra burstable", 283 preemptor: allPods[clusterCritical], 284 inputPods: []*v1.Pod{allPods[bestEffort], allPods[burstable], allPods[highRequestBurstable], allPods[guaranteed], allPods[highRequestGuaranteed]}, 285 insufficientResources: getAdmissionRequirementList(0, 550, 0), 286 expectErr: false, 287 expectedOutput: []*v1.Pod{allPods[highRequestBurstable], allPods[highRequestGuaranteed]}, 288 }, 289 { 290 testName: "evict cluster critical pod for node critical pod", 291 preemptor: allPods[nodeCritical], 292 inputPods: []*v1.Pod{allPods[clusterCritical]}, 293 insufficientResources: getAdmissionRequirementList(100, 0, 0), 294 expectErr: false, 295 expectedOutput: []*v1.Pod{allPods[clusterCritical]}, 296 }, 297 { 298 testName: "can not evict node critical pod for cluster critical pod", 299 preemptor: allPods[clusterCritical], 300 inputPods: []*v1.Pod{allPods[nodeCritical]}, 301 insufficientResources: getAdmissionRequirementList(100, 0, 0), 302 expectErr: true, 303 expectedOutput: nil, 304 }, 305 } 306 307 for _, r := range runs { 308 outputPods, outErr := getPodsToPreempt(r.preemptor, r.inputPods, r.insufficientResources) 309 if !r.expectErr && outErr != nil { 310 t.Errorf("getPodsToPreempt returned an unexpected error during the %s test. Err: %v", r.testName, outErr) 311 } else if r.expectErr && outErr == nil { 312 t.Errorf("getPodsToPreempt expected an error but returned a successful output=%v during the %s test.", outputPods, r.testName) 313 } else if !podListEqual(r.expectedOutput, outputPods) { 314 t.Errorf("getPodsToPreempt expected %v but got %v during the %s test.", r.expectedOutput, outputPods, r.testName) 315 } 316 } 317 } 318 319 func TestAdmissionRequirementsDistance(t *testing.T) { 320 type testRun struct { 321 testName string 322 requirements admissionRequirementList 323 inputPod *v1.Pod 324 expectedOutput float64 325 } 326 allPods := getTestPods() 327 runs := []testRun{ 328 { 329 testName: "no requirements", 330 requirements: getAdmissionRequirementList(0, 0, 0), 331 inputPod: allPods[burstable], 332 expectedOutput: 0, 333 }, 334 { 335 testName: "no requests, some requirements", 336 requirements: getAdmissionRequirementList(100, 100, 1), 337 inputPod: allPods[bestEffort], 338 expectedOutput: 2, 339 }, 340 { 341 testName: "equal requests and requirements", 342 requirements: getAdmissionRequirementList(100, 100, 1), 343 inputPod: allPods[burstable], 344 expectedOutput: 0, 345 }, 346 { 347 testName: "higher requests than requirements", 348 requirements: getAdmissionRequirementList(50, 50, 0), 349 inputPod: allPods[burstable], 350 expectedOutput: 0, 351 }, 352 } 353 for _, run := range runs { 354 output := run.requirements.distance(run.inputPod) 355 if output != run.expectedOutput { 356 t.Errorf("expected: %f, got: %f for %s test", run.expectedOutput, output, run.testName) 357 } 358 } 359 } 360 361 func TestAdmissionRequirementsSubtract(t *testing.T) { 362 type testRun struct { 363 testName string 364 initial admissionRequirementList 365 inputPod *v1.Pod 366 expectedOutput admissionRequirementList 367 } 368 allPods := getTestPods() 369 runs := []testRun{ 370 { 371 testName: "subtract a pod from no requirements", 372 initial: getAdmissionRequirementList(0, 0, 0), 373 inputPod: allPods[burstable], 374 expectedOutput: getAdmissionRequirementList(0, 0, 0), 375 }, 376 { 377 testName: "subtract no requests from some requirements", 378 initial: getAdmissionRequirementList(100, 100, 1), 379 inputPod: allPods[bestEffort], 380 expectedOutput: getAdmissionRequirementList(100, 100, 0), 381 }, 382 { 383 testName: "equal requests and requirements", 384 initial: getAdmissionRequirementList(100, 100, 1), 385 inputPod: allPods[burstable], 386 expectedOutput: getAdmissionRequirementList(0, 0, 0), 387 }, 388 { 389 testName: "subtract higher requests than requirements", 390 initial: getAdmissionRequirementList(50, 50, 0), 391 inputPod: allPods[burstable], 392 expectedOutput: getAdmissionRequirementList(0, 0, 0), 393 }, 394 { 395 testName: "subtract lower requests than requirements", 396 initial: getAdmissionRequirementList(200, 200, 1), 397 inputPod: allPods[burstable], 398 expectedOutput: getAdmissionRequirementList(100, 100, 0), 399 }, 400 } 401 for _, run := range runs { 402 output := run.initial.subtract(run.inputPod) 403 if !admissionRequirementListEqual(output, run.expectedOutput) { 404 t.Errorf("expected: %s, got: %s for %s test", run.expectedOutput.toString(), output.toString(), run.testName) 405 } 406 } 407 } 408 409 func getTestPods() map[string]*v1.Pod { 410 allPods := map[string]*v1.Pod{ 411 tinyBurstable: getPodWithResources(tinyBurstable, v1.ResourceRequirements{ 412 Requests: v1.ResourceList{ 413 v1.ResourceCPU: resource.MustParse("1m"), 414 v1.ResourceMemory: resource.MustParse("1Mi"), 415 }, 416 }), 417 bestEffort: getPodWithResources(bestEffort, v1.ResourceRequirements{}), 418 clusterCritical: getPodWithResources(clusterCritical, v1.ResourceRequirements{ 419 Requests: v1.ResourceList{ 420 v1.ResourceCPU: resource.MustParse("100m"), 421 v1.ResourceMemory: resource.MustParse("100Mi"), 422 }, 423 }), 424 nodeCritical: getPodWithResources(nodeCritical, v1.ResourceRequirements{ 425 Requests: v1.ResourceList{ 426 v1.ResourceCPU: resource.MustParse("100m"), 427 v1.ResourceMemory: resource.MustParse("100Mi"), 428 }, 429 }), 430 burstable: getPodWithResources(burstable, v1.ResourceRequirements{ 431 Requests: v1.ResourceList{ 432 v1.ResourceCPU: resource.MustParse("100m"), 433 v1.ResourceMemory: resource.MustParse("100Mi"), 434 }, 435 }), 436 guaranteed: getPodWithResources(guaranteed, v1.ResourceRequirements{ 437 Requests: v1.ResourceList{ 438 v1.ResourceCPU: resource.MustParse("100m"), 439 v1.ResourceMemory: resource.MustParse("100Mi"), 440 }, 441 Limits: v1.ResourceList{ 442 v1.ResourceCPU: resource.MustParse("100m"), 443 v1.ResourceMemory: resource.MustParse("100Mi"), 444 }, 445 }), 446 highRequestBurstable: getPodWithResources(highRequestBurstable, v1.ResourceRequirements{ 447 Requests: v1.ResourceList{ 448 v1.ResourceCPU: resource.MustParse("300m"), 449 v1.ResourceMemory: resource.MustParse("300Mi"), 450 }, 451 }), 452 highRequestGuaranteed: getPodWithResources(highRequestGuaranteed, v1.ResourceRequirements{ 453 Requests: v1.ResourceList{ 454 v1.ResourceCPU: resource.MustParse("300m"), 455 v1.ResourceMemory: resource.MustParse("300Mi"), 456 }, 457 Limits: v1.ResourceList{ 458 v1.ResourceCPU: resource.MustParse("300m"), 459 v1.ResourceMemory: resource.MustParse("300Mi"), 460 }, 461 }), 462 } 463 allPods[clusterCritical].Namespace = kubeapi.NamespaceSystem 464 allPods[clusterCritical].Spec.PriorityClassName = scheduling.SystemClusterCritical 465 clusterPriority := scheduling.SystemCriticalPriority 466 allPods[clusterCritical].Spec.Priority = &clusterPriority 467 468 allPods[nodeCritical].Namespace = kubeapi.NamespaceSystem 469 allPods[nodeCritical].Spec.PriorityClassName = scheduling.SystemNodeCritical 470 nodePriority := scheduling.SystemCriticalPriority + 100 471 allPods[nodeCritical].Spec.Priority = &nodePriority 472 473 return allPods 474 } 475 476 func getPodWithResources(name string, requests v1.ResourceRequirements) *v1.Pod { 477 return &v1.Pod{ 478 ObjectMeta: metav1.ObjectMeta{ 479 GenerateName: name, 480 Annotations: map[string]string{}, 481 }, 482 Spec: v1.PodSpec{ 483 Containers: []v1.Container{ 484 { 485 Name: fmt.Sprintf("%s-container", name), 486 Resources: requests, 487 }, 488 }, 489 }, 490 } 491 } 492 493 func parseCPUToInt64(res string) int64 { 494 r := resource.MustParse(res) 495 return (&r).MilliValue() 496 } 497 498 func parseNonCPUResourceToInt64(res string) int64 { 499 r := resource.MustParse(res) 500 return (&r).Value() 501 } 502 503 func getAdmissionRequirementList(cpu, memory, pods int) admissionRequirementList { 504 reqs := []*admissionRequirement{} 505 if cpu > 0 { 506 reqs = append(reqs, &admissionRequirement{ 507 resourceName: v1.ResourceCPU, 508 quantity: parseCPUToInt64(fmt.Sprintf("%dm", cpu)), 509 }) 510 } 511 if memory > 0 { 512 reqs = append(reqs, &admissionRequirement{ 513 resourceName: v1.ResourceMemory, 514 quantity: parseNonCPUResourceToInt64(fmt.Sprintf("%dMi", memory)), 515 }) 516 } 517 if pods > 0 { 518 reqs = append(reqs, &admissionRequirement{ 519 resourceName: v1.ResourcePods, 520 quantity: int64(pods), 521 }) 522 } 523 return admissionRequirementList(reqs) 524 } 525 526 // this checks if the lists contents contain all of the same elements. 527 // this is not correct if there are duplicate pods in the list. 528 // for example: podListEqual([a, a, b], [a, b, b]) will return true 529 func admissionRequirementListEqual(list1 admissionRequirementList, list2 admissionRequirementList) bool { 530 if len(list1) != len(list2) { 531 return false 532 } 533 for _, a := range list1 { 534 contains := false 535 for _, b := range list2 { 536 if a.resourceName == b.resourceName && a.quantity == b.quantity { 537 contains = true 538 } 539 } 540 if !contains { 541 return false 542 } 543 } 544 return true 545 } 546 547 // podListEqual checks if the lists contents contain all of the same elements. 548 func podListEqual(list1 []*v1.Pod, list2 []*v1.Pod) bool { 549 if len(list1) != len(list2) { 550 return false 551 } 552 553 m := map[*v1.Pod]int{} 554 for _, val := range list1 { 555 m[val] = m[val] + 1 556 } 557 for _, val := range list2 { 558 m[val] = m[val] - 1 559 } 560 for _, v := range m { 561 if v != 0 { 562 return false 563 } 564 } 565 return true 566 }