k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/pkg/scheduler/framework/plugins/nodevolumelimits/csi_test.go (about) 1 /* 2 Copyright 2019 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package nodevolumelimits 18 19 import ( 20 "errors" 21 "fmt" 22 "reflect" 23 "strings" 24 "testing" 25 26 "github.com/google/go-cmp/cmp" 27 v1 "k8s.io/api/core/v1" 28 storagev1 "k8s.io/api/storage/v1" 29 "k8s.io/apimachinery/pkg/api/resource" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "k8s.io/apimachinery/pkg/util/rand" 32 "k8s.io/apimachinery/pkg/util/sets" 33 csitrans "k8s.io/csi-translation-lib" 34 csilibplugins "k8s.io/csi-translation-lib/plugins" 35 "k8s.io/kubernetes/pkg/scheduler/framework" 36 st "k8s.io/kubernetes/pkg/scheduler/testing" 37 tf "k8s.io/kubernetes/pkg/scheduler/testing/framework" 38 volumeutil "k8s.io/kubernetes/pkg/volume/util" 39 "k8s.io/kubernetes/test/utils/ktesting" 40 "k8s.io/utils/ptr" 41 ) 42 43 const ( 44 ebsCSIDriverName = csilibplugins.AWSEBSDriverName 45 gceCSIDriverName = csilibplugins.GCEPDDriverName 46 47 hostpathInTreePluginName = "kubernetes.io/hostpath" 48 ) 49 50 var ( 51 scName = "csi-sc" 52 ) 53 54 // getVolumeLimitKey returns a ResourceName by filter type 55 func getVolumeLimitKey(filterType string) v1.ResourceName { 56 switch filterType { 57 case ebsVolumeFilterType: 58 return v1.ResourceName(volumeutil.EBSVolumeLimitKey) 59 case gcePDVolumeFilterType: 60 return v1.ResourceName(volumeutil.GCEVolumeLimitKey) 61 case azureDiskVolumeFilterType: 62 return v1.ResourceName(volumeutil.AzureVolumeLimitKey) 63 case cinderVolumeFilterType: 64 return v1.ResourceName(volumeutil.CinderVolumeLimitKey) 65 default: 66 return v1.ResourceName(volumeutil.GetCSIAttachLimitKey(filterType)) 67 } 68 } 69 70 func TestCSILimits(t *testing.T) { 71 runningPod := st.MakePod().PVC("csi-ebs.csi.aws.com-3").Obj() 72 pendingVolumePod := st.MakePod().PVC("csi-4").Obj() 73 74 // Different pod than pendingVolumePod, but using the same unbound PVC 75 unboundPVCPod2 := st.MakePod().PVC("csi-4").Obj() 76 77 missingPVPod := st.MakePod().PVC("csi-6").Obj() 78 noSCPVCPod := st.MakePod().PVC("csi-5").Obj() 79 80 gceTwoVolPod := st.MakePod().PVC("csi-pd.csi.storage.gke.io-1").PVC("csi-pd.csi.storage.gke.io-2").Obj() 81 82 // In-tree volumes 83 inTreeOneVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-0").Obj() 84 inTreeTwoVolPod := st.MakePod().PVC("csi-kubernetes.io/aws-ebs-1").PVC("csi-kubernetes.io/aws-ebs-2").Obj() 85 86 // pods with matching csi driver names 87 csiEBSOneVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-0").Obj() 88 csiEBSTwoVolPod := st.MakePod().PVC("csi-ebs.csi.aws.com-1").PVC("csi-ebs.csi.aws.com-2").Obj() 89 90 inTreeNonMigratableOneVolPod := st.MakePod().PVC("csi-kubernetes.io/hostpath-0").Obj() 91 92 ephemeralVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345").Volume( 93 v1.Volume{ 94 Name: "xyz", 95 VolumeSource: v1.VolumeSource{ 96 Ephemeral: &v1.EphemeralVolumeSource{}, 97 }, 98 }).Obj() 99 100 controller := true 101 ephemeralClaim := &v1.PersistentVolumeClaim{ 102 ObjectMeta: metav1.ObjectMeta{ 103 Namespace: ephemeralVolumePod.Namespace, 104 Name: ephemeralVolumePod.Name + "-" + ephemeralVolumePod.Spec.Volumes[0].Name, 105 OwnerReferences: []metav1.OwnerReference{ 106 { 107 Kind: "Pod", 108 Name: ephemeralVolumePod.Name, 109 UID: ephemeralVolumePod.UID, 110 Controller: &controller, 111 }, 112 }, 113 }, 114 Spec: v1.PersistentVolumeClaimSpec{ 115 StorageClassName: &scName, 116 }, 117 } 118 conflictingClaim := ephemeralClaim.DeepCopy() 119 conflictingClaim.OwnerReferences = nil 120 121 ephemeralTwoVolumePod := st.MakePod().Name("abc").Namespace("test").UID("12345II").Volume(v1.Volume{ 122 Name: "x", 123 VolumeSource: v1.VolumeSource{ 124 Ephemeral: &v1.EphemeralVolumeSource{}, 125 }, 126 }).Volume(v1.Volume{ 127 Name: "y", 128 VolumeSource: v1.VolumeSource{ 129 Ephemeral: &v1.EphemeralVolumeSource{}, 130 }, 131 }).Obj() 132 133 ephemeralClaimX := &v1.PersistentVolumeClaim{ 134 ObjectMeta: metav1.ObjectMeta{ 135 Namespace: ephemeralTwoVolumePod.Namespace, 136 Name: ephemeralTwoVolumePod.Name + "-" + ephemeralTwoVolumePod.Spec.Volumes[0].Name, 137 OwnerReferences: []metav1.OwnerReference{ 138 { 139 Kind: "Pod", 140 Name: ephemeralTwoVolumePod.Name, 141 UID: ephemeralTwoVolumePod.UID, 142 Controller: &controller, 143 }, 144 }, 145 }, 146 Spec: v1.PersistentVolumeClaimSpec{ 147 StorageClassName: &scName, 148 }, 149 } 150 ephemeralClaimY := &v1.PersistentVolumeClaim{ 151 ObjectMeta: metav1.ObjectMeta{ 152 Namespace: ephemeralTwoVolumePod.Namespace, 153 Name: ephemeralTwoVolumePod.Name + "-" + ephemeralTwoVolumePod.Spec.Volumes[1].Name, 154 OwnerReferences: []metav1.OwnerReference{ 155 { 156 Kind: "Pod", 157 Name: ephemeralTwoVolumePod.Name, 158 UID: ephemeralTwoVolumePod.UID, 159 Controller: &controller, 160 }, 161 }, 162 }, 163 Spec: v1.PersistentVolumeClaimSpec{ 164 StorageClassName: &scName, 165 }, 166 } 167 inTreeInlineVolPod := &v1.Pod{ 168 Spec: v1.PodSpec{ 169 Volumes: []v1.Volume{ 170 { 171 VolumeSource: v1.VolumeSource{ 172 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ 173 VolumeID: "aws-inline1", 174 }, 175 }, 176 }, 177 }, 178 }, 179 } 180 inTreeInlineVolPodWithSameCSIVolumeID := &v1.Pod{ 181 Spec: v1.PodSpec{ 182 Volumes: []v1.Volume{ 183 { 184 VolumeSource: v1.VolumeSource{ 185 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ 186 VolumeID: "csi-ebs.csi.aws.com-1", 187 }, 188 }, 189 }, 190 }, 191 }, 192 } 193 onlyConfigmapAndSecretVolPod := &v1.Pod{ 194 Spec: v1.PodSpec{ 195 Volumes: []v1.Volume{ 196 { 197 VolumeSource: v1.VolumeSource{ 198 ConfigMap: &v1.ConfigMapVolumeSource{}, 199 }, 200 }, 201 { 202 VolumeSource: v1.VolumeSource{ 203 Secret: &v1.SecretVolumeSource{}, 204 }, 205 }, 206 }, 207 }, 208 } 209 pvcPodWithConfigmapAndSecret := &v1.Pod{ 210 Spec: v1.PodSpec{ 211 Volumes: []v1.Volume{ 212 { 213 VolumeSource: v1.VolumeSource{ 214 ConfigMap: &v1.ConfigMapVolumeSource{}, 215 }, 216 }, 217 { 218 VolumeSource: v1.VolumeSource{ 219 Secret: &v1.SecretVolumeSource{}, 220 }, 221 }, 222 { 223 VolumeSource: v1.VolumeSource{ 224 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ClaimName: "csi-ebs.csi.aws.com-0"}, 225 }, 226 }, 227 }, 228 }, 229 } 230 ephemeralPodWithConfigmapAndSecret := &v1.Pod{ 231 ObjectMeta: metav1.ObjectMeta{ 232 Namespace: ephemeralVolumePod.Namespace, 233 Name: ephemeralVolumePod.Name, 234 }, 235 Spec: v1.PodSpec{ 236 Volumes: []v1.Volume{ 237 { 238 VolumeSource: v1.VolumeSource{ 239 ConfigMap: &v1.ConfigMapVolumeSource{}, 240 }, 241 }, 242 { 243 VolumeSource: v1.VolumeSource{ 244 Secret: &v1.SecretVolumeSource{}, 245 }, 246 }, 247 { 248 Name: "xyz", 249 VolumeSource: v1.VolumeSource{ 250 Ephemeral: &v1.EphemeralVolumeSource{}, 251 }, 252 }, 253 }, 254 }, 255 } 256 inlineMigratablePodWithConfigmapAndSecret := &v1.Pod{ 257 Spec: v1.PodSpec{ 258 Volumes: []v1.Volume{ 259 { 260 VolumeSource: v1.VolumeSource{ 261 ConfigMap: &v1.ConfigMapVolumeSource{}, 262 }, 263 }, 264 { 265 VolumeSource: v1.VolumeSource{ 266 Secret: &v1.SecretVolumeSource{}, 267 }, 268 }, 269 { 270 VolumeSource: v1.VolumeSource{ 271 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ 272 VolumeID: "aws-inline1", 273 }, 274 }, 275 }, 276 }, 277 }, 278 } 279 tests := []struct { 280 newPod *v1.Pod 281 existingPods []*v1.Pod 282 extraClaims []v1.PersistentVolumeClaim 283 filterName string 284 maxVols int32 285 driverNames []string 286 test string 287 migrationEnabled bool 288 ephemeralEnabled bool 289 limitSource string 290 wantStatus *framework.Status 291 wantPreFilterStatus *framework.Status 292 }{ 293 { 294 newPod: csiEBSOneVolPod, 295 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod}, 296 filterName: "csi", 297 maxVols: 4, 298 driverNames: []string{ebsCSIDriverName}, 299 test: "fits when node volume limit >= new pods CSI volume", 300 limitSource: "node", 301 }, 302 { 303 newPod: csiEBSOneVolPod, 304 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod}, 305 filterName: "csi", 306 maxVols: 2, 307 driverNames: []string{ebsCSIDriverName}, 308 test: "doesn't when node volume limit <= pods CSI volume", 309 limitSource: "node", 310 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 311 }, 312 { 313 newPod: csiEBSOneVolPod, 314 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod}, 315 filterName: "csi", 316 maxVols: 2, 317 driverNames: []string{ebsCSIDriverName}, 318 test: "should when driver does not support volume limits", 319 limitSource: "csinode-with-no-limit", 320 }, 321 // should count pending PVCs 322 { 323 newPod: csiEBSOneVolPod, 324 existingPods: []*v1.Pod{pendingVolumePod, csiEBSTwoVolPod}, 325 filterName: "csi", 326 maxVols: 2, 327 driverNames: []string{ebsCSIDriverName}, 328 test: "count pending PVCs towards volume limit <= pods CSI volume", 329 limitSource: "node", 330 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 331 }, 332 // two same pending PVCs should be counted as 1 333 { 334 newPod: csiEBSOneVolPod, 335 existingPods: []*v1.Pod{pendingVolumePod, unboundPVCPod2, csiEBSTwoVolPod}, 336 filterName: "csi", 337 maxVols: 4, 338 driverNames: []string{ebsCSIDriverName}, 339 test: "count multiple pending pvcs towards volume limit >= pods CSI volume", 340 limitSource: "node", 341 }, 342 // should count PVCs with invalid PV name but valid SC 343 { 344 newPod: csiEBSOneVolPod, 345 existingPods: []*v1.Pod{missingPVPod, csiEBSTwoVolPod}, 346 filterName: "csi", 347 maxVols: 2, 348 driverNames: []string{ebsCSIDriverName}, 349 test: "should count PVCs with invalid PV name but valid SC", 350 limitSource: "node", 351 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 352 }, 353 // don't count a volume which has storageclass missing 354 { 355 newPod: csiEBSOneVolPod, 356 existingPods: []*v1.Pod{runningPod, noSCPVCPod}, 357 filterName: "csi", 358 maxVols: 2, 359 driverNames: []string{ebsCSIDriverName}, 360 test: "don't count pvcs with missing SC towards volume limit", 361 limitSource: "node", 362 }, 363 // don't count multiple volume types 364 { 365 newPod: csiEBSOneVolPod, 366 existingPods: []*v1.Pod{gceTwoVolPod, csiEBSTwoVolPod}, 367 filterName: "csi", 368 maxVols: 2, 369 driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, 370 test: "count pvcs with the same type towards volume limit", 371 limitSource: "node", 372 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 373 }, 374 { 375 newPod: gceTwoVolPod, 376 existingPods: []*v1.Pod{csiEBSTwoVolPod, runningPod}, 377 filterName: "csi", 378 maxVols: 2, 379 driverNames: []string{ebsCSIDriverName, gceCSIDriverName}, 380 test: "don't count pvcs with different type towards volume limit", 381 limitSource: "node", 382 }, 383 // Tests for in-tree volume migration 384 { 385 newPod: inTreeOneVolPod, 386 existingPods: []*v1.Pod{inTreeTwoVolPod}, 387 filterName: "csi", 388 maxVols: 2, 389 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 390 migrationEnabled: true, 391 limitSource: "csinode", 392 test: "should count in-tree volumes if migration is enabled", 393 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 394 }, 395 { 396 newPod: inTreeInlineVolPod, 397 existingPods: []*v1.Pod{inTreeTwoVolPod}, 398 filterName: "csi", 399 maxVols: 2, 400 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 401 migrationEnabled: true, 402 limitSource: "node", 403 test: "nil csi node", 404 }, 405 { 406 newPod: pendingVolumePod, 407 existingPods: []*v1.Pod{inTreeTwoVolPod}, 408 filterName: "csi", 409 maxVols: 2, 410 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 411 migrationEnabled: true, 412 limitSource: "csinode", 413 test: "should count unbound in-tree volumes if migration is enabled", 414 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 415 }, 416 { 417 newPod: inTreeOneVolPod, 418 existingPods: []*v1.Pod{inTreeTwoVolPod}, 419 filterName: "csi", 420 maxVols: 2, 421 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 422 migrationEnabled: true, 423 limitSource: "csinode-with-no-limit", 424 test: "should not limit pod if volume used does not report limits", 425 }, 426 { 427 newPod: inTreeNonMigratableOneVolPod, 428 existingPods: []*v1.Pod{csiEBSTwoVolPod}, 429 filterName: "csi", 430 maxVols: 2, 431 driverNames: []string{hostpathInTreePluginName, ebsCSIDriverName}, 432 migrationEnabled: true, 433 limitSource: "csinode", 434 test: "should not count non-migratable in-tree volumes", 435 }, 436 { 437 newPod: inTreeInlineVolPod, 438 existingPods: []*v1.Pod{inTreeTwoVolPod}, 439 filterName: "csi", 440 maxVols: 2, 441 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 442 migrationEnabled: true, 443 limitSource: "csinode", 444 test: "should count in-tree inline volumes if migration is enabled", 445 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 446 }, 447 // mixed volumes 448 { 449 newPod: inTreeOneVolPod, 450 existingPods: []*v1.Pod{csiEBSTwoVolPod}, 451 filterName: "csi", 452 maxVols: 2, 453 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 454 migrationEnabled: true, 455 limitSource: "csinode", 456 test: "should count in-tree and csi volumes if migration is enabled (when scheduling in-tree volumes)", 457 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 458 }, 459 { 460 newPod: inTreeInlineVolPod, 461 existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeOneVolPod}, 462 filterName: "csi", 463 maxVols: 3, 464 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 465 migrationEnabled: true, 466 limitSource: "csinode", 467 test: "should count in-tree, inline and csi volumes if migration is enabled (when scheduling in-tree volumes)", 468 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 469 }, 470 { 471 newPod: inTreeInlineVolPodWithSameCSIVolumeID, 472 existingPods: []*v1.Pod{csiEBSTwoVolPod, inTreeOneVolPod}, 473 filterName: "csi", 474 maxVols: 3, 475 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 476 migrationEnabled: true, 477 limitSource: "csinode", 478 test: "should not count in-tree, inline and csi volumes if migration is enabled (when scheduling in-tree volumes)", 479 }, 480 { 481 newPod: csiEBSOneVolPod, 482 existingPods: []*v1.Pod{inTreeTwoVolPod}, 483 filterName: "csi", 484 maxVols: 2, 485 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 486 migrationEnabled: true, 487 limitSource: "csinode", 488 test: "should count in-tree and csi volumes if migration is enabled (when scheduling csi volumes)", 489 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 490 }, 491 // ephemeral volumes 492 { 493 newPod: ephemeralVolumePod, 494 filterName: "csi", 495 ephemeralEnabled: true, 496 driverNames: []string{ebsCSIDriverName}, 497 test: "ephemeral volume missing", 498 wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`), 499 }, 500 { 501 newPod: ephemeralVolumePod, 502 filterName: "csi", 503 ephemeralEnabled: true, 504 extraClaims: []v1.PersistentVolumeClaim{*conflictingClaim}, 505 driverNames: []string{ebsCSIDriverName}, 506 test: "ephemeral volume not owned", 507 wantStatus: framework.AsStatus(errors.New("PVC test/abc-xyz was not created for pod test/abc (pod is not owner)")), 508 }, 509 { 510 newPod: ephemeralVolumePod, 511 filterName: "csi", 512 ephemeralEnabled: true, 513 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, 514 driverNames: []string{ebsCSIDriverName}, 515 test: "ephemeral volume unbound", 516 }, 517 { 518 newPod: ephemeralVolumePod, 519 filterName: "csi", 520 ephemeralEnabled: true, 521 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, 522 driverNames: []string{ebsCSIDriverName}, 523 existingPods: []*v1.Pod{runningPod, csiEBSTwoVolPod}, 524 maxVols: 2, 525 limitSource: "node", 526 test: "ephemeral doesn't when node volume limit <= pods CSI volume", 527 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 528 }, 529 { 530 newPod: csiEBSOneVolPod, 531 filterName: "csi", 532 ephemeralEnabled: true, 533 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaimX, *ephemeralClaimY}, 534 driverNames: []string{ebsCSIDriverName}, 535 existingPods: []*v1.Pod{runningPod, ephemeralTwoVolumePod}, 536 maxVols: 2, 537 limitSource: "node", 538 test: "ephemeral doesn't when node volume limit <= pods ephemeral CSI volume", 539 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 540 }, 541 { 542 newPod: csiEBSOneVolPod, 543 filterName: "csi", 544 ephemeralEnabled: false, 545 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, 546 driverNames: []string{ebsCSIDriverName}, 547 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, 548 maxVols: 3, 549 limitSource: "node", 550 test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume, ephemeral disabled", 551 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 552 }, 553 { 554 newPod: csiEBSOneVolPod, 555 filterName: "csi", 556 ephemeralEnabled: true, 557 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, 558 driverNames: []string{ebsCSIDriverName}, 559 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, 560 maxVols: 3, 561 limitSource: "node", 562 test: "persistent doesn't when node volume limit <= pods ephemeral CSI volume + persistent volume", 563 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 564 }, 565 { 566 newPod: csiEBSOneVolPod, 567 filterName: "csi", 568 ephemeralEnabled: true, 569 extraClaims: []v1.PersistentVolumeClaim{*ephemeralClaim}, 570 driverNames: []string{ebsCSIDriverName}, 571 existingPods: []*v1.Pod{runningPod, ephemeralVolumePod, csiEBSTwoVolPod}, 572 maxVols: 4, 573 test: "persistent okay when node volume limit > pods ephemeral CSI volume + persistent volume", 574 }, 575 { 576 newPod: onlyConfigmapAndSecretVolPod, 577 filterName: "csi", 578 maxVols: 2, 579 driverNames: []string{ebsCSIDriverName}, 580 test: "skip Filter when the pod only uses secrets and configmaps", 581 limitSource: "node", 582 wantPreFilterStatus: framework.NewStatus(framework.Skip), 583 }, 584 { 585 newPod: pvcPodWithConfigmapAndSecret, 586 filterName: "csi", 587 maxVols: 2, 588 driverNames: []string{ebsCSIDriverName}, 589 test: "don't skip Filter when the pod has pvcs", 590 limitSource: "node", 591 }, 592 { 593 newPod: ephemeralPodWithConfigmapAndSecret, 594 filterName: "csi", 595 ephemeralEnabled: true, 596 driverNames: []string{ebsCSIDriverName}, 597 test: "don't skip Filter when the pod has ephemeral volumes", 598 wantStatus: framework.NewStatus(framework.UnschedulableAndUnresolvable, `looking up PVC test/abc-xyz: persistentvolumeclaims "abc-xyz" not found`), 599 }, 600 { 601 newPod: inlineMigratablePodWithConfigmapAndSecret, 602 existingPods: []*v1.Pod{inTreeTwoVolPod}, 603 filterName: "csi", 604 maxVols: 2, 605 driverNames: []string{csilibplugins.AWSEBSInTreePluginName, ebsCSIDriverName}, 606 migrationEnabled: true, 607 limitSource: "csinode", 608 test: "don't skip Filter when the pod has inline migratable volumes", 609 wantStatus: framework.NewStatus(framework.Unschedulable, ErrReasonMaxVolumeCountExceeded), 610 }, 611 } 612 613 // running attachable predicate tests with feature gate and limit present on nodes 614 for _, test := range tests { 615 t.Run(test.test, func(t *testing.T) { 616 node, csiNode := getNodeWithPodAndVolumeLimits(test.limitSource, test.existingPods, test.maxVols, test.driverNames...) 617 if csiNode != nil { 618 enableMigrationOnNode(csiNode, csilibplugins.AWSEBSInTreePluginName) 619 } 620 csiTranslator := csitrans.New() 621 p := &CSILimits{ 622 csiNodeLister: getFakeCSINodeLister(csiNode), 623 pvLister: getFakeCSIPVLister(test.filterName, test.driverNames...), 624 pvcLister: append(getFakeCSIPVCLister(test.filterName, scName, test.driverNames...), test.extraClaims...), 625 scLister: getFakeCSIStorageClassLister(scName, test.driverNames[0]), 626 randomVolumeIDPrefix: rand.String(32), 627 translator: csiTranslator, 628 } 629 _, ctx := ktesting.NewTestContext(t) 630 _, gotPreFilterStatus := p.PreFilter(ctx, nil, test.newPod) 631 if diff := cmp.Diff(test.wantPreFilterStatus, gotPreFilterStatus); diff != "" { 632 t.Errorf("PreFilter status does not match (-want, +got): %s", diff) 633 } 634 if gotPreFilterStatus.Code() != framework.Skip { 635 gotStatus := p.Filter(ctx, nil, test.newPod, node) 636 if !reflect.DeepEqual(gotStatus, test.wantStatus) { 637 t.Errorf("Filter status does not match: %v, want: %v", gotStatus, test.wantStatus) 638 } 639 } 640 }) 641 } 642 } 643 644 func TestCSILimitsQHint(t *testing.T) { 645 podEbs := st.MakePod().PVC("csi-ebs.csi.aws.com-2") 646 647 tests := []struct { 648 newPod *v1.Pod 649 deletedPod *v1.Pod 650 deletedPodNotScheduled bool 651 test string 652 wantQHint framework.QueueingHint 653 }{ 654 { 655 newPod: podEbs.Obj(), 656 deletedPod: st.MakePod().PVC("placeholder").Obj(), 657 test: "return a Queue when a deleted pod has a PVC", 658 wantQHint: framework.Queue, 659 }, 660 { 661 newPod: podEbs.Obj(), 662 deletedPod: st.MakePod().Volume(v1.Volume{VolumeSource: v1.VolumeSource{AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{}}}).Obj(), 663 test: "return a Queue when a deleted pod has a inline migratable volume", 664 wantQHint: framework.Queue, 665 }, 666 { 667 newPod: podEbs.Obj(), 668 deletedPod: st.MakePod().Obj(), 669 test: "return a QueueSkip when a deleted pod doesn't have any volume", 670 wantQHint: framework.QueueSkip, 671 }, 672 { 673 newPod: podEbs.Obj(), 674 deletedPod: st.MakePod().PVC("csi-ebs.csi.aws.com-0").Obj(), 675 deletedPodNotScheduled: true, 676 test: "return a QueueSkip when a deleted pod is not scheduled.", 677 wantQHint: framework.QueueSkip, 678 }, 679 } 680 681 for _, test := range tests { 682 t.Run(test.test, func(t *testing.T) { 683 node, csiNode := getNodeWithPodAndVolumeLimits("csiNode", []*v1.Pod{}, 1, "") 684 if csiNode != nil { 685 enableMigrationOnNode(csiNode, csilibplugins.AWSEBSDriverName) 686 } 687 if !test.deletedPodNotScheduled { 688 test.deletedPod.Spec.NodeName = node.Node().Name 689 } else { 690 test.deletedPod.Spec.NodeName = "" 691 } 692 693 p := &CSILimits{ 694 randomVolumeIDPrefix: rand.String(32), 695 translator: csitrans.New(), 696 } 697 logger, _ := ktesting.NewTestContext(t) 698 qhint, err := p.isSchedulableAfterPodDeleted(logger, test.newPod, test.deletedPod, nil) 699 if err != nil { 700 t.Errorf("isSchedulableAfterPodDeleted failed: %v", err) 701 } 702 if qhint != test.wantQHint { 703 t.Errorf("QHint does not match: %v, want: %v", qhint, test.wantQHint) 704 } 705 }) 706 } 707 } 708 709 func getFakeCSIPVLister(volumeName string, driverNames ...string) tf.PersistentVolumeLister { 710 pvLister := tf.PersistentVolumeLister{} 711 for _, driver := range driverNames { 712 for j := 0; j < 4; j++ { 713 volumeHandle := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) 714 pv := v1.PersistentVolume{ 715 ObjectMeta: metav1.ObjectMeta{Name: volumeHandle}, 716 Spec: v1.PersistentVolumeSpec{ 717 PersistentVolumeSource: v1.PersistentVolumeSource{ 718 CSI: &v1.CSIPersistentVolumeSource{ 719 Driver: driver, 720 VolumeHandle: volumeHandle, 721 }, 722 }, 723 }, 724 } 725 726 switch driver { 727 case csilibplugins.AWSEBSInTreePluginName: 728 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ 729 AWSElasticBlockStore: &v1.AWSElasticBlockStoreVolumeSource{ 730 VolumeID: volumeHandle, 731 }, 732 } 733 case hostpathInTreePluginName: 734 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ 735 HostPath: &v1.HostPathVolumeSource{ 736 Path: "/tmp", 737 }, 738 } 739 default: 740 pv.Spec.PersistentVolumeSource = v1.PersistentVolumeSource{ 741 CSI: &v1.CSIPersistentVolumeSource{ 742 Driver: driver, 743 VolumeHandle: volumeHandle, 744 }, 745 } 746 } 747 pvLister = append(pvLister, pv) 748 } 749 } 750 751 return pvLister 752 } 753 754 func getFakeCSIPVCLister(volumeName, scName string, driverNames ...string) tf.PersistentVolumeClaimLister { 755 pvcLister := tf.PersistentVolumeClaimLister{} 756 for _, driver := range driverNames { 757 for j := 0; j < 4; j++ { 758 v := fmt.Sprintf("%s-%s-%d", volumeName, driver, j) 759 pvc := v1.PersistentVolumeClaim{ 760 ObjectMeta: metav1.ObjectMeta{Name: v}, 761 Spec: v1.PersistentVolumeClaimSpec{VolumeName: v}, 762 } 763 pvcLister = append(pvcLister, pvc) 764 } 765 } 766 767 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{ 768 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-4"}, 769 Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName}, 770 }) 771 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{ 772 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-5"}, 773 Spec: v1.PersistentVolumeClaimSpec{}, 774 }) 775 // a pvc with missing PV but available storageclass. 776 pvcLister = append(pvcLister, v1.PersistentVolumeClaim{ 777 ObjectMeta: metav1.ObjectMeta{Name: volumeName + "-6"}, 778 Spec: v1.PersistentVolumeClaimSpec{StorageClassName: &scName, VolumeName: "missing-in-action"}, 779 }) 780 return pvcLister 781 } 782 783 func enableMigrationOnNode(csiNode *storagev1.CSINode, pluginName string) { 784 nodeInfoAnnotations := csiNode.GetAnnotations() 785 if nodeInfoAnnotations == nil { 786 nodeInfoAnnotations = map[string]string{} 787 } 788 789 newAnnotationSet := sets.New[string]() 790 newAnnotationSet.Insert(pluginName) 791 nas := strings.Join(sets.List(newAnnotationSet), ",") 792 nodeInfoAnnotations[v1.MigratedPluginsAnnotationKey] = nas 793 794 csiNode.Annotations = nodeInfoAnnotations 795 } 796 797 func getFakeCSIStorageClassLister(scName, provisionerName string) tf.StorageClassLister { 798 return tf.StorageClassLister{ 799 { 800 ObjectMeta: metav1.ObjectMeta{Name: scName}, 801 Provisioner: provisionerName, 802 }, 803 } 804 } 805 806 func getFakeCSINodeLister(csiNode *storagev1.CSINode) tf.CSINodeLister { 807 csiNodeLister := tf.CSINodeLister{} 808 if csiNode != nil { 809 csiNodeLister = append(csiNodeLister, *csiNode.DeepCopy()) 810 } 811 return csiNodeLister 812 } 813 814 func getNodeWithPodAndVolumeLimits(limitSource string, pods []*v1.Pod, limit int32, driverNames ...string) (*framework.NodeInfo, *storagev1.CSINode) { 815 nodeInfo := framework.NewNodeInfo(pods...) 816 node := &v1.Node{ 817 ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, 818 Status: v1.NodeStatus{ 819 Allocatable: v1.ResourceList{}, 820 }, 821 } 822 var csiNode *storagev1.CSINode 823 824 addLimitToNode := func() { 825 for _, driver := range driverNames { 826 node.Status.Allocatable[getVolumeLimitKey(driver)] = *resource.NewQuantity(int64(limit), resource.DecimalSI) 827 } 828 } 829 830 initCSINode := func() { 831 csiNode = &storagev1.CSINode{ 832 ObjectMeta: metav1.ObjectMeta{Name: "node-for-max-pd-test-1"}, 833 Spec: storagev1.CSINodeSpec{ 834 Drivers: []storagev1.CSINodeDriver{}, 835 }, 836 } 837 } 838 839 addDriversCSINode := func(addLimits bool) { 840 initCSINode() 841 for _, driver := range driverNames { 842 driver := storagev1.CSINodeDriver{ 843 Name: driver, 844 NodeID: "node-for-max-pd-test-1", 845 } 846 if addLimits { 847 driver.Allocatable = &storagev1.VolumeNodeResources{ 848 Count: ptr.To(limit), 849 } 850 } 851 csiNode.Spec.Drivers = append(csiNode.Spec.Drivers, driver) 852 } 853 } 854 855 switch limitSource { 856 case "node": 857 addLimitToNode() 858 case "csinode": 859 addDriversCSINode(true) 860 case "both": 861 addLimitToNode() 862 addDriversCSINode(true) 863 case "csinode-with-no-limit": 864 addDriversCSINode(false) 865 case "no-csi-driver": 866 initCSINode() 867 default: 868 // Do nothing. 869 } 870 871 nodeInfo.SetNode(node) 872 return nodeInfo, csiNode 873 }