sigs.k8s.io/kueue@v0.6.2/test/integration/controller/core/clusterqueue_controller_test.go (about) 1 /* 2 Copyright 2022 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package core 18 19 import ( 20 "github.com/google/go-cmp/cmp/cmpopts" 21 "github.com/onsi/ginkgo/v2" 22 "github.com/onsi/gomega" 23 corev1 "k8s.io/api/core/v1" 24 apimeta "k8s.io/apimachinery/pkg/api/meta" 25 "k8s.io/apimachinery/pkg/api/resource" 26 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 27 "k8s.io/utils/ptr" 28 "sigs.k8s.io/controller-runtime/pkg/client" 29 30 kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" 31 "sigs.k8s.io/kueue/pkg/features" 32 "sigs.k8s.io/kueue/pkg/metrics" 33 "sigs.k8s.io/kueue/pkg/util/testing" 34 "sigs.k8s.io/kueue/pkg/workload" 35 "sigs.k8s.io/kueue/test/integration/framework" 36 "sigs.k8s.io/kueue/test/util" 37 ) 38 39 // +kubebuilder:docs-gen:collapse=Imports 40 41 const ( 42 resourceGPU corev1.ResourceName = "example.com/gpu" 43 44 flavorOnDemand = "on-demand" 45 flavorSpot = "spot" 46 flavorModelA = "model-a" 47 flavorModelB = "model-b" 48 flavorCPUArchA = "arch-a" 49 flavorCPUArchB = "arch-b" 50 ) 51 52 var ignoreConditionTimestamps = cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime") 53 var ignoreLastChangeTime = cmpopts.IgnoreFields(kueue.ClusterQueuePendingWorkloadsStatus{}, "LastChangeTime") 54 var ignorePendingWorkloadsStatus = cmpopts.IgnoreFields(kueue.ClusterQueueStatus{}, "PendingWorkloadsStatus") 55 56 var _ = ginkgo.Describe("ClusterQueue controller", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { 57 var ( 58 ns *corev1.Namespace 59 emptyUsedFlavors = []kueue.FlavorUsage{ 60 { 61 Name: flavorOnDemand, 62 Resources: []kueue.ResourceUsage{ 63 {Name: corev1.ResourceCPU}, 64 }, 65 }, 66 { 67 Name: flavorSpot, 68 Resources: []kueue.ResourceUsage{ 69 {Name: corev1.ResourceCPU}, 70 }, 71 }, 72 { 73 Name: flavorModelA, 74 Resources: []kueue.ResourceUsage{ 75 {Name: resourceGPU}, 76 }, 77 }, 78 { 79 Name: flavorModelB, 80 Resources: []kueue.ResourceUsage{ 81 {Name: resourceGPU}, 82 }, 83 }, 84 } 85 ) 86 87 ginkgo.BeforeAll(func() { 88 fwk = &framework.Framework{CRDPath: crdPath, WebhookPath: webhookPath} 89 cfg = fwk.Init() 90 ctx, k8sClient = fwk.RunManager(cfg, managerSetup) 91 }) 92 ginkgo.AfterAll(func() { 93 fwk.Teardown() 94 }) 95 96 ginkgo.BeforeEach(func() { 97 ns = &corev1.Namespace{ 98 ObjectMeta: metav1.ObjectMeta{ 99 GenerateName: "core-clusterqueue-", 100 }, 101 } 102 gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) 103 }) 104 105 ginkgo.AfterEach(func() { 106 gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed()) 107 }) 108 109 ginkgo.When("Reconciling clusterQueue usage status", func() { 110 var ( 111 clusterQueue *kueue.ClusterQueue 112 localQueue *kueue.LocalQueue 113 onDemandFlavor *kueue.ResourceFlavor 114 spotFlavor *kueue.ResourceFlavor 115 modelAFlavor *kueue.ResourceFlavor 116 modelBFlavor *kueue.ResourceFlavor 117 ac *kueue.AdmissionCheck 118 ) 119 120 ginkgo.BeforeEach(func() { 121 ac = testing.MakeAdmissionCheck("ac").ControllerName("ac-controller").Obj() 122 gomega.Expect(k8sClient.Create(ctx, ac)).To(gomega.Succeed()) 123 util.SetAdmissionCheckActive(ctx, k8sClient, ac, metav1.ConditionTrue) 124 125 clusterQueue = testing.MakeClusterQueue("cluster-queue"). 126 ResourceGroup( 127 *testing.MakeFlavorQuotas(flavorOnDemand). 128 Resource(corev1.ResourceCPU, "5", "5").Obj(), 129 *testing.MakeFlavorQuotas(flavorSpot). 130 Resource(corev1.ResourceCPU, "5", "5").Obj(), 131 ). 132 ResourceGroup( 133 *testing.MakeFlavorQuotas(flavorModelA). 134 Resource(resourceGPU, "5", "5").Obj(), 135 *testing.MakeFlavorQuotas(flavorModelB). 136 Resource(resourceGPU, "5", "5").Obj(), 137 ). 138 Cohort("cohort"). 139 AdmissionChecks(ac.Name). 140 Obj() 141 gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed()) 142 localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj() 143 gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed()) 144 }) 145 146 ginkgo.AfterEach(func() { 147 util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true) 148 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true) 149 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, spotFlavor, true) 150 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, modelAFlavor, true) 151 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, modelBFlavor, true) 152 util.ExpectAdmissionCheckToBeDeleted(ctx, k8sClient, ac, true) 153 }) 154 155 ginkgo.It("Should update status and report metrics when workloads are assigned and finish", func() { 156 workloads := []*kueue.Workload{ 157 testing.MakeWorkload("one", ns.Name).Queue(localQueue.Name). 158 Request(corev1.ResourceCPU, "2").Request(resourceGPU, "2").Obj(), 159 testing.MakeWorkload("two", ns.Name).Queue(localQueue.Name). 160 Request(corev1.ResourceCPU, "3").Request(resourceGPU, "3").Obj(), 161 testing.MakeWorkload("three", ns.Name).Queue(localQueue.Name). 162 Request(corev1.ResourceCPU, "1").Request(resourceGPU, "1").Obj(), 163 testing.MakeWorkload("four", ns.Name).Queue(localQueue.Name). 164 Request(corev1.ResourceCPU, "1").Request(resourceGPU, "1").Obj(), 165 testing.MakeWorkload("five", ns.Name).Queue("other"). 166 Request(corev1.ResourceCPU, "1").Request(resourceGPU, "1").Obj(), 167 testing.MakeWorkload("six", ns.Name).Queue(localQueue.Name). 168 Request(corev1.ResourceCPU, "1").Request(resourceGPU, "1").Obj(), 169 } 170 171 ginkgo.By("Checking that the resource metrics are published", func() { 172 util.ExpectCQResourceNominalQuota(clusterQueue, flavorOnDemand, string(corev1.ResourceCPU), 5) 173 util.ExpectCQResourceNominalQuota(clusterQueue, flavorSpot, string(corev1.ResourceCPU), 5) 174 util.ExpectCQResourceNominalQuota(clusterQueue, flavorModelA, string(resourceGPU), 5) 175 util.ExpectCQResourceNominalQuota(clusterQueue, flavorModelB, string(resourceGPU), 5) 176 177 util.ExpectCQResourceBorrowingQuota(clusterQueue, flavorOnDemand, string(corev1.ResourceCPU), 5) 178 util.ExpectCQResourceBorrowingQuota(clusterQueue, flavorSpot, string(corev1.ResourceCPU), 5) 179 util.ExpectCQResourceBorrowingQuota(clusterQueue, flavorModelA, string(resourceGPU), 5) 180 util.ExpectCQResourceBorrowingQuota(clusterQueue, flavorModelB, string(resourceGPU), 5) 181 182 util.ExpectCQResourceReservations(clusterQueue, flavorOnDemand, string(corev1.ResourceCPU), 0) 183 util.ExpectCQResourceReservations(clusterQueue, flavorSpot, string(corev1.ResourceCPU), 0) 184 util.ExpectCQResourceReservations(clusterQueue, flavorModelA, string(resourceGPU), 0) 185 util.ExpectCQResourceReservations(clusterQueue, flavorModelB, string(resourceGPU), 0) 186 }) 187 188 ginkgo.By("Creating workloads") 189 for _, w := range workloads { 190 gomega.Expect(k8sClient.Create(ctx, w)).To(gomega.Succeed()) 191 } 192 gomega.Eventually(func() kueue.ClusterQueueStatus { 193 var updatedCq kueue.ClusterQueue 194 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 195 return updatedCq.Status 196 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 197 PendingWorkloads: 5, 198 FlavorsReservation: emptyUsedFlavors, 199 FlavorsUsage: emptyUsedFlavors, 200 Conditions: []metav1.Condition{ 201 { 202 Type: kueue.ClusterQueueActive, 203 Status: metav1.ConditionFalse, 204 Reason: "FlavorNotFound", 205 Message: "Can't admit new workloads: FlavorNotFound", 206 }, 207 }, 208 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 209 // Workloads are inadmissible because ResourceFlavors don't exist here yet. 210 util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 5) 211 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 0) 212 213 ginkgo.By("Creating ResourceFlavors") 214 onDemandFlavor = testing.MakeResourceFlavor(flavorOnDemand).Obj() 215 gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed()) 216 spotFlavor = testing.MakeResourceFlavor(flavorSpot).Obj() 217 gomega.Expect(k8sClient.Create(ctx, spotFlavor)).To(gomega.Succeed()) 218 modelAFlavor = testing.MakeResourceFlavor(flavorModelA).Label(resourceGPU.String(), flavorModelA).Obj() 219 gomega.Expect(k8sClient.Create(ctx, modelAFlavor)).To(gomega.Succeed()) 220 modelBFlavor = testing.MakeResourceFlavor(flavorModelB).Label(resourceGPU.String(), flavorModelB).Obj() 221 gomega.Expect(k8sClient.Create(ctx, modelBFlavor)).To(gomega.Succeed()) 222 223 ginkgo.By("Set workloads quota reservation") 224 admissions := []*kueue.Admission{ 225 testing.MakeAdmission(clusterQueue.Name). 226 Assignment(corev1.ResourceCPU, flavorOnDemand, "2").Assignment(resourceGPU, flavorModelA, "2").Obj(), 227 testing.MakeAdmission(clusterQueue.Name). 228 Assignment(corev1.ResourceCPU, flavorOnDemand, "3").Assignment(resourceGPU, flavorModelA, "3").Obj(), 229 testing.MakeAdmission(clusterQueue.Name). 230 Assignment(corev1.ResourceCPU, flavorOnDemand, "1").Assignment(resourceGPU, flavorModelB, "1").Obj(), 231 testing.MakeAdmission(clusterQueue.Name). 232 Assignment(corev1.ResourceCPU, flavorSpot, "1").Assignment(resourceGPU, flavorModelB, "1").Obj(), 233 testing.MakeAdmission("other"). 234 Assignment(corev1.ResourceCPU, flavorSpot, "1").Assignment(resourceGPU, flavorModelB, "1").Obj(), 235 nil, 236 } 237 for i, w := range workloads { 238 gomega.Eventually(func() error { 239 var newWL kueue.Workload 240 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(w), &newWL)).To(gomega.Succeed()) 241 if admissions[i] != nil { 242 return util.SetQuotaReservation(ctx, k8sClient, &newWL, admissions[i]) 243 } 244 return nil 245 }, util.Timeout, util.Interval).Should(gomega.Succeed()) 246 } 247 248 totalUsage := []kueue.FlavorUsage{ 249 { 250 Name: flavorOnDemand, 251 Resources: []kueue.ResourceUsage{{ 252 Name: corev1.ResourceCPU, 253 Total: resource.MustParse("6"), 254 Borrowed: resource.MustParse("1"), 255 }}, 256 }, 257 { 258 Name: flavorSpot, 259 Resources: []kueue.ResourceUsage{{ 260 Name: corev1.ResourceCPU, 261 Total: resource.MustParse("1"), 262 }}, 263 }, 264 { 265 Name: flavorModelA, 266 Resources: []kueue.ResourceUsage{{ 267 Name: resourceGPU, 268 Total: resource.MustParse("5"), 269 }}, 270 }, 271 { 272 Name: flavorModelB, 273 Resources: []kueue.ResourceUsage{{ 274 Name: resourceGPU, 275 Total: resource.MustParse("2"), 276 }}, 277 }, 278 } 279 280 gomega.Eventually(func() kueue.ClusterQueueStatus { 281 var updatedCQ kueue.ClusterQueue 282 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed()) 283 return updatedCQ.Status 284 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 285 PendingWorkloads: 1, 286 ReservingWorkloads: 4, 287 AdmittedWorkloads: 0, 288 FlavorsReservation: totalUsage, 289 FlavorsUsage: emptyUsedFlavors, 290 Conditions: []metav1.Condition{ 291 { 292 Type: kueue.ClusterQueueActive, 293 Status: metav1.ConditionTrue, 294 Reason: "Ready", 295 Message: "Can admit new workloads", 296 }, 297 }, 298 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 299 util.ExpectPendingWorkloadsMetric(clusterQueue, 1, 0) 300 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 4) 301 302 ginkgo.By("Checking the resource reservation metrics are updated", func() { 303 util.ExpectCQResourceReservations(clusterQueue, flavorOnDemand, string(corev1.ResourceCPU), 6) 304 util.ExpectCQResourceReservations(clusterQueue, flavorSpot, string(corev1.ResourceCPU), 1) 305 util.ExpectCQResourceReservations(clusterQueue, flavorModelA, string(resourceGPU), 5) 306 util.ExpectCQResourceReservations(clusterQueue, flavorModelB, string(resourceGPU), 2) 307 }) 308 309 ginkgo.By("Setting the admission check for the first 4 workloads") 310 for _, w := range workloads[:4] { 311 util.SetWorkloadsAdmissionCheck(ctx, k8sClient, w, ac.Name, kueue.CheckStateReady, true) 312 } 313 314 gomega.Eventually(func() kueue.ClusterQueueStatus { 315 var updatedCQ kueue.ClusterQueue 316 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed()) 317 return updatedCQ.Status 318 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 319 PendingWorkloads: 1, 320 ReservingWorkloads: 4, 321 AdmittedWorkloads: 4, 322 FlavorsReservation: totalUsage, 323 FlavorsUsage: totalUsage, 324 Conditions: []metav1.Condition{ 325 { 326 Type: kueue.ClusterQueueActive, 327 Status: metav1.ConditionTrue, 328 Reason: "Ready", 329 Message: "Can admit new workloads", 330 }, 331 }, 332 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 333 util.ExpectPendingWorkloadsMetric(clusterQueue, 1, 0) 334 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 4) 335 336 ginkgo.By("Checking the resource usage metrics are updated", func() { 337 util.ExpectCQResourceReservations(clusterQueue, flavorOnDemand, string(corev1.ResourceCPU), 6) 338 util.ExpectCQResourceReservations(clusterQueue, flavorSpot, string(corev1.ResourceCPU), 1) 339 util.ExpectCQResourceReservations(clusterQueue, flavorModelA, string(resourceGPU), 5) 340 util.ExpectCQResourceReservations(clusterQueue, flavorModelB, string(resourceGPU), 2) 341 }) 342 343 ginkgo.By("Finishing workloads") 344 util.FinishWorkloads(ctx, k8sClient, workloads...) 345 gomega.Eventually(func() kueue.ClusterQueueStatus { 346 var updatedCq kueue.ClusterQueue 347 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 348 return updatedCq.Status 349 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 350 FlavorsReservation: emptyUsedFlavors, 351 FlavorsUsage: emptyUsedFlavors, 352 Conditions: []metav1.Condition{ 353 { 354 Type: kueue.ClusterQueueActive, 355 Status: metav1.ConditionTrue, 356 Reason: "Ready", 357 Message: "Can admit new workloads", 358 }, 359 }, 360 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 361 util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 0) 362 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 0) 363 }) 364 365 ginkgo.It("Should update status and report metrics when a pending workload is deleted", func() { 366 workload := testing.MakeWorkload("one", ns.Name).Queue(localQueue.Name). 367 Request(corev1.ResourceCPU, "5").Obj() 368 369 ginkgo.By("Creating a workload", func() { 370 gomega.Expect(k8sClient.Create(ctx, workload)).To(gomega.Succeed()) 371 }) 372 373 // Pending workloads count is incremented as the workload is inadmissible 374 // because ResourceFlavors don't exist. 375 gomega.Eventually(func() kueue.ClusterQueueStatus { 376 var updatedCq kueue.ClusterQueue 377 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 378 return updatedCq.Status 379 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 380 PendingWorkloads: 1, 381 FlavorsReservation: emptyUsedFlavors, 382 FlavorsUsage: emptyUsedFlavors, 383 Conditions: []metav1.Condition{ 384 { 385 Type: kueue.ClusterQueueActive, 386 Status: metav1.ConditionFalse, 387 Reason: "FlavorNotFound", 388 Message: "Can't admit new workloads: FlavorNotFound", 389 }, 390 }, 391 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 392 393 util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 1) 394 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 0) 395 396 ginkgo.By("Deleting the pending workload", func() { 397 gomega.Expect(k8sClient.Delete(ctx, workload)).To(gomega.Succeed()) 398 }) 399 400 // Pending workloads count is decrement as the deleted workload has been removed from the queue. 401 gomega.Eventually(func() kueue.ClusterQueueStatus { 402 var updatedCq kueue.ClusterQueue 403 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 404 return updatedCq.Status 405 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{ 406 PendingWorkloads: 0, 407 FlavorsReservation: emptyUsedFlavors, 408 FlavorsUsage: emptyUsedFlavors, 409 Conditions: []metav1.Condition{ 410 { 411 Type: kueue.ClusterQueueActive, 412 Status: metav1.ConditionFalse, 413 Reason: "FlavorNotFound", 414 Message: "Can't admit new workloads: FlavorNotFound", 415 }, 416 }, 417 }, ignoreConditionTimestamps, ignorePendingWorkloadsStatus)) 418 util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 0) 419 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 0) 420 }) 421 422 ginkgo.It("Should update status when workloads have reclaimable pods", func() { 423 424 ginkgo.By("Creating ResourceFlavors", func() { 425 onDemandFlavor = testing.MakeResourceFlavor(flavorOnDemand).Obj() 426 gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed()) 427 spotFlavor = testing.MakeResourceFlavor(flavorSpot).Obj() 428 gomega.Expect(k8sClient.Create(ctx, spotFlavor)).To(gomega.Succeed()) 429 modelAFlavor = testing.MakeResourceFlavor(flavorModelA).Label(resourceGPU.String(), flavorModelA).Obj() 430 gomega.Expect(k8sClient.Create(ctx, modelAFlavor)).To(gomega.Succeed()) 431 modelBFlavor = testing.MakeResourceFlavor(flavorModelB).Label(resourceGPU.String(), flavorModelB).Obj() 432 gomega.Expect(k8sClient.Create(ctx, modelBFlavor)).To(gomega.Succeed()) 433 }) 434 435 wl := testing.MakeWorkload("one", ns.Name). 436 Queue(localQueue.Name). 437 PodSets( 438 *testing.MakePodSet("driver", 2). 439 Request(corev1.ResourceCPU, "1"). 440 Obj(), 441 *testing.MakePodSet("workers", 5). 442 Request(resourceGPU, "1"). 443 Obj(), 444 ). 445 Obj() 446 ginkgo.By("Creating the workload", func() { 447 gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed()) 448 util.ExpectPendingWorkloadsMetric(clusterQueue, 1, 0) 449 }) 450 451 ginkgo.By("Admitting the workload", func() { 452 admission := testing.MakeAdmission(clusterQueue.Name).PodSets( 453 kueue.PodSetAssignment{ 454 Name: "driver", 455 Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ 456 corev1.ResourceCPU: "on-demand", 457 }, 458 ResourceUsage: corev1.ResourceList{ 459 corev1.ResourceCPU: resource.MustParse("2"), 460 }, 461 Count: ptr.To[int32](2), 462 }, 463 kueue.PodSetAssignment{ 464 Name: "workers", 465 Flavors: map[corev1.ResourceName]kueue.ResourceFlavorReference{ 466 resourceGPU: "model-a", 467 }, 468 ResourceUsage: corev1.ResourceList{ 469 resourceGPU: resource.MustParse("5"), 470 }, 471 Count: ptr.To[int32](5), 472 }, 473 ).Obj() 474 475 gomega.Eventually(func() error { 476 var newWL kueue.Workload 477 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &newWL)).To(gomega.Succeed()) 478 return util.SetQuotaReservation(ctx, k8sClient, &newWL, admission) 479 }, util.Timeout, util.Interval).Should(gomega.Succeed()) 480 }) 481 482 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1) 483 gomega.Eventually(func() []kueue.FlavorUsage { 484 var updatedCq kueue.ClusterQueue 485 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 486 return updatedCq.Status.FlavorsReservation 487 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]kueue.FlavorUsage{ 488 { 489 Name: flavorOnDemand, 490 Resources: []kueue.ResourceUsage{{ 491 Name: corev1.ResourceCPU, 492 Total: resource.MustParse("2"), 493 }}, 494 }, 495 { 496 Name: flavorSpot, 497 Resources: []kueue.ResourceUsage{{ 498 Name: corev1.ResourceCPU, 499 }}, 500 }, 501 { 502 Name: flavorModelA, 503 Resources: []kueue.ResourceUsage{{ 504 Name: resourceGPU, 505 Total: resource.MustParse("5"), 506 }}, 507 }, 508 { 509 Name: flavorModelB, 510 Resources: []kueue.ResourceUsage{{ 511 Name: resourceGPU, 512 }}, 513 }, 514 }, ignoreConditionTimestamps)) 515 516 ginkgo.By("Mark two workers as reclaimable", func() { 517 gomega.Expect(workload.UpdateReclaimablePods(ctx, k8sClient, wl, []kueue.ReclaimablePod{{Name: "workers", Count: 2}})).To(gomega.Succeed()) 518 519 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1) 520 gomega.Eventually(func() []kueue.FlavorUsage { 521 var updatedCq kueue.ClusterQueue 522 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 523 return updatedCq.Status.FlavorsReservation 524 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]kueue.FlavorUsage{ 525 { 526 Name: flavorOnDemand, 527 Resources: []kueue.ResourceUsage{{ 528 Name: corev1.ResourceCPU, 529 Total: resource.MustParse("2"), 530 }}, 531 }, 532 { 533 Name: flavorSpot, 534 Resources: []kueue.ResourceUsage{{ 535 Name: corev1.ResourceCPU, 536 }}, 537 }, 538 { 539 Name: flavorModelA, 540 Resources: []kueue.ResourceUsage{{ 541 Name: resourceGPU, 542 Total: resource.MustParse("3"), 543 }}, 544 }, 545 { 546 Name: flavorModelB, 547 Resources: []kueue.ResourceUsage{{ 548 Name: resourceGPU, 549 }}, 550 }, 551 }, ignoreConditionTimestamps)) 552 }) 553 554 ginkgo.By("Mark all workers and a driver as reclaimable", func() { 555 gomega.Expect(workload.UpdateReclaimablePods(ctx, k8sClient, wl, []kueue.ReclaimablePod{{Name: "workers", Count: 5}, {Name: "driver", Count: 1}})).To(gomega.Succeed()) 556 557 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 1) 558 gomega.Eventually(func() []kueue.FlavorUsage { 559 var updatedCq kueue.ClusterQueue 560 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 561 return updatedCq.Status.FlavorsReservation 562 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]kueue.FlavorUsage{ 563 { 564 Name: flavorOnDemand, 565 Resources: []kueue.ResourceUsage{{ 566 Name: corev1.ResourceCPU, 567 Total: resource.MustParse("1"), 568 }}, 569 }, 570 { 571 Name: flavorSpot, 572 Resources: []kueue.ResourceUsage{{ 573 Name: corev1.ResourceCPU, 574 }}, 575 }, 576 { 577 Name: flavorModelA, 578 Resources: []kueue.ResourceUsage{{ 579 Name: resourceGPU, 580 }}, 581 }, 582 { 583 Name: flavorModelB, 584 Resources: []kueue.ResourceUsage{{ 585 Name: resourceGPU, 586 }}, 587 }, 588 }, ignoreConditionTimestamps)) 589 }) 590 591 ginkgo.By("Finishing workload", func() { 592 util.FinishWorkloads(ctx, k8sClient, wl) 593 util.ExpectPendingWorkloadsMetric(clusterQueue, 0, 0) 594 util.ExpectReservingActiveWorkloadsMetric(clusterQueue, 0) 595 }) 596 597 }) 598 }) 599 600 ginkgo.When("Reconciling clusterQueue status condition", func() { 601 var ( 602 cq *kueue.ClusterQueue 603 lq *kueue.LocalQueue 604 wl *kueue.Workload 605 cpuArchAFlavor *kueue.ResourceFlavor 606 cpuArchBFlavor *kueue.ResourceFlavor 607 check1 *kueue.AdmissionCheck 608 check2 *kueue.AdmissionCheck 609 ) 610 611 ginkgo.BeforeEach(func() { 612 cq = testing.MakeClusterQueue("bar-cq"). 613 ResourceGroup( 614 *testing.MakeFlavorQuotas(flavorCPUArchA).Resource(corev1.ResourceCPU, "5", "5").Obj(), 615 *testing.MakeFlavorQuotas(flavorCPUArchB).Resource(corev1.ResourceCPU, "5", "5").Obj(), 616 ). 617 Cohort("bar-cohort"). 618 AdmissionChecks("check1", "check2"). 619 Obj() 620 621 gomega.Expect(k8sClient.Create(ctx, cq)).To(gomega.Succeed()) 622 lq = testing.MakeLocalQueue("bar-lq", ns.Name).ClusterQueue(cq.Name).Obj() 623 gomega.Expect(k8sClient.Create(ctx, lq)).To(gomega.Succeed()) 624 wl = testing.MakeWorkload("bar-wl", ns.Name).Queue(lq.Name).Obj() 625 gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed()) 626 }) 627 628 ginkgo.AfterEach(func() { 629 gomega.Expect(util.DeleteWorkload(ctx, k8sClient, wl)).To(gomega.Succeed()) 630 gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, lq)).To(gomega.Succeed()) 631 util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, cq, true) 632 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, cpuArchAFlavor, true) 633 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, cpuArchBFlavor, true) 634 util.ExpectAdmissionCheckToBeDeleted(ctx, k8sClient, check1, true) 635 util.ExpectAdmissionCheckToBeDeleted(ctx, k8sClient, check2, true) 636 }) 637 638 ginkgo.It("Should update status conditions when flavors are created", func() { 639 check1 = testing.MakeAdmissionCheck("check1").ControllerName("ac-controller").Obj() 640 gomega.Expect(k8sClient.Create(ctx, check1)).To(gomega.Succeed()) 641 util.SetAdmissionCheckActive(ctx, k8sClient, check1, metav1.ConditionTrue) 642 643 check2 = testing.MakeAdmissionCheck("check2").ControllerName("ac-controller").Obj() 644 gomega.Expect(k8sClient.Create(ctx, check2)).To(gomega.Succeed()) 645 util.SetAdmissionCheckActive(ctx, k8sClient, check2, metav1.ConditionTrue) 646 647 ginkgo.By("All Flavors are not found") 648 649 gomega.Eventually(func() []metav1.Condition { 650 var updatedCq kueue.ClusterQueue 651 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 652 return updatedCq.Status.Conditions 653 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 654 { 655 Type: kueue.ClusterQueueActive, 656 Status: metav1.ConditionFalse, 657 Reason: "FlavorNotFound", 658 Message: "Can't admit new workloads: FlavorNotFound", 659 }, 660 }, ignoreConditionTimestamps)) 661 662 ginkgo.By("One of flavors is not found") 663 cpuArchAFlavor = testing.MakeResourceFlavor(flavorCPUArchA).Obj() 664 gomega.Expect(k8sClient.Create(ctx, cpuArchAFlavor)).To(gomega.Succeed()) 665 gomega.Eventually(func() []metav1.Condition { 666 var updatedCq kueue.ClusterQueue 667 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 668 return updatedCq.Status.Conditions 669 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 670 { 671 Type: kueue.ClusterQueueActive, 672 Status: metav1.ConditionFalse, 673 Reason: "FlavorNotFound", 674 Message: "Can't admit new workloads: FlavorNotFound", 675 }, 676 }, ignoreConditionTimestamps)) 677 678 ginkgo.By("All flavors are created") 679 cpuArchBFlavor = testing.MakeResourceFlavor(flavorCPUArchB).Obj() 680 gomega.Expect(k8sClient.Create(ctx, cpuArchBFlavor)).To(gomega.Succeed()) 681 gomega.Eventually(func() []metav1.Condition { 682 var updatedCq kueue.ClusterQueue 683 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 684 return updatedCq.Status.Conditions 685 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 686 { 687 Type: kueue.ClusterQueueActive, 688 Status: metav1.ConditionTrue, 689 Reason: "Ready", 690 Message: "Can admit new workloads", 691 }, 692 }, ignoreConditionTimestamps)) 693 }) 694 695 ginkgo.It("Should update status conditions when admission checks are created", func() { 696 697 cpuArchAFlavor = testing.MakeResourceFlavor(flavorCPUArchA).Obj() 698 gomega.Expect(k8sClient.Create(ctx, cpuArchAFlavor)).To(gomega.Succeed()) 699 700 cpuArchBFlavor = testing.MakeResourceFlavor(flavorCPUArchB).Obj() 701 gomega.Expect(k8sClient.Create(ctx, cpuArchBFlavor)).To(gomega.Succeed()) 702 703 ginkgo.By("All checks are not found") 704 705 gomega.Eventually(func() []metav1.Condition { 706 var updatedCq kueue.ClusterQueue 707 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 708 return updatedCq.Status.Conditions 709 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 710 { 711 Type: kueue.ClusterQueueActive, 712 Status: metav1.ConditionFalse, 713 Reason: "CheckNotFoundOrInactive", 714 Message: "Can't admit new workloads: CheckNotFoundOrInactive", 715 }, 716 }, ignoreConditionTimestamps)) 717 718 ginkgo.By("One of the checks is not found") 719 check1 = testing.MakeAdmissionCheck("check1").ControllerName("ac-controller").Active(metav1.ConditionTrue).Obj() 720 gomega.Expect(k8sClient.Create(ctx, check1)).To(gomega.Succeed()) 721 util.SetAdmissionCheckActive(ctx, k8sClient, check1, metav1.ConditionTrue) 722 gomega.Eventually(func() []metav1.Condition { 723 var updatedCq kueue.ClusterQueue 724 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 725 return updatedCq.Status.Conditions 726 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 727 { 728 Type: kueue.ClusterQueueActive, 729 Status: metav1.ConditionFalse, 730 Reason: "CheckNotFoundOrInactive", 731 Message: "Can't admit new workloads: CheckNotFoundOrInactive", 732 }, 733 }, ignoreConditionTimestamps)) 734 735 ginkgo.By("One check is inactive") 736 check2 = testing.MakeAdmissionCheck("check2").ControllerName("ac-controller").Obj() 737 gomega.Expect(k8sClient.Create(ctx, check2)).To(gomega.Succeed()) 738 gomega.Eventually(func() []metav1.Condition { 739 var updatedCq kueue.ClusterQueue 740 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 741 return updatedCq.Status.Conditions 742 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 743 { 744 Type: kueue.ClusterQueueActive, 745 Status: metav1.ConditionFalse, 746 Reason: "CheckNotFoundOrInactive", 747 Message: "Can't admit new workloads: CheckNotFoundOrInactive", 748 }, 749 }, ignoreConditionTimestamps)) 750 751 ginkgo.By("All checks are created") 752 util.SetAdmissionCheckActive(ctx, k8sClient, check2, metav1.ConditionTrue) 753 gomega.Eventually(func() []metav1.Condition { 754 var updatedCq kueue.ClusterQueue 755 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &updatedCq)).To(gomega.Succeed()) 756 return updatedCq.Status.Conditions 757 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo([]metav1.Condition{ 758 { 759 Type: kueue.ClusterQueueActive, 760 Status: metav1.ConditionTrue, 761 Reason: "Ready", 762 Message: "Can admit new workloads", 763 }, 764 }, ignoreConditionTimestamps)) 765 }) 766 }) 767 768 ginkgo.When("Deleting clusterQueues", func() { 769 var ( 770 cq *kueue.ClusterQueue 771 lq *kueue.LocalQueue 772 check *kueue.AdmissionCheck 773 ) 774 775 ginkgo.BeforeEach(func() { 776 check = testing.MakeAdmissionCheck("check").ControllerName("check-controller").Obj() 777 gomega.Expect(k8sClient.Create(ctx, check)).To(gomega.Succeed()) 778 779 cq = testing.MakeClusterQueue("foo-cq").AdmissionChecks(check.Name).Obj() 780 lq = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(cq.Name).Obj() 781 gomega.Expect(k8sClient.Create(ctx, lq)).To(gomega.Succeed()) 782 gomega.Expect(k8sClient.Create(ctx, cq)).To(gomega.Succeed()) 783 }) 784 785 ginkgo.AfterEach(func() { 786 util.ExpectAdmissionCheckToBeDeleted(ctx, k8sClient, check, true) 787 }) 788 789 ginkgo.It("Should delete clusterQueues successfully when no admitted workloads are running", func() { 790 util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, cq, true) 791 }) 792 793 ginkgo.It("Should be stuck in termination until admitted workloads finished running", func() { 794 util.SetAdmissionCheckActive(ctx, k8sClient, check, metav1.ConditionTrue) 795 util.ExpectClusterQueueStatusMetric(cq, metrics.CQStatusActive) 796 797 ginkgo.By("Admit workload") 798 wl := testing.MakeWorkload("workload", ns.Name).Queue(lq.Name).Obj() 799 gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed()) 800 gomega.Expect(util.SetQuotaReservation(ctx, k8sClient, wl, testing.MakeAdmission(cq.Name).Obj())).To(gomega.Succeed()) 801 util.SetWorkloadsAdmissionCheck(ctx, k8sClient, wl, check.Name, kueue.CheckStateReady, true) 802 gomega.Eventually(func(g gomega.Gomega) { 803 key := client.ObjectKeyFromObject(wl) 804 updatedWl := &kueue.Workload{} 805 g.Expect(k8sClient.Get(ctx, key, updatedWl)).To(gomega.Succeed()) 806 g.Expect(apimeta.IsStatusConditionTrue(updatedWl.Status.Conditions, kueue.WorkloadAdmitted)).To(gomega.BeTrue()) 807 }, util.Timeout, util.Interval).Should(gomega.Succeed()) 808 809 ginkgo.By("Delete clusterQueue") 810 gomega.Expect(util.DeleteClusterQueue(ctx, k8sClient, cq)).To(gomega.Succeed()) 811 util.ExpectClusterQueueStatusMetric(cq, metrics.CQStatusTerminating) 812 var newCQ kueue.ClusterQueue 813 gomega.Eventually(func() []string { 814 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &newCQ)).To(gomega.Succeed()) 815 return newCQ.GetFinalizers() 816 }, util.Timeout, util.Interval).Should(gomega.Equal([]string{kueue.ResourceInUseFinalizerName})) 817 818 ginkgo.By("Finish workload") 819 util.FinishWorkloads(ctx, k8sClient, wl) 820 821 ginkgo.By("The clusterQueue will be deleted") 822 gomega.Eventually(func() error { 823 var newCQ kueue.ClusterQueue 824 return k8sClient.Get(ctx, client.ObjectKeyFromObject(cq), &newCQ) 825 }, util.Timeout, util.Interval).Should(testing.BeNotFoundError()) 826 }) 827 828 ginkgo.It("Should delete the cluster without waiting for reserving only workloads to finish", func() { 829 util.SetAdmissionCheckActive(ctx, k8sClient, check, metav1.ConditionTrue) 830 util.ExpectClusterQueueStatusMetric(cq, metrics.CQStatusActive) 831 832 ginkgo.By("Setting quota reservation") 833 wl := testing.MakeWorkload("workload", ns.Name).Queue(lq.Name).Obj() 834 gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed()) 835 gomega.Expect(util.SetQuotaReservation(ctx, k8sClient, wl, testing.MakeAdmission(cq.Name).Obj())).To(gomega.Succeed()) 836 837 ginkgo.By("Delete clusterQueue") 838 util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, cq, true) 839 }) 840 }) 841 }) 842 843 var _ = ginkgo.Describe("ClusterQueue controller with queue visibility is enabled", ginkgo.Ordered, ginkgo.ContinueOnFailure, func() { 844 var ns *corev1.Namespace 845 846 ginkgo.BeforeAll(func() { 847 ginkgo.By("Enabling queue visibility feature", func() { 848 gomega.Expect(features.SetEnable(features.QueueVisibility, true)).To(gomega.Succeed()) 849 }) 850 fwk = &framework.Framework{CRDPath: crdPath, WebhookPath: webhookPath} 851 cfg = fwk.Init() 852 ctx, k8sClient = fwk.RunManager(cfg, managerSetup) 853 }) 854 ginkgo.AfterAll(func() { 855 fwk.Teardown() 856 }) 857 858 ginkgo.BeforeEach(func() { 859 ns = &corev1.Namespace{ 860 ObjectMeta: metav1.ObjectMeta{ 861 GenerateName: "core-clusterqueue-", 862 }, 863 } 864 gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed()) 865 }) 866 867 ginkgo.AfterEach(func() { 868 gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed()) 869 }) 870 871 ginkgo.When("Reconciling clusterQueue pending workload status", func() { 872 var ( 873 clusterQueue *kueue.ClusterQueue 874 localQueue *kueue.LocalQueue 875 onDemandFlavor *kueue.ResourceFlavor 876 ) 877 878 ginkgo.BeforeEach(func() { 879 onDemandFlavor = testing.MakeResourceFlavor(flavorOnDemand).Obj() 880 gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed()) 881 clusterQueue = testing.MakeClusterQueue("cluster-queue"). 882 ResourceGroup( 883 *testing.MakeFlavorQuotas(flavorOnDemand). 884 Resource(corev1.ResourceCPU, "5", "5").Obj(), 885 ). 886 Cohort("cohort"). 887 Obj() 888 gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed()) 889 localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj() 890 gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed()) 891 }) 892 893 ginkgo.AfterEach(func() { 894 util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true) 895 util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true) 896 }) 897 898 ginkgo.It("Should update of the pending workloads when a new workload is scheduled", func() { 899 const lowPrio, midLowerPrio, midHigherPrio, highPrio = 0, 10, 20, 100 900 workloadsFirstBatch := []*kueue.Workload{ 901 testing.MakeWorkload("one", ns.Name).Queue(localQueue.Name).Priority(highPrio). 902 Request(corev1.ResourceCPU, "2").Request(resourceGPU, "2").Obj(), 903 testing.MakeWorkload("two", ns.Name).Queue(localQueue.Name).Priority(midHigherPrio). 904 Request(corev1.ResourceCPU, "3").Request(resourceGPU, "3").Obj(), 905 } 906 907 ginkgo.By("Verify pending workload status before adding workloads") 908 gomega.Eventually(func() *kueue.ClusterQueuePendingWorkloadsStatus { 909 var updatedCq kueue.ClusterQueue 910 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 911 return updatedCq.Status.PendingWorkloadsStatus 912 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(&kueue.ClusterQueuePendingWorkloadsStatus{}, ignoreLastChangeTime)) 913 914 ginkgo.By("Creating workloads") 915 for _, w := range workloadsFirstBatch { 916 gomega.Expect(k8sClient.Create(ctx, w)).To(gomega.Succeed()) 917 } 918 919 ginkgo.By("Awaiting for the pending workloads to be updated") 920 gomega.Eventually(func() *kueue.ClusterQueuePendingWorkloadsStatus { 921 var updatedCq kueue.ClusterQueue 922 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 923 return updatedCq.Status.PendingWorkloadsStatus 924 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(&kueue.ClusterQueuePendingWorkloadsStatus{ 925 Head: []kueue.ClusterQueuePendingWorkload{ 926 { 927 Name: "one", 928 Namespace: ns.Name, 929 }, 930 { 931 Name: "two", 932 Namespace: ns.Name, 933 }, 934 }, 935 }, ignoreLastChangeTime)) 936 937 ginkgo.By("Creating new workloads to so that the number of workloads exceeds the MaxCount") 938 workloadsSecondBatch := []*kueue.Workload{ 939 testing.MakeWorkload("three", ns.Name).Queue(localQueue.Name).Priority(midLowerPrio). 940 Request(corev1.ResourceCPU, "2").Request(resourceGPU, "2").Obj(), 941 testing.MakeWorkload("four", ns.Name).Queue(localQueue.Name).Priority(lowPrio). 942 Request(corev1.ResourceCPU, "3").Request(resourceGPU, "3").Obj(), 943 } 944 for _, w := range workloadsSecondBatch { 945 gomega.Expect(k8sClient.Create(ctx, w)).To(gomega.Succeed()) 946 } 947 948 ginkgo.By("Verify the head of pending workloads when the number of pending workloads exceeds MaxCount") 949 gomega.Eventually(func() *kueue.ClusterQueuePendingWorkloadsStatus { 950 var updatedCq kueue.ClusterQueue 951 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 952 return updatedCq.Status.PendingWorkloadsStatus 953 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(&kueue.ClusterQueuePendingWorkloadsStatus{ 954 Head: []kueue.ClusterQueuePendingWorkload{ 955 { 956 Name: "one", 957 Namespace: ns.Name, 958 }, 959 { 960 Name: "two", 961 Namespace: ns.Name, 962 }, 963 { 964 Name: "three", 965 Namespace: ns.Name, 966 }, 967 }, 968 }, ignoreLastChangeTime)) 969 970 ginkgo.By("Admitting workloads") 971 for _, w := range workloadsFirstBatch { 972 gomega.Eventually(func() error { 973 var newWL kueue.Workload 974 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(w), &newWL)).To(gomega.Succeed()) 975 return util.SetQuotaReservation(ctx, k8sClient, &newWL, testing.MakeAdmission(clusterQueue.Name).Obj()) 976 }, util.Timeout, util.Interval).Should(gomega.Succeed()) 977 } 978 979 ginkgo.By("Awaiting for the pending workloads status to be updated after the workloads are admitted") 980 gomega.Eventually(func() *kueue.ClusterQueuePendingWorkloadsStatus { 981 var updatedCQ kueue.ClusterQueue 982 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed()) 983 return updatedCQ.Status.PendingWorkloadsStatus 984 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(&kueue.ClusterQueuePendingWorkloadsStatus{ 985 Head: []kueue.ClusterQueuePendingWorkload{ 986 { 987 Name: "three", 988 Namespace: ns.Name, 989 }, 990 { 991 Name: "four", 992 Namespace: ns.Name, 993 }, 994 }, 995 }, ignoreLastChangeTime)) 996 997 ginkgo.By("Finishing workload", func() { 998 util.FinishWorkloads(ctx, k8sClient, workloadsFirstBatch...) 999 util.FinishWorkloads(ctx, k8sClient, workloadsSecondBatch...) 1000 }) 1001 1002 ginkgo.By("Awaiting for the pending workloads status to be updated after the workloads are finished") 1003 gomega.Eventually(func() *kueue.ClusterQueuePendingWorkloadsStatus { 1004 var updatedCq kueue.ClusterQueue 1005 gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCq)).To(gomega.Succeed()) 1006 return updatedCq.Status.PendingWorkloadsStatus 1007 }, util.Timeout, util.Interval).Should(gomega.BeComparableTo(&kueue.ClusterQueuePendingWorkloadsStatus{ 1008 Head: []kueue.ClusterQueuePendingWorkload{}, 1009 }, ignoreLastChangeTime)) 1010 }) 1011 }) 1012 })