sigs.k8s.io/kueue@v0.6.2/pkg/controller/core/clusterqueue_controller_test.go (about) 1 /* 2 Copyright 2022 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package core 18 19 import ( 20 "context" 21 "testing" 22 "time" 23 24 "github.com/google/go-cmp/cmp" 25 "github.com/google/go-cmp/cmp/cmpopts" 26 corev1 "k8s.io/api/core/v1" 27 "k8s.io/apimachinery/pkg/api/resource" 28 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 29 "k8s.io/apimachinery/pkg/util/wait" 30 "k8s.io/utils/ptr" 31 32 kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1" 33 "sigs.k8s.io/kueue/pkg/cache" 34 "sigs.k8s.io/kueue/pkg/features" 35 "sigs.k8s.io/kueue/pkg/metrics" 36 "sigs.k8s.io/kueue/pkg/queue" 37 utiltesting "sigs.k8s.io/kueue/pkg/util/testing" 38 testingmetrics "sigs.k8s.io/kueue/pkg/util/testing/metrics" 39 ) 40 41 func TestUpdateCqStatusIfChanged(t *testing.T) { 42 cqName := "test-cq" 43 lqName := "test-lq" 44 defaultWls := &kueue.WorkloadList{ 45 Items: []kueue.Workload{ 46 *utiltesting.MakeWorkload("alpha", "").Queue(lqName).Obj(), 47 *utiltesting.MakeWorkload("beta", "").Queue(lqName).Obj(), 48 }, 49 } 50 51 testCases := map[string]struct { 52 cqStatus kueue.ClusterQueueStatus 53 newConditionStatus metav1.ConditionStatus 54 newReason string 55 newMessage string 56 newWl *kueue.Workload 57 wantCqStatus kueue.ClusterQueueStatus 58 }{ 59 "empty ClusterQueueStatus": { 60 cqStatus: kueue.ClusterQueueStatus{}, 61 newConditionStatus: metav1.ConditionFalse, 62 newReason: "FlavorNotFound", 63 newMessage: "Can't admit new workloads; some flavors are not found", 64 wantCqStatus: kueue.ClusterQueueStatus{ 65 PendingWorkloads: int32(len(defaultWls.Items)), 66 Conditions: []metav1.Condition{{ 67 Type: kueue.ClusterQueueActive, 68 Status: metav1.ConditionFalse, 69 Reason: "FlavorNotFound", 70 Message: "Can't admit new workloads; some flavors are not found", 71 }}, 72 }, 73 }, 74 "same condition status": { 75 cqStatus: kueue.ClusterQueueStatus{ 76 PendingWorkloads: int32(len(defaultWls.Items)), 77 Conditions: []metav1.Condition{{ 78 Type: kueue.ClusterQueueActive, 79 Status: metav1.ConditionTrue, 80 Reason: "Ready", 81 Message: "Can admit new workloads", 82 }}, 83 }, 84 newConditionStatus: metav1.ConditionTrue, 85 newReason: "Ready", 86 newMessage: "Can admit new workloads", 87 wantCqStatus: kueue.ClusterQueueStatus{ 88 PendingWorkloads: int32(len(defaultWls.Items)), 89 Conditions: []metav1.Condition{{ 90 Type: kueue.ClusterQueueActive, 91 Status: metav1.ConditionTrue, 92 Reason: "Ready", 93 Message: "Can admit new workloads", 94 }}, 95 }, 96 }, 97 "same condition status with different reason and message": { 98 cqStatus: kueue.ClusterQueueStatus{ 99 PendingWorkloads: int32(len(defaultWls.Items)), 100 Conditions: []metav1.Condition{{ 101 Type: kueue.ClusterQueueActive, 102 Status: metav1.ConditionFalse, 103 Reason: "FlavorNotFound", 104 Message: "Can't admit new workloads; Can't admit new workloads; some flavors are not found", 105 }}, 106 }, 107 newConditionStatus: metav1.ConditionFalse, 108 newReason: "Terminating", 109 newMessage: "Can't admit new workloads; clusterQueue is terminating", 110 wantCqStatus: kueue.ClusterQueueStatus{ 111 PendingWorkloads: int32(len(defaultWls.Items)), 112 Conditions: []metav1.Condition{{ 113 Type: kueue.ClusterQueueActive, 114 Status: metav1.ConditionFalse, 115 Reason: "Terminating", 116 Message: "Can't admit new workloads; clusterQueue is terminating", 117 }}, 118 }, 119 }, 120 "different condition status": { 121 cqStatus: kueue.ClusterQueueStatus{ 122 PendingWorkloads: int32(len(defaultWls.Items)), 123 Conditions: []metav1.Condition{{ 124 Type: kueue.ClusterQueueActive, 125 Status: metav1.ConditionFalse, 126 Reason: "FlavorNotFound", 127 Message: "Can't admit new workloads; some flavors are not found", 128 }}, 129 }, 130 newConditionStatus: metav1.ConditionTrue, 131 newReason: "Ready", 132 newMessage: "Can admit new workloads", 133 wantCqStatus: kueue.ClusterQueueStatus{ 134 PendingWorkloads: int32(len(defaultWls.Items)), 135 Conditions: []metav1.Condition{{ 136 Type: kueue.ClusterQueueActive, 137 Status: metav1.ConditionTrue, 138 Reason: "Ready", 139 Message: "Can admit new workloads", 140 }}, 141 }, 142 }, 143 "different pendingWorkloads with same condition status": { 144 cqStatus: kueue.ClusterQueueStatus{ 145 PendingWorkloads: int32(len(defaultWls.Items)), 146 Conditions: []metav1.Condition{{ 147 Type: kueue.ClusterQueueActive, 148 Status: metav1.ConditionTrue, 149 Reason: "Ready", 150 Message: "Can admit new workloads", 151 }}, 152 }, 153 newWl: utiltesting.MakeWorkload("gamma", "").Queue(lqName).Obj(), 154 newConditionStatus: metav1.ConditionTrue, 155 newReason: "Ready", 156 newMessage: "Can admit new workloads", 157 wantCqStatus: kueue.ClusterQueueStatus{ 158 PendingWorkloads: int32(len(defaultWls.Items) + 1), 159 Conditions: []metav1.Condition{{ 160 Type: kueue.ClusterQueueActive, 161 Status: metav1.ConditionTrue, 162 Reason: "Ready", 163 Message: "Can admit new workloads", 164 }}, 165 }, 166 }, 167 } 168 169 for name, tc := range testCases { 170 t.Run(name, func(t *testing.T) { 171 cq := utiltesting.MakeClusterQueue(cqName). 172 QueueingStrategy(kueue.StrictFIFO).Obj() 173 cq.Status = tc.cqStatus 174 lq := utiltesting.MakeLocalQueue(lqName, ""). 175 ClusterQueue(cqName).Obj() 176 ctx, log := utiltesting.ContextWithLog(t) 177 178 cl := utiltesting.NewClientBuilder().WithLists(defaultWls).WithObjects(lq, cq).WithStatusSubresource(lq, cq). 179 Build() 180 cqCache := cache.New(cl) 181 qManager := queue.NewManager(cl, cqCache) 182 if err := cqCache.AddClusterQueue(ctx, cq); err != nil { 183 t.Fatalf("Inserting clusterQueue in cache: %v", err) 184 } 185 if err := qManager.AddClusterQueue(ctx, cq); err != nil { 186 t.Fatalf("Inserting clusterQueue in manager: %v", err) 187 } 188 if err := qManager.AddLocalQueue(ctx, lq); err != nil { 189 t.Fatalf("Inserting localQueue in manager: %v", err) 190 } 191 for _, wl := range defaultWls.Items { 192 cqCache.AddOrUpdateWorkload(&wl) 193 } 194 r := &ClusterQueueReconciler{ 195 client: cl, 196 log: log, 197 cache: cqCache, 198 qManager: qManager, 199 } 200 if tc.newWl != nil { 201 r.qManager.AddOrUpdateWorkload(tc.newWl) 202 } 203 err := r.updateCqStatusIfChanged(ctx, cq, tc.newConditionStatus, tc.newReason, tc.newMessage) 204 if err != nil { 205 t.Errorf("Updating ClusterQueueStatus: %v", err) 206 } 207 configCmpOpts := []cmp.Option{ 208 cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime"), 209 cmpopts.IgnoreFields(kueue.ClusterQueuePendingWorkloadsStatus{}, "LastChangeTime"), 210 cmpopts.EquateEmpty(), 211 } 212 if diff := cmp.Diff(tc.wantCqStatus, cq.Status, configCmpOpts...); len(diff) != 0 { 213 t.Errorf("unexpected ClusterQueueStatus (-want,+got):\n%s", diff) 214 } 215 }) 216 } 217 } 218 219 type cqMetrics struct { 220 NominalDPs []testingmetrics.GaugeDataPoint 221 BorrowingDPs []testingmetrics.GaugeDataPoint 222 UsageDPs []testingmetrics.GaugeDataPoint 223 } 224 225 func allMetricsForQueue(name string) cqMetrics { 226 return cqMetrics{ 227 NominalDPs: testingmetrics.CollectFilteredGaugeVec(metrics.ClusterQueueResourceNominalQuota, map[string]string{"cluster_queue": name}), 228 BorrowingDPs: testingmetrics.CollectFilteredGaugeVec(metrics.ClusterQueueResourceBorrowingLimit, map[string]string{"cluster_queue": name}), 229 UsageDPs: testingmetrics.CollectFilteredGaugeVec(metrics.ClusterQueueResourceReservations, map[string]string{"cluster_queue": name}), 230 } 231 } 232 233 func resourceDataPoint(cohort, name, flavor, res string, v float64) testingmetrics.GaugeDataPoint { 234 return testingmetrics.GaugeDataPoint{ 235 Labels: map[string]string{ 236 "cohort": cohort, 237 "cluster_queue": name, 238 "flavor": flavor, 239 "resource": res, 240 }, 241 Value: v, 242 } 243 } 244 245 func TestRecordResourceMetrics(t *testing.T) { 246 baseQueue := &kueue.ClusterQueue{ 247 ObjectMeta: metav1.ObjectMeta{ 248 Name: "name", 249 }, 250 Spec: kueue.ClusterQueueSpec{ 251 Cohort: "cohort", 252 ResourceGroups: []kueue.ResourceGroup{ 253 { 254 CoveredResources: []corev1.ResourceName{corev1.ResourceCPU}, 255 Flavors: []kueue.FlavorQuotas{ 256 { 257 Name: "flavor", 258 Resources: []kueue.ResourceQuota{ 259 { 260 Name: corev1.ResourceCPU, 261 NominalQuota: resource.MustParse("1"), 262 BorrowingLimit: ptr.To(resource.MustParse("2")), 263 }, 264 }, 265 }, 266 }, 267 }, 268 }, 269 }, 270 Status: kueue.ClusterQueueStatus{ 271 FlavorsReservation: []kueue.FlavorUsage{ 272 { 273 Name: "flavor", 274 Resources: []kueue.ResourceUsage{ 275 { 276 Name: corev1.ResourceCPU, 277 Total: resource.MustParse("2"), 278 Borrowed: resource.MustParse("1"), 279 }, 280 }, 281 }, 282 }, 283 }, 284 } 285 286 testCases := map[string]struct { 287 queue *kueue.ClusterQueue 288 wantMetrics cqMetrics 289 updatedQueue *kueue.ClusterQueue 290 wantUpdatedMetrics cqMetrics 291 }{ 292 "no change": { 293 queue: baseQueue.DeepCopy(), 294 wantMetrics: cqMetrics{ 295 NominalDPs: []testingmetrics.GaugeDataPoint{ 296 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 297 }, 298 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 299 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 300 }, 301 UsageDPs: []testingmetrics.GaugeDataPoint{ 302 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 303 }, 304 }, 305 }, 306 "update-in-place": { 307 queue: baseQueue.DeepCopy(), 308 wantMetrics: cqMetrics{ 309 NominalDPs: []testingmetrics.GaugeDataPoint{ 310 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 311 }, 312 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 313 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 314 }, 315 UsageDPs: []testingmetrics.GaugeDataPoint{ 316 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 317 }, 318 }, 319 updatedQueue: func() *kueue.ClusterQueue { 320 ret := baseQueue.DeepCopy() 321 ret.Spec.ResourceGroups[0].Flavors[0].Resources[0].NominalQuota = resource.MustParse("2") 322 ret.Spec.ResourceGroups[0].Flavors[0].Resources[0].BorrowingLimit = ptr.To(resource.MustParse("1")) 323 ret.Status.FlavorsReservation[0].Resources[0].Total = resource.MustParse("3") 324 return ret 325 }(), 326 wantUpdatedMetrics: cqMetrics{ 327 NominalDPs: []testingmetrics.GaugeDataPoint{ 328 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 329 }, 330 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 331 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 332 }, 333 UsageDPs: []testingmetrics.GaugeDataPoint{ 334 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 3), 335 }, 336 }, 337 }, 338 "change-cohort": { 339 queue: baseQueue.DeepCopy(), 340 wantMetrics: cqMetrics{ 341 NominalDPs: []testingmetrics.GaugeDataPoint{ 342 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 343 }, 344 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 345 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 346 }, 347 UsageDPs: []testingmetrics.GaugeDataPoint{ 348 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 349 }, 350 }, 351 updatedQueue: func() *kueue.ClusterQueue { 352 ret := baseQueue.DeepCopy() 353 ret.Spec.Cohort = "cohort2" 354 return ret 355 }(), 356 wantUpdatedMetrics: cqMetrics{ 357 NominalDPs: []testingmetrics.GaugeDataPoint{ 358 resourceDataPoint("cohort2", "name", "flavor", string(corev1.ResourceCPU), 1), 359 }, 360 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 361 resourceDataPoint("cohort2", "name", "flavor", string(corev1.ResourceCPU), 2), 362 }, 363 UsageDPs: []testingmetrics.GaugeDataPoint{ 364 resourceDataPoint("cohort2", "name", "flavor", string(corev1.ResourceCPU), 2), 365 }, 366 }, 367 }, 368 "add-rm-flavor": { 369 queue: baseQueue.DeepCopy(), 370 wantMetrics: cqMetrics{ 371 NominalDPs: []testingmetrics.GaugeDataPoint{ 372 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 373 }, 374 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 375 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 376 }, 377 UsageDPs: []testingmetrics.GaugeDataPoint{ 378 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 379 }, 380 }, 381 updatedQueue: func() *kueue.ClusterQueue { 382 ret := baseQueue.DeepCopy() 383 ret.Spec.ResourceGroups[0].Flavors[0].Name = "flavor2" 384 ret.Status.FlavorsReservation[0].Name = "flavor2" 385 return ret 386 }(), 387 wantUpdatedMetrics: cqMetrics{ 388 NominalDPs: []testingmetrics.GaugeDataPoint{ 389 resourceDataPoint("cohort", "name", "flavor2", string(corev1.ResourceCPU), 1), 390 }, 391 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 392 resourceDataPoint("cohort", "name", "flavor2", string(corev1.ResourceCPU), 2), 393 }, 394 UsageDPs: []testingmetrics.GaugeDataPoint{ 395 resourceDataPoint("cohort", "name", "flavor2", string(corev1.ResourceCPU), 2), 396 }, 397 }, 398 }, 399 "add-rm-resource": { 400 queue: baseQueue.DeepCopy(), 401 wantMetrics: cqMetrics{ 402 NominalDPs: []testingmetrics.GaugeDataPoint{ 403 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 404 }, 405 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 406 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 407 }, 408 UsageDPs: []testingmetrics.GaugeDataPoint{ 409 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 410 }, 411 }, 412 updatedQueue: func() *kueue.ClusterQueue { 413 ret := baseQueue.DeepCopy() 414 ret.Spec.ResourceGroups[0].Flavors[0].Resources[0].Name = corev1.ResourceMemory 415 ret.Status.FlavorsReservation[0].Resources[0].Name = corev1.ResourceMemory 416 return ret 417 }(), 418 wantUpdatedMetrics: cqMetrics{ 419 NominalDPs: []testingmetrics.GaugeDataPoint{ 420 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceMemory), 1), 421 }, 422 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 423 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceMemory), 2), 424 }, 425 UsageDPs: []testingmetrics.GaugeDataPoint{ 426 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceMemory), 2), 427 }, 428 }, 429 }, 430 "drop-usage": { 431 queue: baseQueue.DeepCopy(), 432 wantMetrics: cqMetrics{ 433 NominalDPs: []testingmetrics.GaugeDataPoint{ 434 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 435 }, 436 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 437 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 438 }, 439 UsageDPs: []testingmetrics.GaugeDataPoint{ 440 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 441 }, 442 }, 443 updatedQueue: func() *kueue.ClusterQueue { 444 ret := baseQueue.DeepCopy() 445 ret.Status.FlavorsReservation = nil 446 return ret 447 }(), 448 wantUpdatedMetrics: cqMetrics{ 449 NominalDPs: []testingmetrics.GaugeDataPoint{ 450 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 1), 451 }, 452 BorrowingDPs: []testingmetrics.GaugeDataPoint{ 453 resourceDataPoint("cohort", "name", "flavor", string(corev1.ResourceCPU), 2), 454 }, 455 }, 456 }, 457 } 458 459 opts := []cmp.Option{ 460 cmpopts.SortSlices(func(a, b testingmetrics.GaugeDataPoint) bool { return a.Less(&b) }), 461 cmpopts.EquateEmpty(), 462 } 463 464 for name, tc := range testCases { 465 t.Run(name, func(t *testing.T) { 466 recordResourceMetrics(tc.queue) 467 gotMetrics := allMetricsForQueue(tc.queue.Name) 468 if diff := cmp.Diff(tc.wantMetrics, gotMetrics, opts...); len(diff) != 0 { 469 t.Errorf("Unexpected metrics (-want,+got):\n%s", diff) 470 } 471 472 if tc.updatedQueue != nil { 473 updateResourceMetrics(tc.queue, tc.updatedQueue) 474 gotMetricsAfterUpdate := allMetricsForQueue(tc.queue.Name) 475 if diff := cmp.Diff(tc.wantUpdatedMetrics, gotMetricsAfterUpdate, opts...); len(diff) != 0 { 476 t.Errorf("Unexpected metrics (-want,+got):\n%s", diff) 477 } 478 } 479 480 metrics.ClearClusterQueueResourceMetrics(tc.queue.Name) 481 endMetrics := allMetricsForQueue(tc.queue.Name) 482 if len(endMetrics.NominalDPs) != 0 || len(endMetrics.BorrowingDPs) != 0 || len(endMetrics.UsageDPs) != 0 { 483 t.Errorf("Unexpected metrics after cleanup:\n%v", endMetrics) 484 } 485 }) 486 } 487 } 488 489 func TestClusterQueuePendingWorkloadsStatus(t *testing.T) { 490 cqName := "test-cq" 491 lqName := "test-lq" 492 const lowPrio, highPrio = 0, 100 493 defaultWls := &kueue.WorkloadList{ 494 Items: []kueue.Workload{ 495 *utiltesting.MakeWorkload("one", "").Queue(lqName).Priority(highPrio).Obj(), 496 *utiltesting.MakeWorkload("two", "").Queue(lqName).Priority(lowPrio).Obj(), 497 }, 498 } 499 testCases := map[string]struct { 500 queueVisibilityUpdateInterval time.Duration 501 queueVisibilityClusterQueuesMaxCount int32 502 wantPendingWorkloadsStatus *kueue.ClusterQueuePendingWorkloadsStatus 503 enableQueueVisibility bool 504 }{ 505 "queue visibility is disabled": {}, 506 "queue visibility is disabled but maxcount is provided": { 507 queueVisibilityClusterQueuesMaxCount: 2, 508 }, 509 "queue visibility is enabled": { 510 queueVisibilityClusterQueuesMaxCount: 2, 511 queueVisibilityUpdateInterval: 10 * time.Millisecond, 512 enableQueueVisibility: true, 513 wantPendingWorkloadsStatus: &kueue.ClusterQueuePendingWorkloadsStatus{ 514 Head: []kueue.ClusterQueuePendingWorkload{ 515 {Name: "one"}, {Name: "two"}, 516 }, 517 }, 518 }, 519 "verify the head of pending workloads when the number of pending workloads exceeds MaxCount": { 520 queueVisibilityClusterQueuesMaxCount: 1, 521 queueVisibilityUpdateInterval: 10 * time.Millisecond, 522 enableQueueVisibility: true, 523 wantPendingWorkloadsStatus: &kueue.ClusterQueuePendingWorkloadsStatus{ 524 Head: []kueue.ClusterQueuePendingWorkload{ 525 {Name: "one"}, 526 }, 527 }, 528 }, 529 } 530 for name, tc := range testCases { 531 defer features.SetFeatureGateDuringTest(t, features.QueueVisibility, tc.enableQueueVisibility)() 532 t.Run(name, func(t *testing.T) { 533 cq := utiltesting.MakeClusterQueue(cqName). 534 QueueingStrategy(kueue.StrictFIFO).Obj() 535 lq := utiltesting.MakeLocalQueue(lqName, ""). 536 ClusterQueue(cqName).Obj() 537 ctx := context.Background() 538 539 cl := utiltesting.NewClientBuilder().WithLists(defaultWls).WithObjects(lq, cq).WithStatusSubresource(lq, cq). 540 Build() 541 cCache := cache.New(cl) 542 qManager := queue.NewManager(cl, cCache) 543 if err := qManager.AddClusterQueue(ctx, cq); err != nil { 544 t.Fatalf("Inserting clusterQueue in manager: %v", err) 545 } 546 if err := qManager.AddLocalQueue(ctx, lq); err != nil { 547 t.Fatalf("Inserting localQueue in manager: %v", err) 548 } 549 550 r := NewClusterQueueReconciler( 551 cl, 552 qManager, 553 cCache, 554 WithQueueVisibilityUpdateInterval(tc.queueVisibilityUpdateInterval), 555 WithQueueVisibilityClusterQueuesMaxCount(tc.queueVisibilityClusterQueuesMaxCount), 556 ) 557 558 go func() { 559 if err := r.Start(ctx); err != nil { 560 t.Errorf("error starting the cluster queue reconciler: %v", err) 561 } 562 }() 563 564 diff := "" 565 if err := wait.PollUntilContextTimeout(ctx, time.Second, 10*time.Second, false, func(ctx context.Context) (done bool, err error) { 566 diff = cmp.Diff(tc.wantPendingWorkloadsStatus, r.getWorkloadsStatus(cq), cmpopts.IgnoreFields(kueue.ClusterQueuePendingWorkloadsStatus{}, "LastChangeTime")) 567 return diff == "", nil 568 }); err != nil { 569 t.Fatalf("Failed to get the expected pending workloads status, last diff=%s", diff) 570 } 571 }) 572 } 573 }