sigs.k8s.io/kueue@v0.6.2/test/integration/scheduler/workload_controller_test.go (about)

     1  /*
     2  Copyright 2023 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package scheduler
    18  
    19  import (
    20  	"github.com/google/go-cmp/cmp/cmpopts"
    21  	"github.com/onsi/ginkgo/v2"
    22  	"github.com/onsi/gomega"
    23  	corev1 "k8s.io/api/core/v1"
    24  	nodev1 "k8s.io/api/node/v1"
    25  	"k8s.io/apimachinery/pkg/api/equality"
    26  	"k8s.io/apimachinery/pkg/api/resource"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"sigs.k8s.io/controller-runtime/pkg/client"
    29  
    30  	kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
    31  	"sigs.k8s.io/kueue/pkg/util/testing"
    32  	"sigs.k8s.io/kueue/pkg/workload"
    33  	"sigs.k8s.io/kueue/test/util"
    34  )
    35  
    36  // +kubebuilder:docs-gen:collapse=Imports
    37  
    38  var ignoreCqCondition = cmpopts.IgnoreFields(kueue.ClusterQueueStatus{}, "Conditions")
    39  var ignoreInClusterQueueStatus = cmpopts.IgnoreFields(kueue.ClusterQueueStatus{}, "PendingWorkloadsStatus", "FlavorsUsage", "AdmittedWorkloads")
    40  
    41  var _ = ginkgo.Describe("Workload controller with scheduler", func() {
    42  	var (
    43  		ns             *corev1.Namespace
    44  		localQueue     *kueue.LocalQueue
    45  		wl             *kueue.Workload
    46  		onDemandFlavor *kueue.ResourceFlavor
    47  		runtimeClass   *nodev1.RuntimeClass
    48  		clusterQueue   *kueue.ClusterQueue
    49  		updatedCQ      kueue.ClusterQueue
    50  		resources      = corev1.ResourceList{
    51  			corev1.ResourceCPU: resource.MustParse("1"),
    52  		}
    53  	)
    54  
    55  	ginkgo.BeforeEach(func() {
    56  		ns = &corev1.Namespace{
    57  			ObjectMeta: metav1.ObjectMeta{
    58  				GenerateName: "core-workload-",
    59  			},
    60  		}
    61  		gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())
    62  
    63  		onDemandFlavor = testing.MakeResourceFlavor("on-demand").Obj()
    64  	})
    65  
    66  	ginkgo.AfterEach(func() {
    67  		clusterQueue = nil
    68  		localQueue = nil
    69  		updatedCQ = kueue.ClusterQueue{}
    70  	})
    71  
    72  	ginkgo.When("Workload with RuntimeClass defined", func() {
    73  		ginkgo.BeforeEach(func() {
    74  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
    75  
    76  			runtimeClass = testing.MakeRuntimeClass("kata", "bar-handler").PodOverhead(resources).Obj()
    77  			gomega.Expect(k8sClient.Create(ctx, runtimeClass)).To(gomega.Succeed())
    78  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
    79  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
    80  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
    81  				Cohort("cohort").
    82  				Obj()
    83  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
    84  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
    85  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
    86  		})
    87  		ginkgo.AfterEach(func() {
    88  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
    89  			gomega.Expect(util.DeleteRuntimeClass(ctx, k8sClient, runtimeClass)).To(gomega.Succeed())
    90  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
    91  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
    92  		})
    93  
    94  		ginkgo.It("Should accumulate RuntimeClass's overhead", func() {
    95  			ginkgo.By("Create and wait for workload admission", func() {
    96  				wl = testing.MakeWorkload("one", ns.Name).
    97  					Queue(localQueue.Name).
    98  					Request(corev1.ResourceCPU, "1").
    99  					RuntimeClass("kata").
   100  					Obj()
   101  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   102  
   103  				gomega.Eventually(func() bool {
   104  					read := kueue.Workload{}
   105  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   106  						return false
   107  					}
   108  					return workload.HasQuotaReservation(&read)
   109  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   110  			})
   111  
   112  			ginkgo.By("Check queue resource consumption", func() {
   113  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   114  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   115  					return updatedCQ.Status
   116  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   117  					PendingWorkloads:   0,
   118  					ReservingWorkloads: 1,
   119  					FlavorsReservation: []kueue.FlavorUsage{{
   120  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   121  						Resources: []kueue.ResourceUsage{{
   122  							Name:  corev1.ResourceCPU,
   123  							Total: resource.MustParse("2"),
   124  						}},
   125  					}},
   126  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   127  			})
   128  		})
   129  	})
   130  
   131  	ginkgo.When("Workload with non-existent RuntimeClass defined", func() {
   132  		ginkgo.BeforeEach(func() {
   133  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   134  
   135  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   136  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   137  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   138  				Cohort("cohort").
   139  				Obj()
   140  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   141  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   142  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   143  		})
   144  		ginkgo.AfterEach(func() {
   145  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   146  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   147  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   148  		})
   149  
   150  		ginkgo.It("Should not accumulate RuntimeClass's overhead", func() {
   151  			ginkgo.By("Create and wait for workload admission", func() {
   152  				wl = testing.MakeWorkload("one", ns.Name).
   153  					Queue(localQueue.Name).
   154  					Request(corev1.ResourceCPU, "1").
   155  					RuntimeClass("kata").
   156  					Obj()
   157  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   158  
   159  				gomega.Eventually(func() bool {
   160  					read := kueue.Workload{}
   161  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   162  						return false
   163  					}
   164  					return workload.HasQuotaReservation(&read)
   165  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   166  			})
   167  
   168  			ginkgo.By("Check queue resource consumption", func() {
   169  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   170  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   171  					return updatedCQ.Status
   172  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   173  					PendingWorkloads:   0,
   174  					ReservingWorkloads: 1,
   175  					FlavorsReservation: []kueue.FlavorUsage{{
   176  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   177  						Resources: []kueue.ResourceUsage{{
   178  							Name:  corev1.ResourceCPU,
   179  							Total: resource.MustParse("1"),
   180  						}},
   181  					}},
   182  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   183  			})
   184  		})
   185  	})
   186  
   187  	ginkgo.When("LimitRanges are defined", func() {
   188  		ginkgo.BeforeEach(func() {
   189  			limitRange := testing.MakeLimitRange("limits", ns.Name).WithValue("DefaultRequest", corev1.ResourceCPU, "3").Obj()
   190  			gomega.Expect(k8sClient.Create(ctx, limitRange)).To(gomega.Succeed())
   191  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   192  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   193  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   194  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   195  				Cohort("cohort").
   196  				Obj()
   197  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   198  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   199  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   200  		})
   201  		ginkgo.AfterEach(func() {
   202  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   203  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   204  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   205  		})
   206  
   207  		ginkgo.It("Should use the range defined default requests, if provided", func() {
   208  			ginkgo.By("Create and wait for workload admission", func() {
   209  				wl = testing.MakeWorkload("one", ns.Name).
   210  					Queue(localQueue.Name).
   211  					Obj()
   212  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   213  
   214  				gomega.Eventually(func() bool {
   215  					read := kueue.Workload{}
   216  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   217  						return false
   218  					}
   219  					return workload.HasQuotaReservation(&read)
   220  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   221  			})
   222  
   223  			ginkgo.By("Check queue resource consumption", func() {
   224  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   225  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   226  					return updatedCQ.Status
   227  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   228  					PendingWorkloads:   0,
   229  					ReservingWorkloads: 1,
   230  					FlavorsReservation: []kueue.FlavorUsage{{
   231  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   232  						Resources: []kueue.ResourceUsage{
   233  							{
   234  								Name:  corev1.ResourceCPU,
   235  								Total: resource.MustParse("3"),
   236  							},
   237  						},
   238  					}},
   239  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   240  			})
   241  
   242  			ginkgo.By("Check podSets spec", func() {
   243  				wlRead := kueue.Workload{}
   244  				gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &wlRead)).To(gomega.Succeed())
   245  				gomega.Expect(equality.Semantic.DeepEqual(wl.Spec.PodSets, wlRead.Spec.PodSets)).To(gomega.BeTrue())
   246  			})
   247  		})
   248  		ginkgo.It("Should not use the range defined requests, if provided by the workload", func() {
   249  			ginkgo.By("Create and wait for workload admission", func() {
   250  				wl = testing.MakeWorkload("one", ns.Name).
   251  					Queue(localQueue.Name).
   252  					Request(corev1.ResourceCPU, "1").
   253  					Obj()
   254  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   255  
   256  				gomega.Eventually(func() bool {
   257  					read := kueue.Workload{}
   258  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   259  						return false
   260  					}
   261  					return workload.HasQuotaReservation(&read)
   262  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   263  			})
   264  
   265  			ginkgo.By("Check queue resource consumption", func() {
   266  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   267  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   268  					return updatedCQ.Status
   269  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   270  					PendingWorkloads:   0,
   271  					ReservingWorkloads: 1,
   272  					FlavorsReservation: []kueue.FlavorUsage{{
   273  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   274  						Resources: []kueue.ResourceUsage{
   275  							{
   276  								Name:  corev1.ResourceCPU,
   277  								Total: resource.MustParse("1"),
   278  							},
   279  						},
   280  					}},
   281  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   282  			})
   283  
   284  			ginkgo.By("Check podSets spec", func() {
   285  				wlRead := kueue.Workload{}
   286  				gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &wlRead)).To(gomega.Succeed())
   287  				gomega.Expect(equality.Semantic.DeepEqual(wl.Spec.PodSets, wlRead.Spec.PodSets)).To(gomega.BeTrue())
   288  			})
   289  		})
   290  	})
   291  
   292  	ginkgo.When("the workload defines only resource limits and the LocalQueue is created late", func() {
   293  		ginkgo.BeforeEach(func() {
   294  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   295  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   296  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   297  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   298  				Cohort("cohort").
   299  				Obj()
   300  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   301  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   302  		})
   303  		ginkgo.AfterEach(func() {
   304  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   305  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   306  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   307  		})
   308  
   309  		ginkgo.It("The limits should be used as request values", func() {
   310  			ginkgo.By("Create and wait for workload admission", func() {
   311  				wl = testing.MakeWorkload("one", ns.Name).
   312  					Queue(localQueue.Name).
   313  					Limit(corev1.ResourceCPU, "1").
   314  					Obj()
   315  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   316  
   317  				gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   318  
   319  				gomega.Eventually(func() bool {
   320  					read := kueue.Workload{}
   321  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   322  						return false
   323  					}
   324  					return workload.HasQuotaReservation(&read)
   325  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   326  			})
   327  
   328  			ginkgo.By("Check queue resource consumption", func() {
   329  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   330  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   331  					return updatedCQ.Status
   332  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   333  					PendingWorkloads:   0,
   334  					ReservingWorkloads: 1,
   335  					FlavorsReservation: []kueue.FlavorUsage{{
   336  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   337  						Resources: []kueue.ResourceUsage{{
   338  							Name:  corev1.ResourceCPU,
   339  							Total: resource.MustParse("1"),
   340  						}},
   341  					}},
   342  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   343  			})
   344  
   345  			ginkgo.By("Check podSets spec", func() {
   346  				wlRead := kueue.Workload{}
   347  				gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &wlRead)).To(gomega.Succeed())
   348  				gomega.Expect(equality.Semantic.DeepEqual(wl.Spec.PodSets, wlRead.Spec.PodSets)).To(gomega.BeTrue())
   349  			})
   350  		})
   351  	})
   352  
   353  	ginkgo.When("RuntimeClass is defined and change", func() {
   354  		ginkgo.BeforeEach(func() {
   355  			runtimeClass = testing.MakeRuntimeClass("kata", "bar-handler").
   356  				PodOverhead(corev1.ResourceList{corev1.ResourceCPU: resource.MustParse("2")}).
   357  				Obj()
   358  			gomega.Expect(k8sClient.Create(ctx, runtimeClass)).To(gomega.Succeed())
   359  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   360  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   361  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   362  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   363  				Cohort("cohort").
   364  				Obj()
   365  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   366  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   367  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   368  		})
   369  		ginkgo.AfterEach(func() {
   370  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   371  			gomega.Expect(util.DeleteRuntimeClass(ctx, k8sClient, runtimeClass)).To(gomega.Succeed())
   372  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   373  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   374  		})
   375  
   376  		ginkgo.It("Should sync the resource requests with the new overhead", func() {
   377  			ginkgo.By("Create and wait for the first workload admission", func() {
   378  				wl = testing.MakeWorkload("one", ns.Name).
   379  					Queue(localQueue.Name).
   380  					Request(corev1.ResourceCPU, "1").
   381  					RuntimeClass("kata").
   382  					Obj()
   383  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   384  
   385  				gomega.Eventually(func() bool {
   386  					read := kueue.Workload{}
   387  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   388  						return false
   389  					}
   390  					return workload.HasQuotaReservation(&read)
   391  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   392  			})
   393  
   394  			var wl2 *kueue.Workload
   395  			ginkgo.By("Create a second workload, should stay pending", func() {
   396  				wl2 = testing.MakeWorkload("two", ns.Name).
   397  					Queue(localQueue.Name).
   398  					Request(corev1.ResourceCPU, "1").
   399  					RuntimeClass("kata").
   400  					Obj()
   401  				gomega.Expect(k8sClient.Create(ctx, wl2)).To(gomega.Succeed())
   402  
   403  				gomega.Consistently(func() bool {
   404  					read := kueue.Workload{}
   405  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
   406  						return false
   407  					}
   408  					return workload.HasQuotaReservation(&read)
   409  				}, util.ConsistentDuration, util.Interval).Should(gomega.BeFalse())
   410  			})
   411  
   412  			ginkgo.By("Decreasing the runtimeClass", func() {
   413  				updatedRC := nodev1.RuntimeClass{}
   414  				gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(runtimeClass), &updatedRC)).To(gomega.Succeed())
   415  				updatedRC.Overhead.PodFixed[corev1.ResourceCPU] = resource.MustParse("1")
   416  				gomega.Expect(k8sClient.Update(ctx, &updatedRC)).To(gomega.Succeed())
   417  			})
   418  
   419  			ginkgo.By("The second workload now fits and is admitted", func() {
   420  				gomega.Eventually(func() bool {
   421  					read := kueue.Workload{}
   422  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
   423  						return false
   424  					}
   425  					return workload.HasQuotaReservation(&read)
   426  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   427  			})
   428  
   429  			ginkgo.By("Check queue resource consumption", func() {
   430  				// the total CPU usage in the queue should be 5
   431  				// for the first workload: 3 = 1 (podSet provided) + 2 (initial class overhead, at the time of it's admission)
   432  				// for the second workload: 2 = 1 (podSet provided) + 1 (updated class overhead, at the time of it's admission)
   433  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   434  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   435  					return updatedCQ.Status
   436  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   437  					PendingWorkloads:   0,
   438  					ReservingWorkloads: 2,
   439  					FlavorsReservation: []kueue.FlavorUsage{{
   440  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   441  						Resources: []kueue.ResourceUsage{{
   442  							Name:  corev1.ResourceCPU,
   443  							Total: resource.MustParse("5"),
   444  						}},
   445  					}},
   446  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   447  			})
   448  		})
   449  	})
   450  	ginkgo.When("LimitRanges are defined and change", func() {
   451  		var limitRange *corev1.LimitRange
   452  		ginkgo.BeforeEach(func() {
   453  			limitRange = testing.MakeLimitRange("limits", ns.Name).WithValue("DefaultRequest", corev1.ResourceCPU, "3").Obj()
   454  			gomega.Expect(k8sClient.Create(ctx, limitRange)).To(gomega.Succeed())
   455  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   456  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   457  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   458  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   459  				Cohort("cohort").
   460  				Obj()
   461  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   462  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   463  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   464  		})
   465  		ginkgo.AfterEach(func() {
   466  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   467  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   468  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   469  		})
   470  
   471  		ginkgo.It("Should sync the resource requests with the limit", func() {
   472  			ginkgo.By("Create and wait for the first workload admission", func() {
   473  				wl = testing.MakeWorkload("one", ns.Name).
   474  					Queue(localQueue.Name).
   475  					Obj()
   476  				gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   477  
   478  				gomega.Eventually(func() bool {
   479  					read := kueue.Workload{}
   480  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), &read); err != nil {
   481  						return false
   482  					}
   483  					return workload.HasQuotaReservation(&read)
   484  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   485  			})
   486  
   487  			var wl2 *kueue.Workload
   488  			ginkgo.By("Create a second workload, should stay pending", func() {
   489  				wl2 = testing.MakeWorkload("two", ns.Name).
   490  					Queue(localQueue.Name).
   491  					Obj()
   492  				gomega.Expect(k8sClient.Create(ctx, wl2)).To(gomega.Succeed())
   493  
   494  				gomega.Consistently(func() bool {
   495  					read := kueue.Workload{}
   496  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
   497  						return false
   498  					}
   499  					return workload.HasQuotaReservation(&read)
   500  				}, util.ConsistentDuration, util.Interval).Should(gomega.BeFalse())
   501  			})
   502  
   503  			ginkgo.By("Decreasing the limit's default", func() {
   504  				updatedLr := corev1.LimitRange{}
   505  				gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(limitRange), &updatedLr)).To(gomega.Succeed())
   506  				updatedLr.Spec.Limits[0].DefaultRequest[corev1.ResourceCPU] = resource.MustParse("2")
   507  				gomega.Expect(k8sClient.Update(ctx, &updatedLr)).To(gomega.Succeed())
   508  			})
   509  
   510  			ginkgo.By("The second workload now fits and is admitted", func() {
   511  				gomega.Eventually(func() bool {
   512  					read := kueue.Workload{}
   513  					if err := k8sClient.Get(ctx, client.ObjectKeyFromObject(wl2), &read); err != nil {
   514  						return false
   515  					}
   516  					return workload.HasQuotaReservation(&read)
   517  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   518  			})
   519  
   520  			ginkgo.By("Check queue resource consumption", func() {
   521  				// the total CPU usage in the queue should be 5
   522  				// for the first workload: 3 initial limitRange default, at the time of it's admission
   523  				// for the second workload: 2 updated limitRange default, at the time of it's admission
   524  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   525  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   526  					return updatedCQ.Status
   527  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   528  					PendingWorkloads:   0,
   529  					ReservingWorkloads: 2,
   530  					FlavorsReservation: []kueue.FlavorUsage{{
   531  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   532  						Resources: []kueue.ResourceUsage{{
   533  							Name:  corev1.ResourceCPU,
   534  							Total: resource.MustParse("5"),
   535  						}},
   536  					}},
   537  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   538  			})
   539  		})
   540  	})
   541  
   542  	ginkgo.When("a LimitRange event occurs near workload deletion time", func() {
   543  		var limitRange *corev1.LimitRange
   544  		ginkgo.BeforeEach(func() {
   545  			limitRange = testing.MakeLimitRange("limits", ns.Name).WithValue("DefaultRequest", corev1.ResourceCPU, "3").Obj()
   546  			gomega.Expect(k8sClient.Create(ctx, limitRange)).To(gomega.Succeed())
   547  			gomega.Expect(k8sClient.Create(ctx, onDemandFlavor)).To(gomega.Succeed())
   548  			clusterQueue = testing.MakeClusterQueue("clusterqueue").
   549  				ResourceGroup(*testing.MakeFlavorQuotas(onDemandFlavor.Name).
   550  					Resource(corev1.ResourceCPU, "5", "5").Obj()).
   551  				Cohort("cohort").
   552  				Obj()
   553  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).To(gomega.Succeed())
   554  			localQueue = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(clusterQueue.Name).Obj()
   555  			gomega.Expect(k8sClient.Create(ctx, localQueue)).To(gomega.Succeed())
   556  		})
   557  		ginkgo.AfterEach(func() {
   558  			ginkgo.By("Resource consumption should be 0", func() {
   559  				gomega.Eventually(func() kueue.ClusterQueueStatus {
   560  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(clusterQueue), &updatedCQ)).To(gomega.Succeed())
   561  					return updatedCQ.Status
   562  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(kueue.ClusterQueueStatus{
   563  					PendingWorkloads:   0,
   564  					ReservingWorkloads: 0,
   565  					FlavorsReservation: []kueue.FlavorUsage{{
   566  						Name: kueue.ResourceFlavorReference(onDemandFlavor.Name),
   567  						Resources: []kueue.ResourceUsage{{
   568  							Name:  corev1.ResourceCPU,
   569  							Total: resource.MustParse("0"),
   570  						}},
   571  					}},
   572  				}, ignoreCqCondition, ignoreInClusterQueueStatus))
   573  			})
   574  			gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
   575  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   576  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandFlavor, true)
   577  		})
   578  
   579  		ginkgo.When("When the workload is admissible", func() {
   580  			ginkgo.It("Should not consume resources", func() {
   581  				var wl *kueue.Workload
   582  				ginkgo.By("Create the workload", func() {
   583  					wl = testing.MakeWorkload("one", ns.Name).
   584  						Queue(localQueue.Name).
   585  						Request(corev1.ResourceCPU, "1").
   586  						Obj()
   587  					gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   588  				})
   589  
   590  				updatedLr := corev1.LimitRange{}
   591  				ginkgo.By("Preparing the updated limitRange", func() {
   592  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(limitRange), &updatedLr)).To(gomega.Succeed())
   593  					updatedLr.Spec.Limits[0].DefaultRequest[corev1.ResourceCPU] = resource.MustParse("2")
   594  				})
   595  				ginkgo.By("Updating the limitRange and delete the workload", func() {
   596  					gomega.Expect(k8sClient.Update(ctx, &updatedLr)).To(gomega.Succeed())
   597  					gomega.Expect(k8sClient.Delete(ctx, wl)).To(gomega.Succeed())
   598  				})
   599  			})
   600  		})
   601  
   602  		ginkgo.When("When the workload is not admissible", func() {
   603  			ginkgo.It("Should not consume resources", func() {
   604  				var wl *kueue.Workload
   605  				ginkgo.By("Create the workload", func() {
   606  					wl = testing.MakeWorkload("one", ns.Name).
   607  						Queue(localQueue.Name).
   608  						Request(corev1.ResourceCPU, "7").
   609  						Obj()
   610  					gomega.Expect(k8sClient.Create(ctx, wl)).To(gomega.Succeed())
   611  				})
   612  				updatedLr := corev1.LimitRange{}
   613  				ginkgo.By("Preparing the updated limitRange", func() {
   614  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(limitRange), &updatedLr)).To(gomega.Succeed())
   615  					updatedLr.Spec.Limits[0].DefaultRequest[corev1.ResourceCPU] = resource.MustParse("2")
   616  				})
   617  				ginkgo.By("Updating the limitRange and delete the workload", func() {
   618  					gomega.Expect(k8sClient.Update(ctx, &updatedLr)).To(gomega.Succeed())
   619  					gomega.Expect(k8sClient.Delete(ctx, wl)).To(gomega.Succeed())
   620  				})
   621  			})
   622  		})
   623  	})
   624  })