sigs.k8s.io/kueue@v0.6.2/test/e2e/singlecluster/e2e_test.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2e
    18  
    19  import (
    20  	"github.com/google/go-cmp/cmp/cmpopts"
    21  	"github.com/onsi/ginkgo/v2"
    22  	"github.com/onsi/gomega"
    23  	batchv1 "k8s.io/api/batch/v1"
    24  	corev1 "k8s.io/api/core/v1"
    25  	apimeta "k8s.io/apimachinery/pkg/api/meta"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	"k8s.io/apimachinery/pkg/types"
    28  	"k8s.io/utils/ptr"
    29  	"sigs.k8s.io/controller-runtime/pkg/client"
    30  
    31  	kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
    32  	"sigs.k8s.io/kueue/pkg/controller/constants"
    33  	workloadjob "sigs.k8s.io/kueue/pkg/controller/jobs/job"
    34  	"sigs.k8s.io/kueue/pkg/util/slices"
    35  	"sigs.k8s.io/kueue/pkg/util/testing"
    36  	testingjob "sigs.k8s.io/kueue/pkg/util/testingjobs/job"
    37  	"sigs.k8s.io/kueue/pkg/workload"
    38  	"sigs.k8s.io/kueue/test/util"
    39  )
    40  
    41  // +kubebuilder:docs-gen:collapse=Imports
    42  
    43  var _ = ginkgo.Describe("Kueue", func() {
    44  	var ns *corev1.Namespace
    45  	var sampleJob *batchv1.Job
    46  	var jobKey types.NamespacedName
    47  
    48  	ginkgo.BeforeEach(func() {
    49  		ns = &corev1.Namespace{
    50  			ObjectMeta: metav1.ObjectMeta{
    51  				GenerateName: "e2e-",
    52  			},
    53  		}
    54  		gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())
    55  		sampleJob = testingjob.MakeJob("test-job", ns.Name).
    56  			Queue("main").
    57  			Request("cpu", "1").
    58  			Request("memory", "20Mi").
    59  			Obj()
    60  		jobKey = client.ObjectKeyFromObject(sampleJob)
    61  	})
    62  	ginkgo.AfterEach(func() {
    63  		gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
    64  	})
    65  
    66  	ginkgo.When("Creating a Job without a matching LocalQueue", func() {
    67  		ginkgo.It("Should stay in suspended", func() {
    68  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
    69  
    70  			createdJob := &batchv1.Job{}
    71  			gomega.Eventually(func() bool {
    72  				if err := k8sClient.Get(ctx, jobKey, createdJob); err != nil {
    73  					return false
    74  				}
    75  				return *createdJob.Spec.Suspend
    76  			}, util.Timeout, util.Interval).Should(gomega.BeTrue())
    77  			wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(jobKey.Name), Namespace: ns.Name}
    78  			createdWorkload := &kueue.Workload{}
    79  			gomega.Eventually(func() bool {
    80  				if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
    81  					return false
    82  				}
    83  				return workload.HasQuotaReservation(createdWorkload)
    84  
    85  			}, util.Timeout, util.Interval).Should(gomega.BeFalse())
    86  			gomega.Expect(k8sClient.Delete(ctx, sampleJob)).Should(gomega.Succeed())
    87  		})
    88  	})
    89  
    90  	ginkgo.When("Creating a Job With Queueing", func() {
    91  		var (
    92  			onDemandRF   *kueue.ResourceFlavor
    93  			spotRF       *kueue.ResourceFlavor
    94  			localQueue   *kueue.LocalQueue
    95  			clusterQueue *kueue.ClusterQueue
    96  		)
    97  		ginkgo.BeforeEach(func() {
    98  			onDemandRF = testing.MakeResourceFlavor("on-demand").
    99  				Label("instance-type", "on-demand").Obj()
   100  			gomega.Expect(k8sClient.Create(ctx, onDemandRF)).Should(gomega.Succeed())
   101  			spotRF = testing.MakeResourceFlavor("spot").
   102  				Label("instance-type", "spot").Obj()
   103  			gomega.Expect(k8sClient.Create(ctx, spotRF)).Should(gomega.Succeed())
   104  			clusterQueue = testing.MakeClusterQueue("cluster-queue").
   105  				ResourceGroup(
   106  					*testing.MakeFlavorQuotas("on-demand").
   107  						Resource(corev1.ResourceCPU, "1").
   108  						Resource(corev1.ResourceMemory, "1Gi").
   109  						Obj(),
   110  					*testing.MakeFlavorQuotas("spot").
   111  						Resource(corev1.ResourceCPU, "1").
   112  						Resource(corev1.ResourceMemory, "1Gi").
   113  						Obj(),
   114  				).
   115  				Preemption(kueue.ClusterQueuePreemption{
   116  					WithinClusterQueue: kueue.PreemptionPolicyLowerPriority,
   117  				}).
   118  				Obj()
   119  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
   120  			localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj()
   121  			gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
   122  		})
   123  		ginkgo.AfterEach(func() {
   124  			gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed())
   125  			gomega.Expect(util.DeleteAllJobsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed())
   126  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   127  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandRF, true)
   128  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, spotRF, true)
   129  		})
   130  
   131  		ginkgo.It("Should unsuspend a job and set nodeSelectors", func() {
   132  			// Use a binary that ends.
   133  			sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}).Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"5s"}).Obj()
   134  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   135  
   136  			createdWorkload := &kueue.Workload{}
   137  			expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   138  				"instance-type": "on-demand",
   139  			})
   140  			wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(jobKey.Name), Namespace: ns.Name}
   141  			gomega.Eventually(func() bool {
   142  				if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   143  					return false
   144  				}
   145  				return workload.HasQuotaReservation(createdWorkload) &&
   146  					apimeta.IsStatusConditionTrue(createdWorkload.Status.Conditions, kueue.WorkloadFinished)
   147  
   148  			}, util.LongTimeout, util.Interval).Should(gomega.BeTrue())
   149  		})
   150  
   151  		ginkgo.It("Should run with prebuilt workload", func() {
   152  			var wl *kueue.Workload
   153  			ginkgo.By("Create the pebuilt workload and the job adopting it", func() {
   154  				sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}).
   155  					Label(constants.PrebuiltWorkloadLabel, "prebuilt-wl").
   156  					Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"5s"}).
   157  					BackoffLimit(0).
   158  					TerminationGracePeriod(1).
   159  					Obj()
   160  				testingjob.SetContainerDefaults(&sampleJob.Spec.Template.Spec.Containers[0])
   161  
   162  				wl = testing.MakeWorkload("prebuilt-wl", ns.Name).
   163  					Finalizers(kueue.ResourceInUseFinalizerName).
   164  					Queue(localQueue.Name).
   165  					PodSets(
   166  						*testing.MakePodSet("main", 1).Containers(sampleJob.Spec.Template.Spec.Containers[0]).Obj(),
   167  					).
   168  					Obj()
   169  				gomega.Expect(k8sClient.Create(ctx, wl)).Should(gomega.Succeed())
   170  				gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   171  			})
   172  
   173  			createdWorkload := &kueue.Workload{}
   174  			wlLookupKey := client.ObjectKeyFromObject(wl)
   175  			createdJob := &batchv1.Job{}
   176  			jobLookupKey := client.ObjectKeyFromObject(sampleJob)
   177  
   178  			ginkgo.By("Verify the prebuilt workload is adopted by the job", func() {
   179  				gomega.Eventually(func(g gomega.Gomega) {
   180  					g.Expect(k8sClient.Get(ctx, jobLookupKey, createdJob)).To(gomega.Succeed())
   181  					g.Expect(k8sClient.Get(ctx, wlLookupKey, createdWorkload)).To(gomega.Succeed())
   182  					g.Expect(wl.Spec.PodSets[0].Template.Spec.Containers).To(gomega.BeComparableTo(createdJob.Spec.Template.Spec.Containers), "Check the way the job and workload is created")
   183  					g.Expect(createdWorkload.OwnerReferences).To(gomega.ContainElement(
   184  						gomega.BeComparableTo(metav1.OwnerReference{
   185  							Name: sampleJob.Name,
   186  							UID:  sampleJob.UID,
   187  						}, cmpopts.IgnoreFields(metav1.OwnerReference{}, "APIVersion", "Kind", "Controller", "BlockOwnerDeletion"))))
   188  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   189  			})
   190  
   191  			ginkgo.By("Verify the job is running", func() {
   192  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   193  					"instance-type": "on-demand",
   194  				})
   195  			})
   196  
   197  			ginkgo.By("Delete all pods", func() {
   198  				gomega.Expect(util.DeleteAllPodsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed())
   199  			})
   200  
   201  			ginkgo.By("Await for jobs completion", func() {
   202  				gomega.Eventually(func(g gomega.Gomega) {
   203  					g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(wl), createdWorkload)).To(gomega.Succeed())
   204  					g.Expect(createdWorkload.Finalizers).NotTo(gomega.ContainElement(kueue.ResourceInUseFinalizerName))
   205  					g.Expect(createdWorkload.Status.Conditions).To(gomega.ContainElement(
   206  						gomega.BeComparableTo(metav1.Condition{
   207  							Type:   kueue.WorkloadFinished,
   208  							Status: metav1.ConditionTrue,
   209  							Reason: "JobFinished",
   210  						}, cmpopts.IgnoreFields(metav1.Condition{}, "LastTransitionTime", "Message"))))
   211  				}, util.LongTimeout, util.Interval).Should(gomega.Succeed())
   212  			})
   213  		})
   214  
   215  		ginkgo.It("Should readmit preempted job with priorityClass into a separate flavor", func() {
   216  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   217  
   218  			highPriorityClass := testing.MakePriorityClass("high").PriorityValue(100).Obj()
   219  			gomega.Expect(k8sClient.Create(ctx, highPriorityClass))
   220  			ginkgo.DeferCleanup(func() {
   221  				gomega.Expect(k8sClient.Delete(ctx, highPriorityClass)).To(gomega.Succeed())
   222  			})
   223  
   224  			ginkgo.By("Job is admitted using the first flavor", func() {
   225  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   226  					"instance-type": "on-demand",
   227  				})
   228  			})
   229  
   230  			ginkgo.By("Job is preempted by higher priority job", func() {
   231  				job := testingjob.MakeJob("high", ns.Name).
   232  					Queue("main").
   233  					PriorityClass("high").
   234  					Request(corev1.ResourceCPU, "1").
   235  					NodeSelector("instance-type", "on-demand"). // target the same flavor to cause preemption
   236  					Obj()
   237  				gomega.Expect(k8sClient.Create(ctx, job)).Should(gomega.Succeed())
   238  
   239  				expectJobUnsuspendedWithNodeSelectors(client.ObjectKeyFromObject(job), map[string]string{
   240  					"instance-type": "on-demand",
   241  				})
   242  			})
   243  
   244  			ginkgo.By("Job is re-admitted using the second flavor", func() {
   245  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   246  					"instance-type": "spot",
   247  				})
   248  			})
   249  		})
   250  
   251  		ginkgo.It("Should readmit preempted job with workloadPriorityClass into a separate flavor", func() {
   252  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   253  
   254  			highWorkloadPriorityClass := testing.MakeWorkloadPriorityClass("high-workload").PriorityValue(300).Obj()
   255  			gomega.Expect(k8sClient.Create(ctx, highWorkloadPriorityClass)).Should(gomega.Succeed())
   256  			ginkgo.DeferCleanup(func() {
   257  				gomega.Expect(k8sClient.Delete(ctx, highWorkloadPriorityClass)).To(gomega.Succeed())
   258  			})
   259  
   260  			ginkgo.By("Job is admitted using the first flavor", func() {
   261  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   262  					"instance-type": "on-demand",
   263  				})
   264  			})
   265  
   266  			ginkgo.By("Job is preempted by higher priority job", func() {
   267  				job := testingjob.MakeJob("high-with-wpc", ns.Name).
   268  					Queue("main").
   269  					WorkloadPriorityClass("high-workload").
   270  					Request(corev1.ResourceCPU, "1").
   271  					NodeSelector("instance-type", "on-demand"). // target the same flavor to cause preemption
   272  					Obj()
   273  				gomega.Expect(k8sClient.Create(ctx, job)).Should(gomega.Succeed())
   274  
   275  				expectJobUnsuspendedWithNodeSelectors(client.ObjectKeyFromObject(job), map[string]string{
   276  					"instance-type": "on-demand",
   277  				})
   278  			})
   279  
   280  			ginkgo.By("Job is re-admitted using the second flavor", func() {
   281  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   282  					"instance-type": "spot",
   283  				})
   284  			})
   285  		})
   286  		ginkgo.It("Should partially admit the Job if configured and not fully fits", func() {
   287  			// Use a binary that ends.
   288  			job := testingjob.MakeJob("job", ns.Name).
   289  				Queue("main").
   290  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1s"}).
   291  				Request("cpu", "500m").
   292  				Parallelism(3).
   293  				Completions(4).
   294  				SetAnnotation(workloadjob.JobMinParallelismAnnotation, "1").
   295  				Obj()
   296  			gomega.Expect(k8sClient.Create(ctx, job)).Should(gomega.Succeed())
   297  
   298  			ginkgo.By("Wait for the job to start and check the updated Parallelism and Completions", func() {
   299  				jobKey := client.ObjectKeyFromObject(job)
   300  				expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   301  					"instance-type": "on-demand",
   302  				})
   303  
   304  				updatedJob := &batchv1.Job{}
   305  				gomega.Eventually(func(g gomega.Gomega) {
   306  					g.Expect(k8sClient.Get(ctx, jobKey, updatedJob)).To(gomega.Succeed())
   307  					g.Expect(ptr.Deref(updatedJob.Spec.Parallelism, 0)).To(gomega.Equal(int32(2)))
   308  					g.Expect(ptr.Deref(updatedJob.Spec.Completions, 0)).To(gomega.Equal(int32(4)))
   309  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   310  
   311  			})
   312  
   313  			ginkgo.By("Wait for the job to finish", func() {
   314  				createdWorkload := &kueue.Workload{}
   315  				wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(job.Name), Namespace: ns.Name}
   316  				gomega.Eventually(func() bool {
   317  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   318  						return false
   319  					}
   320  					return workload.HasQuotaReservation(createdWorkload) &&
   321  						apimeta.IsStatusConditionTrue(createdWorkload.Status.Conditions, kueue.WorkloadFinished)
   322  
   323  				}, util.LongTimeout, util.Interval).Should(gomega.BeTrue())
   324  			})
   325  		})
   326  	})
   327  
   328  	ginkgo.When("Creating a Job In a Twostepadmission Queue", func() {
   329  		var (
   330  			onDemandRF   *kueue.ResourceFlavor
   331  			localQueue   *kueue.LocalQueue
   332  			clusterQueue *kueue.ClusterQueue
   333  			check        *kueue.AdmissionCheck
   334  		)
   335  		ginkgo.BeforeEach(func() {
   336  			check = testing.MakeAdmissionCheck("check1").ControllerName("ac-controller").Obj()
   337  			gomega.Expect(k8sClient.Create(ctx, check)).Should(gomega.Succeed())
   338  			util.SetAdmissionCheckActive(ctx, k8sClient, check, metav1.ConditionTrue)
   339  			onDemandRF = testing.MakeResourceFlavor("on-demand").
   340  				Label("instance-type", "on-demand").Obj()
   341  			gomega.Expect(k8sClient.Create(ctx, onDemandRF)).Should(gomega.Succeed())
   342  			clusterQueue = testing.MakeClusterQueue("cluster-queue").
   343  				ResourceGroup(
   344  					*testing.MakeFlavorQuotas("on-demand").
   345  						Resource(corev1.ResourceCPU, "1").
   346  						Resource(corev1.ResourceMemory, "1Gi").
   347  						Obj(),
   348  				).
   349  				AdmissionChecks("check1").
   350  				Obj()
   351  			gomega.Expect(k8sClient.Create(ctx, clusterQueue)).Should(gomega.Succeed())
   352  			localQueue = testing.MakeLocalQueue("main", ns.Name).ClusterQueue("cluster-queue").Obj()
   353  			gomega.Expect(k8sClient.Create(ctx, localQueue)).Should(gomega.Succeed())
   354  		})
   355  		ginkgo.AfterEach(func() {
   356  			gomega.Expect(util.DeleteLocalQueue(ctx, k8sClient, localQueue)).Should(gomega.Succeed())
   357  			gomega.Expect(util.DeleteAllJobsInNamespace(ctx, k8sClient, ns)).Should(gomega.Succeed())
   358  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, clusterQueue, true)
   359  			util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandRF, true)
   360  			gomega.Expect(k8sClient.Delete(ctx, check)).Should(gomega.Succeed())
   361  		})
   362  
   363  		ginkgo.It("Should unsuspend a job only after all checks are cleared", func() {
   364  			// Use a binary that ends.
   365  			sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}).Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"5s"}).Obj()
   366  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   367  
   368  			createdWorkload := &kueue.Workload{}
   369  			wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(jobKey.Name), Namespace: ns.Name}
   370  
   371  			ginkgo.By("verify the check is added to the workload", func() {
   372  				gomega.Eventually(func() map[string]string {
   373  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   374  						return nil
   375  					}
   376  					return slices.ToMap(createdWorkload.Status.AdmissionChecks, func(i int) (string, string) { return createdWorkload.Status.AdmissionChecks[i].Name, "" })
   377  
   378  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(map[string]string{"check1": ""}))
   379  			})
   380  
   381  			ginkgo.By("waiting for the workload to be assigned", func() {
   382  				gomega.Eventually(func() bool {
   383  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   384  						return false
   385  					}
   386  					return apimeta.IsStatusConditionTrue(createdWorkload.Status.Conditions, kueue.WorkloadQuotaReserved)
   387  
   388  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   389  			})
   390  
   391  			ginkgo.By("checking the job remains suspended", func() {
   392  				createdJob := &batchv1.Job{}
   393  				jobKey := client.ObjectKeyFromObject(sampleJob)
   394  				gomega.Consistently(func() bool {
   395  					if err := k8sClient.Get(ctx, jobKey, createdJob); err != nil {
   396  						return false
   397  					}
   398  					return ptr.Deref(createdJob.Spec.Suspend, false)
   399  
   400  				}, util.ConsistentDuration, util.Interval).Should(gomega.BeTrue())
   401  			})
   402  
   403  			ginkgo.By("setting the check as successful", func() {
   404  				gomega.Eventually(func() error {
   405  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   406  						return err
   407  					}
   408  					patch := workload.BaseSSAWorkload(createdWorkload)
   409  					workload.SetAdmissionCheckState(&patch.Status.AdmissionChecks, kueue.AdmissionCheckState{
   410  						Name:  "check1",
   411  						State: kueue.CheckStateReady,
   412  					})
   413  					return k8sClient.Status().Patch(ctx, patch, client.Apply, client.FieldOwner("test-admission-check-controller"), client.ForceOwnership)
   414  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   415  			})
   416  
   417  			expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   418  				"instance-type": "on-demand",
   419  			})
   420  			gomega.Eventually(func() bool {
   421  				if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   422  					return false
   423  				}
   424  				return workload.HasQuotaReservation(createdWorkload) &&
   425  					apimeta.IsStatusConditionTrue(createdWorkload.Status.Conditions, kueue.WorkloadFinished)
   426  
   427  			}, util.LongTimeout, util.Interval).Should(gomega.BeTrue())
   428  		})
   429  
   430  		ginkgo.It("Should suspend a job when its checks become invalid", func() {
   431  			// Use a binary that ends.
   432  			sampleJob = (&testingjob.JobWrapper{Job: *sampleJob}).Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"5s"}).Obj()
   433  			gomega.Expect(k8sClient.Create(ctx, sampleJob)).Should(gomega.Succeed())
   434  
   435  			createdWorkload := &kueue.Workload{}
   436  			wlLookupKey := types.NamespacedName{Name: workloadjob.GetWorkloadNameForJob(jobKey.Name), Namespace: ns.Name}
   437  
   438  			ginkgo.By("verify the check is added to the workload", func() {
   439  				gomega.Eventually(func() map[string]string {
   440  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   441  						return nil
   442  					}
   443  					return slices.ToMap(createdWorkload.Status.AdmissionChecks, func(i int) (string, string) { return createdWorkload.Status.AdmissionChecks[i].Name, "" })
   444  
   445  				}, util.Timeout, util.Interval).Should(gomega.BeComparableTo(map[string]string{"check1": ""}))
   446  			})
   447  
   448  			ginkgo.By("setting the check as successful", func() {
   449  				gomega.Eventually(func() error {
   450  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   451  						return err
   452  					}
   453  					patch := workload.BaseSSAWorkload(createdWorkload)
   454  					workload.SetAdmissionCheckState(&patch.Status.AdmissionChecks, kueue.AdmissionCheckState{
   455  						Name:  "check1",
   456  						State: kueue.CheckStateReady,
   457  					})
   458  					return k8sClient.Status().Patch(ctx, patch, client.Apply, client.FieldOwner("test-admission-check-controller"), client.ForceOwnership)
   459  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   460  			})
   461  
   462  			expectJobUnsuspendedWithNodeSelectors(jobKey, map[string]string{
   463  				"instance-type": "on-demand",
   464  			})
   465  
   466  			ginkgo.By("setting the check as failed (Retry)", func() {
   467  				gomega.Eventually(func() error {
   468  					if err := k8sClient.Get(ctx, wlLookupKey, createdWorkload); err != nil {
   469  						return err
   470  					}
   471  					patch := workload.BaseSSAWorkload(createdWorkload)
   472  					workload.SetAdmissionCheckState(&patch.Status.AdmissionChecks, kueue.AdmissionCheckState{
   473  						Name:  "check1",
   474  						State: kueue.CheckStateRetry,
   475  					})
   476  					return k8sClient.Status().Patch(ctx, patch, client.Apply, client.FieldOwner("test-admission-check-controller"), client.ForceOwnership)
   477  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   478  			})
   479  
   480  			ginkgo.By("checking the job gets suspended", func() {
   481  				createdJob := &batchv1.Job{}
   482  				jobKey := client.ObjectKeyFromObject(sampleJob)
   483  				gomega.Eventually(func() bool {
   484  					if err := k8sClient.Get(ctx, jobKey, createdJob); err != nil {
   485  						return false
   486  					}
   487  					return ptr.Deref(createdJob.Spec.Suspend, false)
   488  
   489  				}, util.Timeout, util.Interval).Should(gomega.BeTrue())
   490  			})
   491  		})
   492  	})
   493  })
   494  
   495  func expectJobUnsuspended(key types.NamespacedName) {
   496  	job := &batchv1.Job{}
   497  	gomega.EventuallyWithOffset(1, func() *bool {
   498  		gomega.Expect(k8sClient.Get(ctx, key, job)).To(gomega.Succeed())
   499  		return job.Spec.Suspend
   500  	}, util.Timeout, util.Interval).Should(gomega.Equal(ptr.To(false)))
   501  }
   502  
   503  func expectJobUnsuspendedWithNodeSelectors(key types.NamespacedName, ns map[string]string) {
   504  	job := &batchv1.Job{}
   505  	gomega.EventuallyWithOffset(1, func() []any {
   506  		gomega.Expect(k8sClient.Get(ctx, key, job)).To(gomega.Succeed())
   507  		return []any{*job.Spec.Suspend, job.Spec.Template.Spec.NodeSelector}
   508  	}, util.Timeout, util.Interval).Should(gomega.Equal([]any{false, ns}))
   509  }
   510  
   511  func defaultOwnerReferenceForJob(name string) []metav1.OwnerReference {
   512  	return []metav1.OwnerReference{
   513  		{
   514  			APIVersion: "batch/v1",
   515  			Kind:       "Job",
   516  			Name:       name,
   517  		},
   518  	}
   519  }