sigs.k8s.io/kueue@v0.6.2/test/e2e/singlecluster/pod_test.go (about)

     1  /*
     2  Copyright 2024 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2e
    18  
    19  import (
    20  	"github.com/onsi/ginkgo/v2"
    21  	"github.com/onsi/gomega"
    22  	corev1 "k8s.io/api/core/v1"
    23  	v1 "k8s.io/api/core/v1"
    24  	"k8s.io/apimachinery/pkg/api/resource"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/apimachinery/pkg/types"
    27  	"k8s.io/apimachinery/pkg/util/sets"
    28  	"k8s.io/apimachinery/pkg/util/version"
    29  	"k8s.io/client-go/discovery"
    30  	"sigs.k8s.io/controller-runtime/pkg/client"
    31  	"sigs.k8s.io/controller-runtime/pkg/client/config"
    32  
    33  	kueue "sigs.k8s.io/kueue/apis/kueue/v1beta1"
    34  	"sigs.k8s.io/kueue/pkg/controller/jobs/pod"
    35  	"sigs.k8s.io/kueue/pkg/util/kubeversion"
    36  	"sigs.k8s.io/kueue/pkg/util/testing"
    37  	podtesting "sigs.k8s.io/kueue/pkg/util/testingjobs/pod"
    38  	"sigs.k8s.io/kueue/test/util"
    39  )
    40  
    41  // +kubebuilder:docs-gen:collapse=Imports
    42  
    43  var _ = ginkgo.Describe("Pod groups", func() {
    44  	var (
    45  		ns         *corev1.Namespace
    46  		onDemandRF *kueue.ResourceFlavor
    47  	)
    48  
    49  	ginkgo.BeforeEach(func() {
    50  		if kubeVersion().LessThan(kubeversion.KubeVersion1_27) {
    51  			ginkgo.Skip("Unsupported in versions older than 1.27")
    52  		}
    53  		ns = &corev1.Namespace{
    54  			ObjectMeta: metav1.ObjectMeta{
    55  				GenerateName: "pod-e2e-",
    56  			},
    57  		}
    58  		gomega.Expect(k8sClient.Create(ctx, ns)).To(gomega.Succeed())
    59  		onDemandRF = testing.MakeResourceFlavor("on-demand").Label("instance-type", "on-demand").Obj()
    60  		gomega.Expect(k8sClient.Create(ctx, onDemandRF)).To(gomega.Succeed())
    61  	})
    62  	ginkgo.AfterEach(func() {
    63  		gomega.Expect(util.DeleteNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
    64  		util.ExpectResourceFlavorToBeDeleted(ctx, k8sClient, onDemandRF, true)
    65  	})
    66  
    67  	ginkgo.When("Single CQ", func() {
    68  		var (
    69  			cq *kueue.ClusterQueue
    70  			lq *kueue.LocalQueue
    71  		)
    72  
    73  		ginkgo.BeforeEach(func() {
    74  			cq = testing.MakeClusterQueue("cq").
    75  				ResourceGroup(
    76  					*testing.MakeFlavorQuotas("on-demand").Resource(corev1.ResourceCPU, "5").Obj(),
    77  				).
    78  				Preemption(kueue.ClusterQueuePreemption{
    79  					WithinClusterQueue: kueue.PreemptionPolicyLowerPriority,
    80  				}).
    81  				Obj()
    82  			gomega.Expect(k8sClient.Create(ctx, cq)).To(gomega.Succeed())
    83  			lq = testing.MakeLocalQueue("queue", ns.Name).ClusterQueue(cq.Name).Obj()
    84  			gomega.Expect(k8sClient.Create(ctx, lq)).To(gomega.Succeed())
    85  		})
    86  		ginkgo.AfterEach(func() {
    87  			gomega.Expect(util.DeleteAllPodsInNamespace(ctx, k8sClient, ns)).To(gomega.Succeed())
    88  			util.ExpectClusterQueueToBeDeleted(ctx, k8sClient, cq, true)
    89  		})
    90  
    91  		ginkgo.It("should admit group that fits", func() {
    92  			group := podtesting.MakePod("group", ns.Name).
    93  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1ms"}).
    94  				Queue(lq.Name).
    95  				Request(corev1.ResourceCPU, "1").
    96  				MakeGroup(2)
    97  			gKey := client.ObjectKey{Namespace: ns.Name, Name: "group"}
    98  			for _, p := range group {
    99  				gomega.Expect(k8sClient.Create(ctx, p)).To(gomega.Succeed())
   100  				gomega.Expect(p.Spec.SchedulingGates).
   101  					To(gomega.ContainElement(corev1.PodSchedulingGate{
   102  						Name: pod.SchedulingGateName}))
   103  			}
   104  			ginkgo.By("Starting admission", func() {
   105  				// Verify that the Pods start with the appropriate selector.
   106  				gomega.Eventually(func(g gomega.Gomega) {
   107  					for _, origPod := range group {
   108  						var p corev1.Pod
   109  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   110  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   111  						g.Expect(p.Spec.NodeSelector).To(gomega.Equal(map[string]string{
   112  							"instance-type": "on-demand",
   113  						}))
   114  					}
   115  				}).Should(gomega.Succeed())
   116  
   117  				util.ExpectWorkloadToFinish(ctx, k8sClient, gKey)
   118  			})
   119  
   120  			ginkgo.By("Deleting finished Pods", func() {
   121  				for _, p := range group {
   122  					gomega.Expect(k8sClient.Delete(ctx, p)).To(gomega.Succeed())
   123  				}
   124  				gomega.Eventually(func(g gomega.Gomega) {
   125  					for _, p := range group {
   126  						var pCopy corev1.Pod
   127  						g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(p), &pCopy)).To(testing.BeNotFoundError())
   128  					}
   129  					var wl kueue.Workload
   130  					g.Expect(k8sClient.Get(ctx, gKey, &wl)).Should(testing.BeNotFoundError())
   131  				}, util.Timeout, util.Interval)
   132  			})
   133  		})
   134  
   135  		ginkgo.It("Should only admit a complete group", func() {
   136  			group := podtesting.MakePod("group", ns.Name).
   137  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1ms"}).
   138  				Queue(lq.Name).
   139  				Request(corev1.ResourceCPU, "1").
   140  				MakeGroup(3)
   141  
   142  			ginkgo.By("Incomplete group should not start", func() {
   143  				// Create incomplete group.
   144  				for _, p := range group[:2] {
   145  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   146  				}
   147  				gomega.Consistently(func(g gomega.Gomega) {
   148  					for _, origPod := range group[:2] {
   149  						var p corev1.Pod
   150  						g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   151  						g.Expect(p.Spec.SchedulingGates).
   152  							To(gomega.ContainElement(corev1.PodSchedulingGate{
   153  								Name: pod.SchedulingGateName}))
   154  					}
   155  				}, util.ConsistentDuration, util.Interval).Should(gomega.Succeed())
   156  			})
   157  			ginkgo.By("Incomplete group can be deleted", func() {
   158  				for _, p := range group[:2] {
   159  					gomega.Expect(k8sClient.Delete(ctx, p)).To(gomega.Succeed())
   160  				}
   161  				gomega.Eventually(func(g gomega.Gomega) {
   162  					for _, origPod := range group[:2] {
   163  						var p corev1.Pod
   164  						err := k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)
   165  						g.Expect(err).To(testing.BeNotFoundError())
   166  					}
   167  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   168  			})
   169  			ginkgo.By("Complete group runs successfully", func() {
   170  				for _, p := range group {
   171  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   172  				}
   173  
   174  				util.ExpectWorkloadToFinish(ctx, k8sClient, client.ObjectKey{Namespace: ns.Name, Name: "group"})
   175  			})
   176  		})
   177  
   178  		ginkgo.It("Failed Pod can be replaced in group", func() {
   179  			eventList := corev1.EventList{}
   180  			eventWatcher, err := k8sClient.Watch(ctx, &eventList, &client.ListOptions{
   181  				Namespace: ns.Name,
   182  			})
   183  			gomega.Expect(err).NotTo(gomega.HaveOccurred())
   184  
   185  			ginkgo.DeferCleanup(func() {
   186  				eventWatcher.Stop()
   187  			})
   188  
   189  			group := podtesting.MakePod("group", ns.Name).
   190  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1ms"}).
   191  				Queue(lq.Name).
   192  				Request(corev1.ResourceCPU, "1").
   193  				MakeGroup(3)
   194  
   195  			// First pod runs for much longer, so that there is time to terminate it.
   196  			group[0].Spec.Containers[0].Args = []string{"-termination-code=1", "10m"}
   197  
   198  			ginkgo.By("Group starts", func() {
   199  				for _, p := range group {
   200  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   201  				}
   202  				gomega.Eventually(func(g gomega.Gomega) {
   203  					for _, origPod := range group {
   204  						var p corev1.Pod
   205  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   206  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   207  					}
   208  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   209  			})
   210  
   211  			ginkgo.By("Fail a pod", func() {
   212  				gomega.Expect(k8sClient.Delete(ctx, group[0])).To(gomega.Succeed())
   213  				gomega.Eventually(func() corev1.PodPhase {
   214  					var p corev1.Pod
   215  					gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(group[0]), &p)).To(gomega.Succeed())
   216  					return p.Status.Phase
   217  				}, util.Timeout, util.Interval).Should(gomega.Equal(corev1.PodFailed))
   218  			})
   219  
   220  			ginkgo.By("Replacement pod starts, and the failed one is deleted", func() {
   221  				// Use a pod template that can succeed fast.
   222  				rep := group[2].DeepCopy()
   223  				rep.Name = "replacement"
   224  				gomega.Expect(k8sClient.Create(ctx, rep)).To(gomega.Succeed())
   225  				gomega.Eventually(func(g gomega.Gomega) {
   226  					var p corev1.Pod
   227  					g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(rep), &p)).To(gomega.Succeed())
   228  					g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   229  					g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(group[0]), &p)).To(testing.BeNotFoundError())
   230  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   231  			})
   232  
   233  			ginkgo.By("Excess pod is deleted", func() {
   234  				excess := group[2].DeepCopy()
   235  				excess.Name = "excess"
   236  				excessPods := sets.New(client.ObjectKeyFromObject(excess))
   237  				ginkgo.By("Create the excess pod", func() {
   238  					gomega.Expect(k8sClient.Create(ctx, excess)).To(gomega.Succeed())
   239  				})
   240  				ginkgo.By("Use events to observe the excess pods are getting stopped", func() {
   241  					preemptedPods := sets.New[types.NamespacedName]()
   242  					gomega.Eventually(func(g gomega.Gomega) sets.Set[types.NamespacedName] {
   243  						select {
   244  						case evt, ok := <-eventWatcher.ResultChan():
   245  							gomega.Expect(ok).To(gomega.BeTrue())
   246  							event, ok := evt.Object.(*v1.Event)
   247  							gomega.Expect(ok).To(gomega.BeTrue())
   248  							if event.InvolvedObject.Namespace == ns.Name && event.Reason == "ExcessPodDeleted" {
   249  								objKey := types.NamespacedName{Namespace: event.InvolvedObject.Namespace, Name: event.InvolvedObject.Name}
   250  								preemptedPods.Insert(objKey)
   251  							}
   252  						default:
   253  						}
   254  						return preemptedPods
   255  					}, util.Timeout, util.Interval).Should(gomega.Equal(excessPods))
   256  				})
   257  				ginkgo.By("Verify the excess pod is deleted", func() {
   258  					gomega.Eventually(func() error {
   259  						return k8sClient.Get(ctx, client.ObjectKeyFromObject(excess), &corev1.Pod{})
   260  					}, util.Timeout, util.Interval).Should(testing.BeNotFoundError())
   261  				})
   262  			})
   263  
   264  			util.ExpectWorkloadToFinish(ctx, k8sClient, client.ObjectKey{Namespace: ns.Name, Name: "group"})
   265  		})
   266  
   267  		ginkgo.It("should allow to schedule a group of diverse pods", func() {
   268  			group := podtesting.MakePod("group", ns.Name).
   269  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1ms"}).
   270  				Queue(lq.Name).
   271  				Request(corev1.ResourceCPU, "3").
   272  				MakeGroup(2)
   273  			gKey := client.ObjectKey{Namespace: ns.Name, Name: "group"}
   274  
   275  			// make the group of pods diverse using different amount of resources
   276  			group[0].Spec.Containers[0].Resources.Requests[corev1.ResourceCPU] = resource.MustParse("2")
   277  
   278  			ginkgo.By("Group starts", func() {
   279  				for _, p := range group {
   280  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   281  				}
   282  				gomega.Eventually(func(g gomega.Gomega) {
   283  					for _, origPod := range group {
   284  						var p corev1.Pod
   285  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   286  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   287  					}
   288  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   289  			})
   290  
   291  			ginkgo.By("Group completes", func() {
   292  				util.ExpectWorkloadToFinish(ctx, k8sClient, client.ObjectKey{Namespace: ns.Name, Name: "group"})
   293  			})
   294  			ginkgo.By("Deleting finished Pods", func() {
   295  				for _, p := range group {
   296  					gomega.Expect(k8sClient.Delete(ctx, p)).To(gomega.Succeed())
   297  				}
   298  				gomega.Eventually(func(g gomega.Gomega) {
   299  					for _, p := range group {
   300  						var pCopy corev1.Pod
   301  						g.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(p), &pCopy)).To(testing.BeNotFoundError())
   302  					}
   303  					var wl kueue.Workload
   304  					g.Expect(k8sClient.Get(ctx, gKey, &wl)).Should(testing.BeNotFoundError())
   305  				}, util.Timeout, util.Interval)
   306  			})
   307  		})
   308  
   309  		ginkgo.It("should allow to preempt the lower priority group", func() {
   310  			eventList := corev1.EventList{}
   311  			eventWatcher, err := k8sClient.Watch(ctx, &eventList, &client.ListOptions{
   312  				Namespace: ns.Name,
   313  			})
   314  			gomega.Expect(err).NotTo(gomega.HaveOccurred())
   315  
   316  			ginkgo.DeferCleanup(func() {
   317  				eventWatcher.Stop()
   318  			})
   319  
   320  			highPriorityClass := testing.MakePriorityClass("high").PriorityValue(100).Obj()
   321  			gomega.Expect(k8sClient.Create(ctx, highPriorityClass))
   322  			ginkgo.DeferCleanup(func() {
   323  				gomega.Expect(k8sClient.Delete(ctx, highPriorityClass)).To(gomega.Succeed())
   324  			})
   325  
   326  			defaultPriorityGroup := podtesting.MakePod("default-priority-group", ns.Name).
   327  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"-termination-code=1", "10min"}).
   328  				Queue(lq.Name).
   329  				Request(corev1.ResourceCPU, "2").
   330  				MakeGroup(2)
   331  			defaultGroupKey := client.ObjectKey{Namespace: ns.Name, Name: "default-priority-group"}
   332  			defaultGroupPods := sets.New[types.NamespacedName](
   333  				client.ObjectKeyFromObject(defaultPriorityGroup[0]),
   334  				client.ObjectKeyFromObject(defaultPriorityGroup[1]),
   335  			)
   336  
   337  			ginkgo.By("Default-priority group starts", func() {
   338  				for _, p := range defaultPriorityGroup {
   339  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   340  				}
   341  				gomega.Eventually(func(g gomega.Gomega) {
   342  					for _, origPod := range defaultPriorityGroup {
   343  						var p corev1.Pod
   344  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   345  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   346  					}
   347  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   348  			})
   349  
   350  			highPriorityGroup := podtesting.MakePod("high-priority-group", ns.Name).
   351  				Image("gcr.io/k8s-staging-perf-tests/sleep:v0.1.0", []string{"1ms"}).
   352  				Queue(lq.Name).
   353  				PriorityClass("high").
   354  				Request(corev1.ResourceCPU, "1").
   355  				MakeGroup(2)
   356  			highGroupKey := client.ObjectKey{Namespace: ns.Name, Name: "high-priority-group"}
   357  
   358  			ginkgo.By("Create the high-priority group", func() {
   359  				for _, p := range highPriorityGroup {
   360  					gomega.Expect(k8sClient.Create(ctx, p.DeepCopy())).To(gomega.Succeed())
   361  				}
   362  				gomega.Eventually(func(g gomega.Gomega) {
   363  					for _, origPod := range highPriorityGroup {
   364  						var p corev1.Pod
   365  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   366  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   367  					}
   368  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   369  			})
   370  
   371  			ginkgo.By("The default priority workload is preempted", func() {
   372  				var updatedWorkload kueue.Workload
   373  				gomega.Expect(k8sClient.Get(ctx, defaultGroupKey, &updatedWorkload)).To(gomega.Succeed())
   374  				util.ExpectWorkloadsToBePreempted(ctx, k8sClient, &updatedWorkload)
   375  			})
   376  
   377  			ginkgo.By("Use events to observe the default-priority pods are getting preempted", func() {
   378  				preemptedPods := sets.New[types.NamespacedName]()
   379  				gomega.Eventually(func(g gomega.Gomega) sets.Set[types.NamespacedName] {
   380  					select {
   381  					case evt, ok := <-eventWatcher.ResultChan():
   382  						gomega.Expect(ok).To(gomega.BeTrue())
   383  						event, ok := evt.Object.(*v1.Event)
   384  						gomega.Expect(ok).To(gomega.BeTrue())
   385  						if event.InvolvedObject.Namespace == ns.Name && event.Reason == "Stopped" {
   386  							objKey := types.NamespacedName{Namespace: event.InvolvedObject.Namespace, Name: event.InvolvedObject.Name}
   387  							preemptedPods.Insert(objKey)
   388  						}
   389  					default:
   390  					}
   391  					return preemptedPods
   392  				}, util.Timeout, util.Interval).Should(gomega.Equal(defaultGroupPods))
   393  			})
   394  
   395  			replacementPods := make(map[types.NamespacedName]types.NamespacedName, len(defaultPriorityGroup))
   396  			ginkgo.By("Create replacement pods as soon as the default-priority pods are Failed", func() {
   397  				gomega.Eventually(func(g gomega.Gomega) int {
   398  					for _, origPod := range defaultPriorityGroup {
   399  						origKey := client.ObjectKeyFromObject(origPod)
   400  						if _, found := replacementPods[origKey]; !found {
   401  							var p corev1.Pod
   402  							gomega.Expect(k8sClient.Get(ctx, origKey, &p)).To(gomega.Succeed())
   403  							if p.Status.Phase == v1.PodFailed {
   404  								rep := origPod.DeepCopy()
   405  								// For replacement pods use args that let it complete fast.
   406  								rep.Name = "replacement-for-" + rep.Name
   407  								rep.Spec.Containers[0].Args = []string{"1ms"}
   408  								g.Expect(k8sClient.Create(ctx, rep)).To(gomega.Succeed())
   409  								replacementPods[origKey] = client.ObjectKeyFromObject(rep)
   410  							}
   411  						}
   412  					}
   413  					return len(replacementPods)
   414  				}, util.Timeout, util.Interval).Should(gomega.Equal(len(defaultPriorityGroup)))
   415  			})
   416  			ginkgo.By("Check that the preempted pods are deleted", func() {
   417  				gomega.Eventually(func(g gomega.Gomega) {
   418  					var p corev1.Pod
   419  					for _, origPod := range defaultPriorityGroup {
   420  						origKey := client.ObjectKeyFromObject(origPod)
   421  						g.Expect(k8sClient.Get(ctx, origKey, &p)).To(testing.BeNotFoundError())
   422  					}
   423  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   424  			})
   425  
   426  			ginkgo.By("Verify the high-priority pods are scheduled", func() {
   427  				gomega.Eventually(func(g gomega.Gomega) {
   428  					for _, origPod := range highPriorityGroup {
   429  						var p corev1.Pod
   430  						gomega.Expect(k8sClient.Get(ctx, client.ObjectKeyFromObject(origPod), &p)).To(gomega.Succeed())
   431  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   432  					}
   433  				}, util.Timeout, util.Interval).Should(gomega.Succeed())
   434  			})
   435  
   436  			ginkgo.By("Verify the high priority group completes", func() {
   437  				util.ExpectWorkloadToFinish(ctx, k8sClient, highGroupKey)
   438  			})
   439  
   440  			ginkgo.By("Await for the replacement pods to be ungated", func() {
   441  				for _, replKey := range replacementPods {
   442  					gomega.Eventually(func(g gomega.Gomega) {
   443  						var p corev1.Pod
   444  						g.Expect(k8sClient.Get(ctx, replKey, &p)).To(gomega.Succeed())
   445  						g.Expect(p.Spec.SchedulingGates).To(gomega.BeEmpty())
   446  					}, util.Timeout, util.Interval).Should(gomega.Succeed())
   447  				}
   448  			})
   449  
   450  			ginkgo.By("Verify the replacement pods of the default priority workload complete", func() {
   451  				for _, replKey := range replacementPods {
   452  					gomega.Eventually(func(g gomega.Gomega) {
   453  						var p corev1.Pod
   454  						g.Expect(k8sClient.Get(ctx, replKey, &p)).To(gomega.Succeed())
   455  						g.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded))
   456  					}, util.Timeout, util.Interval).Should(gomega.Succeed())
   457  				}
   458  			})
   459  
   460  			ginkgo.By("Verify the default priority workload is finished", func() {
   461  				util.ExpectWorkloadToFinish(ctx, k8sClient, defaultGroupKey)
   462  			})
   463  		})
   464  	})
   465  })
   466  
   467  func kubeVersion() *version.Version {
   468  	cfg, err := config.GetConfigWithContext("")
   469  	gomega.Expect(err).NotTo(gomega.HaveOccurred())
   470  	discoveryClient, err := discovery.NewDiscoveryClientForConfig(cfg)
   471  	gomega.Expect(err).NotTo(gomega.HaveOccurred())
   472  	v, err := kubeversion.FetchServerVersion(discoveryClient)
   473  	gomega.Expect(err).NotTo(gomega.HaveOccurred())
   474  	return v
   475  }