k8s.io/kubernetes@v1.29.3/test/e2e_node/critical_pod_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2enode
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	v1 "k8s.io/api/core/v1"
    24  	"k8s.io/apimachinery/pkg/api/resource"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/klog/v2"
    27  	kubeapi "k8s.io/kubernetes/pkg/apis/core"
    28  	"k8s.io/kubernetes/pkg/apis/scheduling"
    29  	kubelettypes "k8s.io/kubernetes/pkg/kubelet/types"
    30  	"k8s.io/kubernetes/test/e2e/framework"
    31  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    32  	"k8s.io/kubernetes/test/e2e/nodefeature"
    33  	imageutils "k8s.io/kubernetes/test/utils/image"
    34  	admissionapi "k8s.io/pod-security-admission/api"
    35  
    36  	"github.com/onsi/ginkgo/v2"
    37  	"github.com/onsi/gomega"
    38  )
    39  
    40  const (
    41  	criticalPodName   = "static-critical-pod"
    42  	guaranteedPodName = "guaranteed"
    43  	burstablePodName  = "burstable"
    44  	bestEffortPodName = "best-effort"
    45  )
    46  
    47  var _ = SIGDescribe("CriticalPod", framework.WithSerial(), framework.WithDisruptive(), nodefeature.CriticalPod, func() {
    48  	f := framework.NewDefaultFramework("critical-pod-test")
    49  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    50  	ginkgo.Context("when we need to admit a critical pod", func() {
    51  		ginkgo.It("should be able to create and delete a critical pod", func(ctx context.Context) {
    52  			// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
    53  			node := getNodeName(ctx, f)
    54  			// Define test pods
    55  			nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
    56  				Requests: v1.ResourceList{
    57  					v1.ResourceCPU:    resource.MustParse("100m"),
    58  					v1.ResourceMemory: resource.MustParse("100Mi"),
    59  				},
    60  				Limits: v1.ResourceList{
    61  					v1.ResourceCPU:    resource.MustParse("100m"),
    62  					v1.ResourceMemory: resource.MustParse("100Mi"),
    63  				},
    64  			}, node)
    65  			nonCriticalBurstable := getTestPod(false, burstablePodName, v1.ResourceRequirements{
    66  				Requests: v1.ResourceList{
    67  					v1.ResourceCPU:    resource.MustParse("100m"),
    68  					v1.ResourceMemory: resource.MustParse("100Mi"),
    69  				},
    70  			}, node)
    71  			nonCriticalBestEffort := getTestPod(false, bestEffortPodName, v1.ResourceRequirements{}, node)
    72  			criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
    73  				// request the entire resource capacity of the node, so that
    74  				// admitting this pod requires the other pod to be preempted
    75  				Requests: getNodeCPUAndMemoryCapacity(ctx, f),
    76  			}, node)
    77  
    78  			// Create pods, starting with non-critical so that the critical preempts the other pods.
    79  			e2epod.NewPodClient(f).CreateBatch(ctx, []*v1.Pod{nonCriticalBestEffort, nonCriticalBurstable, nonCriticalGuaranteed})
    80  			e2epod.PodClientNS(f, kubeapi.NamespaceSystem).CreateSync(ctx, criticalPod)
    81  
    82  			// Check that non-critical pods other than the besteffort have been evicted
    83  			updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
    84  			framework.ExpectNoError(err)
    85  			for _, p := range updatedPodList.Items {
    86  				if p.Name == nonCriticalBestEffort.Name {
    87  					gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodRunning), "pod: %v should not be preempted with status: %#v", p.Name, p.Status)
    88  				} else {
    89  					gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
    90  				}
    91  			}
    92  		})
    93  
    94  		f.It("should add DisruptionTarget condition to the preempted pod", nodefeature.PodDisruptionConditions, func(ctx context.Context) {
    95  			// because adminssion Priority enable, If the priority class is not found, the Pod is rejected.
    96  			node := getNodeName(ctx, f)
    97  			nonCriticalGuaranteed := getTestPod(false, guaranteedPodName, v1.ResourceRequirements{
    98  				Requests: v1.ResourceList{
    99  					v1.ResourceCPU:    resource.MustParse("100m"),
   100  					v1.ResourceMemory: resource.MustParse("100Mi"),
   101  				},
   102  				Limits: v1.ResourceList{
   103  					v1.ResourceCPU:    resource.MustParse("100m"),
   104  					v1.ResourceMemory: resource.MustParse("100Mi"),
   105  				},
   106  			}, node)
   107  
   108  			criticalPod := getTestPod(true, criticalPodName, v1.ResourceRequirements{
   109  				// request the entire resource capacity of the node, so that
   110  				// admitting this pod requires the other pod to be preempted
   111  				Requests: getNodeCPUAndMemoryCapacity(ctx, f),
   112  			}, node)
   113  			criticalPod.Namespace = kubeapi.NamespaceSystem
   114  
   115  			ginkgo.By(fmt.Sprintf("create the non-critical pod %q", klog.KObj(nonCriticalGuaranteed)))
   116  			e2epod.NewPodClient(f).CreateSync(ctx, nonCriticalGuaranteed)
   117  
   118  			ginkgo.By(fmt.Sprintf("create the critical pod %q", klog.KObj(criticalPod)))
   119  			e2epod.PodClientNS(f, kubeapi.NamespaceSystem).Create(ctx, criticalPod)
   120  
   121  			ginkgo.By(fmt.Sprintf("await for the critical pod %q to be ready", klog.KObj(criticalPod)))
   122  			err := e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, criticalPod.Name, kubeapi.NamespaceSystem)
   123  			framework.ExpectNoError(err, "Failed to await for the pod to be running: %q", klog.KObj(criticalPod))
   124  
   125  			// Check that non-critical pods other than the besteffort have been evicted
   126  			updatedPodList, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).List(ctx, metav1.ListOptions{})
   127  			framework.ExpectNoError(err)
   128  			for _, p := range updatedPodList.Items {
   129  				ginkgo.By(fmt.Sprintf("verify that the non-critical pod %q is preempted and has the DisruptionTarget condition", klog.KObj(&p)))
   130  				gomega.Expect(p.Status.Phase).To(gomega.Equal(v1.PodSucceeded), "pod: %v should be preempted with status: %#v", p.Name, p.Status)
   131  				if condition := e2epod.FindPodConditionByType(&p.Status, v1.DisruptionTarget); condition == nil {
   132  					framework.Failf("pod %q should have the condition: %q, pod status: %v", klog.KObj(&p), v1.DisruptionTarget, p.Status)
   133  				}
   134  			}
   135  		})
   136  		ginkgo.AfterEach(func(ctx context.Context) {
   137  			// Delete Pods
   138  			e2epod.NewPodClient(f).DeleteSync(ctx, guaranteedPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   139  			e2epod.NewPodClient(f).DeleteSync(ctx, burstablePodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   140  			e2epod.NewPodClient(f).DeleteSync(ctx, bestEffortPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   141  			e2epod.PodClientNS(f, kubeapi.NamespaceSystem).DeleteSync(ctx, criticalPodName, metav1.DeleteOptions{}, e2epod.DefaultPodDeletionTimeout)
   142  			// Log Events
   143  			logPodEvents(ctx, f)
   144  			logNodeEvents(ctx, f)
   145  
   146  		})
   147  	})
   148  })
   149  
   150  func getNodeCPUAndMemoryCapacity(ctx context.Context, f *framework.Framework) v1.ResourceList {
   151  	nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
   152  	framework.ExpectNoError(err)
   153  	// Assuming that there is only one node, because this is a node e2e test.
   154  	gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
   155  	capacity := nodeList.Items[0].Status.Allocatable
   156  	return v1.ResourceList{
   157  		v1.ResourceCPU:    capacity[v1.ResourceCPU],
   158  		v1.ResourceMemory: capacity[v1.ResourceMemory],
   159  	}
   160  }
   161  
   162  func getNodeName(ctx context.Context, f *framework.Framework) string {
   163  	nodeList, err := f.ClientSet.CoreV1().Nodes().List(ctx, metav1.ListOptions{})
   164  	framework.ExpectNoError(err)
   165  	// Assuming that there is only one node, because this is a node e2e test.
   166  	gomega.Expect(nodeList.Items).To(gomega.HaveLen(1))
   167  	return nodeList.Items[0].GetName()
   168  }
   169  
   170  func getTestPod(critical bool, name string, resources v1.ResourceRequirements, node string) *v1.Pod {
   171  	pod := &v1.Pod{
   172  		TypeMeta: metav1.TypeMeta{
   173  			Kind:       "Pod",
   174  			APIVersion: "v1",
   175  		},
   176  		ObjectMeta: metav1.ObjectMeta{Name: name},
   177  		Spec: v1.PodSpec{
   178  			Containers: []v1.Container{
   179  				{
   180  					Name:      "container",
   181  					Image:     imageutils.GetPauseImageName(),
   182  					Resources: resources,
   183  				},
   184  			},
   185  			NodeName: node,
   186  		},
   187  	}
   188  	if critical {
   189  		pod.ObjectMeta.Namespace = kubeapi.NamespaceSystem
   190  		pod.ObjectMeta.Annotations = map[string]string{
   191  			kubelettypes.ConfigSourceAnnotationKey: kubelettypes.FileSource,
   192  		}
   193  		pod.Spec.PriorityClassName = scheduling.SystemNodeCritical
   194  
   195  		if !kubelettypes.IsCriticalPod(pod) {
   196  			framework.Failf("pod %q should be a critical pod", pod.Name)
   197  		}
   198  	} else {
   199  		if kubelettypes.IsCriticalPod(pod) {
   200  			framework.Failf("pod %q should not be a critical pod", pod.Name)
   201  		}
   202  	}
   203  	return pod
   204  }