k8s.io/kubernetes@v1.29.3/test/e2e_node/pods_container_manager_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2enode
    18  
    19  import (
    20  	"context"
    21  	"strings"
    22  
    23  	v1 "k8s.io/api/core/v1"
    24  	"k8s.io/apimachinery/pkg/api/resource"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/apimachinery/pkg/util/uuid"
    27  	"k8s.io/kubernetes/pkg/kubelet/cm"
    28  	"k8s.io/kubernetes/test/e2e/framework"
    29  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    30  	imageutils "k8s.io/kubernetes/test/utils/image"
    31  	admissionapi "k8s.io/pod-security-admission/api"
    32  
    33  	"github.com/onsi/ginkgo/v2"
    34  	"k8s.io/klog/v2"
    35  )
    36  
    37  // getResourceList returns a ResourceList with the
    38  // specified cpu and memory resource values
    39  func getResourceList(cpu, memory string) v1.ResourceList {
    40  	res := v1.ResourceList{}
    41  	if cpu != "" {
    42  		res[v1.ResourceCPU] = resource.MustParse(cpu)
    43  	}
    44  	if memory != "" {
    45  		res[v1.ResourceMemory] = resource.MustParse(memory)
    46  	}
    47  	return res
    48  }
    49  
    50  // getResourceRequirements returns a ResourceRequirements object
    51  func getResourceRequirements(requests, limits v1.ResourceList) v1.ResourceRequirements {
    52  	res := v1.ResourceRequirements{}
    53  	res.Requests = requests
    54  	res.Limits = limits
    55  	return res
    56  }
    57  
    58  const (
    59  	// Kubelet internal cgroup name for burstable tier
    60  	burstableCgroup = "burstable"
    61  	// Kubelet internal cgroup name for besteffort tier
    62  	bestEffortCgroup = "besteffort"
    63  )
    64  
    65  // makePodToVerifyCgroups returns a pod that verifies the existence of the specified cgroups.
    66  func makePodToVerifyCgroups(cgroupNames []string) *v1.Pod {
    67  	// convert the names to their literal cgroupfs forms...
    68  	cgroupFsNames := []string{}
    69  	rootCgroupName := cm.NewCgroupName(cm.RootCgroupName, defaultNodeAllocatableCgroup)
    70  	for _, baseName := range cgroupNames {
    71  		// Add top level cgroup used to enforce node allocatable.
    72  		cgroupComponents := strings.Split(baseName, "/")
    73  		cgroupName := cm.NewCgroupName(rootCgroupName, cgroupComponents...)
    74  		cgroupFsNames = append(cgroupFsNames, toCgroupFsName(cgroupName))
    75  	}
    76  	klog.Infof("expecting %v cgroups to be found", cgroupFsNames)
    77  	// build the pod command to either verify cgroups exist
    78  	command := ""
    79  
    80  	for _, cgroupFsName := range cgroupFsNames {
    81  		localCommand := ""
    82  		if IsCgroup2UnifiedMode() {
    83  			localCommand = "if [ ! -d /tmp/" + cgroupFsName + " ]; then exit 1; fi; "
    84  		} else {
    85  			localCommand = "if [ ! -d /tmp/memory/" + cgroupFsName + " ] || [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 1; fi; "
    86  		}
    87  		command += localCommand
    88  	}
    89  
    90  	pod := &v1.Pod{
    91  		ObjectMeta: metav1.ObjectMeta{
    92  			Name: "pod" + string(uuid.NewUUID()),
    93  		},
    94  		Spec: v1.PodSpec{
    95  			RestartPolicy: v1.RestartPolicyNever,
    96  			Containers: []v1.Container{
    97  				{
    98  					Image:   busyboxImage,
    99  					Name:    "container" + string(uuid.NewUUID()),
   100  					Command: []string{"sh", "-c", command},
   101  					VolumeMounts: []v1.VolumeMount{
   102  						{
   103  							Name:      "sysfscgroup",
   104  							MountPath: "/tmp",
   105  						},
   106  					},
   107  				},
   108  			},
   109  			Volumes: []v1.Volume{
   110  				{
   111  					Name: "sysfscgroup",
   112  					VolumeSource: v1.VolumeSource{
   113  						HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
   114  					},
   115  				},
   116  			},
   117  		},
   118  	}
   119  	return pod
   120  }
   121  
   122  // makePodToVerifyCgroupRemoved verfies the specified cgroup does not exist.
   123  func makePodToVerifyCgroupRemoved(baseName string) *v1.Pod {
   124  	components := strings.Split(baseName, "/")
   125  	cgroupName := cm.NewCgroupName(cm.RootCgroupName, components...)
   126  	cgroupFsName := toCgroupFsName(cgroupName)
   127  
   128  	command := ""
   129  	if IsCgroup2UnifiedMode() {
   130  		command = "for i in `seq 1 10`; do if [ ! -d /tmp/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"
   131  	} else {
   132  		command = "for i in `seq 1 10`; do if [ ! -d /tmp/memory/" + cgroupFsName + " ] && [ ! -d /tmp/cpu/" + cgroupFsName + " ]; then exit 0; else sleep 10; fi; done; exit 1"
   133  	}
   134  
   135  	pod := &v1.Pod{
   136  		ObjectMeta: metav1.ObjectMeta{
   137  			Name: "pod" + string(uuid.NewUUID()),
   138  		},
   139  		Spec: v1.PodSpec{
   140  			RestartPolicy: v1.RestartPolicyOnFailure,
   141  			Containers: []v1.Container{
   142  				{
   143  					Image:   busyboxImage,
   144  					Name:    "container" + string(uuid.NewUUID()),
   145  					Command: []string{"sh", "-c", command},
   146  					VolumeMounts: []v1.VolumeMount{
   147  						{
   148  							Name:      "sysfscgroup",
   149  							MountPath: "/tmp",
   150  						},
   151  					},
   152  				},
   153  			},
   154  			Volumes: []v1.Volume{
   155  				{
   156  					Name: "sysfscgroup",
   157  					VolumeSource: v1.VolumeSource{
   158  						HostPath: &v1.HostPathVolumeSource{Path: "/sys/fs/cgroup"},
   159  					},
   160  				},
   161  			},
   162  		},
   163  	}
   164  	return pod
   165  }
   166  
   167  var _ = SIGDescribe("Kubelet Cgroup Manager", func() {
   168  	f := framework.NewDefaultFramework("kubelet-cgroup-manager")
   169  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   170  
   171  	ginkgo.Describe("QOS containers", func() {
   172  		ginkgo.Context("On enabling QOS cgroup hierarchy", func() {
   173  			f.It("Top level QoS containers should have been created", f.WithNodeConformance(), func(ctx context.Context) {
   174  				if !kubeletCfg.CgroupsPerQOS {
   175  					return
   176  				}
   177  				cgroupsToVerify := []string{burstableCgroup, bestEffortCgroup}
   178  				pod := makePodToVerifyCgroups(cgroupsToVerify)
   179  				e2epod.NewPodClient(f).Create(ctx, pod)
   180  				err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   181  				framework.ExpectNoError(err)
   182  			})
   183  		})
   184  	})
   185  
   186  	f.Describe("Pod containers", f.WithNodeConformance(), func() {
   187  		ginkgo.Context("On scheduling a Guaranteed Pod", func() {
   188  			ginkgo.It("Pod containers should have been created under the cgroup-root", func(ctx context.Context) {
   189  				if !kubeletCfg.CgroupsPerQOS {
   190  					return
   191  				}
   192  				var (
   193  					guaranteedPod *v1.Pod
   194  					podUID        string
   195  				)
   196  				ginkgo.By("Creating a Guaranteed pod in Namespace", func() {
   197  					guaranteedPod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{
   198  						ObjectMeta: metav1.ObjectMeta{
   199  							Name:      "pod" + string(uuid.NewUUID()),
   200  							Namespace: f.Namespace.Name,
   201  						},
   202  						Spec: v1.PodSpec{
   203  							Containers: []v1.Container{
   204  								{
   205  									Image:     imageutils.GetPauseImageName(),
   206  									Name:      "container" + string(uuid.NewUUID()),
   207  									Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("100m", "100Mi")),
   208  								},
   209  							},
   210  						},
   211  					})
   212  					podUID = string(guaranteedPod.UID)
   213  				})
   214  				ginkgo.By("Checking if the pod cgroup was created", func() {
   215  					cgroupsToVerify := []string{"pod" + podUID}
   216  					pod := makePodToVerifyCgroups(cgroupsToVerify)
   217  					e2epod.NewPodClient(f).Create(ctx, pod)
   218  					err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   219  					framework.ExpectNoError(err)
   220  				})
   221  				ginkgo.By("Checking if the pod cgroup was deleted", func() {
   222  					gp := int64(1)
   223  					err := e2epod.NewPodClient(f).Delete(ctx, guaranteedPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
   224  					framework.ExpectNoError(err)
   225  					pod := makePodToVerifyCgroupRemoved("pod" + podUID)
   226  					e2epod.NewPodClient(f).Create(ctx, pod)
   227  					err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   228  					framework.ExpectNoError(err)
   229  				})
   230  			})
   231  		})
   232  		ginkgo.Context("On scheduling a BestEffort Pod", func() {
   233  			ginkgo.It("Pod containers should have been created under the BestEffort cgroup", func(ctx context.Context) {
   234  				if !kubeletCfg.CgroupsPerQOS {
   235  					return
   236  				}
   237  				var (
   238  					podUID        string
   239  					bestEffortPod *v1.Pod
   240  				)
   241  				ginkgo.By("Creating a BestEffort pod in Namespace", func() {
   242  					bestEffortPod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{
   243  						ObjectMeta: metav1.ObjectMeta{
   244  							Name:      "pod" + string(uuid.NewUUID()),
   245  							Namespace: f.Namespace.Name,
   246  						},
   247  						Spec: v1.PodSpec{
   248  							Containers: []v1.Container{
   249  								{
   250  									Image:     imageutils.GetPauseImageName(),
   251  									Name:      "container" + string(uuid.NewUUID()),
   252  									Resources: getResourceRequirements(getResourceList("", ""), getResourceList("", "")),
   253  								},
   254  							},
   255  						},
   256  					})
   257  					podUID = string(bestEffortPod.UID)
   258  				})
   259  				ginkgo.By("Checking if the pod cgroup was created", func() {
   260  					cgroupsToVerify := []string{"besteffort/pod" + podUID}
   261  					pod := makePodToVerifyCgroups(cgroupsToVerify)
   262  					e2epod.NewPodClient(f).Create(ctx, pod)
   263  					err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   264  					framework.ExpectNoError(err)
   265  				})
   266  				ginkgo.By("Checking if the pod cgroup was deleted", func() {
   267  					gp := int64(1)
   268  					err := e2epod.NewPodClient(f).Delete(ctx, bestEffortPod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
   269  					framework.ExpectNoError(err)
   270  					pod := makePodToVerifyCgroupRemoved("besteffort/pod" + podUID)
   271  					e2epod.NewPodClient(f).Create(ctx, pod)
   272  					err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   273  					framework.ExpectNoError(err)
   274  				})
   275  			})
   276  		})
   277  		ginkgo.Context("On scheduling a Burstable Pod", func() {
   278  			ginkgo.It("Pod containers should have been created under the Burstable cgroup", func(ctx context.Context) {
   279  				if !kubeletCfg.CgroupsPerQOS {
   280  					return
   281  				}
   282  				var (
   283  					podUID       string
   284  					burstablePod *v1.Pod
   285  				)
   286  				ginkgo.By("Creating a Burstable pod in Namespace", func() {
   287  					burstablePod = e2epod.NewPodClient(f).Create(ctx, &v1.Pod{
   288  						ObjectMeta: metav1.ObjectMeta{
   289  							Name:      "pod" + string(uuid.NewUUID()),
   290  							Namespace: f.Namespace.Name,
   291  						},
   292  						Spec: v1.PodSpec{
   293  							Containers: []v1.Container{
   294  								{
   295  									Image:     imageutils.GetPauseImageName(),
   296  									Name:      "container" + string(uuid.NewUUID()),
   297  									Resources: getResourceRequirements(getResourceList("100m", "100Mi"), getResourceList("200m", "200Mi")),
   298  								},
   299  							},
   300  						},
   301  					})
   302  					podUID = string(burstablePod.UID)
   303  				})
   304  				ginkgo.By("Checking if the pod cgroup was created", func() {
   305  					cgroupsToVerify := []string{"burstable/pod" + podUID}
   306  					pod := makePodToVerifyCgroups(cgroupsToVerify)
   307  					e2epod.NewPodClient(f).Create(ctx, pod)
   308  					err := e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   309  					framework.ExpectNoError(err)
   310  				})
   311  				ginkgo.By("Checking if the pod cgroup was deleted", func() {
   312  					gp := int64(1)
   313  					err := e2epod.NewPodClient(f).Delete(ctx, burstablePod.Name, metav1.DeleteOptions{GracePeriodSeconds: &gp})
   314  					framework.ExpectNoError(err)
   315  					pod := makePodToVerifyCgroupRemoved("burstable/pod" + podUID)
   316  					e2epod.NewPodClient(f).Create(ctx, pod)
   317  					err = e2epod.WaitForPodSuccessInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   318  					framework.ExpectNoError(err)
   319  				})
   320  			})
   321  		})
   322  	})
   323  })