k8s.io/kubernetes@v1.29.3/test/e2e_node/oomkiller_linux_test.go (about)

     1  /*
     2  Copyright 2022 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2enode
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  
    23  	"github.com/onsi/gomega"
    24  	v1 "k8s.io/api/core/v1"
    25  	"k8s.io/apimachinery/pkg/api/resource"
    26  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    27  	utilfeature "k8s.io/apiserver/pkg/util/feature"
    28  	"k8s.io/kubernetes/pkg/features"
    29  	kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
    30  	"k8s.io/kubernetes/test/e2e/framework"
    31  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    32  	admissionapi "k8s.io/pod-security-admission/api"
    33  
    34  	"github.com/onsi/ginkgo/v2"
    35  	libcontainercgroups "github.com/opencontainers/runc/libcontainer/cgroups"
    36  )
    37  
    38  type testCase struct {
    39  	name                   string
    40  	podSpec                *v1.Pod
    41  	oomTargetContainerName string
    42  }
    43  
    44  // KubeReservedMemory is default fraction value of node capacity memory to
    45  // be reserved for K8s components.
    46  const KubeReservedMemory = 0.35
    47  
    48  var _ = SIGDescribe("OOMKiller for pod using more memory than node allocatable [LinuxOnly]", framework.WithSerial(), func() {
    49  	f := framework.NewDefaultFramework("nodeallocatable-oomkiller-test")
    50  	f.NamespacePodSecurityEnforceLevel = admissionapi.LevelPrivileged
    51  
    52  	testCases := []testCase{
    53  		{
    54  			name:                   "single process container without resource limits",
    55  			oomTargetContainerName: "oomkill-nodeallocatable-container",
    56  			podSpec: getOOMTargetPod("oomkill-nodeallocatable-pod", "oomkill-nodeallocatable-container",
    57  				getOOMTargetContainerWithoutLimit),
    58  		},
    59  	}
    60  
    61  	for _, testCase := range testCases {
    62  		runOomKillerTest(f, testCase, KubeReservedMemory)
    63  	}
    64  })
    65  
    66  var _ = SIGDescribe("OOMKiller [LinuxOnly]", framework.WithNodeConformance(), func() {
    67  	f := framework.NewDefaultFramework("oomkiller-test")
    68  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
    69  
    70  	testCases := []testCase{
    71  		{
    72  			name:                   "single process container",
    73  			oomTargetContainerName: "oomkill-single-target-container",
    74  			podSpec: getOOMTargetPod("oomkill-target-pod", "oomkill-single-target-container",
    75  				getOOMTargetContainer),
    76  		},
    77  		{
    78  			name:                   "init container",
    79  			oomTargetContainerName: "oomkill-target-init-container",
    80  			podSpec: getInitContainerOOMTargetPod("initcontinar-oomkill-target-pod", "oomkill-target-init-container",
    81  				getOOMTargetContainer),
    82  		},
    83  	}
    84  
    85  	// If using cgroup v2, we set memory.oom.group=1 for the container cgroup so that any process which gets OOM killed
    86  	// in the process, causes all processes in the container to get OOM killed
    87  	if libcontainercgroups.IsCgroup2UnifiedMode() {
    88  		testCases = append(testCases, testCase{
    89  			name:                   "multi process container",
    90  			oomTargetContainerName: "oomkill-multi-target-container",
    91  			podSpec: getOOMTargetPod("oomkill-target-pod", "oomkill-multi-target-container",
    92  				getOOMTargetContainerMultiProcess),
    93  		})
    94  	}
    95  	for _, tc := range testCases {
    96  		runOomKillerTest(f, tc, 0)
    97  	}
    98  })
    99  
   100  func runOomKillerTest(f *framework.Framework, testCase testCase, kubeReservedMemory float64) {
   101  	ginkgo.Context(testCase.name, func() {
   102  		// Update KubeReservedMemory in KubeletConfig.
   103  		if kubeReservedMemory > 0 {
   104  			tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
   105  				if initialConfig.KubeReserved == nil {
   106  					initialConfig.KubeReserved = map[string]string{}
   107  				}
   108  				// There's a race condition observed between system OOM and cgroup OOM if node alocatable
   109  				// memory is equal to node capacity. Hence, reserving a fraction of node's memory capacity for
   110  				// K8s components such that node allocatable memory is less than node capacity to
   111  				// observe OOM kills at cgroup level instead of system OOM kills.
   112  				initialConfig.KubeReserved["memory"] = fmt.Sprintf("%d", int(kubeReservedMemory*getLocalNode(context.TODO(), f).Status.Capacity.Memory().AsApproximateFloat64()))
   113  			})
   114  		}
   115  
   116  		ginkgo.BeforeEach(func() {
   117  			ginkgo.By("setting up the pod to be used in the test")
   118  			e2epod.NewPodClient(f).Create(context.TODO(), testCase.podSpec)
   119  		})
   120  
   121  		ginkgo.It("The containers terminated by OOM killer should have the reason set to OOMKilled", func() {
   122  			cfg, configErr := getCurrentKubeletConfig(context.TODO())
   123  			framework.ExpectNoError(configErr)
   124  			if utilfeature.DefaultFeatureGate.Enabled(features.NodeSwap) {
   125  				// If Swap is enabled, we should test OOM with LimitedSwap.
   126  				// UnlimitedSwap allows for workloads to use unbounded swap which
   127  				// makes testing OOM challenging.
   128  				// We are not able to change the default for these conformance tests,
   129  				// so we will skip these tests if swap is enabled.
   130  				if cfg.MemorySwap.SwapBehavior == "" || cfg.MemorySwap.SwapBehavior == "UnlimitedSwap" {
   131  					ginkgo.Skip("OOMKiller should not run with UnlimitedSwap")
   132  				}
   133  			}
   134  			ginkgo.By("Waiting for the pod to be failed")
   135  			err := e2epod.WaitForPodTerminatedInNamespace(context.TODO(), f.ClientSet, testCase.podSpec.Name, "", f.Namespace.Name)
   136  			framework.ExpectNoError(err, "Failed waiting for pod to terminate, %s/%s", f.Namespace.Name, testCase.podSpec.Name)
   137  
   138  			ginkgo.By("Fetching the latest pod status")
   139  			pod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(context.TODO(), testCase.podSpec.Name, metav1.GetOptions{})
   140  			framework.ExpectNoError(err, "Failed to get the recent pod object for name: %q", pod.Name)
   141  
   142  			ginkgo.By("Verifying the OOM target container has the expected reason")
   143  			verifyReasonForOOMKilledContainer(pod, testCase.oomTargetContainerName)
   144  		})
   145  
   146  		ginkgo.AfterEach(func() {
   147  			ginkgo.By(fmt.Sprintf("deleting pod: %s", testCase.podSpec.Name))
   148  			e2epod.NewPodClient(f).DeleteSync(context.TODO(), testCase.podSpec.Name, metav1.DeleteOptions{}, framework.PodDeleteTimeout)
   149  		})
   150  	})
   151  }
   152  
   153  func verifyReasonForOOMKilledContainer(pod *v1.Pod, oomTargetContainerName string) {
   154  	container := e2epod.FindContainerStatusInPod(pod, oomTargetContainerName)
   155  	if container == nil {
   156  		framework.Failf("OOM target pod %q, container %q does not have the expected state terminated", pod.Name, oomTargetContainerName)
   157  	}
   158  	if container.State.Terminated == nil {
   159  		framework.Failf("OOM target pod %q, container %q is not in the terminated state", pod.Name, container.Name)
   160  	}
   161  	gomega.Expect(container.State.Terminated.ExitCode).To(gomega.Equal(int32(137)),
   162  		"pod: %q, container: %q has unexpected exitCode: %q", pod.Name, container.Name, container.State.Terminated.ExitCode)
   163  
   164  	// This check is currently causing tests to flake on containerd & crio, https://github.com/kubernetes/kubernetes/issues/119600
   165  	// so we'll skip the reason check if we know its going to fail.
   166  	// TODO: Remove this once https://github.com/containerd/containerd/issues/8893 is resolved
   167  	if container.State.Terminated.Reason == "OOMKilled" {
   168  		gomega.Expect(container.State.Terminated.Reason).To(gomega.Equal("OOMKilled"),
   169  			"pod: %q, container: %q has unexpected reason: %q", pod.Name, container.Name, container.State.Terminated.Reason)
   170  	}
   171  
   172  }
   173  
   174  func getOOMTargetPod(podName string, ctnName string, createContainer func(name string) v1.Container) *v1.Pod {
   175  	return &v1.Pod{
   176  		ObjectMeta: metav1.ObjectMeta{
   177  			Name: podName,
   178  		},
   179  		Spec: v1.PodSpec{
   180  			RestartPolicy: v1.RestartPolicyNever,
   181  			Containers: []v1.Container{
   182  				createContainer(ctnName),
   183  			},
   184  		},
   185  	}
   186  }
   187  
   188  func getInitContainerOOMTargetPod(podName string, ctnName string, createContainer func(name string) v1.Container) *v1.Pod {
   189  	return &v1.Pod{
   190  		ObjectMeta: metav1.ObjectMeta{
   191  			Name: podName,
   192  		},
   193  		Spec: v1.PodSpec{
   194  			RestartPolicy: v1.RestartPolicyNever,
   195  			InitContainers: []v1.Container{
   196  				createContainer(ctnName),
   197  			},
   198  			Containers: []v1.Container{
   199  				{
   200  					Name:  "busybox",
   201  					Image: busyboxImage,
   202  				},
   203  			},
   204  		},
   205  	}
   206  }
   207  
   208  // getOOMTargetContainer returns a container with a single process, which attempts to allocate more memory than is
   209  // allowed by the container memory limit.
   210  func getOOMTargetContainer(name string) v1.Container {
   211  	return v1.Container{
   212  		Name:  name,
   213  		Image: busyboxImage,
   214  		Command: []string{
   215  			"sh",
   216  			"-c",
   217  			// use the dd tool to attempt to allocate 20M in a block which exceeds the limit
   218  			"sleep 5 && dd if=/dev/zero of=/dev/null bs=20M",
   219  		},
   220  		Resources: v1.ResourceRequirements{
   221  			Requests: v1.ResourceList{
   222  				v1.ResourceMemory: resource.MustParse("15Mi"),
   223  			},
   224  			Limits: v1.ResourceList{
   225  				v1.ResourceMemory: resource.MustParse("15Mi"),
   226  			},
   227  		},
   228  	}
   229  }
   230  
   231  // getOOMTargetContainerMultiProcess returns a container with two processes, one of which attempts to allocate more
   232  // memory than is allowed by the container memory limit, and a second process which just sleeps.
   233  func getOOMTargetContainerMultiProcess(name string) v1.Container {
   234  	return v1.Container{
   235  		Name:  name,
   236  		Image: busyboxImage,
   237  		Command: []string{
   238  			"sh",
   239  			"-c",
   240  			// use the dd tool to attempt to allocate 20M in a block which exceeds the limit
   241  			"sleep 5 && dd if=/dev/zero of=/dev/null bs=20M & sleep 86400",
   242  		},
   243  		Resources: v1.ResourceRequirements{
   244  			Requests: v1.ResourceList{
   245  				v1.ResourceMemory: resource.MustParse("15Mi"),
   246  			},
   247  			Limits: v1.ResourceList{
   248  				v1.ResourceMemory: resource.MustParse("15Mi"),
   249  			},
   250  		},
   251  	}
   252  }
   253  
   254  // getOOMTargetContainerWithoutLimit returns a container with a single process which attempts to allocate more memory
   255  // than node allocatable and doesn't have resource limits set.
   256  func getOOMTargetContainerWithoutLimit(name string) v1.Container {
   257  	return v1.Container{
   258  		Name:  name,
   259  		Image: busyboxImage,
   260  		Command: []string{
   261  			"sh",
   262  			"-c",
   263  			// use the dd tool to attempt to allocate huge block of memory which exceeds the node allocatable
   264  			"sleep 5 && dd if=/dev/zero of=/dev/null iflag=fullblock count=10 bs=10G",
   265  		},
   266  	}
   267  }