k8s.io/kubernetes@v1.29.3/test/e2e_node/quota_lsci_test.go (about)

     1  /*
     2  Copyright 2019 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package e2enode
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"path/filepath"
    23  	"time"
    24  
    25  	v1 "k8s.io/api/core/v1"
    26  	"k8s.io/apimachinery/pkg/api/resource"
    27  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    28  	"k8s.io/kubernetes/pkg/features"
    29  	kubeletconfig "k8s.io/kubernetes/pkg/kubelet/apis/config"
    30  	"k8s.io/kubernetes/pkg/volume/util/fsquota"
    31  	"k8s.io/kubernetes/test/e2e/feature"
    32  	"k8s.io/kubernetes/test/e2e/framework"
    33  	e2eskipper "k8s.io/kubernetes/test/e2e/framework/skipper"
    34  	"k8s.io/kubernetes/test/e2e/nodefeature"
    35  	imageutils "k8s.io/kubernetes/test/utils/image"
    36  	"k8s.io/mount-utils"
    37  	admissionapi "k8s.io/pod-security-admission/api"
    38  
    39  	"github.com/onsi/ginkgo/v2"
    40  )
    41  
    42  const (
    43  	LSCIQuotaFeature = features.LocalStorageCapacityIsolationFSQuotaMonitoring
    44  )
    45  
    46  func runOneQuotaTest(f *framework.Framework, quotasRequested bool) {
    47  	evictionTestTimeout := 10 * time.Minute
    48  	sizeLimit := resource.MustParse("100Mi")
    49  	useOverLimit := 101 /* Mb */
    50  	useUnderLimit := 99 /* Mb */
    51  	// TODO: remove hardcoded kubelet volume directory path
    52  	// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
    53  	// As for why we do this: see comment below at isXfs.
    54  	if isXfs("/var/lib/kubelet") {
    55  		useUnderLimit = 50 /* Mb */
    56  	}
    57  	priority := 0
    58  	if quotasRequested {
    59  		priority = 1
    60  	}
    61  	ginkgo.Context(fmt.Sprintf(testContextFmt, fmt.Sprintf("use quotas for LSCI monitoring (quotas enabled: %v)", quotasRequested)), func() {
    62  		tempSetCurrentKubeletConfig(f, func(ctx context.Context, initialConfig *kubeletconfig.KubeletConfiguration) {
    63  			defer withFeatureGate(LSCIQuotaFeature, quotasRequested)()
    64  			// TODO: remove hardcoded kubelet volume directory path
    65  			// framework.TestContext.KubeVolumeDir is currently not populated for node e2e
    66  			if quotasRequested && !supportsQuotas("/var/lib/kubelet") {
    67  				// No point in running this as a positive test if quotas are not
    68  				// enabled on the underlying filesystem.
    69  				e2eskipper.Skipf("Cannot run LocalStorageCapacityIsolationFSQuotaMonitoring on filesystem without project quota enabled")
    70  			}
    71  			// setting a threshold to 0% disables; non-empty map overrides default value (necessary due to omitempty)
    72  			initialConfig.EvictionHard = map[string]string{"memory.available": "0%"}
    73  
    74  			if initialConfig.FeatureGates == nil {
    75  				initialConfig.FeatureGates = map[string]bool{}
    76  			}
    77  			initialConfig.FeatureGates[string(LSCIQuotaFeature)] = quotasRequested
    78  		})
    79  		runEvictionTest(f, evictionTestTimeout, noPressure, noStarvedResource, logDiskMetrics, []podEvictSpec{
    80  			{
    81  				evictionPriority: priority, // This pod should be evicted because of emptyDir violation only if quotas are enabled
    82  				pod: diskConcealingPod(fmt.Sprintf("emptydir-concealed-disk-over-sizelimit-quotas-%v", quotasRequested), useOverLimit, &v1.VolumeSource{
    83  					EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
    84  				}, v1.ResourceRequirements{}),
    85  			},
    86  			{
    87  				evictionPriority: 0, // This pod should not be evicted because it uses less than its limit (test for quotas)
    88  				pod: diskConcealingPod(fmt.Sprintf("emptydir-concealed-disk-under-sizelimit-quotas-%v", quotasRequested), useUnderLimit, &v1.VolumeSource{
    89  					EmptyDir: &v1.EmptyDirVolumeSource{SizeLimit: &sizeLimit},
    90  				}, v1.ResourceRequirements{}),
    91  			},
    92  		})
    93  	})
    94  }
    95  
    96  // LocalStorageCapacityIsolationFSQuotaMonitoring tests that quotas are
    97  // used for monitoring rather than du.  The mechanism is to create a
    98  // pod that creates a file, deletes it, and writes data to it.  If
    99  // quotas are used to monitor, it will detect this deleted-but-in-use
   100  // file; if du is used to monitor, it will not detect this.
   101  var _ = SIGDescribe("LocalStorageCapacityIsolationFSQuotaMonitoring", framework.WithSlow(), framework.WithSerial(), framework.WithDisruptive(), feature.LocalStorageCapacityIsolationQuota, nodefeature.LSCIQuotaMonitoring, func() {
   102  	f := framework.NewDefaultFramework("localstorage-quota-monitoring-test")
   103  	f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged
   104  	runOneQuotaTest(f, true)
   105  	runOneQuotaTest(f, false)
   106  })
   107  
   108  const (
   109  	writeConcealedPodCommand = `
   110  my $file = "%s.bin";
   111  open OUT, ">$file" || die "Cannot open $file: $!\n";
   112  unlink "$file" || die "Cannot unlink $file: $!\n";
   113  my $a = "a";
   114  foreach (1..20) { $a = "$a$a"; }
   115  foreach (1..%d) { syswrite(OUT, $a); }
   116  sleep 999999;`
   117  )
   118  
   119  // This is needed for testing eviction of pods using disk space in concealed files; the shell has no convenient
   120  // way of performing I/O to a concealed file, and the busybox image doesn't contain Perl.
   121  func diskConcealingPod(name string, diskConsumedMB int, volumeSource *v1.VolumeSource, resources v1.ResourceRequirements) *v1.Pod {
   122  	path := ""
   123  	volumeMounts := []v1.VolumeMount{}
   124  	volumes := []v1.Volume{}
   125  	if volumeSource != nil {
   126  		path = volumeMountPath
   127  		volumeMounts = []v1.VolumeMount{{MountPath: volumeMountPath, Name: volumeName}}
   128  		volumes = []v1.Volume{{Name: volumeName, VolumeSource: *volumeSource}}
   129  	}
   130  	return &v1.Pod{
   131  		ObjectMeta: metav1.ObjectMeta{Name: fmt.Sprintf("%s-pod", name)},
   132  		Spec: v1.PodSpec{
   133  			RestartPolicy: v1.RestartPolicyNever,
   134  			Containers: []v1.Container{
   135  				{
   136  					Image: imageutils.GetE2EImage(imageutils.Perl),
   137  					Name:  fmt.Sprintf("%s-container", name),
   138  					Command: []string{
   139  						"perl",
   140  						"-e",
   141  						fmt.Sprintf(writeConcealedPodCommand, filepath.Join(path, "file"), diskConsumedMB),
   142  					},
   143  					Resources:    resources,
   144  					VolumeMounts: volumeMounts,
   145  				},
   146  			},
   147  			Volumes: volumes,
   148  		},
   149  	}
   150  }
   151  
   152  // Don't bother returning an error; if something goes wrong,
   153  // simply treat it as "no".
   154  func supportsQuotas(dir string) bool {
   155  	supportsQuota, err := fsquota.SupportsQuotas(mount.New(""), dir)
   156  	return supportsQuota && err == nil
   157  }