k8s.io/kubernetes@v1.29.3/test/e2e/storage/empty_dir_wrapper.go (about)

     1  /*
     2  Copyright 2015 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package storage
    18  
    19  import (
    20  	"context"
    21  	"fmt"
    22  	"strconv"
    23  
    24  	v1 "k8s.io/api/core/v1"
    25  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    26  	"k8s.io/apimachinery/pkg/util/intstr"
    27  	"k8s.io/apimachinery/pkg/util/uuid"
    28  	"k8s.io/kubernetes/test/e2e/framework"
    29  	e2enode "k8s.io/kubernetes/test/e2e/framework/node"
    30  	e2epod "k8s.io/kubernetes/test/e2e/framework/pod"
    31  	e2erc "k8s.io/kubernetes/test/e2e/framework/rc"
    32  	"k8s.io/kubernetes/test/e2e/storage/utils"
    33  	imageutils "k8s.io/kubernetes/test/utils/image"
    34  	admissionapi "k8s.io/pod-security-admission/api"
    35  
    36  	"github.com/onsi/ginkgo/v2"
    37  )
    38  
    39  const (
    40  	// These numbers are obtained empirically.
    41  	// If you make them too low, you'll get flaky
    42  	// tests instead of failing ones if the race bug reappears.
    43  	// If you make volume counts or pod counts too high,
    44  	// the tests may fail because mounting configmap/git_repo
    45  	// volumes is not very fast and the tests may time out
    46  	// waiting for pods to become Running.
    47  	// And of course the higher are the numbers, the
    48  	// slower are the tests.
    49  	wrappedVolumeRaceConfigMapVolumeCount    = 50
    50  	wrappedVolumeRaceConfigMapPodCount       = 5
    51  	wrappedVolumeRaceConfigMapIterationCount = 3
    52  	wrappedVolumeRaceGitRepoVolumeCount      = 50
    53  	wrappedVolumeRaceGitRepoPodCount         = 5
    54  	wrappedVolumeRaceGitRepoIterationCount   = 3
    55  	wrappedVolumeRaceRCNamePrefix            = "wrapped-volume-race-"
    56  )
    57  
    58  var _ = utils.SIGDescribe("EmptyDir wrapper volumes", func() {
    59  	f := framework.NewDefaultFramework("emptydir-wrapper")
    60  	f.NamespacePodSecurityLevel = admissionapi.LevelBaseline
    61  
    62  	/*
    63  		Release: v1.13
    64  		Testname: EmptyDir Wrapper Volume, Secret and ConfigMap volumes, no conflict
    65  		Description: Secret volume and ConfigMap volume is created with data. Pod MUST be able to start with Secret and ConfigMap volumes mounted into the container.
    66  	*/
    67  	framework.ConformanceIt("should not conflict", func(ctx context.Context) {
    68  		name := "emptydir-wrapper-test-" + string(uuid.NewUUID())
    69  		volumeName := "secret-volume"
    70  		volumeMountPath := "/etc/secret-volume"
    71  
    72  		secret := &v1.Secret{
    73  			ObjectMeta: metav1.ObjectMeta{
    74  				Namespace: f.Namespace.Name,
    75  				Name:      name,
    76  			},
    77  			Data: map[string][]byte{
    78  				"data-1": []byte("value-1\n"),
    79  			},
    80  		}
    81  
    82  		var err error
    83  		if secret, err = f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Create(ctx, secret, metav1.CreateOptions{}); err != nil {
    84  			framework.Failf("unable to create test secret %s: %v", secret.Name, err)
    85  		}
    86  
    87  		configMapVolumeName := "configmap-volume"
    88  		configMapVolumeMountPath := "/etc/configmap-volume"
    89  
    90  		configMap := &v1.ConfigMap{
    91  			ObjectMeta: metav1.ObjectMeta{
    92  				Namespace: f.Namespace.Name,
    93  				Name:      name,
    94  			},
    95  			BinaryData: map[string][]byte{
    96  				"data-1": []byte("value-1\n"),
    97  			},
    98  		}
    99  
   100  		if configMap, err = f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{}); err != nil {
   101  			framework.Failf("unable to create test configMap %s: %v", configMap.Name, err)
   102  		}
   103  
   104  		pod := &v1.Pod{
   105  			ObjectMeta: metav1.ObjectMeta{
   106  				Name: "pod-secrets-" + string(uuid.NewUUID()),
   107  			},
   108  			Spec: v1.PodSpec{
   109  				Volumes: []v1.Volume{
   110  					{
   111  						Name: volumeName,
   112  						VolumeSource: v1.VolumeSource{
   113  							Secret: &v1.SecretVolumeSource{
   114  								SecretName: name,
   115  							},
   116  						},
   117  					},
   118  					{
   119  						Name: configMapVolumeName,
   120  						VolumeSource: v1.VolumeSource{
   121  							ConfigMap: &v1.ConfigMapVolumeSource{
   122  								LocalObjectReference: v1.LocalObjectReference{
   123  									Name: name,
   124  								},
   125  							},
   126  						},
   127  					},
   128  				},
   129  				Containers: []v1.Container{
   130  					{
   131  						Name:  "secret-test",
   132  						Image: imageutils.GetE2EImage(imageutils.Agnhost),
   133  						Args:  []string{"test-webserver"},
   134  						VolumeMounts: []v1.VolumeMount{
   135  							{
   136  								Name:      volumeName,
   137  								MountPath: volumeMountPath,
   138  								ReadOnly:  true,
   139  							},
   140  							{
   141  								Name:      configMapVolumeName,
   142  								MountPath: configMapVolumeMountPath,
   143  							},
   144  						},
   145  					},
   146  				},
   147  			},
   148  		}
   149  		pod = e2epod.NewPodClient(f).CreateSync(ctx, pod)
   150  		ginkgo.DeferCleanup(func(ctx context.Context) {
   151  			ginkgo.By("Cleaning up the secret")
   152  			if err := f.ClientSet.CoreV1().Secrets(f.Namespace.Name).Delete(ctx, secret.Name, metav1.DeleteOptions{}); err != nil {
   153  				framework.Failf("unable to delete secret %v: %v", secret.Name, err)
   154  			}
   155  			ginkgo.By("Cleaning up the configmap")
   156  			if err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMap.Name, metav1.DeleteOptions{}); err != nil {
   157  				framework.Failf("unable to delete configmap %v: %v", configMap.Name, err)
   158  			}
   159  			ginkgo.By("Cleaning up the pod")
   160  			if err = f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, pod.Name, *metav1.NewDeleteOptions(0)); err != nil {
   161  				framework.Failf("unable to delete pod %v: %v", pod.Name, err)
   162  			}
   163  		})
   164  	})
   165  
   166  	// The following two tests check for the problem fixed in #29641.
   167  	// In order to reproduce it you need to revert the fix, e.g. via
   168  	// git revert -n df1e925143daf34199b55ffb91d0598244888cce
   169  	// or
   170  	// curl -sL https://github.com/kubernetes/kubernetes/pull/29641.patch | patch -p1 -R
   171  	//
   172  	// After that these tests will fail because some of the pods
   173  	// they create never enter Running state.
   174  	//
   175  	// They need to be [Serial] and [Slow] because they try to induce
   176  	// the race by creating pods with many volumes and container volume mounts,
   177  	// which takes considerable time and may interfere with other tests.
   178  	//
   179  	// Probably should also try making tests for secrets and downwardapi,
   180  	// but these cases are harder because tmpfs-based emptyDir
   181  	// appears to be less prone to the race problem.
   182  
   183  	/*
   184  		Release: v1.13
   185  		Testname: EmptyDir Wrapper Volume, ConfigMap volumes, no race
   186  		Description: Create 50 ConfigMaps Volumes and 5 replicas of pod with these ConfigMapvolumes mounted. Pod MUST NOT fail waiting for Volumes.
   187  	*/
   188  	framework.ConformanceIt("should not cause race condition when used for configmaps", f.WithSerial(), func(ctx context.Context) {
   189  		configMapNames := createConfigmapsForRace(ctx, f)
   190  		ginkgo.DeferCleanup(deleteConfigMaps, f, configMapNames)
   191  		volumes, volumeMounts := makeConfigMapVolumes(configMapNames)
   192  		for i := 0; i < wrappedVolumeRaceConfigMapIterationCount; i++ {
   193  			testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceConfigMapPodCount)
   194  		}
   195  	})
   196  
   197  	// Slow by design [~150 Seconds].
   198  	// This test uses deprecated GitRepo VolumeSource so it MUST not be promoted to Conformance.
   199  	// To provision a container with a git repo, mount an EmptyDir into an InitContainer that clones the repo using git, then mount the EmptyDir into the Pod's container.
   200  	// This projected volume maps approach can also be tested with secrets and downwardapi VolumeSource but are less prone to the race problem.
   201  	f.It("should not cause race condition when used for git_repo", f.WithSerial(), f.WithSlow(), func(ctx context.Context) {
   202  		gitURL, gitRepo, cleanup := createGitServer(ctx, f)
   203  		defer cleanup()
   204  		volumes, volumeMounts := makeGitRepoVolumes(gitURL, gitRepo)
   205  		for i := 0; i < wrappedVolumeRaceGitRepoIterationCount; i++ {
   206  			testNoWrappedVolumeRace(ctx, f, volumes, volumeMounts, wrappedVolumeRaceGitRepoPodCount)
   207  		}
   208  	})
   209  })
   210  
   211  func createGitServer(ctx context.Context, f *framework.Framework) (gitURL string, gitRepo string, cleanup func()) {
   212  	var err error
   213  	gitServerPodName := "git-server-" + string(uuid.NewUUID())
   214  	containerPort := int32(8000)
   215  
   216  	labels := map[string]string{"name": gitServerPodName}
   217  
   218  	gitServerPod := e2epod.NewAgnhostPod(f.Namespace.Name, gitServerPodName, nil, nil, []v1.ContainerPort{{ContainerPort: int32(containerPort)}}, "fake-gitserver")
   219  	gitServerPod.ObjectMeta.Labels = labels
   220  	e2epod.NewPodClient(f).CreateSync(ctx, gitServerPod)
   221  
   222  	// Portal IP and port
   223  	httpPort := 2345
   224  
   225  	gitServerSvc := &v1.Service{
   226  		ObjectMeta: metav1.ObjectMeta{
   227  			Name: "git-server-svc",
   228  		},
   229  		Spec: v1.ServiceSpec{
   230  			Selector: labels,
   231  			Ports: []v1.ServicePort{
   232  				{
   233  					Name:       "http-portal",
   234  					Port:       int32(httpPort),
   235  					TargetPort: intstr.FromInt32(containerPort),
   236  				},
   237  			},
   238  		},
   239  	}
   240  
   241  	if gitServerSvc, err = f.ClientSet.CoreV1().Services(f.Namespace.Name).Create(ctx, gitServerSvc, metav1.CreateOptions{}); err != nil {
   242  		framework.Failf("unable to create test git server service %s: %v", gitServerSvc.Name, err)
   243  	}
   244  
   245  	return "http://" + gitServerSvc.Spec.ClusterIP + ":" + strconv.Itoa(httpPort), "test", func() {
   246  		ginkgo.By("Cleaning up the git server pod")
   247  		if err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Delete(ctx, gitServerPod.Name, *metav1.NewDeleteOptions(0)); err != nil {
   248  			framework.Failf("unable to delete git server pod %v: %v", gitServerPod.Name, err)
   249  		}
   250  		ginkgo.By("Cleaning up the git server svc")
   251  		if err := f.ClientSet.CoreV1().Services(f.Namespace.Name).Delete(ctx, gitServerSvc.Name, metav1.DeleteOptions{}); err != nil {
   252  			framework.Failf("unable to delete git server svc %v: %v", gitServerSvc.Name, err)
   253  		}
   254  	}
   255  }
   256  
   257  func makeGitRepoVolumes(gitURL, gitRepo string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
   258  	for i := 0; i < wrappedVolumeRaceGitRepoVolumeCount; i++ {
   259  		volumeName := fmt.Sprintf("racey-git-repo-%d", i)
   260  		volumes = append(volumes, v1.Volume{
   261  			Name: volumeName,
   262  			VolumeSource: v1.VolumeSource{
   263  				GitRepo: &v1.GitRepoVolumeSource{
   264  					Repository: gitURL,
   265  					Directory:  gitRepo,
   266  				},
   267  			},
   268  		})
   269  		volumeMounts = append(volumeMounts, v1.VolumeMount{
   270  			Name:      volumeName,
   271  			MountPath: fmt.Sprintf("/etc/git-volume-%d", i),
   272  		})
   273  	}
   274  	return
   275  }
   276  
   277  func createConfigmapsForRace(ctx context.Context, f *framework.Framework) (configMapNames []string) {
   278  	ginkgo.By(fmt.Sprintf("Creating %d configmaps", wrappedVolumeRaceConfigMapVolumeCount))
   279  	for i := 0; i < wrappedVolumeRaceConfigMapVolumeCount; i++ {
   280  		configMapName := fmt.Sprintf("racey-configmap-%d", i)
   281  		configMapNames = append(configMapNames, configMapName)
   282  		configMap := &v1.ConfigMap{
   283  			ObjectMeta: metav1.ObjectMeta{
   284  				Namespace: f.Namespace.Name,
   285  				Name:      configMapName,
   286  			},
   287  			Data: map[string]string{
   288  				"data-1": "value-1",
   289  			},
   290  		}
   291  		_, err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Create(ctx, configMap, metav1.CreateOptions{})
   292  		framework.ExpectNoError(err)
   293  	}
   294  	return
   295  }
   296  
   297  func deleteConfigMaps(ctx context.Context, f *framework.Framework, configMapNames []string) {
   298  	ginkgo.By("Cleaning up the configMaps")
   299  	for _, configMapName := range configMapNames {
   300  		err := f.ClientSet.CoreV1().ConfigMaps(f.Namespace.Name).Delete(ctx, configMapName, metav1.DeleteOptions{})
   301  		framework.ExpectNoError(err, "unable to delete configMap %v", configMapName)
   302  	}
   303  }
   304  
   305  func makeConfigMapVolumes(configMapNames []string) (volumes []v1.Volume, volumeMounts []v1.VolumeMount) {
   306  	for i, configMapName := range configMapNames {
   307  		volumeName := fmt.Sprintf("racey-configmap-%d", i)
   308  		volumes = append(volumes, v1.Volume{
   309  			Name: volumeName,
   310  			VolumeSource: v1.VolumeSource{
   311  				ConfigMap: &v1.ConfigMapVolumeSource{
   312  					LocalObjectReference: v1.LocalObjectReference{
   313  						Name: configMapName,
   314  					},
   315  					Items: []v1.KeyToPath{
   316  						{
   317  							Key:  "data-1",
   318  							Path: "data-1",
   319  						},
   320  					},
   321  				},
   322  			},
   323  		})
   324  		volumeMounts = append(volumeMounts, v1.VolumeMount{
   325  			Name:      volumeName,
   326  			MountPath: fmt.Sprintf("/etc/config-%d", i),
   327  		})
   328  	}
   329  	return
   330  }
   331  
   332  func testNoWrappedVolumeRace(ctx context.Context, f *framework.Framework, volumes []v1.Volume, volumeMounts []v1.VolumeMount, podCount int32) {
   333  	const nodeHostnameLabelKey = "kubernetes.io/hostname"
   334  
   335  	rcName := wrappedVolumeRaceRCNamePrefix + string(uuid.NewUUID())
   336  	targetNode, err := e2enode.GetRandomReadySchedulableNode(ctx, f.ClientSet)
   337  	framework.ExpectNoError(err)
   338  
   339  	ginkgo.By("Creating RC which spawns configmap-volume pods")
   340  	affinity := &v1.Affinity{
   341  		NodeAffinity: &v1.NodeAffinity{
   342  			RequiredDuringSchedulingIgnoredDuringExecution: &v1.NodeSelector{
   343  				NodeSelectorTerms: []v1.NodeSelectorTerm{
   344  					{
   345  						MatchExpressions: []v1.NodeSelectorRequirement{
   346  							{
   347  								Key:      nodeHostnameLabelKey,
   348  								Operator: v1.NodeSelectorOpIn,
   349  								Values:   []string{targetNode.Labels[nodeHostnameLabelKey]},
   350  							},
   351  						},
   352  					},
   353  				},
   354  			},
   355  		},
   356  	}
   357  
   358  	rc := &v1.ReplicationController{
   359  		ObjectMeta: metav1.ObjectMeta{
   360  			Name: rcName,
   361  		},
   362  		Spec: v1.ReplicationControllerSpec{
   363  			Replicas: &podCount,
   364  			Selector: map[string]string{
   365  				"name": rcName,
   366  			},
   367  			Template: &v1.PodTemplateSpec{
   368  				ObjectMeta: metav1.ObjectMeta{
   369  					Labels: map[string]string{"name": rcName},
   370  				},
   371  				Spec: v1.PodSpec{
   372  					Containers: []v1.Container{
   373  						{
   374  							Name:         "test-container",
   375  							Image:        imageutils.GetE2EImage(imageutils.Pause),
   376  							VolumeMounts: volumeMounts,
   377  						},
   378  					},
   379  					Affinity:  affinity,
   380  					DNSPolicy: v1.DNSDefault,
   381  					Volumes:   volumes,
   382  				},
   383  			},
   384  		},
   385  	}
   386  	_, err = f.ClientSet.CoreV1().ReplicationControllers(f.Namespace.Name).Create(ctx, rc, metav1.CreateOptions{})
   387  	framework.ExpectNoError(err, "error creating replication controller")
   388  
   389  	ginkgo.DeferCleanup(e2erc.DeleteRCAndWaitForGC, f.ClientSet, f.Namespace.Name, rcName)
   390  
   391  	pods, err := e2epod.PodsCreated(ctx, f.ClientSet, f.Namespace.Name, rcName, podCount)
   392  	framework.ExpectNoError(err, "error creating pods")
   393  
   394  	ginkgo.By("Ensuring each pod is running")
   395  
   396  	// Wait for the pods to enter the running state. Waiting loops until the pods
   397  	// are running so non-running pods cause a timeout for this test.
   398  	for _, pod := range pods.Items {
   399  		if pod.DeletionTimestamp != nil {
   400  			continue
   401  		}
   402  		err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name)
   403  		framework.ExpectNoError(err, "Failed waiting for pod %s to enter running state", pod.Name)
   404  	}
   405  }