k8s.io/kubernetes@v1.31.0-alpha.0.0.20240520171757-56147500dadc/test/e2e/node/security_context.go (about) 1 /* 2 Copyright 2015 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 /* This test check that SecurityContext parameters specified at the 18 * pod or the container level work as intended. These tests cannot be 19 * run when the 'SecurityContextDeny' admission controller is not used 20 * so they are skipped by default. 21 */ 22 23 package node 24 25 import ( 26 "context" 27 "fmt" 28 29 v1 "k8s.io/api/core/v1" 30 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 31 "k8s.io/apimachinery/pkg/util/uuid" 32 "k8s.io/kubernetes/test/e2e/framework" 33 e2ekubectl "k8s.io/kubernetes/test/e2e/framework/kubectl" 34 e2epod "k8s.io/kubernetes/test/e2e/framework/pod" 35 e2eoutput "k8s.io/kubernetes/test/e2e/framework/pod/output" 36 imageutils "k8s.io/kubernetes/test/utils/image" 37 admissionapi "k8s.io/pod-security-admission/api" 38 39 "github.com/onsi/ginkgo/v2" 40 "github.com/onsi/gomega" 41 ) 42 43 // SeccompProcStatusField is the field of /proc/$PID/status referencing the seccomp filter type. 44 const SeccompProcStatusField = "Seccomp:" 45 46 // ProcSelfStatusPath is the path to /proc/self/status. 47 const ProcSelfStatusPath = "/proc/self/status" 48 49 func scTestPod(hostIPC bool, hostPID bool) *v1.Pod { 50 podName := "security-context-" + string(uuid.NewUUID()) 51 pod := &v1.Pod{ 52 ObjectMeta: metav1.ObjectMeta{ 53 Name: podName, 54 Labels: map[string]string{"name": podName}, 55 Annotations: map[string]string{}, 56 }, 57 Spec: v1.PodSpec{ 58 HostIPC: hostIPC, 59 HostPID: hostPID, 60 SecurityContext: &v1.PodSecurityContext{}, 61 Containers: []v1.Container{ 62 { 63 Name: "test-container", 64 Image: imageutils.GetE2EImage(imageutils.BusyBox), 65 }, 66 }, 67 RestartPolicy: v1.RestartPolicyNever, 68 }, 69 } 70 71 return pod 72 } 73 74 var _ = SIGDescribe("Security Context", func() { 75 f := framework.NewDefaultFramework("security-context") 76 f.NamespacePodSecurityLevel = admissionapi.LevelPrivileged 77 78 ginkgo.It("should support pod.Spec.SecurityContext.SupplementalGroups [LinuxOnly]", func(ctx context.Context) { 79 pod := scTestPod(false, false) 80 pod.Spec.Containers[0].Command = []string{"id", "-G"} 81 pod.Spec.SecurityContext.SupplementalGroups = []int64{1234, 5678} 82 groups := []string{"1234", "5678"} 83 e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.SupplementalGroups", pod, 0, groups) 84 }) 85 86 ginkgo.When("if the container's primary UID belongs to some groups in the image [LinuxOnly]", func() { 87 ginkgo.It("should add pod.Spec.SecurityContext.SupplementalGroups to them [LinuxOnly] in resultant supplementary groups for the container processes", func(ctx context.Context) { 88 uidInImage := int64(1000) 89 gidDefinedInImage := int64(50000) 90 supplementalGroup := int64(60000) 91 agnhost := imageutils.GetConfig(imageutils.Agnhost) 92 pod := scTestPod(false, false) 93 pod.Spec.Containers[0].Image = agnhost.GetE2EImage() 94 pod.Spec.Containers[0].Command = []string{"id", "-G"} 95 pod.Spec.SecurityContext.SupplementalGroups = []int64{int64(supplementalGroup)} 96 pod.Spec.SecurityContext.RunAsUser = &uidInImage 97 98 // In specified image(agnhost E2E image), 99 // - user-defined-in-image(uid=1000) is defined 100 // - user-defined-in-image belongs to group-defined-in-image(gid=50000) 101 // thus, resultant supplementary group of the container processes should be 102 // - 1000: self 103 // - 50000: pre-defined groups define in the container image of self(uid=1000) 104 // - 60000: SupplementalGroups 105 // $ id -G 106 // 1000 50000 60000 107 e2eoutput.TestContainerOutput( 108 ctx, 109 f, 110 "pod.Spec.SecurityContext.SupplementalGroups with pre-defined-group in the image", 111 pod, 0, 112 []string{fmt.Sprintf("%d %d %d", uidInImage, gidDefinedInImage, supplementalGroup)}, 113 ) 114 }) 115 }) 116 117 ginkgo.It("should support pod.Spec.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) { 118 pod := scTestPod(false, false) 119 userID := int64(1001) 120 pod.Spec.SecurityContext.RunAsUser = &userID 121 pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} 122 123 e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ 124 fmt.Sprintf("uid=%v", userID), 125 fmt.Sprintf("gid=%v", 0), 126 }) 127 }) 128 129 /* 130 Release: v1.21 131 Testname: Security Context, test RunAsGroup at pod level 132 Description: Container is created with runAsUser and runAsGroup option by passing uid 1001 and gid 2002 at pod level. Pod MUST be in Succeeded phase. 133 [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. 134 */ 135 framework.ConformanceIt("should support pod.Spec.SecurityContext.RunAsUser And pod.Spec.SecurityContext.RunAsGroup [LinuxOnly]", func(ctx context.Context) { 136 pod := scTestPod(false, false) 137 userID := int64(1001) 138 groupID := int64(2002) 139 pod.Spec.SecurityContext.RunAsUser = &userID 140 pod.Spec.SecurityContext.RunAsGroup = &groupID 141 pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} 142 143 e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ 144 fmt.Sprintf("uid=%v", userID), 145 fmt.Sprintf("gid=%v", groupID), 146 }) 147 }) 148 149 ginkgo.It("should support container.SecurityContext.RunAsUser [LinuxOnly]", func(ctx context.Context) { 150 pod := scTestPod(false, false) 151 userID := int64(1001) 152 overrideUserID := int64(1002) 153 pod.Spec.SecurityContext.RunAsUser = &userID 154 pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext) 155 pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID 156 pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} 157 158 e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ 159 fmt.Sprintf("uid=%v", overrideUserID), 160 fmt.Sprintf("gid=%v", 0), 161 }) 162 }) 163 164 /* 165 Release: v1.21 166 Testname: Security Context, test RunAsGroup at container level 167 Description: Container is created with runAsUser and runAsGroup option by passing uid 1001 and gid 2002 at containr level. Pod MUST be in Succeeded phase. 168 [LinuxOnly]: This test is marked as LinuxOnly since Windows does not support running as UID / GID. 169 */ 170 framework.ConformanceIt("should support container.SecurityContext.RunAsUser And container.SecurityContext.RunAsGroup [LinuxOnly]", func(ctx context.Context) { 171 pod := scTestPod(false, false) 172 userID := int64(1001) 173 groupID := int64(2001) 174 overrideUserID := int64(1002) 175 overrideGroupID := int64(2002) 176 pod.Spec.SecurityContext.RunAsUser = &userID 177 pod.Spec.SecurityContext.RunAsGroup = &groupID 178 pod.Spec.Containers[0].SecurityContext = new(v1.SecurityContext) 179 pod.Spec.Containers[0].SecurityContext.RunAsUser = &overrideUserID 180 pod.Spec.Containers[0].SecurityContext.RunAsGroup = &overrideGroupID 181 pod.Spec.Containers[0].Command = []string{"sh", "-c", "id"} 182 183 e2eoutput.TestContainerOutput(ctx, f, "pod.Spec.SecurityContext.RunAsUser", pod, 0, []string{ 184 fmt.Sprintf("uid=%v", overrideUserID), 185 fmt.Sprintf("gid=%v", overrideGroupID), 186 }) 187 }) 188 189 f.It("should support volume SELinux relabeling", f.WithFlaky(), f.WithLabel("LinuxOnly"), func(ctx context.Context) { 190 testPodSELinuxLabeling(ctx, f, false, false) 191 }) 192 193 f.It("should support volume SELinux relabeling when using hostIPC", f.WithFlaky(), f.WithLabel("LinuxOnly"), func(ctx context.Context) { 194 testPodSELinuxLabeling(ctx, f, true, false) 195 }) 196 197 f.It("should support volume SELinux relabeling when using hostPID", f.WithFlaky(), f.WithLabel("LinuxOnly"), func(ctx context.Context) { 198 testPodSELinuxLabeling(ctx, f, false, true) 199 }) 200 201 ginkgo.It("should support seccomp unconfined on the container [LinuxOnly]", func(ctx context.Context) { 202 pod := scTestPod(false, false) 203 pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} 204 pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} 205 pod.Spec.Containers[0].Command = []string{"grep", SeccompProcStatusField, ProcSelfStatusPath} 206 e2eoutput.TestContainerOutput(ctx, f, "seccomp unconfined container", pod, 0, []string{"0"}) // seccomp disabled 207 }) 208 209 ginkgo.It("should support seccomp unconfined on the pod [LinuxOnly]", func(ctx context.Context) { 210 pod := scTestPod(false, false) 211 pod.Spec.SecurityContext = &v1.PodSecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeUnconfined}} 212 pod.Spec.Containers[0].Command = []string{"grep", SeccompProcStatusField, ProcSelfStatusPath} 213 e2eoutput.TestContainerOutput(ctx, f, "seccomp unconfined pod", pod, 0, []string{"0"}) // seccomp disabled 214 }) 215 216 ginkgo.It("should support seccomp runtime/default [LinuxOnly]", func(ctx context.Context) { 217 pod := scTestPod(false, false) 218 pod.Spec.Containers[0].SecurityContext = &v1.SecurityContext{SeccompProfile: &v1.SeccompProfile{Type: v1.SeccompProfileTypeRuntimeDefault}} 219 pod.Spec.Containers[0].Command = []string{"grep", SeccompProcStatusField, ProcSelfStatusPath} 220 e2eoutput.TestContainerOutput(ctx, f, "seccomp runtime/default", pod, 0, []string{"2"}) // seccomp filtered 221 }) 222 223 ginkgo.It("should support seccomp default which is unconfined [LinuxOnly]", func(ctx context.Context) { 224 pod := scTestPod(false, false) 225 pod.Spec.Containers[0].Command = []string{"grep", SeccompProcStatusField, ProcSelfStatusPath} 226 e2eoutput.TestContainerOutput(ctx, f, "seccomp default unconfined", pod, 0, []string{"0"}) // seccomp disabled 227 }) 228 }) 229 230 func testPodSELinuxLabeling(ctx context.Context, f *framework.Framework, hostIPC bool, hostPID bool) { 231 // Write and read a file with an empty_dir volume 232 // with a pod with the MCS label s0:c0,c1 233 pod := scTestPod(hostIPC, hostPID) 234 volumeName := "test-volume" 235 mountPath := "/mounted_volume" 236 pod.Spec.Containers[0].VolumeMounts = []v1.VolumeMount{ 237 { 238 Name: volumeName, 239 MountPath: mountPath, 240 }, 241 } 242 pod.Spec.Volumes = []v1.Volume{ 243 { 244 Name: volumeName, 245 VolumeSource: v1.VolumeSource{ 246 EmptyDir: &v1.EmptyDirVolumeSource{ 247 Medium: v1.StorageMediumDefault, 248 }, 249 }, 250 }, 251 } 252 pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ 253 Level: "s0:c0,c1", 254 } 255 pod.Spec.Containers[0].Command = []string{"sleep", "6000"} 256 257 client := f.ClientSet.CoreV1().Pods(f.Namespace.Name) 258 pod, err := client.Create(ctx, pod, metav1.CreateOptions{}) 259 260 framework.ExpectNoError(err, "Error creating pod %v", pod) 261 framework.ExpectNoError(e2epod.WaitForPodRunningInNamespace(ctx, f.ClientSet, pod)) 262 263 testContent := "hello" 264 testFilePath := mountPath + "/TEST" 265 tk := e2ekubectl.NewTestKubeconfig(framework.TestContext.CertDir, framework.TestContext.Host, framework.TestContext.KubeConfig, framework.TestContext.KubeContext, framework.TestContext.KubectlPath, f.Namespace.Name) 266 err = tk.WriteFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath, testContent) 267 framework.ExpectNoError(err) 268 content, err := tk.ReadFileViaContainer(pod.Name, pod.Spec.Containers[0].Name, testFilePath) 269 framework.ExpectNoError(err) 270 gomega.Expect(content).To(gomega.ContainSubstring(testContent)) 271 272 foundPod, err := f.ClientSet.CoreV1().Pods(f.Namespace.Name).Get(ctx, pod.Name, metav1.GetOptions{}) 273 framework.ExpectNoError(err) 274 275 // Confirm that the file can be accessed from a second 276 // pod using host_path with the same MCS label 277 volumeHostPath := fmt.Sprintf("%s/pods/%s/volumes/kubernetes.io~empty-dir/%s", framework.TestContext.KubeletRootDir, foundPod.UID, volumeName) 278 ginkgo.By(fmt.Sprintf("confirming a container with the same label can read the file under --kubelet-root-dir=%s", framework.TestContext.KubeletRootDir)) 279 pod = scTestPod(hostIPC, hostPID) 280 pod.Spec.NodeName = foundPod.Spec.NodeName 281 volumeMounts := []v1.VolumeMount{ 282 { 283 Name: volumeName, 284 MountPath: mountPath, 285 }, 286 } 287 volumes := []v1.Volume{ 288 { 289 Name: volumeName, 290 VolumeSource: v1.VolumeSource{ 291 HostPath: &v1.HostPathVolumeSource{ 292 Path: volumeHostPath, 293 }, 294 }, 295 }, 296 } 297 pod.Spec.Containers[0].VolumeMounts = volumeMounts 298 pod.Spec.Volumes = volumes 299 pod.Spec.Containers[0].Command = []string{"cat", testFilePath} 300 pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ 301 Level: "s0:c0,c1", 302 } 303 e2eoutput.TestContainerOutput(ctx, f, "Pod with same MCS label reading test file", pod, 0, []string{testContent}) 304 305 // Confirm that the same pod with a different MCS 306 // label cannot access the volume 307 ginkgo.By("confirming a container with a different MCS label is unable to read the file") 308 pod = scTestPod(hostIPC, hostPID) 309 pod.Spec.Volumes = volumes 310 pod.Spec.Containers[0].VolumeMounts = volumeMounts 311 pod.Spec.Containers[0].Command = []string{"sleep", "6000"} 312 pod.Spec.SecurityContext.SELinuxOptions = &v1.SELinuxOptions{ 313 Level: "s0:c2,c3", 314 } 315 _, err = client.Create(ctx, pod, metav1.CreateOptions{}) 316 framework.ExpectNoError(err, "Error creating pod %v", pod) 317 318 err = e2epod.WaitForPodNameRunningInNamespace(ctx, f.ClientSet, pod.Name, f.Namespace.Name) 319 framework.ExpectNoError(err, "Error waiting for pod to run %v", pod) 320 321 // for this to work, SELinux should be in enforcing mode, so let's check that 322 isEnforced, err := tk.ReadFileViaContainer(pod.Name, "test-container", "/sys/fs/selinux/enforce") 323 if err == nil && isEnforced == "1" { 324 _, err = tk.ReadFileViaContainer(pod.Name, "test-container", testFilePath) 325 gomega.Expect(err).To(gomega.HaveOccurred(), "expecting SELinux to not let the container with different MCS label to read the file") 326 } 327 }