k8s.io/kubernetes@v1.29.3/pkg/kubelet/volumemanager/reconciler/reconstruct_new_test.go (about) 1 /* 2 Copyright 2022 The Kubernetes Authors. 3 4 Licensed under the Apache License, Version 2.0 (the "License"); 5 you may not use this file except in compliance with the License. 6 You may obtain a copy of the License at 7 8 http://www.apache.org/licenses/LICENSE-2.0 9 10 Unless required by applicable law or agreed to in writing, software 11 distributed under the License is distributed on an "AS IS" BASIS, 12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 See the License for the specific language governing permissions and 14 limitations under the License. 15 */ 16 17 package reconciler 18 19 import ( 20 "fmt" 21 "os" 22 "path/filepath" 23 "reflect" 24 "testing" 25 26 v1 "k8s.io/api/core/v1" 27 metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" 28 "k8s.io/apimachinery/pkg/util/sets" 29 utilfeature "k8s.io/apiserver/pkg/util/feature" 30 featuregatetesting "k8s.io/component-base/featuregate/testing" 31 "k8s.io/klog/v2/ktesting" 32 "k8s.io/kubernetes/pkg/features" 33 "k8s.io/kubernetes/pkg/volume" 34 volumetesting "k8s.io/kubernetes/pkg/volume/testing" 35 "k8s.io/kubernetes/pkg/volume/util" 36 ) 37 38 func TestReconstructVolumes(t *testing.T) { 39 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)() 40 41 tests := []struct { 42 name string 43 volumePaths []string 44 expectedVolumesNeedReportedInUse []string 45 expectedVolumesNeedDevicePath []string 46 expectedVolumesFailedReconstruction []string 47 verifyFunc func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error 48 }{ 49 { 50 name: "when two pods are using same volume and both are deleted", 51 volumePaths: []string{ 52 filepath.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"), 53 filepath.Join("pod2", "volumes", "fake-plugin", "pvc-abcdef"), 54 }, 55 expectedVolumesNeedReportedInUse: []string{"fake-plugin/pvc-abcdef", "fake-plugin/pvc-abcdef"}, 56 expectedVolumesNeedDevicePath: []string{"fake-plugin/pvc-abcdef", "fake-plugin/pvc-abcdef"}, 57 expectedVolumesFailedReconstruction: []string{}, 58 verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error { 59 mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes() 60 if len(mountedPods) != 0 { 61 return fmt.Errorf("expected 0 certain pods in asw got %d", len(mountedPods)) 62 } 63 allPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes() 64 if len(allPods) != 2 { 65 return fmt.Errorf("expected 2 uncertain pods in asw got %d", len(allPods)) 66 } 67 volumes := rcInstance.actualStateOfWorld.GetPossiblyMountedVolumesForPod("pod1") 68 if len(volumes) != 1 { 69 return fmt.Errorf("expected 1 uncertain volume in asw got %d", len(volumes)) 70 } 71 // The volume should be marked as reconstructed in ASW 72 if reconstructed := rcInstance.actualStateOfWorld.IsVolumeReconstructed("fake-plugin/pvc-abcdef", "pod1"); !reconstructed { 73 t.Errorf("expected volume to be marked as reconstructed, got %v", reconstructed) 74 } 75 return nil 76 }, 77 }, 78 { 79 name: "when reconstruction fails for a volume, volumes should be cleaned up", 80 volumePaths: []string{ 81 filepath.Join("pod1", "volumes", "missing-plugin", "pvc-abcdef"), 82 }, 83 expectedVolumesNeedReportedInUse: []string{}, 84 expectedVolumesNeedDevicePath: []string{}, 85 expectedVolumesFailedReconstruction: []string{"pvc-abcdef"}, 86 }, 87 } 88 for _, tc := range tests { 89 t.Run(tc.name, func(t *testing.T) { 90 tmpKubeletDir, err := os.MkdirTemp("", "") 91 if err != nil { 92 t.Fatalf("can't make a temp directory for kubeletPods: %v", err) 93 } 94 defer os.RemoveAll(tmpKubeletDir) 95 96 // create kubelet pod directory 97 tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods") 98 os.MkdirAll(tmpKubeletPodDir, 0755) 99 100 mountPaths := []string{} 101 102 // create pod and volume directories so as reconciler can find them. 103 for _, volumePath := range tc.volumePaths { 104 vp := filepath.Join(tmpKubeletPodDir, volumePath) 105 mountPaths = append(mountPaths, vp) 106 os.MkdirAll(vp, 0755) 107 } 108 109 rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths, nil /*custom kubeclient*/) 110 rcInstance, _ := rc.(*reconciler) 111 112 // Act 113 rcInstance.reconstructVolumes() 114 115 // Assert 116 // Convert to []UniqueVolumeName 117 expectedVolumes := make([]v1.UniqueVolumeName, len(tc.expectedVolumesNeedDevicePath)) 118 for i := range tc.expectedVolumesNeedDevicePath { 119 expectedVolumes[i] = v1.UniqueVolumeName(tc.expectedVolumesNeedDevicePath[i]) 120 } 121 if !reflect.DeepEqual(expectedVolumes, rcInstance.volumesNeedUpdateFromNodeStatus) { 122 t.Errorf("Expected expectedVolumesNeedDevicePath:\n%v\n got:\n%v", expectedVolumes, rcInstance.volumesNeedUpdateFromNodeStatus) 123 } 124 125 expectedVolumes = make([]v1.UniqueVolumeName, len(tc.expectedVolumesNeedReportedInUse)) 126 for i := range tc.expectedVolumesNeedReportedInUse { 127 expectedVolumes[i] = v1.UniqueVolumeName(tc.expectedVolumesNeedReportedInUse[i]) 128 } 129 if !reflect.DeepEqual(expectedVolumes, rcInstance.volumesNeedReportedInUse) { 130 t.Errorf("Expected volumesNeedReportedInUse:\n%v\n got:\n%v", expectedVolumes, rcInstance.volumesNeedReportedInUse) 131 } 132 133 volumesFailedReconstruction := sets.NewString() 134 for _, vol := range rcInstance.volumesFailedReconstruction { 135 volumesFailedReconstruction.Insert(vol.volumeSpecName) 136 } 137 if !reflect.DeepEqual(volumesFailedReconstruction.List(), tc.expectedVolumesFailedReconstruction) { 138 t.Errorf("Expected volumesFailedReconstruction:\n%v\n got:\n%v", tc.expectedVolumesFailedReconstruction, volumesFailedReconstruction.List()) 139 } 140 141 if tc.verifyFunc != nil { 142 if err := tc.verifyFunc(rcInstance, fakePlugin); err != nil { 143 t.Errorf("Test %s failed: %v", tc.name, err) 144 } 145 } 146 }) 147 } 148 } 149 150 func TestCleanOrphanVolumes(t *testing.T) { 151 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)() 152 153 type podInfo struct { 154 podName string 155 podUID string 156 outerVolumeName string 157 innerVolumeName string 158 } 159 defaultPodInfo := podInfo{ 160 podName: "pod1", 161 podUID: "pod1uid", 162 outerVolumeName: "volume-name", 163 innerVolumeName: "volume-name", 164 } 165 defaultVolume := podVolume{ 166 podName: "pod1uid", 167 volumeSpecName: "volume-name", 168 volumePath: "", 169 pluginName: "fake-plugin", 170 volumeMode: v1.PersistentVolumeFilesystem, 171 } 172 173 tests := []struct { 174 name string 175 podInfos []podInfo 176 volumesFailedReconstruction []podVolume 177 expectedUnmounts int 178 }{ 179 { 180 name: "volume is in DSW and is not cleaned", 181 podInfos: []podInfo{defaultPodInfo}, 182 volumesFailedReconstruction: []podVolume{defaultVolume}, 183 expectedUnmounts: 0, 184 }, 185 { 186 name: "volume is not in DSW and is cleaned", 187 podInfos: []podInfo{}, 188 volumesFailedReconstruction: []podVolume{defaultVolume}, 189 expectedUnmounts: 1, 190 }, 191 } 192 for _, tc := range tests { 193 t.Run(tc.name, func(t *testing.T) { 194 // Arrange 195 tmpKubeletDir, err := os.MkdirTemp("", "") 196 if err != nil { 197 t.Fatalf("can't make a temp directory for kubeletPods: %v", err) 198 } 199 defer os.RemoveAll(tmpKubeletDir) 200 201 // create kubelet pod directory 202 tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods") 203 os.MkdirAll(tmpKubeletPodDir, 0755) 204 205 mountPaths := []string{} 206 207 rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths, nil /*custom kubeclient*/) 208 rcInstance, _ := rc.(*reconciler) 209 rcInstance.volumesFailedReconstruction = tc.volumesFailedReconstruction 210 logger, _ := ktesting.NewTestContext(t) 211 for _, tpodInfo := range tc.podInfos { 212 pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName) 213 volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]} 214 podName := util.GetUniquePodName(pod) 215 volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume( 216 podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* SELinuxContext */) 217 if err != nil { 218 t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) 219 } 220 rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "") 221 } 222 223 // Act 224 rcInstance.cleanOrphanVolumes() 225 226 // Assert 227 if len(rcInstance.volumesFailedReconstruction) != 0 { 228 t.Errorf("Expected volumesFailedReconstruction to be empty, got %+v", rcInstance.volumesFailedReconstruction) 229 } 230 // Unmount runs in a go routine, wait for its finish 231 var lastErr error 232 err = retryWithExponentialBackOff(testOperationBackOffDuration, func() (bool, error) { 233 if err := verifyTearDownCalls(fakePlugin, tc.expectedUnmounts); err != nil { 234 lastErr = err 235 return false, nil 236 } 237 return true, nil 238 }) 239 if err != nil { 240 t.Errorf("Error waiting for volumes to get unmounted: %s: %s", err, lastErr) 241 } 242 }) 243 } 244 } 245 246 func verifyTearDownCalls(plugin *volumetesting.FakeVolumePlugin, expected int) error { 247 unmounters := plugin.GetUnmounters() 248 if len(unmounters) == 0 && (expected == 0) { 249 return nil 250 } 251 actualCallCount := 0 252 for _, unmounter := range unmounters { 253 actualCallCount = unmounter.GetTearDownCallCount() 254 if actualCallCount == expected { 255 return nil 256 } 257 } 258 return fmt.Errorf("expected TearDown calls %d, got %d", expected, actualCallCount) 259 } 260 261 func TestReconstructVolumesMount(t *testing.T) { 262 // This test checks volume reconstruction + subsequent failed mount. 263 // Since the volume is reconstructed, it must be marked as uncertain 264 // even after a final SetUp error, see https://github.com/kubernetes/kubernetes/issues/96635 265 // and https://github.com/kubernetes/kubernetes/pull/110670. 266 defer featuregatetesting.SetFeatureGateDuringTest(t, utilfeature.DefaultFeatureGate, features.NewVolumeManagerReconstruction, true)() 267 268 tests := []struct { 269 name string 270 volumePath string 271 expectMount bool 272 volumeMode v1.PersistentVolumeMode 273 deviceMountPath string 274 }{ 275 { 276 name: "reconstructed volume is mounted", 277 volumePath: filepath.Join("pod1uid", "volumes", "fake-plugin", "volumename"), 278 279 expectMount: true, 280 volumeMode: v1.PersistentVolumeFilesystem, 281 }, 282 { 283 name: "reconstructed volume fails to mount", 284 // FailOnSetupVolumeName: MountDevice succeeds, SetUp fails 285 volumePath: filepath.Join("pod1uid", "volumes", "fake-plugin", volumetesting.FailOnSetupVolumeName), 286 expectMount: false, 287 volumeMode: v1.PersistentVolumeFilesystem, 288 }, 289 { 290 name: "reconstructed volume device map fails", 291 volumePath: filepath.Join("pod1uid", "volumeDevices", "fake-plugin", volumetesting.FailMountDeviceVolumeName), 292 volumeMode: v1.PersistentVolumeBlock, 293 deviceMountPath: filepath.Join("plugins", "fake-plugin", "volumeDevices", "pluginDependentPath"), 294 }, 295 } 296 for _, tc := range tests { 297 t.Run(tc.name, func(t *testing.T) { 298 tmpKubeletDir, err := os.MkdirTemp("", "") 299 if err != nil { 300 t.Fatalf("can't make a temp directory for kubeletPods: %v", err) 301 } 302 defer os.RemoveAll(tmpKubeletDir) 303 304 // create kubelet pod directory 305 tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods") 306 os.MkdirAll(tmpKubeletPodDir, 0755) 307 308 // create pod and volume directories so as reconciler can find them. 309 vp := filepath.Join(tmpKubeletPodDir, tc.volumePath) 310 mountPaths := []string{vp} 311 os.MkdirAll(vp, 0755) 312 313 // Arrange 2 - populate DSW 314 outerName := filepath.Base(tc.volumePath) 315 pod, pv, pvc := getPodPVCAndPV(tc.volumeMode, "pod1", outerName, "pvc1") 316 volumeSpec := &volume.Spec{PersistentVolume: pv} 317 kubeClient := createtestClientWithPVPVC(pv, pvc, v1.AttachedVolume{ 318 Name: v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", outerName)), 319 DevicePath: "fake/path", 320 }) 321 322 rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths, kubeClient /*custom kubeclient*/) 323 rcInstance, _ := rc.(*reconciler) 324 325 // Act 1 - reconstruction 326 rcInstance.reconstructVolumes() 327 328 // Assert 1 - the volume is Uncertain 329 mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes() 330 if len(mountedPods) != 0 { 331 t.Errorf("expected 0 mounted volumes, got %+v", mountedPods) 332 } 333 allPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes() 334 if len(allPods) != 1 { 335 t.Errorf("expected 1 uncertain volume in asw, got %+v", allPods) 336 } 337 338 podName := util.GetUniquePodName(pod) 339 volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume( 340 podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* SELinuxContext */) 341 if err != nil { 342 t.Fatalf("Error adding volume %s to dsow: %v", volumeSpec.Name(), err) 343 } 344 logger, _ := ktesting.NewTestContext(t) 345 rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "") 346 347 rcInstance.populatorHasAddedPods = func() bool { 348 // Mark DSW populated to allow unmounting of volumes. 349 return true 350 } 351 // Mark devices paths as reconciled to allow unmounting of volumes. 352 rcInstance.volumesNeedUpdateFromNodeStatus = nil 353 354 // Act 2 - reconcile once 355 rcInstance.reconcileNew() 356 357 // Assert 2 358 // MountDevice was attempted 359 var lastErr error 360 err = retryWithExponentialBackOff(testOperationBackOffDuration, func() (bool, error) { 361 if tc.volumeMode == v1.PersistentVolumeFilesystem { 362 if err := volumetesting.VerifyMountDeviceCallCount(1, fakePlugin); err != nil { 363 lastErr = err 364 return false, nil 365 } 366 return true, nil 367 } else { 368 return true, nil 369 } 370 }) 371 if err != nil { 372 t.Errorf("Error waiting for volumes to get mounted: %s: %s", err, lastErr) 373 } 374 375 if tc.expectMount { 376 // The volume should be fully mounted 377 waitForMount(t, fakePlugin, volumeName, rcInstance.actualStateOfWorld) 378 // SetUp was called and succeeded 379 if err := volumetesting.VerifySetUpCallCount(1, fakePlugin); err != nil { 380 t.Errorf("Expected SetUp() to be called, got %s", err) 381 } 382 } else { 383 // The test does not expect any change in ASW, yet it needs to wait for volume operations to finish 384 err = retryWithExponentialBackOff(testOperationBackOffDuration, func() (bool, error) { 385 return !rcInstance.operationExecutor.IsOperationPending(volumeName, "pod1uid", nodeName), nil 386 }) 387 if err != nil { 388 t.Errorf("Error waiting for operation to get finished: %s", err) 389 } 390 // The volume is uncertain 391 mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes() 392 if len(mountedPods) != 0 { 393 t.Errorf("expected 0 mounted volumes after reconcile, got %+v", mountedPods) 394 } 395 allPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes() 396 if len(allPods) != 1 { 397 t.Errorf("expected 1 mounted or uncertain volumes after reconcile, got %+v", allPods) 398 } 399 if tc.deviceMountPath != "" { 400 expectedDeviceMountPath := filepath.Join(tmpKubeletDir, tc.deviceMountPath) 401 deviceMountPath := allPods[0].DeviceMountPath 402 if expectedDeviceMountPath != deviceMountPath { 403 t.Errorf("expected deviceMountPath to be %s, got %s", expectedDeviceMountPath, deviceMountPath) 404 } 405 } 406 407 } 408 409 // Unmount was *not* attempted in any case 410 verifyTearDownCalls(fakePlugin, 0) 411 }) 412 } 413 } 414 415 func getPodPVCAndPV(volumeMode v1.PersistentVolumeMode, podName, pvName, pvcName string) (*v1.Pod, *v1.PersistentVolume, *v1.PersistentVolumeClaim) { 416 pv := &v1.PersistentVolume{ 417 ObjectMeta: metav1.ObjectMeta{ 418 Name: pvName, 419 UID: "pvuid", 420 }, 421 Spec: v1.PersistentVolumeSpec{ 422 ClaimRef: &v1.ObjectReference{Name: pvcName}, 423 VolumeMode: &volumeMode, 424 }, 425 } 426 pvc := &v1.PersistentVolumeClaim{ 427 ObjectMeta: metav1.ObjectMeta{ 428 Name: pvcName, 429 UID: "pvcuid", 430 }, 431 Spec: v1.PersistentVolumeClaimSpec{ 432 VolumeName: pvName, 433 VolumeMode: &volumeMode, 434 }, 435 } 436 pod := &v1.Pod{ 437 ObjectMeta: metav1.ObjectMeta{ 438 Name: podName, 439 UID: "pod1uid", 440 }, 441 Spec: v1.PodSpec{ 442 Volumes: []v1.Volume{ 443 { 444 Name: "volume-name", 445 VolumeSource: v1.VolumeSource{ 446 PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{ 447 ClaimName: pvc.Name, 448 }, 449 }, 450 }, 451 }, 452 }, 453 } 454 return pod, pv, pvc 455 }