k8s.io/kubernetes@v1.29.3/pkg/kubelet/volumemanager/reconciler/reconciler_test.go (about)

     1  /*
     2  Copyright 2016 The Kubernetes Authors.
     3  
     4  Licensed under the Apache License, Version 2.0 (the "License");
     5  you may not use this file except in compliance with the License.
     6  You may obtain a copy of the License at
     7  
     8      http://www.apache.org/licenses/LICENSE-2.0
     9  
    10  Unless required by applicable law or agreed to in writing, software
    11  distributed under the License is distributed on an "AS IS" BASIS,
    12  WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
    13  See the License for the specific language governing permissions and
    14  limitations under the License.
    15  */
    16  
    17  package reconciler
    18  
    19  import (
    20  	"crypto/md5"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"testing"
    25  	"time"
    26  
    27  	csitrans "k8s.io/csi-translation-lib"
    28  	"k8s.io/kubernetes/pkg/volume/csimigration"
    29  
    30  	"github.com/stretchr/testify/assert"
    31  	"k8s.io/mount-utils"
    32  
    33  	v1 "k8s.io/api/core/v1"
    34  	"k8s.io/apimachinery/pkg/api/resource"
    35  	metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
    36  	"k8s.io/apimachinery/pkg/runtime"
    37  	k8stypes "k8s.io/apimachinery/pkg/types"
    38  	"k8s.io/apimachinery/pkg/util/wait"
    39  	"k8s.io/client-go/kubernetes/fake"
    40  	core "k8s.io/client-go/testing"
    41  	"k8s.io/client-go/tools/record"
    42  	"k8s.io/klog/v2"
    43  	"k8s.io/klog/v2/ktesting"
    44  	"k8s.io/kubernetes/pkg/kubelet/volumemanager/cache"
    45  	"k8s.io/kubernetes/pkg/volume"
    46  	volumetesting "k8s.io/kubernetes/pkg/volume/testing"
    47  	"k8s.io/kubernetes/pkg/volume/util"
    48  	"k8s.io/kubernetes/pkg/volume/util/hostutil"
    49  	"k8s.io/kubernetes/pkg/volume/util/operationexecutor"
    50  	"k8s.io/kubernetes/pkg/volume/util/types"
    51  )
    52  
    53  const (
    54  	// reconcilerLoopSleepDuration is the amount of time the reconciler loop
    55  	// waits between successive executions
    56  	reconcilerLoopSleepDuration = 1 * time.Nanosecond
    57  	// waitForAttachTimeout is the maximum amount of time a
    58  	// operationexecutor.Mount call will wait for a volume to be attached.
    59  	waitForAttachTimeout         = 1 * time.Second
    60  	nodeName                     = k8stypes.NodeName("mynodename")
    61  	kubeletPodsDir               = "fake-dir"
    62  	testOperationBackOffDuration = 100 * time.Millisecond
    63  	reconcilerSyncWaitDuration   = 10 * time.Second
    64  )
    65  
    66  func hasAddedPods() bool { return true }
    67  
    68  // Calls Run()
    69  // Verifies there are no calls to attach, detach, mount, unmount, etc.
    70  func Test_Run_Positive_DoNothing(t *testing.T) {
    71  	// Arrange
    72  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
    73  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
    74  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
    75  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
    76  	kubeClient := createTestClient()
    77  	fakeRecorder := &record.FakeRecorder{}
    78  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
    79  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
    80  		kubeClient,
    81  		volumePluginMgr,
    82  		fakeRecorder,
    83  		fakeHandler,
    84  	))
    85  	reconciler := NewReconciler(
    86  		kubeClient,
    87  		false, /* controllerAttachDetachEnabled */
    88  		reconcilerLoopSleepDuration,
    89  		waitForAttachTimeout,
    90  		nodeName,
    91  		dsw,
    92  		asw,
    93  		hasAddedPods,
    94  		oex,
    95  		mount.NewFakeMounter(nil),
    96  		hostutil.NewFakeHostUtil(nil),
    97  		volumePluginMgr,
    98  		kubeletPodsDir)
    99  
   100  	// Act
   101  	runReconciler(reconciler)
   102  
   103  	// Assert
   104  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
   105  	assert.NoError(t, volumetesting.VerifyZeroWaitForAttachCallCount(fakePlugin))
   106  	assert.NoError(t, volumetesting.VerifyZeroMountDeviceCallCount(fakePlugin))
   107  	assert.NoError(t, volumetesting.VerifyZeroSetUpCallCount(fakePlugin))
   108  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   109  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   110  }
   111  
   112  // Populates desiredStateOfWorld cache with one volume/pod.
   113  // Calls Run()
   114  // Verifies there is are attach/mount/etc calls and no detach/unmount calls.
   115  func Test_Run_Positive_VolumeAttachAndMount(t *testing.T) {
   116  	// Arrange
   117  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
   118  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   119  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   120  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   121  	kubeClient := createTestClient()
   122  	fakeRecorder := &record.FakeRecorder{}
   123  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   124  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   125  		kubeClient,
   126  		volumePluginMgr,
   127  		fakeRecorder,
   128  		fakeHandler))
   129  	reconciler := NewReconciler(
   130  		kubeClient,
   131  		false, /* controllerAttachDetachEnabled */
   132  		reconcilerLoopSleepDuration,
   133  		waitForAttachTimeout,
   134  		nodeName,
   135  		dsw,
   136  		asw,
   137  		hasAddedPods,
   138  		oex,
   139  		mount.NewFakeMounter(nil),
   140  		hostutil.NewFakeHostUtil(nil),
   141  		volumePluginMgr,
   142  		kubeletPodsDir)
   143  	pod := &v1.Pod{
   144  		ObjectMeta: metav1.ObjectMeta{
   145  			Name: "pod1",
   146  			UID:  "pod1uid",
   147  		},
   148  		Spec: v1.PodSpec{
   149  			Volumes: []v1.Volume{
   150  				{
   151  					Name: "volume-name",
   152  					VolumeSource: v1.VolumeSource{
   153  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   154  							PDName: "fake-device1",
   155  						},
   156  					},
   157  				},
   158  			},
   159  		},
   160  	}
   161  
   162  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   163  	podName := util.GetUniquePodName(pod)
   164  	generatedVolumeName, err := dsw.AddPodToVolume(
   165  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   166  
   167  	// Assert
   168  	if err != nil {
   169  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   170  	}
   171  
   172  	// Act
   173  	runReconciler(reconciler)
   174  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   175  	// Assert
   176  	assert.NoError(t, volumetesting.VerifyAttachCallCount(
   177  		1 /* expectedAttachCallCount */, fakePlugin))
   178  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   179  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   180  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   181  		1 /* expectedMountDeviceCallCount */, fakePlugin))
   182  	assert.NoError(t, volumetesting.VerifySetUpCallCount(
   183  		1 /* expectedSetUpCallCount */, fakePlugin))
   184  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   185  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   186  }
   187  
   188  // Populates desiredStateOfWorld cache with one volume/pod.
   189  // Calls Run()
   190  // Verifies there is are attach/mount/etc calls and no detach/unmount calls.
   191  func Test_Run_Positive_VolumeAttachAndMountMigrationEnabled(t *testing.T) {
   192  	// Arrange
   193  	intreeToCSITranslator := csitrans.New()
   194  	node := &v1.Node{
   195  		ObjectMeta: metav1.ObjectMeta{
   196  			Name: string(nodeName),
   197  		},
   198  		Spec: v1.NodeSpec{},
   199  		Status: v1.NodeStatus{
   200  			VolumesAttached: []v1.AttachedVolume{
   201  				{
   202  					Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", "pd.csi.storage.gke.io-fake-device1")),
   203  					DevicePath: "fake/path",
   204  				},
   205  			},
   206  		},
   207  	}
   208  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
   209  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   210  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   211  
   212  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   213  	kubeClient := createTestClient(v1.AttachedVolume{
   214  		Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", "pd.csi.storage.gke.io-fake-device1")),
   215  		DevicePath: "fake/path",
   216  	})
   217  
   218  	fakeRecorder := &record.FakeRecorder{}
   219  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   220  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   221  		kubeClient,
   222  		volumePluginMgr,
   223  		fakeRecorder,
   224  		fakeHandler))
   225  	reconciler := NewReconciler(
   226  		kubeClient,
   227  		true, /* controllerAttachDetachEnabled */
   228  		reconcilerLoopSleepDuration,
   229  		waitForAttachTimeout,
   230  		nodeName,
   231  		dsw,
   232  		asw,
   233  		hasAddedPods,
   234  		oex,
   235  		mount.NewFakeMounter(nil),
   236  		hostutil.NewFakeHostUtil(nil),
   237  		volumePluginMgr,
   238  		kubeletPodsDir)
   239  	pod := &v1.Pod{
   240  		ObjectMeta: metav1.ObjectMeta{
   241  			Name: "pod1",
   242  			UID:  "pod1uid",
   243  		},
   244  		Spec: v1.PodSpec{
   245  			Volumes: []v1.Volume{
   246  				{
   247  					Name: "volume-name",
   248  					VolumeSource: v1.VolumeSource{
   249  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   250  							PDName: "fake-device1",
   251  						},
   252  					},
   253  				},
   254  			},
   255  		},
   256  	}
   257  
   258  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   259  	migratedSpec, err := csimigration.TranslateInTreeSpecToCSI(volumeSpec, pod.Namespace, intreeToCSITranslator)
   260  	if err != nil {
   261  		t.Fatalf("unexpected error while translating spec %v: %v", volumeSpec, err)
   262  	}
   263  
   264  	podName := util.GetUniquePodName(pod)
   265  	generatedVolumeName, err := dsw.AddPodToVolume(
   266  		podName,
   267  		pod,
   268  		migratedSpec,
   269  		migratedSpec.Name(),
   270  		"",  /* volumeGidValue */
   271  		nil, /* SELinuxContexts */
   272  	)
   273  
   274  	// Assert
   275  	if err != nil {
   276  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   277  	}
   278  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
   279  
   280  	// Act
   281  	runReconciler(reconciler)
   282  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   283  	// Assert
   284  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   285  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   286  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   287  		1 /* expectedMountDeviceCallCount */, fakePlugin))
   288  	assert.NoError(t, volumetesting.VerifySetUpCallCount(
   289  		1 /* expectedSetUpCallCount */, fakePlugin))
   290  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   291  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   292  }
   293  
   294  // Populates desiredStateOfWorld cache with one volume/pod.
   295  // Enables controllerAttachDetachEnabled.
   296  // Calls Run()
   297  // Verifies there is one mount call and no unmount calls.
   298  // Verifies there are no attach/detach calls.
   299  func Test_Run_Positive_VolumeMountControllerAttachEnabled(t *testing.T) {
   300  	// Arrange
   301  	node := &v1.Node{
   302  		ObjectMeta: metav1.ObjectMeta{
   303  			Name: string(nodeName),
   304  		},
   305  		Status: v1.NodeStatus{
   306  			VolumesAttached: []v1.AttachedVolume{
   307  				{
   308  					Name:       "fake-plugin/fake-device1",
   309  					DevicePath: "fake/path",
   310  				},
   311  			},
   312  		},
   313  	}
   314  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
   315  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   316  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   317  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   318  	kubeClient := createTestClient()
   319  	fakeRecorder := &record.FakeRecorder{}
   320  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   321  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   322  		kubeClient,
   323  		volumePluginMgr,
   324  		fakeRecorder,
   325  		fakeHandler))
   326  	reconciler := NewReconciler(
   327  		kubeClient,
   328  		true, /* controllerAttachDetachEnabled */
   329  		reconcilerLoopSleepDuration,
   330  		waitForAttachTimeout,
   331  		nodeName,
   332  		dsw,
   333  		asw,
   334  		hasAddedPods,
   335  		oex,
   336  		mount.NewFakeMounter(nil),
   337  		hostutil.NewFakeHostUtil(nil),
   338  		volumePluginMgr,
   339  		kubeletPodsDir)
   340  	pod := &v1.Pod{
   341  		ObjectMeta: metav1.ObjectMeta{
   342  			Name: "pod1",
   343  			UID:  "pod1uid",
   344  		},
   345  		Spec: v1.PodSpec{
   346  			Volumes: []v1.Volume{
   347  				{
   348  					Name: "volume-name",
   349  					VolumeSource: v1.VolumeSource{
   350  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   351  							PDName: "fake-device1",
   352  						},
   353  					},
   354  				},
   355  			},
   356  		},
   357  	}
   358  
   359  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   360  	podName := util.GetUniquePodName(pod)
   361  	generatedVolumeName, err := dsw.AddPodToVolume(
   362  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   363  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
   364  
   365  	// Assert
   366  	if err != nil {
   367  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   368  	}
   369  
   370  	// Act
   371  	runReconciler(reconciler)
   372  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   373  
   374  	// Assert
   375  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
   376  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   377  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   378  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   379  		1 /* expectedMountDeviceCallCount */, fakePlugin))
   380  	assert.NoError(t, volumetesting.VerifySetUpCallCount(
   381  		1 /* expectedSetUpCallCount */, fakePlugin))
   382  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   383  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   384  }
   385  
   386  // Populates desiredStateOfWorld cache with one volume/pod.
   387  // Enables controllerAttachDetachEnabled.
   388  // volume is not repored-in-use
   389  // Calls Run()
   390  // Verifies that there is not wait-for-mount call
   391  // Verifies that there is no exponential-backoff triggered
   392  func Test_Run_Negative_VolumeMountControllerAttachEnabled(t *testing.T) {
   393  	// Arrange
   394  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
   395  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   396  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   397  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   398  	kubeClient := createTestClient()
   399  	fakeRecorder := &record.FakeRecorder{}
   400  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   401  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   402  		kubeClient,
   403  		volumePluginMgr,
   404  		fakeRecorder,
   405  		fakeHandler))
   406  	reconciler := NewReconciler(
   407  		kubeClient,
   408  		true, /* controllerAttachDetachEnabled */
   409  		reconcilerLoopSleepDuration,
   410  		waitForAttachTimeout,
   411  		nodeName,
   412  		dsw,
   413  		asw,
   414  		hasAddedPods,
   415  		oex,
   416  		mount.NewFakeMounter(nil),
   417  		hostutil.NewFakeHostUtil(nil),
   418  		volumePluginMgr,
   419  		kubeletPodsDir)
   420  	pod := &v1.Pod{
   421  		ObjectMeta: metav1.ObjectMeta{
   422  			Name: "pod1",
   423  			UID:  "pod1uid",
   424  		},
   425  		Spec: v1.PodSpec{
   426  			Volumes: []v1.Volume{
   427  				{
   428  					Name: "volume-name",
   429  					VolumeSource: v1.VolumeSource{
   430  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   431  							PDName: "fake-device1",
   432  						},
   433  					},
   434  				},
   435  			},
   436  		},
   437  	}
   438  
   439  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   440  	podName := util.GetUniquePodName(pod)
   441  	generatedVolumeName, err := dsw.AddPodToVolume(
   442  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   443  
   444  	// Assert
   445  	if err != nil {
   446  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   447  	}
   448  
   449  	// Act
   450  	runReconciler(reconciler)
   451  	time.Sleep(reconcilerSyncWaitDuration)
   452  
   453  	ok := oex.IsOperationSafeToRetry(generatedVolumeName, podName, nodeName, operationexecutor.VerifyControllerAttachedVolumeOpName)
   454  	if !ok {
   455  		t.Errorf("operation on volume %s is not safe to retry", generatedVolumeName)
   456  	}
   457  
   458  	// Assert
   459  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
   460  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   461  		0 /* expectedWaitForAttachCallCount */, fakePlugin))
   462  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   463  		0 /* expectedMountDeviceCallCount */, fakePlugin))
   464  }
   465  
   466  // Populates desiredStateOfWorld cache with one volume/pod.
   467  // Calls Run()
   468  // Verifies there is one attach/mount/etc call and no detach calls.
   469  // Deletes volume/pod from desired state of world.
   470  // Verifies detach/unmount calls are issued.
   471  func Test_Run_Positive_VolumeAttachMountUnmountDetach(t *testing.T) {
   472  	// Arrange
   473  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
   474  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   475  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   476  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   477  	kubeClient := createTestClient()
   478  	fakeRecorder := &record.FakeRecorder{}
   479  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   480  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   481  		kubeClient,
   482  		volumePluginMgr,
   483  		fakeRecorder,
   484  		fakeHandler))
   485  	reconciler := NewReconciler(
   486  		kubeClient,
   487  		false, /* controllerAttachDetachEnabled */
   488  		reconcilerLoopSleepDuration,
   489  		waitForAttachTimeout,
   490  		nodeName,
   491  		dsw,
   492  		asw,
   493  		hasAddedPods,
   494  		oex,
   495  		mount.NewFakeMounter(nil),
   496  		hostutil.NewFakeHostUtil(nil),
   497  		volumePluginMgr,
   498  		kubeletPodsDir)
   499  	pod := &v1.Pod{
   500  		ObjectMeta: metav1.ObjectMeta{
   501  			Name: "pod1",
   502  			UID:  "pod1uid",
   503  		},
   504  		Spec: v1.PodSpec{
   505  			Volumes: []v1.Volume{
   506  				{
   507  					Name: "volume-name",
   508  					VolumeSource: v1.VolumeSource{
   509  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   510  							PDName: "fake-device1",
   511  						},
   512  					},
   513  				},
   514  			},
   515  		},
   516  	}
   517  
   518  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   519  	podName := util.GetUniquePodName(pod)
   520  	generatedVolumeName, err := dsw.AddPodToVolume(
   521  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   522  
   523  	// Assert
   524  	if err != nil {
   525  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   526  	}
   527  
   528  	// Act
   529  	runReconciler(reconciler)
   530  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   531  	// Assert
   532  	assert.NoError(t, volumetesting.VerifyAttachCallCount(
   533  		1 /* expectedAttachCallCount */, fakePlugin))
   534  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   535  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   536  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   537  		1 /* expectedMountDeviceCallCount */, fakePlugin))
   538  	assert.NoError(t, volumetesting.VerifySetUpCallCount(
   539  		1 /* expectedSetUpCallCount */, fakePlugin))
   540  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   541  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   542  
   543  	// Act
   544  	dsw.DeletePodFromVolume(podName, generatedVolumeName)
   545  	waitForDetach(t, generatedVolumeName, asw)
   546  
   547  	// Assert
   548  	assert.NoError(t, volumetesting.VerifyTearDownCallCount(
   549  		1 /* expectedTearDownCallCount */, fakePlugin))
   550  	assert.NoError(t, volumetesting.VerifyDetachCallCount(
   551  		1 /* expectedDetachCallCount */, fakePlugin))
   552  }
   553  
   554  // Populates desiredStateOfWorld cache with one volume/pod.
   555  // Enables controllerAttachDetachEnabled.
   556  // Calls Run()
   557  // Verifies one mount call is made and no unmount calls.
   558  // Deletes volume/pod from desired state of world.
   559  // Verifies one unmount call is made.
   560  // Verifies there are no attach/detach calls made.
   561  func Test_Run_Positive_VolumeUnmountControllerAttachEnabled(t *testing.T) {
   562  	// Arrange
   563  	node := &v1.Node{
   564  		ObjectMeta: metav1.ObjectMeta{
   565  			Name: string(nodeName),
   566  		},
   567  		Status: v1.NodeStatus{
   568  			VolumesAttached: []v1.AttachedVolume{
   569  				{
   570  					Name:       "fake-plugin/fake-device1",
   571  					DevicePath: "fake/path",
   572  				},
   573  			},
   574  		},
   575  	}
   576  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
   577  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   578  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   579  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   580  	kubeClient := createTestClient()
   581  	fakeRecorder := &record.FakeRecorder{}
   582  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   583  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   584  		kubeClient,
   585  		volumePluginMgr,
   586  		fakeRecorder,
   587  		fakeHandler))
   588  	reconciler := NewReconciler(
   589  		kubeClient,
   590  		true, /* controllerAttachDetachEnabled */
   591  		reconcilerLoopSleepDuration,
   592  		waitForAttachTimeout,
   593  		nodeName,
   594  		dsw,
   595  		asw,
   596  		hasAddedPods,
   597  		oex,
   598  		mount.NewFakeMounter(nil),
   599  		hostutil.NewFakeHostUtil(nil),
   600  		volumePluginMgr,
   601  		kubeletPodsDir)
   602  	pod := &v1.Pod{
   603  		ObjectMeta: metav1.ObjectMeta{
   604  			Name: "pod1",
   605  			UID:  "pod1uid",
   606  		},
   607  		Spec: v1.PodSpec{
   608  			Volumes: []v1.Volume{
   609  				{
   610  					Name: "volume-name",
   611  					VolumeSource: v1.VolumeSource{
   612  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
   613  							PDName: "fake-device1",
   614  						},
   615  					},
   616  				},
   617  			},
   618  		},
   619  	}
   620  
   621  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
   622  	podName := util.GetUniquePodName(pod)
   623  	generatedVolumeName, err := dsw.AddPodToVolume(
   624  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   625  
   626  	// Assert
   627  	if err != nil {
   628  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   629  	}
   630  
   631  	// Act
   632  	runReconciler(reconciler)
   633  
   634  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
   635  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   636  
   637  	// Assert
   638  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
   639  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   640  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   641  	assert.NoError(t, volumetesting.VerifyMountDeviceCallCount(
   642  		1 /* expectedMountDeviceCallCount */, fakePlugin))
   643  	assert.NoError(t, volumetesting.VerifySetUpCallCount(
   644  		1 /* expectedSetUpCallCount */, fakePlugin))
   645  	assert.NoError(t, volumetesting.VerifyZeroTearDownCallCount(fakePlugin))
   646  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   647  
   648  	// Act
   649  	dsw.DeletePodFromVolume(podName, generatedVolumeName)
   650  	waitForDetach(t, generatedVolumeName, asw)
   651  
   652  	// Assert
   653  	assert.NoError(t, volumetesting.VerifyTearDownCallCount(
   654  		1 /* expectedTearDownCallCount */, fakePlugin))
   655  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   656  }
   657  
   658  // Populates desiredStateOfWorld cache with one volume/pod.
   659  // Calls Run()
   660  // Verifies there are attach/get map paths/setupDevice calls and
   661  // no detach/teardownDevice calls.
   662  func Test_Run_Positive_VolumeAttachAndMap(t *testing.T) {
   663  	pod := &v1.Pod{
   664  		ObjectMeta: metav1.ObjectMeta{
   665  			Name:      "pod1",
   666  			UID:       "pod1uid",
   667  			Namespace: "ns",
   668  		},
   669  		Spec: v1.PodSpec{},
   670  	}
   671  
   672  	mode := v1.PersistentVolumeBlock
   673  	gcepv := &v1.PersistentVolume{
   674  		ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
   675  		Spec: v1.PersistentVolumeSpec{
   676  			Capacity:               v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
   677  			PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
   678  			AccessModes: []v1.PersistentVolumeAccessMode{
   679  				v1.ReadWriteOnce,
   680  				v1.ReadOnlyMany,
   681  			},
   682  			VolumeMode: &mode,
   683  			ClaimRef:   &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
   684  		},
   685  	}
   686  
   687  	gcepvc := &v1.PersistentVolumeClaim{
   688  		ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
   689  		Spec: v1.PersistentVolumeClaimSpec{
   690  			VolumeName: "volume-name",
   691  			VolumeMode: &mode,
   692  		},
   693  		Status: v1.PersistentVolumeClaimStatus{
   694  			Phase:    v1.ClaimBound,
   695  			Capacity: gcepv.Spec.Capacity,
   696  		},
   697  	}
   698  
   699  	// Arrange
   700  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
   701  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   702  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   703  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   704  	kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
   705  	fakeRecorder := &record.FakeRecorder{}
   706  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   707  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   708  		kubeClient,
   709  		volumePluginMgr,
   710  		fakeRecorder,
   711  		fakeHandler))
   712  	reconciler := NewReconciler(
   713  		kubeClient,
   714  		false, /* controllerAttachDetachEnabled */
   715  		reconcilerLoopSleepDuration,
   716  		waitForAttachTimeout,
   717  		nodeName,
   718  		dsw,
   719  		asw,
   720  		hasAddedPods,
   721  		oex,
   722  		mount.NewFakeMounter(nil),
   723  		hostutil.NewFakeHostUtil(nil),
   724  		volumePluginMgr,
   725  		kubeletPodsDir)
   726  
   727  	volumeSpec := &volume.Spec{
   728  		PersistentVolume: gcepv,
   729  	}
   730  	podName := util.GetUniquePodName(pod)
   731  	generatedVolumeName, err := dsw.AddPodToVolume(
   732  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   733  
   734  	// Assert
   735  	if err != nil {
   736  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   737  	}
   738  
   739  	// Act
   740  	runReconciler(reconciler)
   741  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   742  	// Assert
   743  	assert.NoError(t, volumetesting.VerifyAttachCallCount(
   744  		1 /* expectedAttachCallCount */, fakePlugin))
   745  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   746  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   747  	assert.NoError(t, volumetesting.VerifyGetMapPodDeviceCallCount(
   748  		1 /* expectedGetMapDeviceCallCount */, fakePlugin))
   749  	assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
   750  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   751  }
   752  
   753  // Populates desiredStateOfWorld cache with one volume/pod.
   754  // Enables controllerAttachDetachEnabled.
   755  // Calls Run()
   756  // Verifies there are two get map path calls, a setupDevice call
   757  // and no teardownDevice call.
   758  // Verifies there are no attach/detach calls.
   759  func Test_Run_Positive_BlockVolumeMapControllerAttachEnabled(t *testing.T) {
   760  	pod := &v1.Pod{
   761  		ObjectMeta: metav1.ObjectMeta{
   762  			Name:      "pod1",
   763  			UID:       "pod1uid",
   764  			Namespace: "ns",
   765  		},
   766  		Spec: v1.PodSpec{},
   767  	}
   768  
   769  	mode := v1.PersistentVolumeBlock
   770  	gcepv := &v1.PersistentVolume{
   771  		ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
   772  		Spec: v1.PersistentVolumeSpec{
   773  			Capacity:               v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
   774  			PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
   775  			AccessModes: []v1.PersistentVolumeAccessMode{
   776  				v1.ReadWriteOnce,
   777  				v1.ReadOnlyMany,
   778  			},
   779  			VolumeMode: &mode,
   780  			ClaimRef:   &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
   781  		},
   782  	}
   783  	gcepvc := &v1.PersistentVolumeClaim{
   784  		ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
   785  		Spec: v1.PersistentVolumeClaimSpec{
   786  			VolumeName: "volume-name",
   787  			VolumeMode: &mode,
   788  		},
   789  		Status: v1.PersistentVolumeClaimStatus{
   790  			Phase:    v1.ClaimBound,
   791  			Capacity: gcepv.Spec.Capacity,
   792  		},
   793  	}
   794  
   795  	volumeSpec := &volume.Spec{
   796  		PersistentVolume: gcepv,
   797  	}
   798  	node := &v1.Node{
   799  		ObjectMeta: metav1.ObjectMeta{
   800  			Name: string(nodeName),
   801  		},
   802  		Status: v1.NodeStatus{
   803  			VolumesAttached: []v1.AttachedVolume{
   804  				{
   805  					Name:       "fake-plugin/fake-device1",
   806  					DevicePath: "fake/path",
   807  				},
   808  			},
   809  		},
   810  	}
   811  
   812  	// Arrange
   813  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
   814  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   815  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   816  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   817  	kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
   818  		Name:       "fake-plugin/fake-device1",
   819  		DevicePath: "/fake/path",
   820  	})
   821  	fakeRecorder := &record.FakeRecorder{}
   822  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   823  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   824  		kubeClient,
   825  		volumePluginMgr,
   826  		fakeRecorder,
   827  		fakeHandler))
   828  	reconciler := NewReconciler(
   829  		kubeClient,
   830  		true, /* controllerAttachDetachEnabled */
   831  		reconcilerLoopSleepDuration,
   832  		waitForAttachTimeout,
   833  		nodeName,
   834  		dsw,
   835  		asw,
   836  		hasAddedPods,
   837  		oex,
   838  		mount.NewFakeMounter(nil),
   839  		hostutil.NewFakeHostUtil(nil),
   840  		volumePluginMgr,
   841  		kubeletPodsDir)
   842  
   843  	podName := util.GetUniquePodName(pod)
   844  	generatedVolumeName, err := dsw.AddPodToVolume(
   845  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   846  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
   847  
   848  	// Assert
   849  	if err != nil {
   850  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   851  	}
   852  
   853  	// Act
   854  	runReconciler(reconciler)
   855  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   856  
   857  	// Assert
   858  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
   859  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   860  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   861  	assert.NoError(t, volumetesting.VerifyGetMapPodDeviceCallCount(
   862  		1 /* expectedGetMapDeviceCallCount */, fakePlugin))
   863  	assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
   864  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   865  }
   866  
   867  // Populates desiredStateOfWorld cache with one volume/pod.
   868  // Calls Run()
   869  // Verifies there is one attach call, two get map path calls,
   870  // setupDevice call and no detach calls.
   871  // Deletes volume/pod from desired state of world.
   872  // Verifies one detach/teardownDevice calls are issued.
   873  func Test_Run_Positive_BlockVolumeAttachMapUnmapDetach(t *testing.T) {
   874  	pod := &v1.Pod{
   875  		ObjectMeta: metav1.ObjectMeta{
   876  			Name:      "pod1",
   877  			UID:       "pod1uid",
   878  			Namespace: "ns",
   879  		},
   880  		Spec: v1.PodSpec{},
   881  	}
   882  
   883  	mode := v1.PersistentVolumeBlock
   884  	gcepv := &v1.PersistentVolume{
   885  		ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
   886  		Spec: v1.PersistentVolumeSpec{
   887  			Capacity:               v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
   888  			PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
   889  			AccessModes: []v1.PersistentVolumeAccessMode{
   890  				v1.ReadWriteOnce,
   891  				v1.ReadOnlyMany,
   892  			},
   893  			VolumeMode: &mode,
   894  			ClaimRef:   &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
   895  		},
   896  	}
   897  	gcepvc := &v1.PersistentVolumeClaim{
   898  		ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
   899  		Spec: v1.PersistentVolumeClaimSpec{
   900  			VolumeName: "volume-name",
   901  			VolumeMode: &mode,
   902  		},
   903  		Status: v1.PersistentVolumeClaimStatus{
   904  			Phase:    v1.ClaimBound,
   905  			Capacity: gcepv.Spec.Capacity,
   906  		},
   907  	}
   908  
   909  	volumeSpec := &volume.Spec{
   910  		PersistentVolume: gcepv,
   911  	}
   912  
   913  	// Arrange
   914  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgr(t)
   915  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
   916  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
   917  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
   918  	kubeClient := createtestClientWithPVPVC(gcepv, gcepvc)
   919  	fakeRecorder := &record.FakeRecorder{}
   920  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
   921  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
   922  		kubeClient,
   923  		volumePluginMgr,
   924  		fakeRecorder,
   925  		fakeHandler))
   926  	reconciler := NewReconciler(
   927  		kubeClient,
   928  		false, /* controllerAttachDetachEnabled */
   929  		reconcilerLoopSleepDuration,
   930  		waitForAttachTimeout,
   931  		nodeName,
   932  		dsw,
   933  		asw,
   934  		hasAddedPods,
   935  		oex,
   936  		mount.NewFakeMounter(nil),
   937  		hostutil.NewFakeHostUtil(nil),
   938  		volumePluginMgr,
   939  		kubeletPodsDir)
   940  
   941  	podName := util.GetUniquePodName(pod)
   942  	generatedVolumeName, err := dsw.AddPodToVolume(
   943  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
   944  
   945  	// Assert
   946  	if err != nil {
   947  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
   948  	}
   949  
   950  	// Act
   951  	runReconciler(reconciler)
   952  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
   953  	// Assert
   954  	assert.NoError(t, volumetesting.VerifyAttachCallCount(
   955  		1 /* expectedAttachCallCount */, fakePlugin))
   956  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
   957  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
   958  	assert.NoError(t, volumetesting.VerifyGetMapPodDeviceCallCount(
   959  		1 /* expectedGetMapDeviceCallCount */, fakePlugin))
   960  	assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
   961  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
   962  
   963  	// Act
   964  	dsw.DeletePodFromVolume(podName, generatedVolumeName)
   965  	waitForDetach(t, generatedVolumeName, asw)
   966  
   967  	// Assert
   968  	assert.NoError(t, volumetesting.VerifyTearDownDeviceCallCount(
   969  		1 /* expectedTearDownDeviceCallCount */, fakePlugin))
   970  	assert.NoError(t, volumetesting.VerifyDetachCallCount(
   971  		1 /* expectedDetachCallCount */, fakePlugin))
   972  }
   973  
   974  // Populates desiredStateOfWorld cache with one volume/pod.
   975  // Enables controllerAttachDetachEnabled.
   976  // Calls Run()
   977  // Verifies two map path calls are made and no teardownDevice/detach calls.
   978  // Deletes volume/pod from desired state of world.
   979  // Verifies one teardownDevice call is made.
   980  // Verifies there are no attach/detach calls made.
   981  func Test_Run_Positive_VolumeUnmapControllerAttachEnabled(t *testing.T) {
   982  	pod := &v1.Pod{
   983  		ObjectMeta: metav1.ObjectMeta{
   984  			Name:      "pod1",
   985  			UID:       "pod1uid",
   986  			Namespace: "ns",
   987  		},
   988  		Spec: v1.PodSpec{},
   989  	}
   990  
   991  	mode := v1.PersistentVolumeBlock
   992  	gcepv := &v1.PersistentVolume{
   993  		ObjectMeta: metav1.ObjectMeta{UID: "001", Name: "volume-name"},
   994  		Spec: v1.PersistentVolumeSpec{
   995  			Capacity:               v1.ResourceList{v1.ResourceName(v1.ResourceStorage): resource.MustParse("10G")},
   996  			PersistentVolumeSource: v1.PersistentVolumeSource{GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{PDName: "fake-device1"}},
   997  			AccessModes: []v1.PersistentVolumeAccessMode{
   998  				v1.ReadWriteOnce,
   999  				v1.ReadOnlyMany,
  1000  			},
  1001  			VolumeMode: &mode,
  1002  			ClaimRef:   &v1.ObjectReference{Namespace: "ns", Name: "pvc-volume-name"},
  1003  		},
  1004  	}
  1005  	gcepvc := &v1.PersistentVolumeClaim{
  1006  		ObjectMeta: metav1.ObjectMeta{UID: "pvc-001", Name: "pvc-volume-name", Namespace: "ns"},
  1007  		Spec: v1.PersistentVolumeClaimSpec{
  1008  			VolumeName: "volume-name",
  1009  			VolumeMode: &mode,
  1010  		},
  1011  		Status: v1.PersistentVolumeClaimStatus{
  1012  			Phase:    v1.ClaimBound,
  1013  			Capacity: gcepv.Spec.Capacity,
  1014  		},
  1015  	}
  1016  
  1017  	volumeSpec := &volume.Spec{
  1018  		PersistentVolume: gcepv,
  1019  	}
  1020  
  1021  	node := &v1.Node{
  1022  		ObjectMeta: metav1.ObjectMeta{
  1023  			Name: string(nodeName),
  1024  		},
  1025  		Status: v1.NodeStatus{
  1026  			VolumesAttached: []v1.AttachedVolume{
  1027  				{
  1028  					Name:       "fake-plugin/fake-device1",
  1029  					DevicePath: "/fake/path",
  1030  				},
  1031  			},
  1032  		},
  1033  	}
  1034  
  1035  	// Arrange
  1036  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
  1037  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  1038  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  1039  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1040  	kubeClient := createtestClientWithPVPVC(gcepv, gcepvc, v1.AttachedVolume{
  1041  		Name:       "fake-plugin/fake-device1",
  1042  		DevicePath: "/fake/path",
  1043  	})
  1044  	fakeRecorder := &record.FakeRecorder{}
  1045  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
  1046  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1047  		kubeClient,
  1048  		volumePluginMgr,
  1049  		fakeRecorder,
  1050  		fakeHandler))
  1051  	reconciler := NewReconciler(
  1052  		kubeClient,
  1053  		true, /* controllerAttachDetachEnabled */
  1054  		reconcilerLoopSleepDuration,
  1055  		waitForAttachTimeout,
  1056  		nodeName,
  1057  		dsw,
  1058  		asw,
  1059  		hasAddedPods,
  1060  		oex,
  1061  		mount.NewFakeMounter(nil),
  1062  		hostutil.NewFakeHostUtil(nil),
  1063  		volumePluginMgr,
  1064  		kubeletPodsDir)
  1065  
  1066  	podName := util.GetUniquePodName(pod)
  1067  	generatedVolumeName, err := dsw.AddPodToVolume(
  1068  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  1069  
  1070  	// Assert
  1071  	if err != nil {
  1072  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
  1073  	}
  1074  
  1075  	// Act
  1076  	runReconciler(reconciler)
  1077  
  1078  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
  1079  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
  1080  
  1081  	// Assert
  1082  	assert.NoError(t, volumetesting.VerifyZeroAttachCalls(fakePlugin))
  1083  	assert.NoError(t, volumetesting.VerifyWaitForAttachCallCount(
  1084  		1 /* expectedWaitForAttachCallCount */, fakePlugin))
  1085  	assert.NoError(t, volumetesting.VerifyGetMapPodDeviceCallCount(
  1086  		1 /* expectedGetMapDeviceCallCount */, fakePlugin))
  1087  	assert.NoError(t, volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin))
  1088  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
  1089  
  1090  	// Act
  1091  	dsw.DeletePodFromVolume(podName, generatedVolumeName)
  1092  	waitForDetach(t, generatedVolumeName, asw)
  1093  
  1094  	// Assert
  1095  	assert.NoError(t, volumetesting.VerifyTearDownDeviceCallCount(
  1096  		1 /* expectedTearDownDeviceCallCount */, fakePlugin))
  1097  	assert.NoError(t, volumetesting.VerifyZeroDetachCallCount(fakePlugin))
  1098  }
  1099  
  1100  func Test_GenerateMapVolumeFunc_Plugin_Not_Found(t *testing.T) {
  1101  	testCases := map[string]struct {
  1102  		volumePlugins  []volume.VolumePlugin
  1103  		expectErr      bool
  1104  		expectedErrMsg string
  1105  	}{
  1106  		"volumePlugin is nil": {
  1107  			volumePlugins:  []volume.VolumePlugin{},
  1108  			expectErr:      true,
  1109  			expectedErrMsg: "MapVolume.FindMapperPluginBySpec failed",
  1110  		},
  1111  		"blockVolumePlugin is nil": {
  1112  			volumePlugins:  volumetesting.NewFakeFileVolumePlugin(),
  1113  			expectErr:      true,
  1114  			expectedErrMsg: "MapVolume.FindMapperPluginBySpec failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
  1115  		},
  1116  	}
  1117  
  1118  	for name, tc := range testCases {
  1119  		t.Run(name, func(t *testing.T) {
  1120  			volumePluginMgr := &volume.VolumePluginMgr{}
  1121  			volumePluginMgr.InitPlugins(tc.volumePlugins, nil, nil)
  1122  			asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1123  			oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1124  				nil, /* kubeClient */
  1125  				volumePluginMgr,
  1126  				nil, /* fakeRecorder */
  1127  				nil))
  1128  
  1129  			pod := &v1.Pod{
  1130  				ObjectMeta: metav1.ObjectMeta{
  1131  					Name: "pod1",
  1132  					UID:  "pod1uid",
  1133  				},
  1134  				Spec: v1.PodSpec{},
  1135  			}
  1136  			volumeMode := v1.PersistentVolumeBlock
  1137  			tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
  1138  			volumeToMount := operationexecutor.VolumeToMount{
  1139  				Pod:        pod,
  1140  				VolumeSpec: tmpSpec}
  1141  			err := oex.MountVolume(waitForAttachTimeout, volumeToMount, asw, false)
  1142  			// Assert
  1143  			if assert.Error(t, err) {
  1144  				assert.Contains(t, err.Error(), tc.expectedErrMsg)
  1145  			}
  1146  		})
  1147  	}
  1148  }
  1149  
  1150  func Test_GenerateUnmapVolumeFunc_Plugin_Not_Found(t *testing.T) {
  1151  	testCases := map[string]struct {
  1152  		volumePlugins  []volume.VolumePlugin
  1153  		expectErr      bool
  1154  		expectedErrMsg string
  1155  	}{
  1156  		"volumePlugin is nil": {
  1157  			volumePlugins:  []volume.VolumePlugin{},
  1158  			expectErr:      true,
  1159  			expectedErrMsg: "UnmapVolume.FindMapperPluginByName failed",
  1160  		},
  1161  		"blockVolumePlugin is nil": {
  1162  			volumePlugins:  volumetesting.NewFakeFileVolumePlugin(),
  1163  			expectErr:      true,
  1164  			expectedErrMsg: "UnmapVolume.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
  1165  		},
  1166  	}
  1167  
  1168  	for name, tc := range testCases {
  1169  		t.Run(name, func(t *testing.T) {
  1170  			volumePluginMgr := &volume.VolumePluginMgr{}
  1171  			volumePluginMgr.InitPlugins(tc.volumePlugins, nil, nil)
  1172  			asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1173  			oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1174  				nil, /* kubeClient */
  1175  				volumePluginMgr,
  1176  				nil, /* fakeRecorder */
  1177  				nil))
  1178  			volumeMode := v1.PersistentVolumeBlock
  1179  			tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
  1180  			volumeToUnmount := operationexecutor.MountedVolume{
  1181  				PluginName: "fake-file-plugin",
  1182  				VolumeSpec: tmpSpec}
  1183  			err := oex.UnmountVolume(volumeToUnmount, asw, "" /* podsDir */)
  1184  			// Assert
  1185  			if assert.Error(t, err) {
  1186  				assert.Contains(t, err.Error(), tc.expectedErrMsg)
  1187  			}
  1188  		})
  1189  	}
  1190  }
  1191  
  1192  func Test_GenerateUnmapDeviceFunc_Plugin_Not_Found(t *testing.T) {
  1193  	testCases := map[string]struct {
  1194  		volumePlugins  []volume.VolumePlugin
  1195  		expectErr      bool
  1196  		expectedErrMsg string
  1197  	}{
  1198  		"volumePlugin is nil": {
  1199  			volumePlugins:  []volume.VolumePlugin{},
  1200  			expectErr:      true,
  1201  			expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed",
  1202  		},
  1203  		"blockVolumePlugin is nil": {
  1204  			volumePlugins:  volumetesting.NewFakeFileVolumePlugin(),
  1205  			expectErr:      true,
  1206  			expectedErrMsg: "UnmapDevice.FindMapperPluginByName failed to find BlockVolumeMapper plugin. Volume plugin is nil.",
  1207  		},
  1208  	}
  1209  
  1210  	for name, tc := range testCases {
  1211  		t.Run(name, func(t *testing.T) {
  1212  			volumePluginMgr := &volume.VolumePluginMgr{}
  1213  			volumePluginMgr.InitPlugins(tc.volumePlugins, nil, nil)
  1214  			asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1215  			oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1216  				nil, /* kubeClient */
  1217  				volumePluginMgr,
  1218  				nil, /* fakeRecorder */
  1219  				nil))
  1220  			var hostutil hostutil.HostUtils
  1221  			volumeMode := v1.PersistentVolumeBlock
  1222  			tmpSpec := &volume.Spec{PersistentVolume: &v1.PersistentVolume{Spec: v1.PersistentVolumeSpec{VolumeMode: &volumeMode}}}
  1223  			deviceToDetach := operationexecutor.AttachedVolume{VolumeSpec: tmpSpec, PluginName: "fake-file-plugin"}
  1224  			err := oex.UnmountDevice(deviceToDetach, asw, hostutil)
  1225  			// Assert
  1226  			if assert.Error(t, err) {
  1227  				assert.Contains(t, err.Error(), tc.expectedErrMsg)
  1228  			}
  1229  		})
  1230  	}
  1231  }
  1232  
  1233  // Populates desiredStateOfWorld cache with one volume/pod.
  1234  // Enables controllerAttachDetachEnabled.
  1235  // Calls Run()
  1236  // Wait for volume mounted.
  1237  // Mark volume as fsResizeRequired in ASW.
  1238  // Verifies volume's fsResizeRequired flag is cleared later.
  1239  func Test_Run_Positive_VolumeFSResizeControllerAttachEnabled(t *testing.T) {
  1240  	blockMode := v1.PersistentVolumeBlock
  1241  	fsMode := v1.PersistentVolumeFilesystem
  1242  
  1243  	var tests = []struct {
  1244  		name            string
  1245  		volumeMode      *v1.PersistentVolumeMode
  1246  		expansionFailed bool
  1247  		uncertainTest   bool
  1248  		pvName          string
  1249  		pvcSize         resource.Quantity
  1250  		pvcStatusSize   resource.Quantity
  1251  		oldPVSize       resource.Quantity
  1252  		newPVSize       resource.Quantity
  1253  	}{
  1254  		{
  1255  			name:          "expand-fs-volume",
  1256  			volumeMode:    &fsMode,
  1257  			pvName:        "pv",
  1258  			pvcSize:       resource.MustParse("10G"),
  1259  			pvcStatusSize: resource.MustParse("10G"),
  1260  			newPVSize:     resource.MustParse("15G"),
  1261  			oldPVSize:     resource.MustParse("10G"),
  1262  		},
  1263  		{
  1264  			name:          "expand-raw-block",
  1265  			volumeMode:    &blockMode,
  1266  			pvName:        "pv",
  1267  			pvcSize:       resource.MustParse("10G"),
  1268  			pvcStatusSize: resource.MustParse("10G"),
  1269  			newPVSize:     resource.MustParse("15G"),
  1270  			oldPVSize:     resource.MustParse("10G"),
  1271  		},
  1272  		{
  1273  			name:            "expand-fs-volume with in-use error",
  1274  			volumeMode:      &fsMode,
  1275  			expansionFailed: true,
  1276  			pvName:          volumetesting.FailWithInUseVolumeName,
  1277  			pvcSize:         resource.MustParse("10G"),
  1278  			pvcStatusSize:   resource.MustParse("10G"),
  1279  			newPVSize:       resource.MustParse("15G"),
  1280  			oldPVSize:       resource.MustParse("13G"),
  1281  		},
  1282  		{
  1283  			name:            "expand-fs-volume with unsupported error",
  1284  			volumeMode:      &fsMode,
  1285  			expansionFailed: false,
  1286  			pvName:          volumetesting.FailWithUnSupportedVolumeName,
  1287  			pvcSize:         resource.MustParse("10G"),
  1288  			pvcStatusSize:   resource.MustParse("10G"),
  1289  			newPVSize:       resource.MustParse("15G"),
  1290  			oldPVSize:       resource.MustParse("13G"),
  1291  		},
  1292  	}
  1293  
  1294  	for _, tc := range tests {
  1295  		t.Run(tc.name, func(t *testing.T) {
  1296  			pv := getTestPV(tc.pvName, tc.volumeMode, tc.oldPVSize)
  1297  			pvc := getTestPVC("pv", tc.volumeMode, tc.pvcSize, tc.pvcStatusSize)
  1298  			pod := getTestPod(pvc.Name)
  1299  
  1300  			// deep copy before reconciler runs to avoid data race.
  1301  			pvWithSize := pv.DeepCopy()
  1302  			node := &v1.Node{
  1303  				ObjectMeta: metav1.ObjectMeta{
  1304  					Name: string(nodeName),
  1305  				},
  1306  				Spec: v1.NodeSpec{},
  1307  				Status: v1.NodeStatus{
  1308  					VolumesAttached: []v1.AttachedVolume{
  1309  						{
  1310  							Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.pvName)),
  1311  							DevicePath: "fake/path",
  1312  						},
  1313  					},
  1314  				},
  1315  			}
  1316  			volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
  1317  			seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  1318  			dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  1319  			asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1320  			kubeClient := createtestClientWithPVPVC(pv, pvc, v1.AttachedVolume{
  1321  				Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.pvName)),
  1322  				DevicePath: "fake/path",
  1323  			})
  1324  			fakeRecorder := &record.FakeRecorder{}
  1325  			fakeHandler := volumetesting.NewBlockVolumePathHandler()
  1326  			oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1327  				kubeClient,
  1328  				volumePluginMgr,
  1329  				fakeRecorder,
  1330  				fakeHandler))
  1331  
  1332  			reconciler := NewReconciler(
  1333  				kubeClient,
  1334  				true, /* controllerAttachDetachEnabled */
  1335  				reconcilerLoopSleepDuration,
  1336  				waitForAttachTimeout,
  1337  				nodeName,
  1338  				dsw,
  1339  				asw,
  1340  				hasAddedPods,
  1341  				oex,
  1342  				mount.NewFakeMounter(nil),
  1343  				hostutil.NewFakeHostUtil(nil),
  1344  				volumePluginMgr,
  1345  				kubeletPodsDir)
  1346  
  1347  			volumeSpec := &volume.Spec{PersistentVolume: pv}
  1348  			podName := util.GetUniquePodName(pod)
  1349  			volumeName, err := dsw.AddPodToVolume(
  1350  				podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  1351  			// Assert
  1352  			if err != nil {
  1353  				t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
  1354  			}
  1355  			dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
  1356  
  1357  			// Start the reconciler to fill ASW.
  1358  			stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
  1359  			go func() {
  1360  				defer close(stoppedChan)
  1361  				reconciler.Run(stopChan)
  1362  			}()
  1363  			waitForMount(t, fakePlugin, volumeName, asw)
  1364  			// Stop the reconciler.
  1365  			close(stopChan)
  1366  			<-stoppedChan
  1367  
  1368  			// Simulate what DSOWP does
  1369  			pvWithSize.Spec.Capacity[v1.ResourceStorage] = tc.newPVSize
  1370  			volumeSpec = &volume.Spec{PersistentVolume: pvWithSize}
  1371  			dsw.AddPodToVolume(podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxContexts */)
  1372  
  1373  			t.Logf("Changing size of the volume to %s", tc.newPVSize.String())
  1374  			newSize := tc.newPVSize.DeepCopy()
  1375  			dsw.UpdatePersistentVolumeSize(volumeName, &newSize)
  1376  
  1377  			_, _, podExistErr := asw.PodExistsInVolume(podName, volumeName, newSize, "" /* SELinuxLabel */)
  1378  			if tc.expansionFailed {
  1379  				if cache.IsFSResizeRequiredError(podExistErr) {
  1380  					t.Fatalf("volume %s should not throw fsResizeRequired error: %v", volumeName, podExistErr)
  1381  				}
  1382  			} else {
  1383  				if !cache.IsFSResizeRequiredError(podExistErr) {
  1384  					t.Fatalf("Volume should be marked as fsResizeRequired, but receive unexpected error: %v", podExistErr)
  1385  				}
  1386  				go reconciler.Run(wait.NeverStop)
  1387  
  1388  				waitErr := retryWithExponentialBackOff(testOperationBackOffDuration, func() (done bool, err error) {
  1389  					mounted, _, err := asw.PodExistsInVolume(podName, volumeName, newSize, "" /* SELinuxContext */)
  1390  					return mounted && err == nil, nil
  1391  				})
  1392  				if waitErr != nil {
  1393  					t.Fatalf("Volume resize should succeeded %v", waitErr)
  1394  				}
  1395  			}
  1396  
  1397  		})
  1398  	}
  1399  }
  1400  
  1401  func getTestPVC(pvName string, volumeMode *v1.PersistentVolumeMode, specSize, statusSize resource.Quantity) *v1.PersistentVolumeClaim {
  1402  	pvc := &v1.PersistentVolumeClaim{
  1403  		ObjectMeta: metav1.ObjectMeta{
  1404  			Name: "pvc",
  1405  			UID:  "pvcuid",
  1406  		},
  1407  		Spec: v1.PersistentVolumeClaimSpec{
  1408  			Resources: v1.VolumeResourceRequirements{
  1409  				Requests: v1.ResourceList{
  1410  					v1.ResourceStorage: specSize,
  1411  				},
  1412  			},
  1413  			VolumeName: pvName,
  1414  			VolumeMode: volumeMode,
  1415  		},
  1416  		Status: v1.PersistentVolumeClaimStatus{
  1417  			Capacity: v1.ResourceList{
  1418  				v1.ResourceStorage: statusSize,
  1419  			},
  1420  		},
  1421  	}
  1422  	return pvc
  1423  }
  1424  
  1425  func getTestPV(pvName string, volumeMode *v1.PersistentVolumeMode, pvSize resource.Quantity) *v1.PersistentVolume {
  1426  	pv := &v1.PersistentVolume{
  1427  		ObjectMeta: metav1.ObjectMeta{
  1428  			Name: pvName,
  1429  			UID:  "pvuid",
  1430  		},
  1431  		Spec: v1.PersistentVolumeSpec{
  1432  			ClaimRef:   &v1.ObjectReference{Name: "pvc"},
  1433  			VolumeMode: volumeMode,
  1434  			Capacity: v1.ResourceList{
  1435  				v1.ResourceStorage: pvSize,
  1436  			},
  1437  		},
  1438  	}
  1439  	return pv
  1440  }
  1441  
  1442  func getTestPod(claimName string) *v1.Pod {
  1443  	pod := &v1.Pod{
  1444  		ObjectMeta: metav1.ObjectMeta{
  1445  			Name: "pod1",
  1446  			UID:  "pod1uid",
  1447  		},
  1448  		Spec: v1.PodSpec{
  1449  			Volumes: []v1.Volume{
  1450  				{
  1451  					Name: "volume-name",
  1452  					VolumeSource: v1.VolumeSource{
  1453  						PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  1454  							ClaimName: claimName,
  1455  						},
  1456  					},
  1457  				},
  1458  			},
  1459  		},
  1460  	}
  1461  	return pod
  1462  }
  1463  
  1464  func Test_UncertainDeviceGlobalMounts(t *testing.T) {
  1465  	var tests = []struct {
  1466  		name                   string
  1467  		deviceState            operationexecutor.DeviceMountState
  1468  		unmountDeviceCallCount int
  1469  		volumeName             string
  1470  		supportRemount         bool
  1471  	}{
  1472  		{
  1473  			name:                   "timed out operations should result in device marked as uncertain",
  1474  			deviceState:            operationexecutor.DeviceMountUncertain,
  1475  			unmountDeviceCallCount: 1,
  1476  			volumeName:             volumetesting.TimeoutOnMountDeviceVolumeName,
  1477  		},
  1478  		{
  1479  			name:                   "failed operation should result in not-mounted device",
  1480  			deviceState:            operationexecutor.DeviceNotMounted,
  1481  			unmountDeviceCallCount: 0,
  1482  			volumeName:             volumetesting.FailMountDeviceVolumeName,
  1483  		},
  1484  		{
  1485  			name:                   "timeout followed by failed operation should result in non-mounted device",
  1486  			deviceState:            operationexecutor.DeviceNotMounted,
  1487  			unmountDeviceCallCount: 0,
  1488  			volumeName:             volumetesting.TimeoutAndFailOnMountDeviceVolumeName,
  1489  		},
  1490  		{
  1491  			name:                   "success followed by timeout operation should result in mounted device",
  1492  			deviceState:            operationexecutor.DeviceGloballyMounted,
  1493  			unmountDeviceCallCount: 1,
  1494  			volumeName:             volumetesting.SuccessAndTimeoutDeviceName,
  1495  			supportRemount:         true,
  1496  		},
  1497  		{
  1498  			name:                   "success followed by failed operation should result in mounted device",
  1499  			deviceState:            operationexecutor.DeviceGloballyMounted,
  1500  			unmountDeviceCallCount: 1,
  1501  			volumeName:             volumetesting.SuccessAndFailOnMountDeviceName,
  1502  			supportRemount:         true,
  1503  		},
  1504  	}
  1505  
  1506  	modes := []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem}
  1507  
  1508  	for modeIndex := range modes {
  1509  		for tcIndex := range tests {
  1510  			mode := modes[modeIndex]
  1511  			tc := tests[tcIndex]
  1512  			testName := fmt.Sprintf("%s [%s]", tc.name, mode)
  1513  			uniqueTestString := fmt.Sprintf("global-mount-%s", testName)
  1514  			uniquePodDir := fmt.Sprintf("%s-%x", kubeletPodsDir, md5.Sum([]byte(uniqueTestString)))
  1515  			t.Run(testName+"[", func(t *testing.T) {
  1516  				t.Parallel()
  1517  				pv := &v1.PersistentVolume{
  1518  					ObjectMeta: metav1.ObjectMeta{
  1519  						Name: tc.volumeName,
  1520  						UID:  "pvuid",
  1521  					},
  1522  					Spec: v1.PersistentVolumeSpec{
  1523  						ClaimRef:   &v1.ObjectReference{Name: "pvc"},
  1524  						VolumeMode: &mode,
  1525  					},
  1526  				}
  1527  				pvc := &v1.PersistentVolumeClaim{
  1528  					ObjectMeta: metav1.ObjectMeta{
  1529  						Name: "pvc",
  1530  						UID:  "pvcuid",
  1531  					},
  1532  					Spec: v1.PersistentVolumeClaimSpec{
  1533  						VolumeName: tc.volumeName,
  1534  						VolumeMode: &mode,
  1535  					},
  1536  				}
  1537  				pod := &v1.Pod{
  1538  					ObjectMeta: metav1.ObjectMeta{
  1539  						Name: "pod1",
  1540  						UID:  "pod1uid",
  1541  					},
  1542  					Spec: v1.PodSpec{
  1543  						Volumes: []v1.Volume{
  1544  							{
  1545  								Name: "volume-name",
  1546  								VolumeSource: v1.VolumeSource{
  1547  									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  1548  										ClaimName: pvc.Name,
  1549  									},
  1550  								},
  1551  							},
  1552  						},
  1553  					},
  1554  				}
  1555  
  1556  				node := &v1.Node{
  1557  					ObjectMeta: metav1.ObjectMeta{
  1558  						Name: string(nodeName),
  1559  					},
  1560  					Spec: v1.NodeSpec{},
  1561  					Status: v1.NodeStatus{
  1562  						VolumesAttached: []v1.AttachedVolume{
  1563  							{
  1564  								Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.volumeName)),
  1565  								DevicePath: "fake/path",
  1566  							},
  1567  						},
  1568  					},
  1569  				}
  1570  				volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
  1571  				fakePlugin.SupportsRemount = tc.supportRemount
  1572  				seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  1573  
  1574  				dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  1575  				asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1576  				kubeClient := createtestClientWithPVPVC(pv, pvc, v1.AttachedVolume{
  1577  					Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.volumeName)),
  1578  					DevicePath: "fake/path",
  1579  				})
  1580  				fakeRecorder := &record.FakeRecorder{}
  1581  				fakeHandler := volumetesting.NewBlockVolumePathHandler()
  1582  				oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1583  					kubeClient,
  1584  					volumePluginMgr,
  1585  					fakeRecorder,
  1586  					fakeHandler))
  1587  
  1588  				reconciler := NewReconciler(
  1589  					kubeClient,
  1590  					true, /* controllerAttachDetachEnabled */
  1591  					reconcilerLoopSleepDuration,
  1592  					waitForAttachTimeout,
  1593  					nodeName,
  1594  					dsw,
  1595  					asw,
  1596  					hasAddedPods,
  1597  					oex,
  1598  					&mount.FakeMounter{},
  1599  					hostutil.NewFakeHostUtil(nil),
  1600  					volumePluginMgr,
  1601  					uniquePodDir)
  1602  				volumeSpec := &volume.Spec{PersistentVolume: pv}
  1603  				podName := util.GetUniquePodName(pod)
  1604  				volumeName, err := dsw.AddPodToVolume(
  1605  					podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  1606  				// Assert
  1607  				if err != nil {
  1608  					t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
  1609  				}
  1610  				dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
  1611  
  1612  				// Start the reconciler to fill ASW.
  1613  				stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
  1614  				go func() {
  1615  					reconciler.Run(stopChan)
  1616  					close(stoppedChan)
  1617  				}()
  1618  				waitForVolumeToExistInASW(t, volumeName, asw)
  1619  				if tc.volumeName == volumetesting.TimeoutAndFailOnMountDeviceVolumeName {
  1620  					// Wait upto 10s for reconciler to catch up
  1621  					time.Sleep(reconcilerSyncWaitDuration)
  1622  				}
  1623  
  1624  				if tc.volumeName == volumetesting.SuccessAndFailOnMountDeviceName ||
  1625  					tc.volumeName == volumetesting.SuccessAndTimeoutDeviceName {
  1626  					// wait for mount and then break it via remount
  1627  					waitForMount(t, fakePlugin, volumeName, asw)
  1628  					asw.MarkRemountRequired(podName)
  1629  					time.Sleep(reconcilerSyncWaitDuration)
  1630  				}
  1631  
  1632  				if tc.deviceState == operationexecutor.DeviceMountUncertain {
  1633  					waitForUncertainGlobalMount(t, volumeName, asw)
  1634  				}
  1635  
  1636  				if tc.deviceState == operationexecutor.DeviceGloballyMounted {
  1637  					waitForMount(t, fakePlugin, volumeName, asw)
  1638  				}
  1639  
  1640  				dsw.DeletePodFromVolume(podName, volumeName)
  1641  				waitForDetach(t, volumeName, asw)
  1642  				if mode == v1.PersistentVolumeFilesystem {
  1643  					err = volumetesting.VerifyUnmountDeviceCallCount(tc.unmountDeviceCallCount, fakePlugin)
  1644  				} else {
  1645  					if tc.unmountDeviceCallCount == 0 {
  1646  						err = volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin)
  1647  					} else {
  1648  						err = volumetesting.VerifyTearDownDeviceCallCount(tc.unmountDeviceCallCount, fakePlugin)
  1649  					}
  1650  				}
  1651  				if err != nil {
  1652  					t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1653  				}
  1654  			})
  1655  		}
  1656  	}
  1657  }
  1658  
  1659  func Test_UncertainVolumeMountState(t *testing.T) {
  1660  	var tests = []struct {
  1661  		name                   string
  1662  		volumeState            operationexecutor.VolumeMountState
  1663  		unmountDeviceCallCount int
  1664  		unmountVolumeCount     int
  1665  		volumeName             string
  1666  		supportRemount         bool
  1667  		pvcStatusSize          resource.Quantity
  1668  		pvSize                 resource.Quantity
  1669  	}{
  1670  		{
  1671  			name:                   "timed out operations should result in volume marked as uncertain",
  1672  			volumeState:            operationexecutor.VolumeMountUncertain,
  1673  			unmountDeviceCallCount: 1,
  1674  			unmountVolumeCount:     1,
  1675  			volumeName:             volumetesting.TimeoutOnSetupVolumeName,
  1676  		},
  1677  		{
  1678  			name:                   "failed operation should result in not-mounted volume",
  1679  			volumeState:            operationexecutor.VolumeNotMounted,
  1680  			unmountDeviceCallCount: 1,
  1681  			unmountVolumeCount:     0,
  1682  			volumeName:             volumetesting.FailOnSetupVolumeName,
  1683  		},
  1684  		{
  1685  			name:                   "timeout followed by failed operation should result in non-mounted volume",
  1686  			volumeState:            operationexecutor.VolumeNotMounted,
  1687  			unmountDeviceCallCount: 1,
  1688  			unmountVolumeCount:     0,
  1689  			volumeName:             volumetesting.TimeoutAndFailOnSetupVolumeName,
  1690  		},
  1691  		{
  1692  			name:                   "success followed by timeout operation should result in mounted volume",
  1693  			volumeState:            operationexecutor.VolumeMounted,
  1694  			unmountDeviceCallCount: 1,
  1695  			unmountVolumeCount:     1,
  1696  			volumeName:             volumetesting.SuccessAndTimeoutSetupVolumeName,
  1697  			supportRemount:         true,
  1698  		},
  1699  		{
  1700  			name:                   "success followed by failed operation should result in mounted volume",
  1701  			volumeState:            operationexecutor.VolumeMounted,
  1702  			unmountDeviceCallCount: 1,
  1703  			unmountVolumeCount:     1,
  1704  			volumeName:             volumetesting.SuccessAndFailOnSetupVolumeName,
  1705  			supportRemount:         true,
  1706  		},
  1707  		{
  1708  			name:                   "mount success but fail to expand filesystem",
  1709  			volumeState:            operationexecutor.VolumeMountUncertain,
  1710  			unmountDeviceCallCount: 1,
  1711  			unmountVolumeCount:     1,
  1712  			volumeName:             volumetesting.FailVolumeExpansion,
  1713  			supportRemount:         true,
  1714  			pvSize:                 resource.MustParse("10G"),
  1715  			pvcStatusSize:          resource.MustParse("2G"),
  1716  		},
  1717  	}
  1718  	modes := []v1.PersistentVolumeMode{v1.PersistentVolumeBlock, v1.PersistentVolumeFilesystem}
  1719  
  1720  	for modeIndex := range modes {
  1721  		for tcIndex := range tests {
  1722  			mode := modes[modeIndex]
  1723  			tc := tests[tcIndex]
  1724  			testName := fmt.Sprintf("%s [%s]", tc.name, mode)
  1725  			uniqueTestString := fmt.Sprintf("local-mount-%s", testName)
  1726  			uniquePodDir := fmt.Sprintf("%s-%x", kubeletPodsDir, md5.Sum([]byte(uniqueTestString)))
  1727  			t.Run(testName, func(t *testing.T) {
  1728  				t.Parallel()
  1729  				pv := &v1.PersistentVolume{
  1730  					ObjectMeta: metav1.ObjectMeta{
  1731  						Name: tc.volumeName,
  1732  						UID:  "pvuid",
  1733  					},
  1734  					Spec: v1.PersistentVolumeSpec{
  1735  						ClaimRef:   &v1.ObjectReference{Name: "pvc"},
  1736  						VolumeMode: &mode,
  1737  					},
  1738  				}
  1739  				if tc.pvSize.CmpInt64(0) > 0 {
  1740  					pv.Spec.Capacity = v1.ResourceList{
  1741  						v1.ResourceStorage: tc.pvSize,
  1742  					}
  1743  				}
  1744  				pvc := &v1.PersistentVolumeClaim{
  1745  					ObjectMeta: metav1.ObjectMeta{
  1746  						Name: "pvc",
  1747  						UID:  "pvcuid",
  1748  					},
  1749  					Spec: v1.PersistentVolumeClaimSpec{
  1750  						VolumeName: tc.volumeName,
  1751  						VolumeMode: &mode,
  1752  					},
  1753  				}
  1754  				if tc.pvcStatusSize.CmpInt64(0) > 0 {
  1755  					pvc.Status = v1.PersistentVolumeClaimStatus{
  1756  						Capacity: v1.ResourceList{
  1757  							v1.ResourceStorage: tc.pvcStatusSize,
  1758  						},
  1759  					}
  1760  				}
  1761  				pod := &v1.Pod{
  1762  					ObjectMeta: metav1.ObjectMeta{
  1763  						Name: "pod1",
  1764  						UID:  "pod1uid",
  1765  					},
  1766  					Spec: v1.PodSpec{
  1767  						Volumes: []v1.Volume{
  1768  							{
  1769  								Name: "volume-name",
  1770  								VolumeSource: v1.VolumeSource{
  1771  									PersistentVolumeClaim: &v1.PersistentVolumeClaimVolumeSource{
  1772  										ClaimName: pvc.Name,
  1773  									},
  1774  								},
  1775  							},
  1776  						},
  1777  					},
  1778  				}
  1779  
  1780  				node := &v1.Node{
  1781  					ObjectMeta: metav1.ObjectMeta{
  1782  						Name: string(nodeName),
  1783  					},
  1784  					Status: v1.NodeStatus{
  1785  						VolumesAttached: []v1.AttachedVolume{
  1786  							{
  1787  								Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.volumeName)),
  1788  								DevicePath: "fake/path",
  1789  							},
  1790  						},
  1791  					},
  1792  				}
  1793  
  1794  				volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
  1795  				fakePlugin.SupportsRemount = tc.supportRemount
  1796  				seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  1797  				dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  1798  				asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  1799  				kubeClient := createtestClientWithPVPVC(pv, pvc, v1.AttachedVolume{
  1800  					Name:       v1.UniqueVolumeName(fmt.Sprintf("fake-plugin/%s", tc.volumeName)),
  1801  					DevicePath: "fake/path",
  1802  				})
  1803  				fakeRecorder := &record.FakeRecorder{}
  1804  				fakeHandler := volumetesting.NewBlockVolumePathHandler()
  1805  				oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  1806  					kubeClient,
  1807  					volumePluginMgr,
  1808  					fakeRecorder,
  1809  					fakeHandler))
  1810  
  1811  				reconciler := NewReconciler(
  1812  					kubeClient,
  1813  					true, /* controllerAttachDetachEnabled */
  1814  					reconcilerLoopSleepDuration,
  1815  					waitForAttachTimeout,
  1816  					nodeName,
  1817  					dsw,
  1818  					asw,
  1819  					hasAddedPods,
  1820  					oex,
  1821  					&mount.FakeMounter{},
  1822  					hostutil.NewFakeHostUtil(nil),
  1823  					volumePluginMgr,
  1824  					uniquePodDir)
  1825  				volumeSpec := &volume.Spec{PersistentVolume: pv}
  1826  				podName := util.GetUniquePodName(pod)
  1827  				volumeName, err := dsw.AddPodToVolume(
  1828  					podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  1829  				// Assert
  1830  				if err != nil {
  1831  					t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
  1832  				}
  1833  				dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{volumeName})
  1834  
  1835  				// Start the reconciler to fill ASW.
  1836  				stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
  1837  				go func() {
  1838  					reconciler.Run(stopChan)
  1839  					close(stoppedChan)
  1840  				}()
  1841  				waitForVolumeToExistInASW(t, volumeName, asw)
  1842  				// all of these tests rely on device to be globally mounted and hence waiting for global
  1843  				// mount ensures that unmountDevice is called as expected.
  1844  				waitForGlobalMount(t, volumeName, asw)
  1845  				if tc.volumeName == volumetesting.TimeoutAndFailOnSetupVolumeName {
  1846  					// Wait upto 10s for reconciler to catchup
  1847  					time.Sleep(reconcilerSyncWaitDuration)
  1848  				}
  1849  
  1850  				if tc.volumeName == volumetesting.SuccessAndFailOnSetupVolumeName ||
  1851  					tc.volumeName == volumetesting.SuccessAndTimeoutSetupVolumeName {
  1852  					// wait for mount and then break it via remount
  1853  					waitForMount(t, fakePlugin, volumeName, asw)
  1854  					asw.MarkRemountRequired(podName)
  1855  					time.Sleep(reconcilerSyncWaitDuration)
  1856  				}
  1857  
  1858  				if tc.volumeState == operationexecutor.VolumeMountUncertain {
  1859  					waitForUncertainPodMount(t, volumeName, podName, asw)
  1860  				}
  1861  
  1862  				if tc.volumeState == operationexecutor.VolumeMounted {
  1863  					waitForMount(t, fakePlugin, volumeName, asw)
  1864  				}
  1865  
  1866  				dsw.DeletePodFromVolume(podName, volumeName)
  1867  				waitForDetach(t, volumeName, asw)
  1868  
  1869  				if mode == v1.PersistentVolumeFilesystem {
  1870  					if err := volumetesting.VerifyUnmountDeviceCallCount(tc.unmountDeviceCallCount, fakePlugin); err != nil {
  1871  						t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1872  					}
  1873  					if err := volumetesting.VerifyTearDownCallCount(tc.unmountVolumeCount, fakePlugin); err != nil {
  1874  						t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1875  					}
  1876  				} else {
  1877  					if tc.unmountVolumeCount == 0 {
  1878  						if err := volumetesting.VerifyZeroUnmapPodDeviceCallCount(fakePlugin); err != nil {
  1879  							t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1880  						}
  1881  					} else {
  1882  						if err := volumetesting.VerifyUnmapPodDeviceCallCount(tc.unmountVolumeCount, fakePlugin); err != nil {
  1883  							t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1884  						}
  1885  					}
  1886  					if tc.unmountDeviceCallCount == 0 {
  1887  						if err := volumetesting.VerifyZeroTearDownDeviceCallCount(fakePlugin); err != nil {
  1888  							t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1889  						}
  1890  					} else {
  1891  						if err := volumetesting.VerifyTearDownDeviceCallCount(tc.unmountDeviceCallCount, fakePlugin); err != nil {
  1892  							t.Errorf("Error verifying UnMountDeviceCallCount: %v", err)
  1893  						}
  1894  					}
  1895  				}
  1896  			})
  1897  		}
  1898  	}
  1899  }
  1900  
  1901  func waitForUncertainGlobalMount(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
  1902  	// check if volume is globally mounted in uncertain state
  1903  	err := retryWithExponentialBackOff(
  1904  		testOperationBackOffDuration,
  1905  		func() (bool, error) {
  1906  			unmountedVolumes := asw.GetUnmountedVolumes()
  1907  			for _, v := range unmountedVolumes {
  1908  				if v.VolumeName == volumeName && v.DeviceMountState == operationexecutor.DeviceMountUncertain {
  1909  					return true, nil
  1910  				}
  1911  			}
  1912  			return false, nil
  1913  		},
  1914  	)
  1915  
  1916  	if err != nil {
  1917  		t.Fatalf("expected volumes %s to be mounted in uncertain state globally", volumeName)
  1918  	}
  1919  }
  1920  
  1921  func waitForGlobalMount(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
  1922  	// check if volume is globally mounted
  1923  	err := retryWithExponentialBackOff(
  1924  		testOperationBackOffDuration,
  1925  		func() (bool, error) {
  1926  			mountedVolumes := asw.GetGloballyMountedVolumes()
  1927  			for _, v := range mountedVolumes {
  1928  				if v.VolumeName == volumeName {
  1929  					return true, nil
  1930  				}
  1931  			}
  1932  			return false, nil
  1933  		},
  1934  	)
  1935  
  1936  	if err != nil {
  1937  		t.Fatalf("expected volume devices %s to be mounted globally", volumeName)
  1938  	}
  1939  }
  1940  
  1941  func waitForUncertainPodMount(t *testing.T, volumeName v1.UniqueVolumeName, podName types.UniquePodName, asw cache.ActualStateOfWorld) {
  1942  	// check if volume is locally pod mounted in uncertain state
  1943  	err := retryWithExponentialBackOff(
  1944  		testOperationBackOffDuration,
  1945  		func() (bool, error) {
  1946  			mounted, _, err := asw.PodExistsInVolume(podName, volumeName, resource.Quantity{}, "" /* SELinuxContext */)
  1947  			if mounted || err != nil {
  1948  				return false, nil
  1949  			}
  1950  			allMountedVolumes := asw.GetAllMountedVolumes()
  1951  			for _, v := range allMountedVolumes {
  1952  				if v.VolumeName == volumeName {
  1953  					return true, nil
  1954  				}
  1955  			}
  1956  			return false, nil
  1957  		},
  1958  	)
  1959  
  1960  	if err != nil {
  1961  		t.Fatalf("expected volumes %s to be mounted in uncertain state for pod", volumeName)
  1962  	}
  1963  }
  1964  
  1965  func waitForMount(
  1966  	t *testing.T,
  1967  	fakePlugin *volumetesting.FakeVolumePlugin,
  1968  	volumeName v1.UniqueVolumeName,
  1969  	asw cache.ActualStateOfWorld) {
  1970  	err := retryWithExponentialBackOff(
  1971  		testOperationBackOffDuration,
  1972  		func() (bool, error) {
  1973  			mountedVolumes := asw.GetMountedVolumes()
  1974  			for _, mountedVolume := range mountedVolumes {
  1975  				if mountedVolume.VolumeName == volumeName {
  1976  					return true, nil
  1977  				}
  1978  			}
  1979  
  1980  			return false, nil
  1981  		},
  1982  	)
  1983  
  1984  	if err != nil {
  1985  		t.Fatalf("Timed out waiting for volume %q to be attached.", volumeName)
  1986  	}
  1987  }
  1988  
  1989  func waitForVolumeToExistInASW(t *testing.T, volumeName v1.UniqueVolumeName, asw cache.ActualStateOfWorld) {
  1990  	err := retryWithExponentialBackOff(
  1991  		testOperationBackOffDuration,
  1992  		func() (bool, error) {
  1993  			if asw.VolumeExists(volumeName) {
  1994  				return true, nil
  1995  			}
  1996  			return false, nil
  1997  		},
  1998  	)
  1999  	if err != nil {
  2000  		t.Fatalf("Timed out waiting for volume %q to be exist in asw.", volumeName)
  2001  	}
  2002  }
  2003  
  2004  func waitForDetach(
  2005  	t *testing.T,
  2006  	volumeName v1.UniqueVolumeName,
  2007  	asw cache.ActualStateOfWorld) {
  2008  	err := retryWithExponentialBackOff(
  2009  		testOperationBackOffDuration,
  2010  		func() (bool, error) {
  2011  			if asw.VolumeExists(volumeName) {
  2012  				return false, nil
  2013  			}
  2014  
  2015  			return true, nil
  2016  		},
  2017  	)
  2018  
  2019  	if err != nil {
  2020  		t.Fatalf("Timed out waiting for volume %q to be detached.", volumeName)
  2021  	}
  2022  }
  2023  
  2024  func retryWithExponentialBackOff(initialDuration time.Duration, fn wait.ConditionFunc) error {
  2025  	backoff := wait.Backoff{
  2026  		Duration: initialDuration,
  2027  		Factor:   3,
  2028  		Jitter:   0,
  2029  		Steps:    6,
  2030  	}
  2031  	return wait.ExponentialBackoff(backoff, fn)
  2032  }
  2033  
  2034  func createTestClient(attachedVolumes ...v1.AttachedVolume) *fake.Clientset {
  2035  	fakeClient := &fake.Clientset{}
  2036  	if len(attachedVolumes) == 0 {
  2037  		attachedVolumes = append(attachedVolumes, v1.AttachedVolume{
  2038  			Name:       "fake-plugin/fake-device1",
  2039  			DevicePath: "fake/path",
  2040  		})
  2041  	}
  2042  	fakeClient.AddReactor("get", "nodes",
  2043  		func(action core.Action) (bool, runtime.Object, error) {
  2044  			return true, &v1.Node{
  2045  				ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
  2046  				Status: v1.NodeStatus{
  2047  					VolumesAttached: attachedVolumes,
  2048  				},
  2049  			}, nil
  2050  		},
  2051  	)
  2052  
  2053  	fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
  2054  		return true, nil, fmt.Errorf("no reaction implemented for %s", action)
  2055  	})
  2056  	return fakeClient
  2057  }
  2058  
  2059  func runReconciler(reconciler Reconciler) {
  2060  	go reconciler.Run(wait.NeverStop)
  2061  }
  2062  
  2063  func createtestClientWithPVPVC(pv *v1.PersistentVolume, pvc *v1.PersistentVolumeClaim, attachedVolumes ...v1.AttachedVolume) *fake.Clientset {
  2064  	fakeClient := &fake.Clientset{}
  2065  	if len(attachedVolumes) == 0 {
  2066  		attachedVolumes = append(attachedVolumes, v1.AttachedVolume{
  2067  			Name:       "fake-plugin/pv",
  2068  			DevicePath: "fake/path",
  2069  		})
  2070  	}
  2071  	fakeClient.AddReactor("get", "nodes",
  2072  		func(action core.Action) (bool, runtime.Object, error) {
  2073  			return true, &v1.Node{
  2074  				ObjectMeta: metav1.ObjectMeta{Name: string(nodeName)},
  2075  				Status: v1.NodeStatus{
  2076  					VolumesAttached: attachedVolumes,
  2077  				},
  2078  			}, nil
  2079  		})
  2080  	fakeClient.AddReactor("get", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
  2081  		return true, pvc, nil
  2082  	})
  2083  	fakeClient.AddReactor("get", "persistentvolumes", func(action core.Action) (bool, runtime.Object, error) {
  2084  		return true, pv, nil
  2085  	})
  2086  	fakeClient.AddReactor("patch", "persistentvolumeclaims", func(action core.Action) (bool, runtime.Object, error) {
  2087  		if action.GetSubresource() == "status" {
  2088  			return true, pvc, nil
  2089  		}
  2090  		return true, nil, fmt.Errorf("no reaction implemented for %s", action)
  2091  	})
  2092  	fakeClient.AddReactor("*", "*", func(action core.Action) (bool, runtime.Object, error) {
  2093  		return true, nil, fmt.Errorf("no reaction implemented for %s", action)
  2094  	})
  2095  	return fakeClient
  2096  }
  2097  
  2098  func Test_Run_Positive_VolumeMountControllerAttachEnabledRace(t *testing.T) {
  2099  	// Arrange
  2100  	node := &v1.Node{
  2101  		ObjectMeta: metav1.ObjectMeta{
  2102  			Name: string(nodeName),
  2103  		},
  2104  		Status: v1.NodeStatus{
  2105  			VolumesAttached: []v1.AttachedVolume{
  2106  				{
  2107  					Name:       "fake-plugin/fake-device1",
  2108  					DevicePath: "/fake/path",
  2109  				},
  2110  			},
  2111  		},
  2112  	}
  2113  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNode(t, node)
  2114  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  2115  
  2116  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  2117  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  2118  	kubeClient := createTestClient()
  2119  	fakeRecorder := &record.FakeRecorder{}
  2120  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
  2121  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  2122  		kubeClient,
  2123  		volumePluginMgr,
  2124  		fakeRecorder,
  2125  		fakeHandler))
  2126  	reconciler := NewReconciler(
  2127  		kubeClient,
  2128  		true, /* controllerAttachDetachEnabled */
  2129  		reconcilerLoopSleepDuration,
  2130  		waitForAttachTimeout,
  2131  		nodeName,
  2132  		dsw,
  2133  		asw,
  2134  		hasAddedPods,
  2135  		oex,
  2136  		mount.NewFakeMounter(nil),
  2137  		hostutil.NewFakeHostUtil(nil),
  2138  		volumePluginMgr,
  2139  		kubeletPodsDir)
  2140  	pod := &v1.Pod{
  2141  		ObjectMeta: metav1.ObjectMeta{
  2142  			Name: "pod1",
  2143  			UID:  "pod1uid",
  2144  		},
  2145  		Spec: v1.PodSpec{
  2146  			Volumes: []v1.Volume{
  2147  				{
  2148  					Name: "volume-name",
  2149  					VolumeSource: v1.VolumeSource{
  2150  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  2151  							PDName: "fake-device1",
  2152  						},
  2153  					},
  2154  				},
  2155  			},
  2156  		},
  2157  	}
  2158  
  2159  	// Some steps are executes out of order in callbacks, follow the numbers.
  2160  
  2161  	// 1. Add a volume to DSW and wait until it's mounted
  2162  	volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
  2163  	// copy before reconciler runs to avoid data race.
  2164  	volumeSpecCopy := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
  2165  	podName := util.GetUniquePodName(pod)
  2166  	generatedVolumeName, err := dsw.AddPodToVolume(
  2167  		podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  2168  	dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeName})
  2169  
  2170  	if err != nil {
  2171  		t.Fatalf("AddPodToVolume failed. Expected: <no error> Actual: <%v>", err)
  2172  	}
  2173  	// Start the reconciler to fill ASW.
  2174  	stopChan, stoppedChan := make(chan struct{}), make(chan struct{})
  2175  	go func() {
  2176  		reconciler.Run(stopChan)
  2177  		close(stoppedChan)
  2178  	}()
  2179  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
  2180  	// Stop the reconciler.
  2181  	close(stopChan)
  2182  	<-stoppedChan
  2183  
  2184  	finished := make(chan interface{})
  2185  	fakePlugin.Lock()
  2186  	fakePlugin.UnmountDeviceHook = func(mountPath string) error {
  2187  		// Act:
  2188  		// 3. While a volume is being unmounted, add it back to the desired state of world
  2189  		klog.InfoS("UnmountDevice called")
  2190  		var generatedVolumeNameCopy v1.UniqueVolumeName
  2191  		generatedVolumeNameCopy, err = dsw.AddPodToVolume(
  2192  			podName, pod, volumeSpecCopy, volumeSpec.Name(), "" /* volumeGidValue */, nil /* seLinuxLabel */)
  2193  		dsw.MarkVolumesReportedInUse([]v1.UniqueVolumeName{generatedVolumeNameCopy})
  2194  		return nil
  2195  	}
  2196  
  2197  	fakePlugin.WaitForAttachHook = func(spec *volume.Spec, devicePath string, pod *v1.Pod, spectimeout time.Duration) (string, error) {
  2198  		// Assert
  2199  		// 4. When the volume is mounted again, expect that UnmountDevice operation did not clear devicePath
  2200  		if devicePath == "" {
  2201  			klog.ErrorS(nil, "Expected WaitForAttach called with devicePath from Node.Status")
  2202  			close(finished)
  2203  			return "", fmt.Errorf("Expected devicePath from Node.Status")
  2204  		}
  2205  		close(finished)
  2206  		return devicePath, nil
  2207  	}
  2208  	fakePlugin.Unlock()
  2209  
  2210  	// Start the reconciler again.
  2211  	go reconciler.Run(wait.NeverStop)
  2212  
  2213  	// 2. Delete the volume from DSW (and wait for callbacks)
  2214  	dsw.DeletePodFromVolume(podName, generatedVolumeName)
  2215  
  2216  	<-finished
  2217  	waitForMount(t, fakePlugin, generatedVolumeName, asw)
  2218  }
  2219  
  2220  func getFakeNode() *v1.Node {
  2221  	return &v1.Node{
  2222  		ObjectMeta: metav1.ObjectMeta{
  2223  			Name: string(nodeName),
  2224  		},
  2225  		Status: v1.NodeStatus{
  2226  			VolumesAttached: []v1.AttachedVolume{
  2227  				{
  2228  					Name:       "fake-plugin/fake-device1",
  2229  					DevicePath: "/fake/path",
  2230  				},
  2231  			},
  2232  		},
  2233  	}
  2234  }
  2235  
  2236  func getInlineFakePod(podName, podUUID, outerName, innerName string) *v1.Pod {
  2237  	pod := &v1.Pod{
  2238  		ObjectMeta: metav1.ObjectMeta{
  2239  			Name: podName,
  2240  			UID:  k8stypes.UID(podUUID),
  2241  		},
  2242  		Spec: v1.PodSpec{
  2243  			Volumes: []v1.Volume{
  2244  				{
  2245  					Name: outerName,
  2246  					VolumeSource: v1.VolumeSource{
  2247  						GCEPersistentDisk: &v1.GCEPersistentDiskVolumeSource{
  2248  							PDName: innerName,
  2249  						},
  2250  					},
  2251  				},
  2252  			},
  2253  		},
  2254  	}
  2255  	return pod
  2256  }
  2257  
  2258  func getReconciler(kubeletDir string, t *testing.T, volumePaths []string, kubeClient *fake.Clientset) (Reconciler, *volumetesting.FakeVolumePlugin) {
  2259  	node := getFakeNode()
  2260  	volumePluginMgr, fakePlugin := volumetesting.GetTestKubeletVolumePluginMgrWithNodeAndRoot(t, node, kubeletDir)
  2261  	tmpKubeletPodDir := filepath.Join(kubeletDir, "pods")
  2262  	seLinuxTranslator := util.NewFakeSELinuxLabelTranslator()
  2263  
  2264  	dsw := cache.NewDesiredStateOfWorld(volumePluginMgr, seLinuxTranslator)
  2265  	asw := cache.NewActualStateOfWorld(nodeName, volumePluginMgr)
  2266  	if kubeClient == nil {
  2267  		kubeClient = createTestClient()
  2268  	}
  2269  
  2270  	fakeRecorder := &record.FakeRecorder{}
  2271  	fakeHandler := volumetesting.NewBlockVolumePathHandler()
  2272  	oex := operationexecutor.NewOperationExecutor(operationexecutor.NewOperationGenerator(
  2273  		kubeClient,
  2274  		volumePluginMgr,
  2275  		fakeRecorder,
  2276  		fakeHandler))
  2277  	mountPoints := []mount.MountPoint{}
  2278  	for _, volumePath := range volumePaths {
  2279  		mountPoints = append(mountPoints, mount.MountPoint{Path: volumePath})
  2280  	}
  2281  	rc := NewReconciler(
  2282  		kubeClient,
  2283  		true, /* controllerAttachDetachEnabled */
  2284  		reconcilerLoopSleepDuration,
  2285  		waitForAttachTimeout,
  2286  		nodeName,
  2287  		dsw,
  2288  		asw,
  2289  		hasAddedPods,
  2290  		oex,
  2291  		mount.NewFakeMounter(mountPoints),
  2292  		hostutil.NewFakeHostUtil(nil),
  2293  		volumePluginMgr,
  2294  		tmpKubeletPodDir)
  2295  	return rc, fakePlugin
  2296  }
  2297  
  2298  func TestSyncStates(t *testing.T) {
  2299  	type podInfo struct {
  2300  		podName         string
  2301  		podUID          string
  2302  		outerVolumeName string
  2303  		innerVolumeName string
  2304  	}
  2305  	defaultPodInfo := podInfo{
  2306  		podName:         "pod1",
  2307  		podUID:          "pod1uid",
  2308  		outerVolumeName: "volume-name",
  2309  		innerVolumeName: "volume-name",
  2310  	}
  2311  	tests := []struct {
  2312  		name                 string
  2313  		volumePaths          []string
  2314  		createMountPoint     bool
  2315  		podInfos             []podInfo
  2316  		postSyncStatCallback func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
  2317  		verifyFunc           func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error
  2318  	}{
  2319  		{
  2320  			name: "when two pods are using same volume and both are deleted",
  2321  			volumePaths: []string{
  2322  				filepath.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"),
  2323  				filepath.Join("pod2", "volumes", "fake-plugin", "pvc-abcdef"),
  2324  			},
  2325  			createMountPoint: true,
  2326  			podInfos:         []podInfo{},
  2327  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2328  				mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
  2329  				if len(mountedPods) != 2 {
  2330  					return fmt.Errorf("expected 2 pods to in asw got %d", len(mountedPods))
  2331  				}
  2332  				return nil
  2333  			},
  2334  		},
  2335  		{
  2336  			name: "when two pods are using same volume and one of them is deleted",
  2337  			volumePaths: []string{
  2338  				filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
  2339  				filepath.Join("pod2uid", "volumes", "fake-plugin", "volume-name"),
  2340  			},
  2341  			createMountPoint: true,
  2342  			podInfos: []podInfo{
  2343  				{
  2344  					podName:         "pod2",
  2345  					podUID:          "pod2uid",
  2346  					outerVolumeName: "volume-name",
  2347  					innerVolumeName: "volume-name",
  2348  				},
  2349  			},
  2350  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2351  				// for pod that is deleted, volume is considered as mounted
  2352  				mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
  2353  				if len(mountedPods) != 1 {
  2354  					return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
  2355  				}
  2356  				if types.UniquePodName("pod1uid") != mountedPods[0].PodName {
  2357  					return fmt.Errorf("expected mounted pod to be %s got %s", "pod1uid", mountedPods[0].PodName)
  2358  				}
  2359  
  2360  				// for pod that is in dsw, volume is in skippedDuringReconstruction
  2361  				skippedVolumes := rcInstance.skippedDuringReconstruction
  2362  				if len(skippedVolumes) != 1 {
  2363  					return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
  2364  				}
  2365  				if skippedVolumes["fake-plugin/volume-name"] == nil {
  2366  					return fmt.Errorf("expected %s is in skippedDuringReconstruction, got %+v", "fake-plugin/volume-name", skippedVolumes)
  2367  				}
  2368  				return nil
  2369  			},
  2370  		},
  2371  		{
  2372  			name: "when reconstruction fails for a volume, volumes should be cleaned up",
  2373  			volumePaths: []string{
  2374  				filepath.Join("pod1", "volumes", "fake-plugin", volumetesting.FailNewMounter),
  2375  			},
  2376  			createMountPoint: true,
  2377  			podInfos:         []podInfo{},
  2378  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2379  				return retryWithExponentialBackOff(reconcilerSyncWaitDuration, func() (bool, error) {
  2380  					err := volumetesting.VerifyTearDownCallCount(1, fakePlugin)
  2381  					if err != nil {
  2382  						return false, nil
  2383  					}
  2384  					return true, nil
  2385  				})
  2386  			},
  2387  		},
  2388  		{
  2389  			name: "when mount point does not exist, reconstruction should not fail, volumes should be added in asw",
  2390  			volumePaths: []string{
  2391  				filepath.Join("pod1", "volumes", "fake-plugin", "pvc-abcdef"),
  2392  			},
  2393  			createMountPoint: false,
  2394  			podInfos:         []podInfo{},
  2395  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2396  				mountedPods := rcInstance.actualStateOfWorld.GetMountedVolumes()
  2397  				if len(mountedPods) != 1 {
  2398  					return fmt.Errorf("expected 1 pods to in asw got %d", len(mountedPods))
  2399  				}
  2400  				return nil
  2401  			},
  2402  		},
  2403  		{
  2404  			name: "when mount point does not exist, reconstruction should not fail, if volume exists in dsw, volume should be recorded in skipped during reconstruction",
  2405  			volumePaths: []string{
  2406  				filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
  2407  			},
  2408  			createMountPoint: false,
  2409  			podInfos:         []podInfo{defaultPodInfo},
  2410  			postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2411  				skippedVolumes := rcInstance.skippedDuringReconstruction
  2412  				if len(skippedVolumes) != 1 {
  2413  					return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
  2414  				}
  2415  				rcInstance.processReconstructedVolumes()
  2416  				return nil
  2417  			},
  2418  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2419  				mountedPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes()
  2420  				if len(mountedPods) != 1 {
  2421  					return fmt.Errorf("expected 1 pods to in mounted volume list got %d", len(mountedPods))
  2422  				}
  2423  				mountedPodVolume := mountedPods[0]
  2424  				addedViaReconstruction := rcInstance.actualStateOfWorld.IsVolumeReconstructed(mountedPodVolume.VolumeName, mountedPodVolume.PodName)
  2425  				if !addedViaReconstruction {
  2426  					return fmt.Errorf("expected volume %s to be marked as added via reconstruction", mountedPodVolume.VolumeName)
  2427  				}
  2428  
  2429  				// check device mount state
  2430  				attachedVolumes := rcInstance.actualStateOfWorld.GetAttachedVolumes()
  2431  				if len(attachedVolumes) != 1 {
  2432  					return fmt.Errorf("expected 1 volume to be unmounted, got %d", len(attachedVolumes))
  2433  				}
  2434  				firstAttachedVolume := attachedVolumes[0]
  2435  				if !firstAttachedVolume.DeviceMayBeMounted() {
  2436  					return fmt.Errorf("expected %s volume to be mounted in uncertain state", firstAttachedVolume.VolumeName)
  2437  				}
  2438  
  2439  				// also skippedVolumes map should be empty
  2440  				skippedVolumes := rcInstance.skippedDuringReconstruction
  2441  				if len(skippedVolumes) > 0 {
  2442  					return fmt.Errorf("expected 0 pods in skipped volumes found %d", len(skippedVolumes))
  2443  				}
  2444  				return nil
  2445  			},
  2446  		},
  2447  		{
  2448  			name: "when volume exists in dsow, volume should be recorded in skipped during reconstruction",
  2449  			volumePaths: []string{
  2450  				filepath.Join("pod1uid", "volumes", "fake-plugin", "volume-name"),
  2451  			},
  2452  			createMountPoint: true,
  2453  			podInfos:         []podInfo{defaultPodInfo},
  2454  			postSyncStatCallback: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2455  				skippedVolumes := rcInstance.skippedDuringReconstruction
  2456  				if len(skippedVolumes) != 1 {
  2457  					return fmt.Errorf("expected 1 pods to in skippedDuringReconstruction got %d", len(skippedVolumes))
  2458  				}
  2459  				rcInstance.processReconstructedVolumes()
  2460  				return nil
  2461  			},
  2462  			verifyFunc: func(rcInstance *reconciler, fakePlugin *volumetesting.FakeVolumePlugin) error {
  2463  				mountedPods := rcInstance.actualStateOfWorld.GetAllMountedVolumes()
  2464  				if len(mountedPods) != 1 {
  2465  					return fmt.Errorf("expected 1 pods to in mounted volume list got %d", len(mountedPods))
  2466  				}
  2467  				mountedPodVolume := mountedPods[0]
  2468  				addedViaReconstruction := rcInstance.actualStateOfWorld.IsVolumeReconstructed(mountedPodVolume.VolumeName, mountedPodVolume.PodName)
  2469  				if !addedViaReconstruction {
  2470  					return fmt.Errorf("expected volume %s to be marked as added via reconstruction", mountedPodVolume.VolumeName)
  2471  				}
  2472  
  2473  				// check device mount state
  2474  				attachedVolumes := rcInstance.actualStateOfWorld.GetAttachedVolumes()
  2475  				if len(attachedVolumes) != 1 {
  2476  					return fmt.Errorf("expected 1 volume to be unmounted, got %d", len(attachedVolumes))
  2477  				}
  2478  				firstAttachedVolume := attachedVolumes[0]
  2479  				if !firstAttachedVolume.DeviceMayBeMounted() {
  2480  					return fmt.Errorf("expected %s volume to be mounted in uncertain state", firstAttachedVolume.VolumeName)
  2481  				}
  2482  
  2483  				// also skippedVolumes map should be empty
  2484  				skippedVolumes := rcInstance.skippedDuringReconstruction
  2485  				if len(skippedVolumes) > 0 {
  2486  					return fmt.Errorf("expected 0 pods in skipped volumes found %d", len(skippedVolumes))
  2487  				}
  2488  				return nil
  2489  			},
  2490  		},
  2491  	}
  2492  	for _, tc := range tests {
  2493  		t.Run(tc.name, func(t *testing.T) {
  2494  			tmpKubeletDir, err := os.MkdirTemp("", "")
  2495  			if err != nil {
  2496  				t.Fatalf("can't make a temp directory for kubeletPods: %v", err)
  2497  			}
  2498  			defer os.RemoveAll(tmpKubeletDir)
  2499  
  2500  			// create kubelet pod directory
  2501  			tmpKubeletPodDir := filepath.Join(tmpKubeletDir, "pods")
  2502  			os.MkdirAll(tmpKubeletPodDir, 0755)
  2503  
  2504  			mountPaths := []string{}
  2505  
  2506  			// create pod and volume directories so as reconciler can find them.
  2507  			for _, volumePath := range tc.volumePaths {
  2508  				vp := filepath.Join(tmpKubeletPodDir, volumePath)
  2509  				if tc.createMountPoint {
  2510  					mountPaths = append(mountPaths, vp)
  2511  				}
  2512  				os.MkdirAll(vp, 0755)
  2513  			}
  2514  
  2515  			rc, fakePlugin := getReconciler(tmpKubeletDir, t, mountPaths, nil /*custom kubeclient*/)
  2516  			rcInstance, _ := rc.(*reconciler)
  2517  			logger, _ := ktesting.NewTestContext(t)
  2518  			for _, tpodInfo := range tc.podInfos {
  2519  				pod := getInlineFakePod(tpodInfo.podName, tpodInfo.podUID, tpodInfo.outerVolumeName, tpodInfo.innerVolumeName)
  2520  				volumeSpec := &volume.Spec{Volume: &pod.Spec.Volumes[0]}
  2521  				podName := util.GetUniquePodName(pod)
  2522  				volumeName, err := rcInstance.desiredStateOfWorld.AddPodToVolume(
  2523  					podName, pod, volumeSpec, volumeSpec.Name(), "" /* volumeGidValue */, nil /* SELinuxContext */)
  2524  				if err != nil {
  2525  					t.Fatalf("error adding volume %s to dsow: %v", volumeSpec.Name(), err)
  2526  				}
  2527  				rcInstance.actualStateOfWorld.MarkVolumeAsAttached(logger, volumeName, volumeSpec, nodeName, "")
  2528  			}
  2529  
  2530  			rcInstance.syncStates(tmpKubeletPodDir)
  2531  			if tc.postSyncStatCallback != nil {
  2532  				err := tc.postSyncStatCallback(rcInstance, fakePlugin)
  2533  				if err != nil {
  2534  					t.Errorf("test %s, postSyncStatCallback failed: %v", tc.name, err)
  2535  				}
  2536  			}
  2537  
  2538  			if err := tc.verifyFunc(rcInstance, fakePlugin); err != nil {
  2539  				t.Errorf("test %s failed: %v", tc.name, err)
  2540  			}
  2541  		})
  2542  	}
  2543  }