github.com/Ilhicas/nomad@v1.0.4-0.20210304152020-e86851182bc3/client/pluginmanager/csimanager/volume_test.go (about)

     1  package csimanager
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"io/ioutil"
     7  	"os"
     8  	"runtime"
     9  	"testing"
    10  
    11  	"github.com/hashicorp/nomad/helper/mount"
    12  	"github.com/hashicorp/nomad/helper/testlog"
    13  	"github.com/hashicorp/nomad/nomad/mock"
    14  	"github.com/hashicorp/nomad/nomad/structs"
    15  	"github.com/hashicorp/nomad/plugins/csi"
    16  	csifake "github.com/hashicorp/nomad/plugins/csi/fake"
    17  	"github.com/stretchr/testify/require"
    18  )
    19  
    20  func tmpDir(t testing.TB) string {
    21  	t.Helper()
    22  	dir, err := ioutil.TempDir("", "nomad")
    23  	require.NoError(t, err)
    24  	return dir
    25  }
    26  
    27  func checkMountSupport() bool {
    28  	path, err := os.Getwd()
    29  	if err != nil {
    30  		return false
    31  	}
    32  
    33  	m := mount.New()
    34  	_, err = m.IsNotAMountPoint(path)
    35  	return err == nil
    36  }
    37  
    38  func TestVolumeManager_ensureStagingDir(t *testing.T) {
    39  	if !checkMountSupport() {
    40  		t.Skip("mount point detection not supported for this platform")
    41  	}
    42  	t.Parallel()
    43  
    44  	cases := []struct {
    45  		Name                 string
    46  		Volume               *structs.CSIVolume
    47  		UsageOptions         *UsageOptions
    48  		CreateDirAheadOfTime bool
    49  		MountDirAheadOfTime  bool
    50  
    51  		ExpectedErr        error
    52  		ExpectedMountState bool
    53  	}{
    54  		{
    55  			Name:         "Creates a directory when one does not exist",
    56  			Volume:       &structs.CSIVolume{ID: "foo"},
    57  			UsageOptions: &UsageOptions{},
    58  		},
    59  		{
    60  			Name:                 "Does not fail because of a pre-existing directory",
    61  			Volume:               &structs.CSIVolume{ID: "foo"},
    62  			UsageOptions:         &UsageOptions{},
    63  			CreateDirAheadOfTime: true,
    64  		},
    65  		{
    66  			Name:         "Returns negative mount info",
    67  			UsageOptions: &UsageOptions{},
    68  			Volume:       &structs.CSIVolume{ID: "foo"},
    69  		},
    70  		{
    71  			Name:                 "Returns positive mount info",
    72  			Volume:               &structs.CSIVolume{ID: "foo"},
    73  			UsageOptions:         &UsageOptions{},
    74  			CreateDirAheadOfTime: true,
    75  			MountDirAheadOfTime:  true,
    76  			ExpectedMountState:   true,
    77  		},
    78  	}
    79  
    80  	for _, tc := range cases {
    81  		t.Run(tc.Name, func(t *testing.T) {
    82  			// Step 1: Validate that the test case makes sense
    83  			if !tc.CreateDirAheadOfTime && tc.MountDirAheadOfTime {
    84  				require.Fail(t, "Cannot Mount without creating a dir")
    85  			}
    86  
    87  			if tc.MountDirAheadOfTime {
    88  				// We can enable these tests by either mounting a fake device on linux
    89  				// e.g shipping a small ext4 image file and using that as a loopback
    90  				//     device, but there's no convenient way to implement this.
    91  				t.Skip("TODO: Skipped because we don't detect bind mounts")
    92  			}
    93  
    94  			// Step 2: Test Setup
    95  			tmpPath := tmpDir(t)
    96  			defer os.RemoveAll(tmpPath)
    97  
    98  			csiFake := &csifake.Client{}
    99  			eventer := func(e *structs.NodeEvent) {}
   100  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   101  			expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume.ID, tc.UsageOptions)
   102  
   103  			if tc.CreateDirAheadOfTime {
   104  				err := os.MkdirAll(expectedStagingPath, 0700)
   105  				require.NoError(t, err)
   106  			}
   107  
   108  			// Step 3: Now we can do some testing
   109  
   110  			path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume, tc.UsageOptions)
   111  			if tc.ExpectedErr != nil {
   112  				require.EqualError(t, testErr, tc.ExpectedErr.Error())
   113  				return // We don't perform extra validation if an error was detected.
   114  			}
   115  
   116  			require.NoError(t, testErr)
   117  			require.Equal(t, tc.ExpectedMountState, detectedMount)
   118  
   119  			// If the ensureStagingDir call had to create a directory itself, then here
   120  			// we validate that the directory exists and its permissions
   121  			if !tc.CreateDirAheadOfTime {
   122  				file, err := os.Lstat(path)
   123  				require.NoError(t, err)
   124  				require.True(t, file.IsDir())
   125  
   126  				// TODO: Figure out a windows equivalent of this test
   127  				if runtime.GOOS != "windows" {
   128  					require.Equal(t, os.FileMode(0700), file.Mode().Perm())
   129  				}
   130  			}
   131  		})
   132  	}
   133  }
   134  
   135  func TestVolumeManager_stageVolume(t *testing.T) {
   136  	if !checkMountSupport() {
   137  		t.Skip("mount point detection not supported for this platform")
   138  	}
   139  	t.Parallel()
   140  
   141  	cases := []struct {
   142  		Name         string
   143  		Volume       *structs.CSIVolume
   144  		UsageOptions *UsageOptions
   145  		PluginErr    error
   146  		ExpectedErr  error
   147  	}{
   148  		{
   149  			Name: "Returns an error when an invalid AttachmentMode is provided",
   150  			Volume: &structs.CSIVolume{
   151  				ID:             "foo",
   152  				AttachmentMode: "nonsense",
   153  			},
   154  			UsageOptions: &UsageOptions{},
   155  			ExpectedErr:  errors.New("Unknown volume attachment mode: nonsense"),
   156  		},
   157  		{
   158  			Name: "Returns an error when an invalid AccessMode is provided",
   159  			Volume: &structs.CSIVolume{
   160  				ID:             "foo",
   161  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   162  				AccessMode:     "nonsense",
   163  			},
   164  			UsageOptions: &UsageOptions{},
   165  			ExpectedErr:  errors.New("Unknown volume access mode: nonsense"),
   166  		},
   167  		{
   168  			Name: "Returns an error when the plugin returns an error",
   169  			Volume: &structs.CSIVolume{
   170  				ID:             "foo",
   171  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   172  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   173  			},
   174  			UsageOptions: &UsageOptions{},
   175  			PluginErr:    errors.New("Some Unknown Error"),
   176  			ExpectedErr:  errors.New("Some Unknown Error"),
   177  		},
   178  		{
   179  			Name: "Happy Path",
   180  			Volume: &structs.CSIVolume{
   181  				ID:             "foo",
   182  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   183  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   184  			},
   185  			UsageOptions: &UsageOptions{},
   186  			PluginErr:    nil,
   187  			ExpectedErr:  nil,
   188  		},
   189  	}
   190  
   191  	for _, tc := range cases {
   192  		t.Run(tc.Name, func(t *testing.T) {
   193  			tmpPath := tmpDir(t)
   194  			defer os.RemoveAll(tmpPath)
   195  
   196  			csiFake := &csifake.Client{}
   197  			csiFake.NextNodeStageVolumeErr = tc.PluginErr
   198  
   199  			eventer := func(e *structs.NodeEvent) {}
   200  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   201  			ctx := context.Background()
   202  
   203  			err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions, nil)
   204  
   205  			if tc.ExpectedErr != nil {
   206  				require.EqualError(t, err, tc.ExpectedErr.Error())
   207  			} else {
   208  				require.NoError(t, err)
   209  			}
   210  		})
   211  	}
   212  }
   213  
   214  func TestVolumeManager_unstageVolume(t *testing.T) {
   215  	if !checkMountSupport() {
   216  		t.Skip("mount point detection not supported for this platform")
   217  	}
   218  	t.Parallel()
   219  
   220  	cases := []struct {
   221  		Name                 string
   222  		Volume               *structs.CSIVolume
   223  		UsageOptions         *UsageOptions
   224  		PluginErr            error
   225  		ExpectedErr          error
   226  		ExpectedCSICallCount int64
   227  	}{
   228  		{
   229  			Name: "Returns an error when the plugin returns an error",
   230  			Volume: &structs.CSIVolume{
   231  				ID: "foo",
   232  			},
   233  			UsageOptions:         &UsageOptions{},
   234  			PluginErr:            errors.New("Some Unknown Error"),
   235  			ExpectedErr:          errors.New("Some Unknown Error"),
   236  			ExpectedCSICallCount: 1,
   237  		},
   238  		{
   239  			Name: "Happy Path",
   240  			Volume: &structs.CSIVolume{
   241  				ID: "foo",
   242  			},
   243  			UsageOptions:         &UsageOptions{},
   244  			PluginErr:            nil,
   245  			ExpectedErr:          nil,
   246  			ExpectedCSICallCount: 1,
   247  		},
   248  	}
   249  
   250  	for _, tc := range cases {
   251  		t.Run(tc.Name, func(t *testing.T) {
   252  			tmpPath := tmpDir(t)
   253  			defer os.RemoveAll(tmpPath)
   254  
   255  			csiFake := &csifake.Client{}
   256  			csiFake.NextNodeUnstageVolumeErr = tc.PluginErr
   257  
   258  			eventer := func(e *structs.NodeEvent) {}
   259  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   260  			ctx := context.Background()
   261  
   262  			err := manager.unstageVolume(ctx,
   263  				tc.Volume.ID, tc.Volume.RemoteID(), tc.UsageOptions)
   264  
   265  			if tc.ExpectedErr != nil {
   266  				require.EqualError(t, err, tc.ExpectedErr.Error())
   267  			} else {
   268  				require.NoError(t, err)
   269  			}
   270  
   271  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnstageVolumeCallCount)
   272  		})
   273  	}
   274  }
   275  
   276  func TestVolumeManager_publishVolume(t *testing.T) {
   277  	if !checkMountSupport() {
   278  		t.Skip("mount point detection not supported for this platform")
   279  	}
   280  	t.Parallel()
   281  
   282  	cases := []struct {
   283  		Name                     string
   284  		Allocation               *structs.Allocation
   285  		Volume                   *structs.CSIVolume
   286  		UsageOptions             *UsageOptions
   287  		PluginErr                error
   288  		ExpectedErr              error
   289  		ExpectedCSICallCount     int64
   290  		ExpectedVolumeCapability *csi.VolumeCapability
   291  	}{
   292  		{
   293  			Name:       "Returns an error when the plugin returns an error",
   294  			Allocation: structs.MockAlloc(),
   295  			Volume: &structs.CSIVolume{
   296  				ID:             "foo",
   297  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   298  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   299  			},
   300  			UsageOptions:         &UsageOptions{},
   301  			PluginErr:            errors.New("Some Unknown Error"),
   302  			ExpectedErr:          errors.New("Some Unknown Error"),
   303  			ExpectedCSICallCount: 1,
   304  		},
   305  		{
   306  			Name:       "Happy Path",
   307  			Allocation: structs.MockAlloc(),
   308  			Volume: &structs.CSIVolume{
   309  				ID:             "foo",
   310  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   311  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   312  			},
   313  			UsageOptions:         &UsageOptions{},
   314  			PluginErr:            nil,
   315  			ExpectedErr:          nil,
   316  			ExpectedCSICallCount: 1,
   317  		},
   318  		{
   319  			Name:       "Mount options in the volume",
   320  			Allocation: structs.MockAlloc(),
   321  			Volume: &structs.CSIVolume{
   322  				ID:             "foo",
   323  				AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
   324  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   325  				MountOptions: &structs.CSIMountOptions{
   326  					MountFlags: []string{"ro"},
   327  				},
   328  			},
   329  			UsageOptions:         &UsageOptions{},
   330  			PluginErr:            nil,
   331  			ExpectedErr:          nil,
   332  			ExpectedCSICallCount: 1,
   333  			ExpectedVolumeCapability: &csi.VolumeCapability{
   334  				AccessType: csi.VolumeAccessTypeMount,
   335  				AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
   336  				MountVolume: &structs.CSIMountOptions{
   337  					MountFlags: []string{"ro"},
   338  				},
   339  			},
   340  		},
   341  		{
   342  			Name:       "Mount options override in the request",
   343  			Allocation: structs.MockAlloc(),
   344  			Volume: &structs.CSIVolume{
   345  				ID:             "foo",
   346  				AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
   347  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   348  				MountOptions: &structs.CSIMountOptions{
   349  					MountFlags: []string{"ro"},
   350  				},
   351  			},
   352  			UsageOptions: &UsageOptions{
   353  				MountOptions: &structs.CSIMountOptions{
   354  					MountFlags: []string{"rw"},
   355  				},
   356  			},
   357  			PluginErr:            nil,
   358  			ExpectedErr:          nil,
   359  			ExpectedCSICallCount: 1,
   360  			ExpectedVolumeCapability: &csi.VolumeCapability{
   361  				AccessType: csi.VolumeAccessTypeMount,
   362  				AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
   363  				MountVolume: &structs.CSIMountOptions{
   364  					MountFlags: []string{"rw"},
   365  				},
   366  			},
   367  		},
   368  	}
   369  
   370  	for _, tc := range cases {
   371  		t.Run(tc.Name, func(t *testing.T) {
   372  			tmpPath := tmpDir(t)
   373  			defer os.RemoveAll(tmpPath)
   374  
   375  			csiFake := &csifake.Client{}
   376  			csiFake.NextNodePublishVolumeErr = tc.PluginErr
   377  
   378  			eventer := func(e *structs.NodeEvent) {}
   379  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   380  			ctx := context.Background()
   381  
   382  			_, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions, nil)
   383  
   384  			if tc.ExpectedErr != nil {
   385  				require.EqualError(t, err, tc.ExpectedErr.Error())
   386  			} else {
   387  				require.NoError(t, err)
   388  			}
   389  
   390  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount)
   391  
   392  			if tc.ExpectedVolumeCapability != nil {
   393  				require.Equal(t, tc.ExpectedVolumeCapability, csiFake.PrevVolumeCapability)
   394  			}
   395  
   396  		})
   397  	}
   398  }
   399  
   400  func TestVolumeManager_unpublishVolume(t *testing.T) {
   401  	if !checkMountSupport() {
   402  		t.Skip("mount point detection not supported for this platform")
   403  	}
   404  	t.Parallel()
   405  
   406  	cases := []struct {
   407  		Name                 string
   408  		Allocation           *structs.Allocation
   409  		Volume               *structs.CSIVolume
   410  		UsageOptions         *UsageOptions
   411  		PluginErr            error
   412  		ExpectedErr          error
   413  		ExpectedCSICallCount int64
   414  	}{
   415  		{
   416  			Name:       "Returns an error when the plugin returns an error",
   417  			Allocation: structs.MockAlloc(),
   418  			Volume: &structs.CSIVolume{
   419  				ID: "foo",
   420  			},
   421  			UsageOptions:         &UsageOptions{},
   422  			PluginErr:            errors.New("Some Unknown Error"),
   423  			ExpectedErr:          errors.New("Some Unknown Error"),
   424  			ExpectedCSICallCount: 1,
   425  		},
   426  		{
   427  			Name:       "Happy Path",
   428  			Allocation: structs.MockAlloc(),
   429  			Volume: &structs.CSIVolume{
   430  				ID: "foo",
   431  			},
   432  			UsageOptions:         &UsageOptions{},
   433  			PluginErr:            nil,
   434  			ExpectedErr:          nil,
   435  			ExpectedCSICallCount: 1,
   436  		},
   437  	}
   438  
   439  	for _, tc := range cases {
   440  		t.Run(tc.Name, func(t *testing.T) {
   441  			tmpPath := tmpDir(t)
   442  			defer os.RemoveAll(tmpPath)
   443  
   444  			csiFake := &csifake.Client{}
   445  			csiFake.NextNodeUnpublishVolumeErr = tc.PluginErr
   446  
   447  			eventer := func(e *structs.NodeEvent) {}
   448  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   449  			ctx := context.Background()
   450  
   451  			err := manager.unpublishVolume(ctx,
   452  				tc.Volume.ID, tc.Volume.RemoteID(), tc.Allocation.ID, tc.UsageOptions)
   453  
   454  			if tc.ExpectedErr != nil {
   455  				require.EqualError(t, err, tc.ExpectedErr.Error())
   456  			} else {
   457  				require.NoError(t, err)
   458  			}
   459  
   460  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnpublishVolumeCallCount)
   461  		})
   462  	}
   463  }
   464  
   465  func TestVolumeManager_MountVolumeEvents(t *testing.T) {
   466  	if !checkMountSupport() {
   467  		t.Skip("mount point detection not supported for this platform")
   468  	}
   469  	t.Parallel()
   470  
   471  	tmpPath := tmpDir(t)
   472  	defer os.RemoveAll(tmpPath)
   473  
   474  	csiFake := &csifake.Client{}
   475  
   476  	var events []*structs.NodeEvent
   477  	eventer := func(e *structs.NodeEvent) {
   478  		events = append(events, e)
   479  	}
   480  
   481  	manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   482  	ctx := context.Background()
   483  	vol := &structs.CSIVolume{
   484  		ID:         "vol",
   485  		Namespace:  "ns",
   486  		AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   487  	}
   488  	alloc := mock.Alloc()
   489  	usage := &UsageOptions{}
   490  	pubCtx := map[string]string{}
   491  
   492  	_, err := manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
   493  	require.Error(t, err, "Unknown volume attachment mode: ")
   494  	require.Equal(t, 1, len(events))
   495  	e := events[0]
   496  	require.Equal(t, "Mount volume", e.Message)
   497  	require.Equal(t, "Storage", e.Subsystem)
   498  	require.Equal(t, "vol", e.Details["volume_id"])
   499  	require.Equal(t, "false", e.Details["success"])
   500  	require.Equal(t, "Unknown volume attachment mode: ", e.Details["error"])
   501  	events = events[1:]
   502  
   503  	vol.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
   504  	_, err = manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
   505  	require.NoError(t, err)
   506  
   507  	require.Equal(t, 1, len(events))
   508  	e = events[0]
   509  	require.Equal(t, "Mount volume", e.Message)
   510  	require.Equal(t, "Storage", e.Subsystem)
   511  	require.Equal(t, "vol", e.Details["volume_id"])
   512  	require.Equal(t, "true", e.Details["success"])
   513  	events = events[1:]
   514  
   515  	err = manager.UnmountVolume(ctx, vol.ID, vol.RemoteID(), alloc.ID, usage)
   516  	require.NoError(t, err)
   517  
   518  	require.Equal(t, 1, len(events))
   519  	e = events[0]
   520  	require.Equal(t, "Unmount volume", e.Message)
   521  	require.Equal(t, "Storage", e.Subsystem)
   522  	require.Equal(t, "vol", e.Details["volume_id"])
   523  	require.Equal(t, "true", e.Details["success"])
   524  }