github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/client/pluginmanager/csimanager/volume_test.go (about)

     1  package csimanager
     2  
     3  import (
     4  	"context"
     5  	"errors"
     6  	"os"
     7  	"runtime"
     8  	"testing"
     9  
    10  	"github.com/hashicorp/nomad/ci"
    11  	"github.com/hashicorp/nomad/helper/mount"
    12  	"github.com/hashicorp/nomad/helper/testlog"
    13  	"github.com/hashicorp/nomad/nomad/mock"
    14  	"github.com/hashicorp/nomad/nomad/structs"
    15  	"github.com/hashicorp/nomad/plugins/csi"
    16  	csifake "github.com/hashicorp/nomad/plugins/csi/fake"
    17  	"github.com/stretchr/testify/require"
    18  )
    19  
    20  func checkMountSupport() bool {
    21  	path, err := os.Getwd()
    22  	if err != nil {
    23  		return false
    24  	}
    25  
    26  	m := mount.New()
    27  	_, err = m.IsNotAMountPoint(path)
    28  	return err == nil
    29  }
    30  
    31  func TestVolumeManager_ensureStagingDir(t *testing.T) {
    32  	if !checkMountSupport() {
    33  		t.Skip("mount point detection not supported for this platform")
    34  	}
    35  	ci.Parallel(t)
    36  
    37  	cases := []struct {
    38  		Name                 string
    39  		Volume               *structs.CSIVolume
    40  		UsageOptions         *UsageOptions
    41  		CreateDirAheadOfTime bool
    42  		MountDirAheadOfTime  bool
    43  
    44  		ExpectedErr        error
    45  		ExpectedMountState bool
    46  	}{
    47  		{
    48  			Name:         "Creates a directory when one does not exist",
    49  			Volume:       &structs.CSIVolume{ID: "foo"},
    50  			UsageOptions: &UsageOptions{},
    51  		},
    52  		{
    53  			Name:                 "Does not fail because of a pre-existing directory",
    54  			Volume:               &structs.CSIVolume{ID: "foo"},
    55  			UsageOptions:         &UsageOptions{},
    56  			CreateDirAheadOfTime: true,
    57  		},
    58  		{
    59  			Name:         "Returns negative mount info",
    60  			UsageOptions: &UsageOptions{},
    61  			Volume:       &structs.CSIVolume{ID: "foo"},
    62  		},
    63  		{
    64  			Name:                 "Returns positive mount info",
    65  			Volume:               &structs.CSIVolume{ID: "foo"},
    66  			UsageOptions:         &UsageOptions{},
    67  			CreateDirAheadOfTime: true,
    68  			MountDirAheadOfTime:  true,
    69  			ExpectedMountState:   true,
    70  		},
    71  	}
    72  
    73  	for _, tc := range cases {
    74  		t.Run(tc.Name, func(t *testing.T) {
    75  			// Step 1: Validate that the test case makes sense
    76  			if !tc.CreateDirAheadOfTime && tc.MountDirAheadOfTime {
    77  				require.Fail(t, "Cannot Mount without creating a dir")
    78  			}
    79  
    80  			if tc.MountDirAheadOfTime {
    81  				// We can enable these tests by either mounting a fake device on linux
    82  				// e.g shipping a small ext4 image file and using that as a loopback
    83  				//     device, but there's no convenient way to implement this.
    84  				t.Skip("TODO: Skipped because we don't detect bind mounts")
    85  			}
    86  
    87  			// Step 2: Test Setup
    88  			tmpPath := t.TempDir()
    89  
    90  			csiFake := &csifake.Client{}
    91  			eventer := func(e *structs.NodeEvent) {}
    92  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
    93  			expectedStagingPath := manager.stagingDirForVolume(tmpPath, tc.Volume.ID, tc.UsageOptions)
    94  
    95  			if tc.CreateDirAheadOfTime {
    96  				err := os.MkdirAll(expectedStagingPath, 0700)
    97  				require.NoError(t, err)
    98  			}
    99  
   100  			// Step 3: Now we can do some testing
   101  
   102  			path, detectedMount, testErr := manager.ensureStagingDir(tc.Volume, tc.UsageOptions)
   103  			if tc.ExpectedErr != nil {
   104  				require.EqualError(t, testErr, tc.ExpectedErr.Error())
   105  				return // We don't perform extra validation if an error was detected.
   106  			}
   107  
   108  			require.NoError(t, testErr)
   109  			require.Equal(t, tc.ExpectedMountState, detectedMount)
   110  
   111  			// If the ensureStagingDir call had to create a directory itself, then here
   112  			// we validate that the directory exists and its permissions
   113  			if !tc.CreateDirAheadOfTime {
   114  				file, err := os.Lstat(path)
   115  				require.NoError(t, err)
   116  				require.True(t, file.IsDir())
   117  
   118  				// TODO: Figure out a windows equivalent of this test
   119  				if runtime.GOOS != "windows" {
   120  					require.Equal(t, os.FileMode(0700), file.Mode().Perm())
   121  				}
   122  			}
   123  		})
   124  	}
   125  }
   126  
   127  func TestVolumeManager_stageVolume(t *testing.T) {
   128  	if !checkMountSupport() {
   129  		t.Skip("mount point detection not supported for this platform")
   130  	}
   131  	ci.Parallel(t)
   132  
   133  	cases := []struct {
   134  		Name         string
   135  		Volume       *structs.CSIVolume
   136  		UsageOptions *UsageOptions
   137  		PluginErr    error
   138  		ExpectedErr  error
   139  	}{
   140  		{
   141  			Name: "Returns an error when an invalid AttachmentMode is provided",
   142  			Volume: &structs.CSIVolume{
   143  				ID: "foo",
   144  			},
   145  			UsageOptions: &UsageOptions{AttachmentMode: "nonsense"},
   146  			ExpectedErr:  errors.New("unknown volume attachment mode: nonsense"),
   147  		},
   148  		{
   149  			Name: "Returns an error when an invalid AccessMode is provided",
   150  			Volume: &structs.CSIVolume{
   151  				ID: "foo",
   152  			},
   153  			UsageOptions: &UsageOptions{
   154  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   155  				AccessMode:     "nonsense",
   156  			},
   157  			ExpectedErr: errors.New("unknown volume access mode: nonsense"),
   158  		},
   159  		{
   160  			Name: "Returns an error when the plugin returns an error",
   161  			Volume: &structs.CSIVolume{
   162  				ID: "foo",
   163  			},
   164  			UsageOptions: &UsageOptions{
   165  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   166  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   167  			},
   168  			PluginErr:   errors.New("Some Unknown Error"),
   169  			ExpectedErr: errors.New("Some Unknown Error"),
   170  		},
   171  		{
   172  			Name: "Happy Path",
   173  			Volume: &structs.CSIVolume{
   174  				ID: "foo",
   175  			},
   176  			UsageOptions: &UsageOptions{
   177  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   178  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   179  			},
   180  			PluginErr:   nil,
   181  			ExpectedErr: nil,
   182  		},
   183  	}
   184  
   185  	for _, tc := range cases {
   186  		t.Run(tc.Name, func(t *testing.T) {
   187  			tmpPath := t.TempDir()
   188  
   189  			csiFake := &csifake.Client{}
   190  			csiFake.NextNodeStageVolumeErr = tc.PluginErr
   191  
   192  			eventer := func(e *structs.NodeEvent) {}
   193  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   194  			ctx := context.Background()
   195  
   196  			err := manager.stageVolume(ctx, tc.Volume, tc.UsageOptions, nil)
   197  
   198  			if tc.ExpectedErr != nil {
   199  				require.EqualError(t, err, tc.ExpectedErr.Error())
   200  			} else {
   201  				require.NoError(t, err)
   202  			}
   203  		})
   204  	}
   205  }
   206  
   207  func TestVolumeManager_unstageVolume(t *testing.T) {
   208  	if !checkMountSupport() {
   209  		t.Skip("mount point detection not supported for this platform")
   210  	}
   211  	ci.Parallel(t)
   212  
   213  	cases := []struct {
   214  		Name                 string
   215  		Volume               *structs.CSIVolume
   216  		UsageOptions         *UsageOptions
   217  		PluginErr            error
   218  		ExpectedErr          error
   219  		ExpectedCSICallCount int64
   220  	}{
   221  		{
   222  			Name: "Returns an error when the plugin returns an error",
   223  			Volume: &structs.CSIVolume{
   224  				ID: "foo",
   225  			},
   226  			UsageOptions:         &UsageOptions{},
   227  			PluginErr:            errors.New("Some Unknown Error"),
   228  			ExpectedErr:          errors.New("Some Unknown Error"),
   229  			ExpectedCSICallCount: 1,
   230  		},
   231  		{
   232  			Name: "Happy Path",
   233  			Volume: &structs.CSIVolume{
   234  				ID: "foo",
   235  			},
   236  			UsageOptions:         &UsageOptions{},
   237  			PluginErr:            nil,
   238  			ExpectedErr:          nil,
   239  			ExpectedCSICallCount: 1,
   240  		},
   241  	}
   242  
   243  	for _, tc := range cases {
   244  		t.Run(tc.Name, func(t *testing.T) {
   245  			tmpPath := t.TempDir()
   246  
   247  			csiFake := &csifake.Client{}
   248  			csiFake.NextNodeUnstageVolumeErr = tc.PluginErr
   249  
   250  			eventer := func(e *structs.NodeEvent) {}
   251  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   252  			ctx := context.Background()
   253  
   254  			err := manager.unstageVolume(ctx,
   255  				tc.Volume.ID, tc.Volume.RemoteID(), tc.UsageOptions)
   256  
   257  			if tc.ExpectedErr != nil {
   258  				require.EqualError(t, err, tc.ExpectedErr.Error())
   259  			} else {
   260  				require.NoError(t, err)
   261  			}
   262  
   263  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnstageVolumeCallCount)
   264  		})
   265  	}
   266  }
   267  
   268  func TestVolumeManager_publishVolume(t *testing.T) {
   269  	if !checkMountSupport() {
   270  		t.Skip("mount point detection not supported for this platform")
   271  	}
   272  
   273  	ci.Parallel(t)
   274  
   275  	cases := []struct {
   276  		Name                     string
   277  		Allocation               *structs.Allocation
   278  		Volume                   *structs.CSIVolume
   279  		UsageOptions             *UsageOptions
   280  		PluginErr                error
   281  		ExpectedErr              error
   282  		ExpectedCSICallCount     int64
   283  		ExpectedVolumeCapability *csi.VolumeCapability
   284  	}{
   285  		{
   286  			Name:       "Returns an error when the plugin returns an error",
   287  			Allocation: structs.MockAlloc(),
   288  			Volume: &structs.CSIVolume{
   289  				ID: "foo",
   290  			},
   291  			UsageOptions: &UsageOptions{
   292  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   293  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   294  			},
   295  			PluginErr:            errors.New("Some Unknown Error"),
   296  			ExpectedErr:          errors.New("Some Unknown Error"),
   297  			ExpectedCSICallCount: 1,
   298  		},
   299  		{
   300  			Name:       "Happy Path",
   301  			Allocation: structs.MockAlloc(),
   302  			Volume: &structs.CSIVolume{
   303  				ID: "foo",
   304  			},
   305  			UsageOptions: &UsageOptions{
   306  				AttachmentMode: structs.CSIVolumeAttachmentModeBlockDevice,
   307  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   308  			},
   309  			PluginErr:            nil,
   310  			ExpectedErr:          nil,
   311  			ExpectedCSICallCount: 1,
   312  		},
   313  		{
   314  			Name:       "Mount options in the volume",
   315  			Allocation: structs.MockAlloc(),
   316  			Volume: &structs.CSIVolume{
   317  				ID: "foo",
   318  				MountOptions: &structs.CSIMountOptions{
   319  					MountFlags: []string{"ro"},
   320  				},
   321  			},
   322  			UsageOptions: &UsageOptions{
   323  				AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
   324  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   325  			},
   326  			PluginErr:            nil,
   327  			ExpectedErr:          nil,
   328  			ExpectedCSICallCount: 1,
   329  			ExpectedVolumeCapability: &csi.VolumeCapability{
   330  				AccessType: csi.VolumeAccessTypeMount,
   331  				AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
   332  				MountVolume: &structs.CSIMountOptions{
   333  					MountFlags: []string{"ro"},
   334  				},
   335  			},
   336  		},
   337  		{
   338  			Name:       "Mount options override in the request",
   339  			Allocation: structs.MockAlloc(),
   340  			Volume: &structs.CSIVolume{
   341  				ID: "foo",
   342  				MountOptions: &structs.CSIMountOptions{
   343  					MountFlags: []string{"ro"},
   344  				},
   345  			},
   346  			UsageOptions: &UsageOptions{
   347  				AttachmentMode: structs.CSIVolumeAttachmentModeFilesystem,
   348  				AccessMode:     structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   349  				MountOptions: &structs.CSIMountOptions{
   350  					MountFlags: []string{"rw"},
   351  				},
   352  			},
   353  			PluginErr:            nil,
   354  			ExpectedErr:          nil,
   355  			ExpectedCSICallCount: 1,
   356  			ExpectedVolumeCapability: &csi.VolumeCapability{
   357  				AccessType: csi.VolumeAccessTypeMount,
   358  				AccessMode: csi.VolumeAccessModeMultiNodeMultiWriter,
   359  				MountVolume: &structs.CSIMountOptions{
   360  					MountFlags: []string{"rw"},
   361  				},
   362  			},
   363  		},
   364  	}
   365  
   366  	for _, tc := range cases {
   367  		t.Run(tc.Name, func(t *testing.T) {
   368  			tmpPath := t.TempDir()
   369  
   370  			csiFake := &csifake.Client{}
   371  			csiFake.NextNodePublishVolumeErr = tc.PluginErr
   372  
   373  			eventer := func(e *structs.NodeEvent) {}
   374  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   375  			ctx := context.Background()
   376  
   377  			_, err := manager.publishVolume(ctx, tc.Volume, tc.Allocation, tc.UsageOptions, nil)
   378  
   379  			if tc.ExpectedErr != nil {
   380  				require.EqualError(t, err, tc.ExpectedErr.Error())
   381  			} else {
   382  				require.NoError(t, err)
   383  			}
   384  
   385  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodePublishVolumeCallCount)
   386  
   387  			if tc.ExpectedVolumeCapability != nil {
   388  				require.Equal(t, tc.ExpectedVolumeCapability, csiFake.PrevVolumeCapability)
   389  			}
   390  		})
   391  	}
   392  }
   393  
   394  func TestVolumeManager_unpublishVolume(t *testing.T) {
   395  	if !checkMountSupport() {
   396  		t.Skip("mount point detection not supported for this platform")
   397  	}
   398  	ci.Parallel(t)
   399  
   400  	cases := []struct {
   401  		Name                 string
   402  		Allocation           *structs.Allocation
   403  		Volume               *structs.CSIVolume
   404  		UsageOptions         *UsageOptions
   405  		PluginErr            error
   406  		ExpectedErr          error
   407  		ExpectedCSICallCount int64
   408  	}{
   409  		{
   410  			Name:       "Returns an error when the plugin returns an error",
   411  			Allocation: structs.MockAlloc(),
   412  			Volume: &structs.CSIVolume{
   413  				ID: "foo",
   414  			},
   415  			UsageOptions:         &UsageOptions{},
   416  			PluginErr:            errors.New("Some Unknown Error"),
   417  			ExpectedErr:          errors.New("Some Unknown Error"),
   418  			ExpectedCSICallCount: 1,
   419  		},
   420  		{
   421  			Name:       "Happy Path",
   422  			Allocation: structs.MockAlloc(),
   423  			Volume: &structs.CSIVolume{
   424  				ID: "foo",
   425  			},
   426  			UsageOptions:         &UsageOptions{},
   427  			PluginErr:            nil,
   428  			ExpectedErr:          nil,
   429  			ExpectedCSICallCount: 1,
   430  		},
   431  	}
   432  
   433  	for _, tc := range cases {
   434  		t.Run(tc.Name, func(t *testing.T) {
   435  			tmpPath := t.TempDir()
   436  
   437  			csiFake := &csifake.Client{}
   438  			csiFake.NextNodeUnpublishVolumeErr = tc.PluginErr
   439  
   440  			eventer := func(e *structs.NodeEvent) {}
   441  			manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   442  			ctx := context.Background()
   443  
   444  			err := manager.unpublishVolume(ctx,
   445  				tc.Volume.ID, tc.Volume.RemoteID(), tc.Allocation.ID, tc.UsageOptions)
   446  
   447  			if tc.ExpectedErr != nil {
   448  				require.EqualError(t, err, tc.ExpectedErr.Error())
   449  			} else {
   450  				require.NoError(t, err)
   451  			}
   452  
   453  			require.Equal(t, tc.ExpectedCSICallCount, csiFake.NodeUnpublishVolumeCallCount)
   454  		})
   455  	}
   456  }
   457  
   458  func TestVolumeManager_MountVolumeEvents(t *testing.T) {
   459  	if !checkMountSupport() {
   460  		t.Skip("mount point detection not supported for this platform")
   461  	}
   462  	ci.Parallel(t)
   463  
   464  	tmpPath := t.TempDir()
   465  
   466  	csiFake := &csifake.Client{}
   467  
   468  	var events []*structs.NodeEvent
   469  	eventer := func(e *structs.NodeEvent) {
   470  		events = append(events, e)
   471  	}
   472  
   473  	manager := newVolumeManager(testlog.HCLogger(t), eventer, csiFake, tmpPath, tmpPath, true)
   474  	ctx := context.Background()
   475  	vol := &structs.CSIVolume{
   476  		ID:        "vol",
   477  		Namespace: "ns",
   478  	}
   479  	alloc := mock.Alloc()
   480  	usage := &UsageOptions{
   481  		AccessMode: structs.CSIVolumeAccessModeMultiNodeMultiWriter,
   482  	}
   483  	pubCtx := map[string]string{}
   484  
   485  	_, err := manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
   486  	require.Error(t, err, "unknown volume attachment mode: ")
   487  	require.Equal(t, 1, len(events))
   488  	e := events[0]
   489  	require.Equal(t, "Mount volume", e.Message)
   490  	require.Equal(t, "Storage", e.Subsystem)
   491  	require.Equal(t, "vol", e.Details["volume_id"])
   492  	require.Equal(t, "false", e.Details["success"])
   493  	require.Equal(t, "unknown volume attachment mode: ", e.Details["error"])
   494  	events = events[1:]
   495  
   496  	usage.AttachmentMode = structs.CSIVolumeAttachmentModeFilesystem
   497  	_, err = manager.MountVolume(ctx, vol, alloc, usage, pubCtx)
   498  	require.NoError(t, err)
   499  
   500  	require.Equal(t, 1, len(events))
   501  	e = events[0]
   502  	require.Equal(t, "Mount volume", e.Message)
   503  	require.Equal(t, "Storage", e.Subsystem)
   504  	require.Equal(t, "vol", e.Details["volume_id"])
   505  	require.Equal(t, "true", e.Details["success"])
   506  	events = events[1:]
   507  
   508  	err = manager.UnmountVolume(ctx, vol.ID, vol.RemoteID(), alloc.ID, usage)
   509  	require.NoError(t, err)
   510  
   511  	require.Equal(t, 1, len(events))
   512  	e = events[0]
   513  	require.Equal(t, "Unmount volume", e.Message)
   514  	require.Equal(t, "Storage", e.Subsystem)
   515  	require.Equal(t, "vol", e.Details["volume_id"])
   516  	require.Equal(t, "true", e.Details["success"])
   517  }