github.com/anth0d/nomad@v0.0.0-20221214183521-ae3a0a2cad06/nomad/structs/csi_test.go (about)

     1  package structs
     2  
     3  import (
     4  	"reflect"
     5  	"testing"
     6  	"time"
     7  
     8  	"github.com/hashicorp/nomad/ci"
     9  	"github.com/stretchr/testify/require"
    10  )
    11  
    12  // TestCSIVolumeClaim ensures that a volume claim workflows work as expected.
    13  func TestCSIVolumeClaim(t *testing.T) {
    14  	ci.Parallel(t)
    15  
    16  	vol := NewCSIVolume("vol0", 0)
    17  	vol.Schedulable = true
    18  	vol.AccessMode = CSIVolumeAccessModeUnknown
    19  	vol.AttachmentMode = CSIVolumeAttachmentModeUnknown
    20  	vol.RequestedCapabilities = []*CSIVolumeCapability{
    21  		{
    22  			AccessMode:     CSIVolumeAccessModeMultiNodeSingleWriter,
    23  			AttachmentMode: CSIVolumeAttachmentModeFilesystem,
    24  		},
    25  		{
    26  			AccessMode:     CSIVolumeAccessModeMultiNodeReader,
    27  			AttachmentMode: CSIVolumeAttachmentModeFilesystem,
    28  		},
    29  	}
    30  
    31  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
    32  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
    33  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
    34  	claim := &CSIVolumeClaim{
    35  		AllocationID: alloc1.ID,
    36  		NodeID:       "foo",
    37  		State:        CSIVolumeClaimStateTaken,
    38  	}
    39  
    40  	// claim a read and ensure we are still schedulable
    41  	claim.Mode = CSIVolumeClaimRead
    42  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
    43  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
    44  	require.NoError(t, vol.Claim(claim, alloc1))
    45  	require.True(t, vol.ReadSchedulable())
    46  	require.False(t, vol.WriteSchedulable())
    47  	require.False(t, vol.HasFreeWriteClaims())
    48  	require.Len(t, vol.ReadClaims, 1)
    49  	require.Len(t, vol.WriteClaims, 0)
    50  	require.Len(t, vol.PastClaims, 0)
    51  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
    52  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
    53  	require.Len(t, vol.RequestedCapabilities, 2)
    54  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
    55  		vol.RequestedCapabilities[0].AccessMode)
    56  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
    57  		vol.RequestedCapabilities[1].AccessMode)
    58  
    59  	// claim a write and ensure we can't upgrade capabilities.
    60  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
    61  	claim.Mode = CSIVolumeClaimWrite
    62  	claim.AllocationID = alloc2.ID
    63  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
    64  	require.True(t, vol.ReadSchedulable())
    65  	require.False(t, vol.WriteSchedulable())
    66  	require.False(t, vol.HasFreeWriteClaims())
    67  	require.Len(t, vol.ReadClaims, 1)
    68  	require.Len(t, vol.WriteClaims, 0)
    69  	require.Len(t, vol.PastClaims, 0)
    70  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
    71  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
    72  
    73  	// release our last claim, including unpublish workflow
    74  	claim.AllocationID = alloc1.ID
    75  	claim.Mode = CSIVolumeClaimRead
    76  	claim.State = CSIVolumeClaimStateReadyToFree
    77  	vol.Claim(claim, nil)
    78  	require.Len(t, vol.ReadClaims, 0)
    79  	require.Len(t, vol.WriteClaims, 0)
    80  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
    81  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
    82  	require.Len(t, vol.RequestedCapabilities, 2)
    83  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
    84  		vol.RequestedCapabilities[0].AccessMode)
    85  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
    86  		vol.RequestedCapabilities[1].AccessMode)
    87  
    88  	// claim a write on the now-unclaimed volume and ensure we can upgrade
    89  	// capabilities so long as they're in our RequestedCapabilities.
    90  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
    91  	claim.Mode = CSIVolumeClaimWrite
    92  	claim.State = CSIVolumeClaimStateTaken
    93  	claim.AllocationID = alloc2.ID
    94  	require.NoError(t, vol.Claim(claim, alloc2))
    95  	require.Len(t, vol.ReadClaims, 0)
    96  	require.Len(t, vol.WriteClaims, 1)
    97  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
    98  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
    99  	require.Len(t, vol.RequestedCapabilities, 2)
   100  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   101  		vol.RequestedCapabilities[0].AccessMode)
   102  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   103  		vol.RequestedCapabilities[1].AccessMode)
   104  
   105  	// make the claim again to ensure its idempotent, and that the volume's
   106  	// access mode is unchanged.
   107  	require.NoError(t, vol.Claim(claim, alloc2))
   108  	require.True(t, vol.ReadSchedulable())
   109  	require.True(t, vol.WriteSchedulable())
   110  	require.False(t, vol.HasFreeWriteClaims())
   111  	require.Len(t, vol.ReadClaims, 0)
   112  	require.Len(t, vol.WriteClaims, 1)
   113  	require.Len(t, vol.PastClaims, 0)
   114  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   115  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   116  
   117  	// claim a read. ensure we are still schedulable and that we haven't
   118  	// changed the access mode
   119  	claim.AllocationID = alloc1.ID
   120  	claim.Mode = CSIVolumeClaimRead
   121  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   122  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   123  	require.NoError(t, vol.Claim(claim, alloc1))
   124  	require.True(t, vol.ReadSchedulable())
   125  	require.True(t, vol.WriteSchedulable())
   126  	require.False(t, vol.HasFreeWriteClaims())
   127  	require.Len(t, vol.ReadClaims, 1)
   128  	require.Len(t, vol.WriteClaims, 1)
   129  	require.Len(t, vol.PastClaims, 0)
   130  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   131  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   132  
   133  	// ensure we can't change the attachment mode for a claimed volume
   134  	claim.AttachmentMode = CSIVolumeAttachmentModeBlockDevice
   135  	claim.AllocationID = alloc3.ID
   136  	require.EqualError(t, vol.Claim(claim, alloc3),
   137  		"cannot change attachment mode of claimed volume")
   138  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   139  
   140  	// denormalize-on-read (simulating a volume we've gotten out of the state
   141  	// store) and then ensure we cannot claim another write
   142  	vol.WriteAllocs[alloc2.ID] = alloc2
   143  	claim.Mode = CSIVolumeClaimWrite
   144  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   145  
   146  	// release the write claim but ensure it doesn't free up write claims
   147  	// until after we've unpublished
   148  	claim.AllocationID = alloc2.ID
   149  	claim.State = CSIVolumeClaimStateUnpublishing
   150  	vol.Claim(claim, nil)
   151  	require.True(t, vol.ReadSchedulable())
   152  	require.True(t, vol.WriteSchedulable())
   153  	require.False(t, vol.HasFreeWriteClaims())
   154  	require.Len(t, vol.ReadClaims, 1)
   155  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   156  	require.Len(t, vol.PastClaims, 1)
   157  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   158  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   159  
   160  	// complete the unpublish workflow
   161  	claim.State = CSIVolumeClaimStateReadyToFree
   162  	vol.Claim(claim, nil)
   163  	require.True(t, vol.ReadSchedulable())
   164  	require.True(t, vol.WriteSchedulable())
   165  	require.True(t, vol.HasFreeWriteClaims())
   166  	require.Len(t, vol.ReadClaims, 1)
   167  	require.Len(t, vol.WriteClaims, 0)
   168  	require.Len(t, vol.WriteAllocs, 0)
   169  	require.Len(t, vol.PastClaims, 0)
   170  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   171  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   172  
   173  	// release our last claim, including unpublish workflow
   174  	claim.AllocationID = alloc1.ID
   175  	claim.Mode = CSIVolumeClaimRead
   176  	vol.Claim(claim, nil)
   177  	require.Len(t, vol.ReadClaims, 0)
   178  	require.Len(t, vol.WriteClaims, 0)
   179  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   180  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   181  	require.Len(t, vol.RequestedCapabilities, 2)
   182  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   183  		vol.RequestedCapabilities[0].AccessMode)
   184  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   185  		vol.RequestedCapabilities[1].AccessMode)
   186  }
   187  
   188  // TestCSIVolumeClaim_CompatOldClaims ensures that volume created before
   189  // v1.1.0 with claims that exist before v1.1.0 still work.
   190  //
   191  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   192  func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) {
   193  	ci.Parallel(t)
   194  
   195  	vol := NewCSIVolume("vol0", 0)
   196  	vol.Schedulable = true
   197  	vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   198  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   199  
   200  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   201  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   202  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
   203  	claim := &CSIVolumeClaim{
   204  		AllocationID: alloc1.ID,
   205  		NodeID:       "foo",
   206  		State:        CSIVolumeClaimStateTaken,
   207  	}
   208  
   209  	// claim a read and ensure we are still schedulable
   210  	claim.Mode = CSIVolumeClaimRead
   211  	require.NoError(t, vol.Claim(claim, alloc1))
   212  	require.True(t, vol.ReadSchedulable())
   213  	require.True(t, vol.WriteSchedulable())
   214  	require.True(t, vol.HasFreeWriteClaims())
   215  	require.Len(t, vol.ReadClaims, 1)
   216  	require.Len(t, vol.WriteClaims, 0)
   217  	require.Len(t, vol.PastClaims, 0)
   218  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   219  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   220  	require.Len(t, vol.RequestedCapabilities, 1)
   221  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   222  		vol.RequestedCapabilities[0].AccessMode)
   223  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   224  		vol.RequestedCapabilities[0].AttachmentMode)
   225  
   226  	// claim a write and ensure we no longer have free write claims
   227  	claim.Mode = CSIVolumeClaimWrite
   228  	claim.AllocationID = alloc2.ID
   229  	require.NoError(t, vol.Claim(claim, alloc2))
   230  	require.True(t, vol.ReadSchedulable())
   231  	require.True(t, vol.WriteSchedulable())
   232  	require.False(t, vol.HasFreeWriteClaims())
   233  	require.Len(t, vol.ReadClaims, 1)
   234  	require.Len(t, vol.WriteClaims, 1)
   235  	require.Len(t, vol.PastClaims, 0)
   236  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   237  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   238  
   239  	// denormalize-on-read (simulating a volume we've gotten out of the state
   240  	// store) and then ensure we cannot claim another write
   241  	vol.WriteAllocs[alloc2.ID] = alloc2
   242  	claim.AllocationID = alloc3.ID
   243  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   244  
   245  	// release the write claim but ensure it doesn't free up write claims
   246  	// until after we've unpublished
   247  	claim.AllocationID = alloc2.ID
   248  	claim.State = CSIVolumeClaimStateUnpublishing
   249  	vol.Claim(claim, nil)
   250  	require.True(t, vol.ReadSchedulable())
   251  	require.True(t, vol.WriteSchedulable())
   252  	require.False(t, vol.HasFreeWriteClaims())
   253  	require.Len(t, vol.ReadClaims, 1)
   254  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   255  	require.Len(t, vol.PastClaims, 1)
   256  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   257  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   258  
   259  	// complete the unpublish workflow
   260  	claim.State = CSIVolumeClaimStateReadyToFree
   261  	vol.Claim(claim, nil)
   262  	require.True(t, vol.ReadSchedulable())
   263  	require.True(t, vol.WriteSchedulable())
   264  	require.True(t, vol.HasFreeWriteClaims())
   265  	require.Len(t, vol.ReadClaims, 1)
   266  	require.Len(t, vol.WriteClaims, 0)
   267  	require.Len(t, vol.WriteAllocs, 0)
   268  	require.Len(t, vol.PastClaims, 0)
   269  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   270  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   271  
   272  	// release our last claim, including unpublish workflow
   273  	claim.AllocationID = alloc1.ID
   274  	claim.Mode = CSIVolumeClaimRead
   275  	vol.Claim(claim, nil)
   276  	require.Len(t, vol.ReadClaims, 0)
   277  	require.Len(t, vol.WriteClaims, 0)
   278  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   279  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   280  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   281  		vol.RequestedCapabilities[0].AccessMode)
   282  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   283  		vol.RequestedCapabilities[0].AttachmentMode)
   284  }
   285  
   286  // TestCSIVolumeClaim_CompatNewClaimsOK ensures that a volume created
   287  // before v1.1.0 is compatible with new claims.
   288  //
   289  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   290  func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) {
   291  	ci.Parallel(t)
   292  
   293  	vol := NewCSIVolume("vol0", 0)
   294  	vol.Schedulable = true
   295  	vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   296  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   297  
   298  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   299  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   300  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
   301  	claim := &CSIVolumeClaim{
   302  		AllocationID: alloc1.ID,
   303  		NodeID:       "foo",
   304  		State:        CSIVolumeClaimStateTaken,
   305  	}
   306  
   307  	// claim a read and ensure we are still schedulable
   308  	claim.Mode = CSIVolumeClaimRead
   309  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   310  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   311  	require.NoError(t, vol.Claim(claim, alloc1))
   312  	require.True(t, vol.ReadSchedulable())
   313  	require.True(t, vol.WriteSchedulable())
   314  	require.True(t, vol.HasFreeWriteClaims())
   315  	require.Len(t, vol.ReadClaims, 1)
   316  	require.Len(t, vol.WriteClaims, 0)
   317  	require.Len(t, vol.PastClaims, 0)
   318  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   319  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   320  	require.Len(t, vol.RequestedCapabilities, 1)
   321  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   322  		vol.RequestedCapabilities[0].AccessMode)
   323  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   324  		vol.RequestedCapabilities[0].AttachmentMode)
   325  
   326  	// claim a write and ensure we no longer have free write claims
   327  	claim.Mode = CSIVolumeClaimWrite
   328  	claim.AllocationID = alloc2.ID
   329  	require.NoError(t, vol.Claim(claim, alloc2))
   330  	require.True(t, vol.ReadSchedulable())
   331  	require.True(t, vol.WriteSchedulable())
   332  	require.False(t, vol.HasFreeWriteClaims())
   333  	require.Len(t, vol.ReadClaims, 1)
   334  	require.Len(t, vol.WriteClaims, 1)
   335  	require.Len(t, vol.PastClaims, 0)
   336  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   337  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   338  
   339  	// ensure we can't change the attachment mode for a claimed volume
   340  	claim.AttachmentMode = CSIVolumeAttachmentModeBlockDevice
   341  	require.EqualError(t, vol.Claim(claim, alloc2),
   342  		"cannot change attachment mode of claimed volume")
   343  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   344  
   345  	// denormalize-on-read (simulating a volume we've gotten out of the state
   346  	// store) and then ensure we cannot claim another write
   347  	vol.WriteAllocs[alloc2.ID] = alloc2
   348  	claim.AllocationID = alloc3.ID
   349  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   350  
   351  	// release the write claim but ensure it doesn't free up write claims
   352  	// until after we've unpublished
   353  	claim.AllocationID = alloc2.ID
   354  	claim.State = CSIVolumeClaimStateUnpublishing
   355  	vol.Claim(claim, nil)
   356  	require.True(t, vol.ReadSchedulable())
   357  	require.True(t, vol.WriteSchedulable())
   358  	require.False(t, vol.HasFreeWriteClaims())
   359  	require.Len(t, vol.ReadClaims, 1)
   360  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   361  	require.Len(t, vol.PastClaims, 1)
   362  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   363  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   364  
   365  	// complete the unpublish workflow
   366  	claim.State = CSIVolumeClaimStateReadyToFree
   367  	vol.Claim(claim, nil)
   368  	require.True(t, vol.ReadSchedulable())
   369  	require.True(t, vol.WriteSchedulable())
   370  	require.True(t, vol.HasFreeWriteClaims())
   371  	require.Len(t, vol.ReadClaims, 1)
   372  	require.Len(t, vol.WriteClaims, 0)
   373  	require.Len(t, vol.WriteAllocs, 0)
   374  	require.Len(t, vol.PastClaims, 0)
   375  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   376  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   377  
   378  	// release our last claim, including unpublish workflow
   379  	claim.AllocationID = alloc1.ID
   380  	claim.Mode = CSIVolumeClaimRead
   381  	vol.Claim(claim, nil)
   382  	require.Len(t, vol.ReadClaims, 0)
   383  	require.Len(t, vol.WriteClaims, 0)
   384  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   385  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   386  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   387  		vol.RequestedCapabilities[0].AccessMode)
   388  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   389  		vol.RequestedCapabilities[0].AttachmentMode)
   390  }
   391  
   392  // TestCSIVolumeClaim_CompatNewClaimsNoUpgrade ensures that a volume created
   393  // before v1.1.0 is compatible with new claims, but prevents unexpected
   394  // capability upgrades.
   395  //
   396  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   397  func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) {
   398  	ci.Parallel(t)
   399  
   400  	vol := NewCSIVolume("vol0", 0)
   401  	vol.Schedulable = true
   402  	vol.AccessMode = CSIVolumeAccessModeMultiNodeReader
   403  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   404  
   405  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   406  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   407  	claim := &CSIVolumeClaim{
   408  		AllocationID: alloc1.ID,
   409  		NodeID:       "foo",
   410  		State:        CSIVolumeClaimStateTaken,
   411  	}
   412  
   413  	// claim a read and ensure we are still schedulable
   414  	claim.Mode = CSIVolumeClaimRead
   415  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   416  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   417  	require.NoError(t, vol.Claim(claim, alloc1))
   418  	require.True(t, vol.ReadSchedulable())
   419  	require.False(t, vol.WriteSchedulable())
   420  	require.False(t, vol.HasFreeWriteClaims())
   421  	require.Len(t, vol.ReadClaims, 1)
   422  	require.Len(t, vol.WriteClaims, 0)
   423  	require.Len(t, vol.PastClaims, 0)
   424  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
   425  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   426  	require.Len(t, vol.RequestedCapabilities, 1)
   427  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   428  		vol.RequestedCapabilities[0].AccessMode)
   429  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   430  		vol.RequestedCapabilities[0].AttachmentMode)
   431  
   432  	// claim a write and ensure we can't upgrade capabilities.
   433  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   434  	claim.Mode = CSIVolumeClaimWrite
   435  	claim.AllocationID = alloc2.ID
   436  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
   437  	require.True(t, vol.ReadSchedulable())
   438  	require.False(t, vol.WriteSchedulable())
   439  	require.False(t, vol.HasFreeWriteClaims())
   440  	require.Len(t, vol.ReadClaims, 1)
   441  	require.Len(t, vol.WriteClaims, 0)
   442  	require.Len(t, vol.PastClaims, 0)
   443  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
   444  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   445  	require.Len(t, vol.RequestedCapabilities, 1)
   446  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   447  		vol.RequestedCapabilities[0].AccessMode)
   448  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   449  		vol.RequestedCapabilities[0].AttachmentMode)
   450  
   451  	// release our last claim, including unpublish workflow
   452  	claim.AllocationID = alloc1.ID
   453  	claim.Mode = CSIVolumeClaimRead
   454  	claim.State = CSIVolumeClaimStateReadyToFree
   455  	vol.Claim(claim, nil)
   456  	require.Len(t, vol.ReadClaims, 0)
   457  	require.Len(t, vol.WriteClaims, 0)
   458  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   459  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   460  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   461  		vol.RequestedCapabilities[0].AccessMode)
   462  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   463  		vol.RequestedCapabilities[0].AttachmentMode)
   464  
   465  	// claim a write on the now-unclaimed volume and ensure we still can't
   466  	// upgrade capabilities.
   467  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   468  	claim.Mode = CSIVolumeClaimWrite
   469  	claim.State = CSIVolumeClaimStateTaken
   470  	claim.AllocationID = alloc2.ID
   471  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
   472  	require.Len(t, vol.ReadClaims, 0)
   473  	require.Len(t, vol.WriteClaims, 0)
   474  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   475  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   476  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   477  		vol.RequestedCapabilities[0].AccessMode)
   478  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   479  		vol.RequestedCapabilities[0].AttachmentMode)
   480  }
   481  
   482  func TestVolume_Copy(t *testing.T) {
   483  	ci.Parallel(t)
   484  
   485  	a1 := MockAlloc()
   486  	a2 := MockAlloc()
   487  	a3 := MockAlloc()
   488  	c1 := &CSIVolumeClaim{
   489  		AllocationID:   a1.ID,
   490  		NodeID:         a1.NodeID,
   491  		ExternalNodeID: "c1",
   492  		Mode:           CSIVolumeClaimRead,
   493  		State:          CSIVolumeClaimStateTaken,
   494  	}
   495  	c2 := &CSIVolumeClaim{
   496  		AllocationID:   a2.ID,
   497  		NodeID:         a2.NodeID,
   498  		ExternalNodeID: "c2",
   499  		Mode:           CSIVolumeClaimRead,
   500  		State:          CSIVolumeClaimStateNodeDetached,
   501  	}
   502  	c3 := &CSIVolumeClaim{
   503  		AllocationID:   a3.ID,
   504  		NodeID:         a3.NodeID,
   505  		ExternalNodeID: "c3",
   506  		Mode:           CSIVolumeClaimWrite,
   507  		State:          CSIVolumeClaimStateTaken,
   508  	}
   509  
   510  	v1 := &CSIVolume{
   511  		ID:             "vol1",
   512  		Name:           "vol1",
   513  		ExternalID:     "vol-abcdef",
   514  		Namespace:      "default",
   515  		Topologies:     []*CSITopology{{Segments: map[string]string{"AZ1": "123"}}},
   516  		AccessMode:     CSIVolumeAccessModeSingleNodeWriter,
   517  		AttachmentMode: CSIVolumeAttachmentModeBlockDevice,
   518  		MountOptions:   &CSIMountOptions{FSType: "ext4", MountFlags: []string{"ro", "noatime"}},
   519  		Secrets:        CSISecrets{"mysecret": "myvalue"},
   520  		Parameters:     map[string]string{"param1": "val1"},
   521  		Context:        map[string]string{"ctx1": "val1"},
   522  
   523  		ReadAllocs:  map[string]*Allocation{a1.ID: a1, a2.ID: nil},
   524  		WriteAllocs: map[string]*Allocation{a3.ID: a3},
   525  
   526  		ReadClaims:  map[string]*CSIVolumeClaim{a1.ID: c1, a2.ID: c2},
   527  		WriteClaims: map[string]*CSIVolumeClaim{a3.ID: c3},
   528  		PastClaims:  map[string]*CSIVolumeClaim{},
   529  
   530  		Schedulable:         true,
   531  		PluginID:            "moosefs",
   532  		Provider:            "n/a",
   533  		ProviderVersion:     "1.0",
   534  		ControllerRequired:  true,
   535  		ControllersHealthy:  2,
   536  		ControllersExpected: 2,
   537  		NodesHealthy:        4,
   538  		NodesExpected:       5,
   539  		ResourceExhausted:   time.Now(),
   540  	}
   541  
   542  	v2 := v1.Copy()
   543  	if !reflect.DeepEqual(v1, v2) {
   544  		t.Fatalf("Copy() returned an unequal Volume; got %#v; want %#v", v1, v2)
   545  	}
   546  
   547  	v1.ReadClaims[a1.ID].State = CSIVolumeClaimStateReadyToFree
   548  	v1.ReadAllocs[a2.ID] = a2
   549  	v1.WriteAllocs[a3.ID].ClientStatus = AllocClientStatusComplete
   550  	v1.MountOptions.FSType = "zfs"
   551  
   552  	if v2.ReadClaims[a1.ID].State == CSIVolumeClaimStateReadyToFree {
   553  		t.Fatalf("Volume.Copy() failed; changes to original ReadClaims seen in copy")
   554  	}
   555  	if v2.ReadAllocs[a2.ID] != nil {
   556  		t.Fatalf("Volume.Copy() failed; changes to original ReadAllocs seen in copy")
   557  	}
   558  	if v2.WriteAllocs[a3.ID].ClientStatus == AllocClientStatusComplete {
   559  		t.Fatalf("Volume.Copy() failed; changes to original WriteAllocs seen in copy")
   560  	}
   561  	if v2.MountOptions.FSType == "zfs" {
   562  		t.Fatalf("Volume.Copy() failed; changes to original MountOptions seen in copy")
   563  	}
   564  
   565  }
   566  
   567  func TestCSIVolume_Validate(t *testing.T) {
   568  	ci.Parallel(t)
   569  
   570  	vol := &CSIVolume{
   571  		ID:         "test",
   572  		PluginID:   "test",
   573  		SnapshotID: "test-snapshot",
   574  		CloneID:    "test-clone",
   575  		RequestedTopologies: &CSITopologyRequest{
   576  			Required: []*CSITopology{{}, {}},
   577  		},
   578  	}
   579  	err := vol.Validate()
   580  	require.EqualError(t, err, "validation: missing namespace, only one of snapshot_id and clone_id is allowed, must include at least one capability block, required topology is missing segments field, required topology is missing segments field")
   581  
   582  }
   583  
   584  func TestCSIVolume_Merge(t *testing.T) {
   585  	ci.Parallel(t)
   586  
   587  	testCases := []struct {
   588  		name     string
   589  		v        *CSIVolume
   590  		update   *CSIVolume
   591  		expected string
   592  		expectFn func(t *testing.T, v *CSIVolume)
   593  	}{
   594  		{
   595  			name: "invalid capacity update",
   596  			v:    &CSIVolume{Capacity: 100},
   597  			update: &CSIVolume{
   598  				RequestedCapacityMax: 300, RequestedCapacityMin: 200},
   599  			expected: "volume requested capacity update was not compatible with existing capacity",
   600  			expectFn: func(t *testing.T, v *CSIVolume) {
   601  				require.NotEqual(t, 300, v.RequestedCapacityMax)
   602  				require.NotEqual(t, 200, v.RequestedCapacityMin)
   603  			},
   604  		},
   605  		{
   606  			name: "invalid capability update",
   607  			v: &CSIVolume{
   608  				AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   609  				AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   610  			},
   611  			update: &CSIVolume{
   612  				RequestedCapabilities: []*CSIVolumeCapability{
   613  					{
   614  						AccessMode:     CSIVolumeAccessModeSingleNodeWriter,
   615  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   616  					},
   617  				},
   618  			},
   619  			expected: "volume requested capabilities update was not compatible with existing capability in use",
   620  		},
   621  		{
   622  			name: "invalid topology update - removed",
   623  			v: &CSIVolume{
   624  				RequestedTopologies: &CSITopologyRequest{
   625  					Required: []*CSITopology{
   626  						{Segments: map[string]string{"rack": "R1"}},
   627  					},
   628  				},
   629  				Topologies: []*CSITopology{
   630  					{Segments: map[string]string{"rack": "R1"}},
   631  				},
   632  			},
   633  			update:   &CSIVolume{},
   634  			expected: "volume topology request update was not compatible with existing topology",
   635  			expectFn: func(t *testing.T, v *CSIVolume) {
   636  				require.Len(t, v.Topologies, 1)
   637  			},
   638  		},
   639  		{
   640  			name: "invalid topology requirement added",
   641  			v: &CSIVolume{
   642  				Topologies: []*CSITopology{
   643  					{Segments: map[string]string{"rack": "R1"}},
   644  				},
   645  			},
   646  			update: &CSIVolume{
   647  				RequestedTopologies: &CSITopologyRequest{
   648  					Required: []*CSITopology{
   649  						{Segments: map[string]string{"rack": "R1"}},
   650  						{Segments: map[string]string{"rack": "R3"}},
   651  					},
   652  				},
   653  			},
   654  			expected: "volume topology request update was not compatible with existing topology",
   655  			expectFn: func(t *testing.T, v *CSIVolume) {
   656  				require.Len(t, v.Topologies, 1)
   657  				require.Equal(t, "R1", v.Topologies[0].Segments["rack"])
   658  			},
   659  		},
   660  		{
   661  			name: "invalid topology preference removed",
   662  			v: &CSIVolume{
   663  				Topologies: []*CSITopology{
   664  					{Segments: map[string]string{"rack": "R1"}},
   665  				},
   666  				RequestedTopologies: &CSITopologyRequest{
   667  					Preferred: []*CSITopology{
   668  						{Segments: map[string]string{"rack": "R1"}},
   669  						{Segments: map[string]string{"rack": "R3"}},
   670  					},
   671  				},
   672  			},
   673  			update: &CSIVolume{
   674  				Topologies: []*CSITopology{
   675  					{Segments: map[string]string{"rack": "R1"}},
   676  				},
   677  				RequestedTopologies: &CSITopologyRequest{
   678  					Preferred: []*CSITopology{
   679  						{Segments: map[string]string{"rack": "R3"}},
   680  					},
   681  				},
   682  			},
   683  			expected: "volume topology request update was not compatible with existing topology",
   684  		},
   685  		{
   686  			name: "valid update",
   687  			v: &CSIVolume{
   688  				Topologies: []*CSITopology{
   689  					{Segments: map[string]string{"rack": "R1"}},
   690  					{Segments: map[string]string{"rack": "R2"}},
   691  				},
   692  				AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   693  				AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   694  				MountOptions: &CSIMountOptions{
   695  					FSType:     "ext4",
   696  					MountFlags: []string{"noatime"},
   697  				},
   698  				RequestedTopologies: &CSITopologyRequest{
   699  					Required: []*CSITopology{
   700  						{Segments: map[string]string{"rack": "R1"}},
   701  					},
   702  					Preferred: []*CSITopology{
   703  						{Segments: map[string]string{"rack": "R2"}},
   704  					},
   705  				},
   706  			},
   707  			update: &CSIVolume{
   708  				Topologies: []*CSITopology{
   709  					{Segments: map[string]string{"rack": "R1"}},
   710  					{Segments: map[string]string{"rack": "R2"}},
   711  				},
   712  				MountOptions: &CSIMountOptions{
   713  					FSType:     "ext4",
   714  					MountFlags: []string{"noatime"},
   715  				},
   716  				RequestedTopologies: &CSITopologyRequest{
   717  					Required: []*CSITopology{
   718  						{Segments: map[string]string{"rack": "R1"}},
   719  					},
   720  					Preferred: []*CSITopology{
   721  						{Segments: map[string]string{"rack": "R2"}},
   722  					},
   723  				},
   724  				RequestedCapabilities: []*CSIVolumeCapability{
   725  					{
   726  						AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   727  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   728  					},
   729  					{
   730  						AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   731  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   732  					},
   733  				},
   734  			},
   735  		},
   736  	}
   737  	for _, tc := range testCases {
   738  		tc = tc
   739  		t.Run(tc.name, func(t *testing.T) {
   740  			err := tc.v.Merge(tc.update)
   741  			if tc.expected == "" {
   742  				require.NoError(t, err)
   743  			} else {
   744  				if tc.expectFn != nil {
   745  					tc.expectFn(t, tc.v)
   746  				}
   747  				require.Error(t, err, tc.expected)
   748  				require.Contains(t, err.Error(), tc.expected)
   749  			}
   750  		})
   751  	}
   752  }
   753  
   754  func TestCSIPluginJobs(t *testing.T) {
   755  	ci.Parallel(t)
   756  
   757  	plug := NewCSIPlugin("foo", 1000)
   758  	controller := &Job{
   759  		ID:   "job",
   760  		Type: "service",
   761  		TaskGroups: []*TaskGroup{{
   762  			Name:  "foo",
   763  			Count: 11,
   764  			Tasks: []*Task{{
   765  				CSIPluginConfig: &TaskCSIPluginConfig{
   766  					ID:   "foo",
   767  					Type: CSIPluginTypeController,
   768  				},
   769  			}},
   770  		}},
   771  	}
   772  
   773  	summary := &JobSummary{}
   774  
   775  	plug.AddJob(controller, summary)
   776  	require.Equal(t, 11, plug.ControllersExpected)
   777  
   778  	// New job id & make it a system node plugin job
   779  	node := controller.Copy()
   780  	node.ID = "bar"
   781  	node.Type = "system"
   782  	node.TaskGroups[0].Tasks[0].CSIPluginConfig.Type = CSIPluginTypeNode
   783  
   784  	summary = &JobSummary{
   785  		Summary: map[string]TaskGroupSummary{
   786  			"foo": {
   787  				Queued:   1,
   788  				Running:  1,
   789  				Starting: 1,
   790  			},
   791  		},
   792  	}
   793  
   794  	plug.AddJob(node, summary)
   795  	require.Equal(t, 3, plug.NodesExpected)
   796  
   797  	plug.DeleteJob(node, summary)
   798  	require.Equal(t, 0, plug.NodesExpected)
   799  	require.Empty(t, plug.NodeJobs[""])
   800  
   801  	plug.DeleteJob(controller, nil)
   802  	require.Equal(t, 0, plug.ControllersExpected)
   803  	require.Empty(t, plug.ControllerJobs[""])
   804  }
   805  
   806  func TestCSIPluginCleanup(t *testing.T) {
   807  	ci.Parallel(t)
   808  
   809  	plug := NewCSIPlugin("foo", 1000)
   810  	plug.AddPlugin("n0", &CSIInfo{
   811  		PluginID:                 "foo",
   812  		AllocID:                  "a0",
   813  		Healthy:                  true,
   814  		Provider:                 "foo-provider",
   815  		RequiresControllerPlugin: true,
   816  		RequiresTopologies:       false,
   817  		ControllerInfo:           &CSIControllerInfo{},
   818  	})
   819  
   820  	plug.AddPlugin("n0", &CSIInfo{
   821  		PluginID:                 "foo",
   822  		AllocID:                  "a0",
   823  		Healthy:                  true,
   824  		Provider:                 "foo-provider",
   825  		RequiresControllerPlugin: true,
   826  		RequiresTopologies:       false,
   827  		NodeInfo:                 &CSINodeInfo{},
   828  	})
   829  
   830  	require.Equal(t, 1, plug.ControllersHealthy)
   831  	require.Equal(t, 1, plug.NodesHealthy)
   832  
   833  	err := plug.DeleteNode("n0")
   834  	require.NoError(t, err)
   835  
   836  	require.Equal(t, 0, plug.ControllersHealthy)
   837  	require.Equal(t, 0, plug.NodesHealthy)
   838  
   839  	require.Equal(t, 0, len(plug.Controllers))
   840  	require.Equal(t, 0, len(plug.Nodes))
   841  }
   842  
   843  func TestDeleteNodeForType_Controller(t *testing.T) {
   844  	ci.Parallel(t)
   845  
   846  	info := &CSIInfo{
   847  		PluginID:                 "foo",
   848  		AllocID:                  "a0",
   849  		Healthy:                  true,
   850  		Provider:                 "foo-provider",
   851  		RequiresControllerPlugin: true,
   852  		RequiresTopologies:       false,
   853  		ControllerInfo:           &CSIControllerInfo{},
   854  	}
   855  
   856  	plug := NewCSIPlugin("foo", 1000)
   857  
   858  	plug.Controllers["n0"] = info
   859  	plug.ControllersHealthy = 1
   860  
   861  	err := plug.DeleteNodeForType("n0", CSIPluginTypeController)
   862  	require.NoError(t, err)
   863  
   864  	require.Equal(t, 0, plug.ControllersHealthy)
   865  	require.Equal(t, 0, len(plug.Controllers))
   866  }
   867  
   868  func TestDeleteNodeForType_NilController(t *testing.T) {
   869  	ci.Parallel(t)
   870  
   871  	plug := NewCSIPlugin("foo", 1000)
   872  
   873  	plug.Controllers["n0"] = nil
   874  	plug.ControllersHealthy = 1
   875  
   876  	err := plug.DeleteNodeForType("n0", CSIPluginTypeController)
   877  	require.Error(t, err)
   878  	require.Equal(t, 1, len(plug.Controllers))
   879  
   880  	_, ok := plug.Controllers["foo"]
   881  	require.False(t, ok)
   882  }
   883  
   884  func TestDeleteNodeForType_Node(t *testing.T) {
   885  	ci.Parallel(t)
   886  
   887  	info := &CSIInfo{
   888  		PluginID:                 "foo",
   889  		AllocID:                  "a0",
   890  		Healthy:                  true,
   891  		Provider:                 "foo-provider",
   892  		RequiresControllerPlugin: true,
   893  		RequiresTopologies:       false,
   894  		NodeInfo:                 &CSINodeInfo{},
   895  	}
   896  
   897  	plug := NewCSIPlugin("foo", 1000)
   898  
   899  	plug.Nodes["n0"] = info
   900  	plug.NodesHealthy = 1
   901  
   902  	err := plug.DeleteNodeForType("n0", CSIPluginTypeNode)
   903  	require.NoError(t, err)
   904  
   905  	require.Equal(t, 0, plug.NodesHealthy)
   906  	require.Equal(t, 0, len(plug.Nodes))
   907  }
   908  
   909  func TestDeleteNodeForType_NilNode(t *testing.T) {
   910  	ci.Parallel(t)
   911  
   912  	plug := NewCSIPlugin("foo", 1000)
   913  
   914  	plug.Nodes["n0"] = nil
   915  	plug.NodesHealthy = 1
   916  
   917  	err := plug.DeleteNodeForType("n0", CSIPluginTypeNode)
   918  	require.Error(t, err)
   919  	require.Equal(t, 1, len(plug.Nodes))
   920  
   921  	_, ok := plug.Nodes["foo"]
   922  	require.False(t, ok)
   923  }
   924  
   925  func TestDeleteNodeForType_Monolith(t *testing.T) {
   926  	ci.Parallel(t)
   927  
   928  	controllerInfo := &CSIInfo{
   929  		PluginID:                 "foo",
   930  		AllocID:                  "a0",
   931  		Healthy:                  true,
   932  		Provider:                 "foo-provider",
   933  		RequiresControllerPlugin: true,
   934  		RequiresTopologies:       false,
   935  		ControllerInfo:           &CSIControllerInfo{},
   936  	}
   937  
   938  	nodeInfo := &CSIInfo{
   939  		PluginID:                 "foo",
   940  		AllocID:                  "a0",
   941  		Healthy:                  true,
   942  		Provider:                 "foo-provider",
   943  		RequiresControllerPlugin: true,
   944  		RequiresTopologies:       false,
   945  		NodeInfo:                 &CSINodeInfo{},
   946  	}
   947  
   948  	plug := NewCSIPlugin("foo", 1000)
   949  
   950  	plug.Controllers["n0"] = controllerInfo
   951  	plug.ControllersHealthy = 1
   952  
   953  	plug.Nodes["n0"] = nodeInfo
   954  	plug.NodesHealthy = 1
   955  
   956  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
   957  	require.NoError(t, err)
   958  
   959  	require.Equal(t, 0, len(plug.Controllers))
   960  	require.Equal(t, 0, len(plug.Nodes))
   961  
   962  	_, ok := plug.Nodes["foo"]
   963  	require.False(t, ok)
   964  
   965  	_, ok = plug.Controllers["foo"]
   966  	require.False(t, ok)
   967  }
   968  
   969  func TestDeleteNodeForType_Monolith_NilController(t *testing.T) {
   970  	ci.Parallel(t)
   971  
   972  	plug := NewCSIPlugin("foo", 1000)
   973  
   974  	plug.Controllers["n0"] = nil
   975  	plug.ControllersHealthy = 1
   976  
   977  	nodeInfo := &CSIInfo{
   978  		PluginID:                 "foo",
   979  		AllocID:                  "a0",
   980  		Healthy:                  true,
   981  		Provider:                 "foo-provider",
   982  		RequiresControllerPlugin: true,
   983  		RequiresTopologies:       false,
   984  		NodeInfo:                 &CSINodeInfo{},
   985  	}
   986  
   987  	plug.Nodes["n0"] = nodeInfo
   988  	plug.NodesHealthy = 1
   989  
   990  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
   991  	require.Error(t, err)
   992  
   993  	require.Equal(t, 1, len(plug.Controllers))
   994  	require.Equal(t, 0, len(plug.Nodes))
   995  
   996  	_, ok := plug.Nodes["foo"]
   997  	require.False(t, ok)
   998  
   999  	_, ok = plug.Controllers["foo"]
  1000  	require.False(t, ok)
  1001  }
  1002  
  1003  func TestDeleteNodeForType_Monolith_NilNode(t *testing.T) {
  1004  	ci.Parallel(t)
  1005  
  1006  	plug := NewCSIPlugin("foo", 1000)
  1007  
  1008  	plug.Nodes["n0"] = nil
  1009  	plug.NodesHealthy = 1
  1010  
  1011  	controllerInfo := &CSIInfo{
  1012  		PluginID:                 "foo",
  1013  		AllocID:                  "a0",
  1014  		Healthy:                  true,
  1015  		Provider:                 "foo-provider",
  1016  		RequiresControllerPlugin: true,
  1017  		RequiresTopologies:       false,
  1018  		ControllerInfo:           &CSIControllerInfo{},
  1019  	}
  1020  
  1021  	plug.Controllers["n0"] = controllerInfo
  1022  	plug.ControllersHealthy = 1
  1023  
  1024  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
  1025  	require.Error(t, err)
  1026  
  1027  	require.Equal(t, 0, len(plug.Controllers))
  1028  	require.Equal(t, 1, len(plug.Nodes))
  1029  
  1030  	_, ok := plug.Nodes["foo"]
  1031  	require.False(t, ok)
  1032  
  1033  	_, ok = plug.Controllers["foo"]
  1034  	require.False(t, ok)
  1035  }