github.com/hernad/nomad@v1.6.112/nomad/structs/csi_test.go (about)

     1  // Copyright (c) HashiCorp, Inc.
     2  // SPDX-License-Identifier: MPL-2.0
     3  
     4  package structs
     5  
     6  import (
     7  	"reflect"
     8  	"testing"
     9  	"time"
    10  
    11  	"github.com/hernad/nomad/ci"
    12  	"github.com/shoenig/test/must"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  // TestCSIVolumeClaim ensures that a volume claim workflows work as expected.
    17  func TestCSIVolumeClaim(t *testing.T) {
    18  	ci.Parallel(t)
    19  
    20  	vol := NewCSIVolume("vol0", 0)
    21  	vol.Schedulable = true
    22  	vol.AccessMode = CSIVolumeAccessModeUnknown
    23  	vol.AttachmentMode = CSIVolumeAttachmentModeUnknown
    24  	vol.RequestedCapabilities = []*CSIVolumeCapability{
    25  		{
    26  			AccessMode:     CSIVolumeAccessModeMultiNodeSingleWriter,
    27  			AttachmentMode: CSIVolumeAttachmentModeFilesystem,
    28  		},
    29  		{
    30  			AccessMode:     CSIVolumeAccessModeMultiNodeReader,
    31  			AttachmentMode: CSIVolumeAttachmentModeFilesystem,
    32  		},
    33  	}
    34  
    35  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
    36  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
    37  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
    38  	claim := &CSIVolumeClaim{
    39  		AllocationID: alloc1.ID,
    40  		NodeID:       "foo",
    41  		State:        CSIVolumeClaimStateTaken,
    42  	}
    43  
    44  	// claim a read and ensure we are still schedulable
    45  	claim.Mode = CSIVolumeClaimRead
    46  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
    47  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
    48  	require.NoError(t, vol.Claim(claim, alloc1))
    49  	require.True(t, vol.ReadSchedulable())
    50  	require.False(t, vol.WriteSchedulable())
    51  	require.False(t, vol.HasFreeWriteClaims())
    52  	require.Len(t, vol.ReadClaims, 1)
    53  	require.Len(t, vol.WriteClaims, 0)
    54  	require.Len(t, vol.PastClaims, 0)
    55  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
    56  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
    57  	require.Len(t, vol.RequestedCapabilities, 2)
    58  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
    59  		vol.RequestedCapabilities[0].AccessMode)
    60  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
    61  		vol.RequestedCapabilities[1].AccessMode)
    62  
    63  	// claim a write and ensure we can't upgrade capabilities.
    64  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
    65  	claim.Mode = CSIVolumeClaimWrite
    66  	claim.AllocationID = alloc2.ID
    67  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
    68  	require.True(t, vol.ReadSchedulable())
    69  	require.False(t, vol.WriteSchedulable())
    70  	require.False(t, vol.HasFreeWriteClaims())
    71  	require.Len(t, vol.ReadClaims, 1)
    72  	require.Len(t, vol.WriteClaims, 0)
    73  	require.Len(t, vol.PastClaims, 0)
    74  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
    75  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
    76  
    77  	// release our last claim, including unpublish workflow
    78  	claim.AllocationID = alloc1.ID
    79  	claim.Mode = CSIVolumeClaimRead
    80  	claim.State = CSIVolumeClaimStateReadyToFree
    81  	vol.Claim(claim, nil)
    82  	require.Len(t, vol.ReadClaims, 0)
    83  	require.Len(t, vol.WriteClaims, 0)
    84  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
    85  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
    86  	require.Len(t, vol.RequestedCapabilities, 2)
    87  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
    88  		vol.RequestedCapabilities[0].AccessMode)
    89  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
    90  		vol.RequestedCapabilities[1].AccessMode)
    91  
    92  	// claim a write on the now-unclaimed volume and ensure we can upgrade
    93  	// capabilities so long as they're in our RequestedCapabilities.
    94  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
    95  	claim.Mode = CSIVolumeClaimWrite
    96  	claim.State = CSIVolumeClaimStateTaken
    97  	claim.AllocationID = alloc2.ID
    98  	require.NoError(t, vol.Claim(claim, alloc2))
    99  	require.Len(t, vol.ReadClaims, 0)
   100  	require.Len(t, vol.WriteClaims, 1)
   101  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   102  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   103  	require.Len(t, vol.RequestedCapabilities, 2)
   104  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   105  		vol.RequestedCapabilities[0].AccessMode)
   106  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   107  		vol.RequestedCapabilities[1].AccessMode)
   108  
   109  	// make the claim again to ensure its idempotent, and that the volume's
   110  	// access mode is unchanged.
   111  	require.NoError(t, vol.Claim(claim, alloc2))
   112  	require.True(t, vol.ReadSchedulable())
   113  	require.True(t, vol.WriteSchedulable())
   114  	require.False(t, vol.HasFreeWriteClaims())
   115  	require.Len(t, vol.ReadClaims, 0)
   116  	require.Len(t, vol.WriteClaims, 1)
   117  	require.Len(t, vol.PastClaims, 0)
   118  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   119  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   120  
   121  	// claim a read. ensure we are still schedulable and that we haven't
   122  	// changed the access mode
   123  	claim.AllocationID = alloc1.ID
   124  	claim.Mode = CSIVolumeClaimRead
   125  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   126  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   127  	require.NoError(t, vol.Claim(claim, alloc1))
   128  	require.True(t, vol.ReadSchedulable())
   129  	require.True(t, vol.WriteSchedulable())
   130  	require.False(t, vol.HasFreeWriteClaims())
   131  	require.Len(t, vol.ReadClaims, 1)
   132  	require.Len(t, vol.WriteClaims, 1)
   133  	require.Len(t, vol.PastClaims, 0)
   134  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   135  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   136  
   137  	// ensure we can't change the attachment mode for a claimed volume
   138  	claim.AttachmentMode = CSIVolumeAttachmentModeBlockDevice
   139  	claim.AllocationID = alloc3.ID
   140  	require.EqualError(t, vol.Claim(claim, alloc3),
   141  		"cannot change attachment mode of claimed volume")
   142  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   143  
   144  	// denormalize-on-read (simulating a volume we've gotten out of the state
   145  	// store) and then ensure we cannot claim another write
   146  	vol.WriteAllocs[alloc2.ID] = alloc2
   147  	claim.Mode = CSIVolumeClaimWrite
   148  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   149  
   150  	// release the write claim but ensure it doesn't free up write claims
   151  	// until after we've unpublished
   152  	claim.AllocationID = alloc2.ID
   153  	claim.State = CSIVolumeClaimStateUnpublishing
   154  	vol.Claim(claim, nil)
   155  	require.True(t, vol.ReadSchedulable())
   156  	require.True(t, vol.WriteSchedulable())
   157  	require.False(t, vol.HasFreeWriteClaims())
   158  	require.Len(t, vol.ReadClaims, 1)
   159  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   160  	require.Len(t, vol.PastClaims, 1)
   161  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   162  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   163  
   164  	// complete the unpublish workflow
   165  	claim.State = CSIVolumeClaimStateReadyToFree
   166  	vol.Claim(claim, nil)
   167  	require.True(t, vol.ReadSchedulable())
   168  	require.True(t, vol.WriteSchedulable())
   169  	require.True(t, vol.HasFreeWriteClaims())
   170  	require.Len(t, vol.ReadClaims, 1)
   171  	require.Len(t, vol.WriteClaims, 0)
   172  	require.Len(t, vol.WriteAllocs, 0)
   173  	require.Len(t, vol.PastClaims, 0)
   174  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   175  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   176  
   177  	// release our last claim, including unpublish workflow
   178  	claim.AllocationID = alloc1.ID
   179  	claim.Mode = CSIVolumeClaimRead
   180  	vol.Claim(claim, nil)
   181  	require.Len(t, vol.ReadClaims, 0)
   182  	require.Len(t, vol.WriteClaims, 0)
   183  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   184  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   185  	require.Len(t, vol.RequestedCapabilities, 2)
   186  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   187  		vol.RequestedCapabilities[0].AccessMode)
   188  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   189  		vol.RequestedCapabilities[1].AccessMode)
   190  }
   191  
   192  // TestCSIVolumeClaim_CompatOldClaims ensures that volume created before
   193  // v1.1.0 with claims that exist before v1.1.0 still work.
   194  //
   195  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   196  func TestCSIVolumeClaim_CompatOldClaims(t *testing.T) {
   197  	ci.Parallel(t)
   198  
   199  	vol := NewCSIVolume("vol0", 0)
   200  	vol.Schedulable = true
   201  	vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   202  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   203  
   204  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   205  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   206  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
   207  	claim := &CSIVolumeClaim{
   208  		AllocationID: alloc1.ID,
   209  		NodeID:       "foo",
   210  		State:        CSIVolumeClaimStateTaken,
   211  	}
   212  
   213  	// claim a read and ensure we are still schedulable
   214  	claim.Mode = CSIVolumeClaimRead
   215  	require.NoError(t, vol.Claim(claim, alloc1))
   216  	require.True(t, vol.ReadSchedulable())
   217  	require.True(t, vol.WriteSchedulable())
   218  	require.True(t, vol.HasFreeWriteClaims())
   219  	require.Len(t, vol.ReadClaims, 1)
   220  	require.Len(t, vol.WriteClaims, 0)
   221  	require.Len(t, vol.PastClaims, 0)
   222  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   223  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   224  	require.Len(t, vol.RequestedCapabilities, 1)
   225  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   226  		vol.RequestedCapabilities[0].AccessMode)
   227  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   228  		vol.RequestedCapabilities[0].AttachmentMode)
   229  
   230  	// claim a write and ensure we no longer have free write claims
   231  	claim.Mode = CSIVolumeClaimWrite
   232  	claim.AllocationID = alloc2.ID
   233  	require.NoError(t, vol.Claim(claim, alloc2))
   234  	require.True(t, vol.ReadSchedulable())
   235  	require.True(t, vol.WriteSchedulable())
   236  	require.False(t, vol.HasFreeWriteClaims())
   237  	require.Len(t, vol.ReadClaims, 1)
   238  	require.Len(t, vol.WriteClaims, 1)
   239  	require.Len(t, vol.PastClaims, 0)
   240  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   241  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   242  
   243  	// denormalize-on-read (simulating a volume we've gotten out of the state
   244  	// store) and then ensure we cannot claim another write
   245  	vol.WriteAllocs[alloc2.ID] = alloc2
   246  	claim.AllocationID = alloc3.ID
   247  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   248  
   249  	// release the write claim but ensure it doesn't free up write claims
   250  	// until after we've unpublished
   251  	claim.AllocationID = alloc2.ID
   252  	claim.State = CSIVolumeClaimStateUnpublishing
   253  	vol.Claim(claim, nil)
   254  	require.True(t, vol.ReadSchedulable())
   255  	require.True(t, vol.WriteSchedulable())
   256  	require.False(t, vol.HasFreeWriteClaims())
   257  	require.Len(t, vol.ReadClaims, 1)
   258  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   259  	require.Len(t, vol.PastClaims, 1)
   260  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   261  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   262  
   263  	// complete the unpublish workflow
   264  	claim.State = CSIVolumeClaimStateReadyToFree
   265  	vol.Claim(claim, nil)
   266  	require.True(t, vol.ReadSchedulable())
   267  	require.True(t, vol.WriteSchedulable())
   268  	require.True(t, vol.HasFreeWriteClaims())
   269  	require.Len(t, vol.ReadClaims, 1)
   270  	require.Len(t, vol.WriteClaims, 0)
   271  	require.Len(t, vol.WriteAllocs, 0)
   272  	require.Len(t, vol.PastClaims, 0)
   273  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   274  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   275  
   276  	// release our last claim, including unpublish workflow
   277  	claim.AllocationID = alloc1.ID
   278  	claim.Mode = CSIVolumeClaimRead
   279  	vol.Claim(claim, nil)
   280  	require.Len(t, vol.ReadClaims, 0)
   281  	require.Len(t, vol.WriteClaims, 0)
   282  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   283  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   284  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   285  		vol.RequestedCapabilities[0].AccessMode)
   286  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   287  		vol.RequestedCapabilities[0].AttachmentMode)
   288  }
   289  
   290  // TestCSIVolumeClaim_CompatNewClaimsOK ensures that a volume created
   291  // before v1.1.0 is compatible with new claims.
   292  //
   293  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   294  func TestCSIVolumeClaim_CompatNewClaimsOK(t *testing.T) {
   295  	ci.Parallel(t)
   296  
   297  	vol := NewCSIVolume("vol0", 0)
   298  	vol.Schedulable = true
   299  	vol.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   300  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   301  
   302  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   303  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   304  	alloc3 := &Allocation{ID: "a3", Namespace: "n", JobID: "j3"}
   305  	claim := &CSIVolumeClaim{
   306  		AllocationID: alloc1.ID,
   307  		NodeID:       "foo",
   308  		State:        CSIVolumeClaimStateTaken,
   309  	}
   310  
   311  	// claim a read and ensure we are still schedulable
   312  	claim.Mode = CSIVolumeClaimRead
   313  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   314  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   315  	require.NoError(t, vol.Claim(claim, alloc1))
   316  	require.True(t, vol.ReadSchedulable())
   317  	require.True(t, vol.WriteSchedulable())
   318  	require.True(t, vol.HasFreeWriteClaims())
   319  	require.Len(t, vol.ReadClaims, 1)
   320  	require.Len(t, vol.WriteClaims, 0)
   321  	require.Len(t, vol.PastClaims, 0)
   322  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   323  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   324  	require.Len(t, vol.RequestedCapabilities, 1)
   325  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   326  		vol.RequestedCapabilities[0].AccessMode)
   327  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   328  		vol.RequestedCapabilities[0].AttachmentMode)
   329  
   330  	// claim a write and ensure we no longer have free write claims
   331  	claim.Mode = CSIVolumeClaimWrite
   332  	claim.AllocationID = alloc2.ID
   333  	require.NoError(t, vol.Claim(claim, alloc2))
   334  	require.True(t, vol.ReadSchedulable())
   335  	require.True(t, vol.WriteSchedulable())
   336  	require.False(t, vol.HasFreeWriteClaims())
   337  	require.Len(t, vol.ReadClaims, 1)
   338  	require.Len(t, vol.WriteClaims, 1)
   339  	require.Len(t, vol.PastClaims, 0)
   340  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   341  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   342  
   343  	// ensure we can't change the attachment mode for a claimed volume
   344  	claim.AttachmentMode = CSIVolumeAttachmentModeBlockDevice
   345  	require.EqualError(t, vol.Claim(claim, alloc2),
   346  		"cannot change attachment mode of claimed volume")
   347  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   348  
   349  	// denormalize-on-read (simulating a volume we've gotten out of the state
   350  	// store) and then ensure we cannot claim another write
   351  	vol.WriteAllocs[alloc2.ID] = alloc2
   352  	claim.AllocationID = alloc3.ID
   353  	require.EqualError(t, vol.Claim(claim, alloc3), ErrCSIVolumeMaxClaims.Error())
   354  
   355  	// release the write claim but ensure it doesn't free up write claims
   356  	// until after we've unpublished
   357  	claim.AllocationID = alloc2.ID
   358  	claim.State = CSIVolumeClaimStateUnpublishing
   359  	vol.Claim(claim, nil)
   360  	require.True(t, vol.ReadSchedulable())
   361  	require.True(t, vol.WriteSchedulable())
   362  	require.False(t, vol.HasFreeWriteClaims())
   363  	require.Len(t, vol.ReadClaims, 1)
   364  	require.Len(t, vol.WriteClaims, 1) // claim still exists until we're done
   365  	require.Len(t, vol.PastClaims, 1)
   366  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   367  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   368  
   369  	// complete the unpublish workflow
   370  	claim.State = CSIVolumeClaimStateReadyToFree
   371  	vol.Claim(claim, nil)
   372  	require.True(t, vol.ReadSchedulable())
   373  	require.True(t, vol.WriteSchedulable())
   374  	require.True(t, vol.HasFreeWriteClaims())
   375  	require.Len(t, vol.ReadClaims, 1)
   376  	require.Len(t, vol.WriteClaims, 0)
   377  	require.Len(t, vol.WriteAllocs, 0)
   378  	require.Len(t, vol.PastClaims, 0)
   379  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter, vol.AccessMode)
   380  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   381  
   382  	// release our last claim, including unpublish workflow
   383  	claim.AllocationID = alloc1.ID
   384  	claim.Mode = CSIVolumeClaimRead
   385  	vol.Claim(claim, nil)
   386  	require.Len(t, vol.ReadClaims, 0)
   387  	require.Len(t, vol.WriteClaims, 0)
   388  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   389  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   390  	require.Equal(t, CSIVolumeAccessModeMultiNodeSingleWriter,
   391  		vol.RequestedCapabilities[0].AccessMode)
   392  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   393  		vol.RequestedCapabilities[0].AttachmentMode)
   394  }
   395  
   396  // TestCSIVolumeClaim_CompatNewClaimsNoUpgrade ensures that a volume created
   397  // before v1.1.0 is compatible with new claims, but prevents unexpected
   398  // capability upgrades.
   399  //
   400  // COMPAT(1.3.0): safe to remove this test, but not the code, for 1.3.0
   401  func TestCSIVolumeClaim_CompatNewClaimsNoUpgrade(t *testing.T) {
   402  	ci.Parallel(t)
   403  
   404  	vol := NewCSIVolume("vol0", 0)
   405  	vol.Schedulable = true
   406  	vol.AccessMode = CSIVolumeAccessModeMultiNodeReader
   407  	vol.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   408  
   409  	alloc1 := &Allocation{ID: "a1", Namespace: "n", JobID: "j"}
   410  	alloc2 := &Allocation{ID: "a2", Namespace: "n", JobID: "j"}
   411  	claim := &CSIVolumeClaim{
   412  		AllocationID: alloc1.ID,
   413  		NodeID:       "foo",
   414  		State:        CSIVolumeClaimStateTaken,
   415  	}
   416  
   417  	// claim a read and ensure we are still schedulable
   418  	claim.Mode = CSIVolumeClaimRead
   419  	claim.AccessMode = CSIVolumeAccessModeMultiNodeReader
   420  	claim.AttachmentMode = CSIVolumeAttachmentModeFilesystem
   421  	require.NoError(t, vol.Claim(claim, alloc1))
   422  	require.True(t, vol.ReadSchedulable())
   423  	require.False(t, vol.WriteSchedulable())
   424  	require.False(t, vol.HasFreeWriteClaims())
   425  	require.Len(t, vol.ReadClaims, 1)
   426  	require.Len(t, vol.WriteClaims, 0)
   427  	require.Len(t, vol.PastClaims, 0)
   428  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
   429  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   430  	require.Len(t, vol.RequestedCapabilities, 1)
   431  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   432  		vol.RequestedCapabilities[0].AccessMode)
   433  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   434  		vol.RequestedCapabilities[0].AttachmentMode)
   435  
   436  	// claim a write and ensure we can't upgrade capabilities.
   437  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   438  	claim.Mode = CSIVolumeClaimWrite
   439  	claim.AllocationID = alloc2.ID
   440  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
   441  	require.True(t, vol.ReadSchedulable())
   442  	require.False(t, vol.WriteSchedulable())
   443  	require.False(t, vol.HasFreeWriteClaims())
   444  	require.Len(t, vol.ReadClaims, 1)
   445  	require.Len(t, vol.WriteClaims, 0)
   446  	require.Len(t, vol.PastClaims, 0)
   447  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader, vol.AccessMode)
   448  	require.Equal(t, CSIVolumeAttachmentModeFilesystem, vol.AttachmentMode)
   449  	require.Len(t, vol.RequestedCapabilities, 1)
   450  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   451  		vol.RequestedCapabilities[0].AccessMode)
   452  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   453  		vol.RequestedCapabilities[0].AttachmentMode)
   454  
   455  	// release our last claim, including unpublish workflow
   456  	claim.AllocationID = alloc1.ID
   457  	claim.Mode = CSIVolumeClaimRead
   458  	claim.State = CSIVolumeClaimStateReadyToFree
   459  	vol.Claim(claim, nil)
   460  	require.Len(t, vol.ReadClaims, 0)
   461  	require.Len(t, vol.WriteClaims, 0)
   462  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   463  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   464  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   465  		vol.RequestedCapabilities[0].AccessMode)
   466  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   467  		vol.RequestedCapabilities[0].AttachmentMode)
   468  
   469  	// claim a write on the now-unclaimed volume and ensure we still can't
   470  	// upgrade capabilities.
   471  	claim.AccessMode = CSIVolumeAccessModeMultiNodeSingleWriter
   472  	claim.Mode = CSIVolumeClaimWrite
   473  	claim.State = CSIVolumeClaimStateTaken
   474  	claim.AllocationID = alloc2.ID
   475  	require.EqualError(t, vol.Claim(claim, alloc2), ErrCSIVolumeUnschedulable.Error())
   476  	require.Len(t, vol.ReadClaims, 0)
   477  	require.Len(t, vol.WriteClaims, 0)
   478  	require.Equal(t, CSIVolumeAccessModeUnknown, vol.AccessMode)
   479  	require.Equal(t, CSIVolumeAttachmentModeUnknown, vol.AttachmentMode)
   480  	require.Equal(t, CSIVolumeAccessModeMultiNodeReader,
   481  		vol.RequestedCapabilities[0].AccessMode)
   482  	require.Equal(t, CSIVolumeAttachmentModeFilesystem,
   483  		vol.RequestedCapabilities[0].AttachmentMode)
   484  }
   485  
   486  func TestVolume_Copy(t *testing.T) {
   487  	ci.Parallel(t)
   488  
   489  	a1 := MockAlloc()
   490  	a2 := MockAlloc()
   491  	a3 := MockAlloc()
   492  	c1 := &CSIVolumeClaim{
   493  		AllocationID:   a1.ID,
   494  		NodeID:         a1.NodeID,
   495  		ExternalNodeID: "c1",
   496  		Mode:           CSIVolumeClaimRead,
   497  		State:          CSIVolumeClaimStateTaken,
   498  	}
   499  	c2 := &CSIVolumeClaim{
   500  		AllocationID:   a2.ID,
   501  		NodeID:         a2.NodeID,
   502  		ExternalNodeID: "c2",
   503  		Mode:           CSIVolumeClaimRead,
   504  		State:          CSIVolumeClaimStateNodeDetached,
   505  	}
   506  	c3 := &CSIVolumeClaim{
   507  		AllocationID:   a3.ID,
   508  		NodeID:         a3.NodeID,
   509  		ExternalNodeID: "c3",
   510  		Mode:           CSIVolumeClaimWrite,
   511  		State:          CSIVolumeClaimStateTaken,
   512  	}
   513  
   514  	v1 := &CSIVolume{
   515  		ID:             "vol1",
   516  		Name:           "vol1",
   517  		ExternalID:     "vol-abcdef",
   518  		Namespace:      "default",
   519  		Topologies:     []*CSITopology{{Segments: map[string]string{"AZ1": "123"}}},
   520  		AccessMode:     CSIVolumeAccessModeSingleNodeWriter,
   521  		AttachmentMode: CSIVolumeAttachmentModeBlockDevice,
   522  		MountOptions:   &CSIMountOptions{FSType: "ext4", MountFlags: []string{"ro", "noatime"}},
   523  		Secrets:        CSISecrets{"mysecret": "myvalue"},
   524  		Parameters:     map[string]string{"param1": "val1"},
   525  		Context:        map[string]string{"ctx1": "val1"},
   526  
   527  		ReadAllocs:  map[string]*Allocation{a1.ID: a1, a2.ID: nil},
   528  		WriteAllocs: map[string]*Allocation{a3.ID: a3},
   529  
   530  		ReadClaims:  map[string]*CSIVolumeClaim{a1.ID: c1, a2.ID: c2},
   531  		WriteClaims: map[string]*CSIVolumeClaim{a3.ID: c3},
   532  		PastClaims:  map[string]*CSIVolumeClaim{},
   533  
   534  		Schedulable:         true,
   535  		PluginID:            "moosefs",
   536  		Provider:            "n/a",
   537  		ProviderVersion:     "1.0",
   538  		ControllerRequired:  true,
   539  		ControllersHealthy:  2,
   540  		ControllersExpected: 2,
   541  		NodesHealthy:        4,
   542  		NodesExpected:       5,
   543  		ResourceExhausted:   time.Now(),
   544  	}
   545  
   546  	v2 := v1.Copy()
   547  	if !reflect.DeepEqual(v1, v2) {
   548  		t.Fatalf("Copy() returned an unequal Volume; got %#v; want %#v", v1, v2)
   549  	}
   550  
   551  	v1.ReadClaims[a1.ID].State = CSIVolumeClaimStateReadyToFree
   552  	v1.ReadAllocs[a2.ID] = a2
   553  	v1.WriteAllocs[a3.ID].ClientStatus = AllocClientStatusComplete
   554  	v1.MountOptions.FSType = "zfs"
   555  
   556  	if v2.ReadClaims[a1.ID].State == CSIVolumeClaimStateReadyToFree {
   557  		t.Fatalf("Volume.Copy() failed; changes to original ReadClaims seen in copy")
   558  	}
   559  	if v2.ReadAllocs[a2.ID] != nil {
   560  		t.Fatalf("Volume.Copy() failed; changes to original ReadAllocs seen in copy")
   561  	}
   562  	if v2.WriteAllocs[a3.ID].ClientStatus == AllocClientStatusComplete {
   563  		t.Fatalf("Volume.Copy() failed; changes to original WriteAllocs seen in copy")
   564  	}
   565  	if v2.MountOptions.FSType == "zfs" {
   566  		t.Fatalf("Volume.Copy() failed; changes to original MountOptions seen in copy")
   567  	}
   568  
   569  }
   570  
   571  func TestCSIVolume_Validate(t *testing.T) {
   572  	ci.Parallel(t)
   573  
   574  	vol := &CSIVolume{
   575  		ID:         "test",
   576  		PluginID:   "test",
   577  		SnapshotID: "test-snapshot",
   578  		CloneID:    "test-clone",
   579  		RequestedTopologies: &CSITopologyRequest{
   580  			Required: []*CSITopology{{}, {}},
   581  		},
   582  	}
   583  	err := vol.Validate()
   584  	require.EqualError(t, err, "validation: missing namespace, only one of snapshot_id and clone_id is allowed, must include at least one capability block, required topology is missing segments field, required topology is missing segments field")
   585  
   586  }
   587  
   588  func TestCSIVolume_Merge(t *testing.T) {
   589  	ci.Parallel(t)
   590  
   591  	testCases := []struct {
   592  		name     string
   593  		v        *CSIVolume
   594  		update   *CSIVolume
   595  		expected string
   596  		expectFn func(t *testing.T, v *CSIVolume)
   597  	}{
   598  		{
   599  			name: "invalid capacity update",
   600  			v:    &CSIVolume{Capacity: 100},
   601  			update: &CSIVolume{
   602  				RequestedCapacityMax: 300, RequestedCapacityMin: 200},
   603  			expected: "volume requested capacity update was not compatible with existing capacity",
   604  			expectFn: func(t *testing.T, v *CSIVolume) {
   605  				require.NotEqual(t, 300, v.RequestedCapacityMax)
   606  				require.NotEqual(t, 200, v.RequestedCapacityMin)
   607  			},
   608  		},
   609  		{
   610  			name: "invalid capability update",
   611  			v: &CSIVolume{
   612  				AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   613  				AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   614  			},
   615  			update: &CSIVolume{
   616  				RequestedCapabilities: []*CSIVolumeCapability{
   617  					{
   618  						AccessMode:     CSIVolumeAccessModeSingleNodeWriter,
   619  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   620  					},
   621  				},
   622  			},
   623  			expected: "volume requested capabilities update was not compatible with existing capability in use",
   624  		},
   625  		{
   626  			name: "invalid topology update - removed",
   627  			v: &CSIVolume{
   628  				RequestedTopologies: &CSITopologyRequest{
   629  					Required: []*CSITopology{
   630  						{Segments: map[string]string{"rack": "R1"}},
   631  					},
   632  				},
   633  				Topologies: []*CSITopology{
   634  					{Segments: map[string]string{"rack": "R1"}},
   635  				},
   636  			},
   637  			update:   &CSIVolume{},
   638  			expected: "volume topology request update was not compatible with existing topology",
   639  			expectFn: func(t *testing.T, v *CSIVolume) {
   640  				require.Len(t, v.Topologies, 1)
   641  			},
   642  		},
   643  		{
   644  			name: "invalid topology requirement added",
   645  			v: &CSIVolume{
   646  				Topologies: []*CSITopology{
   647  					{Segments: map[string]string{"rack": "R1"}},
   648  				},
   649  			},
   650  			update: &CSIVolume{
   651  				RequestedTopologies: &CSITopologyRequest{
   652  					Required: []*CSITopology{
   653  						{Segments: map[string]string{"rack": "R1"}},
   654  						{Segments: map[string]string{"rack": "R3"}},
   655  					},
   656  				},
   657  			},
   658  			expected: "volume topology request update was not compatible with existing topology",
   659  			expectFn: func(t *testing.T, v *CSIVolume) {
   660  				require.Len(t, v.Topologies, 1)
   661  				require.Equal(t, "R1", v.Topologies[0].Segments["rack"])
   662  			},
   663  		},
   664  		{
   665  			name: "invalid topology preference removed",
   666  			v: &CSIVolume{
   667  				Topologies: []*CSITopology{
   668  					{Segments: map[string]string{"rack": "R1"}},
   669  				},
   670  				RequestedTopologies: &CSITopologyRequest{
   671  					Preferred: []*CSITopology{
   672  						{Segments: map[string]string{"rack": "R1"}},
   673  						{Segments: map[string]string{"rack": "R3"}},
   674  					},
   675  				},
   676  			},
   677  			update: &CSIVolume{
   678  				Topologies: []*CSITopology{
   679  					{Segments: map[string]string{"rack": "R1"}},
   680  				},
   681  				RequestedTopologies: &CSITopologyRequest{
   682  					Preferred: []*CSITopology{
   683  						{Segments: map[string]string{"rack": "R3"}},
   684  					},
   685  				},
   686  			},
   687  			expected: "volume topology request update was not compatible with existing topology",
   688  		},
   689  		{
   690  			name: "valid update",
   691  			v: &CSIVolume{
   692  				Topologies: []*CSITopology{
   693  					{Segments: map[string]string{"rack": "R1"}},
   694  					{Segments: map[string]string{"rack": "R2"}},
   695  				},
   696  				AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   697  				AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   698  				MountOptions: &CSIMountOptions{
   699  					FSType:     "ext4",
   700  					MountFlags: []string{"noatime"},
   701  				},
   702  				RequestedTopologies: &CSITopologyRequest{
   703  					Required: []*CSITopology{
   704  						{Segments: map[string]string{"rack": "R1"}},
   705  					},
   706  					Preferred: []*CSITopology{
   707  						{Segments: map[string]string{"rack": "R2"}},
   708  					},
   709  				},
   710  			},
   711  			update: &CSIVolume{
   712  				Topologies: []*CSITopology{
   713  					{Segments: map[string]string{"rack": "R1"}},
   714  					{Segments: map[string]string{"rack": "R2"}},
   715  				},
   716  				MountOptions: &CSIMountOptions{
   717  					FSType:     "ext4",
   718  					MountFlags: []string{"noatime"},
   719  				},
   720  				RequestedTopologies: &CSITopologyRequest{
   721  					Required: []*CSITopology{
   722  						{Segments: map[string]string{"rack": "R1"}},
   723  					},
   724  					Preferred: []*CSITopology{
   725  						{Segments: map[string]string{"rack": "R2"}},
   726  					},
   727  				},
   728  				RequestedCapabilities: []*CSIVolumeCapability{
   729  					{
   730  						AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   731  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   732  					},
   733  					{
   734  						AccessMode:     CSIVolumeAccessModeMultiNodeReader,
   735  						AttachmentMode: CSIVolumeAttachmentModeFilesystem,
   736  					},
   737  				},
   738  			},
   739  		},
   740  	}
   741  	for _, tc := range testCases {
   742  		tc = tc
   743  		t.Run(tc.name, func(t *testing.T) {
   744  			err := tc.v.Merge(tc.update)
   745  			if tc.expected == "" {
   746  				require.NoError(t, err)
   747  			} else {
   748  				if tc.expectFn != nil {
   749  					tc.expectFn(t, tc.v)
   750  				}
   751  				require.Error(t, err, tc.expected)
   752  				require.Contains(t, err.Error(), tc.expected)
   753  			}
   754  		})
   755  	}
   756  }
   757  
   758  func TestCSIPluginJobs(t *testing.T) {
   759  	ci.Parallel(t)
   760  
   761  	plug := NewCSIPlugin("foo", 1000)
   762  	controller := &Job{
   763  		ID:   "job",
   764  		Type: "service",
   765  		TaskGroups: []*TaskGroup{{
   766  			Name:  "foo",
   767  			Count: 11,
   768  			Tasks: []*Task{{
   769  				CSIPluginConfig: &TaskCSIPluginConfig{
   770  					ID:   "foo",
   771  					Type: CSIPluginTypeController,
   772  				},
   773  			}},
   774  		}},
   775  	}
   776  
   777  	summary := &JobSummary{}
   778  
   779  	plug.AddJob(controller, summary)
   780  	require.Equal(t, 11, plug.ControllersExpected)
   781  
   782  	// New job id & make it a system node plugin job
   783  	node := controller.Copy()
   784  	node.ID = "bar"
   785  	node.Type = "system"
   786  	node.TaskGroups[0].Tasks[0].CSIPluginConfig.Type = CSIPluginTypeNode
   787  
   788  	summary = &JobSummary{
   789  		Summary: map[string]TaskGroupSummary{
   790  			"foo": {
   791  				Queued:   1,
   792  				Running:  1,
   793  				Starting: 1,
   794  			},
   795  		},
   796  	}
   797  
   798  	plug.AddJob(node, summary)
   799  	require.Equal(t, 3, plug.NodesExpected)
   800  
   801  	plug.DeleteJob(node, summary)
   802  	require.Equal(t, 0, plug.NodesExpected)
   803  	require.Empty(t, plug.NodeJobs[""])
   804  
   805  	plug.DeleteJob(controller, nil)
   806  	require.Equal(t, 0, plug.ControllersExpected)
   807  	require.Empty(t, plug.ControllerJobs[""])
   808  }
   809  
   810  func TestCSIPluginCleanup(t *testing.T) {
   811  	ci.Parallel(t)
   812  
   813  	plug := NewCSIPlugin("foo", 1000)
   814  	plug.AddPlugin("n0", &CSIInfo{
   815  		PluginID:                 "foo",
   816  		AllocID:                  "a0",
   817  		Healthy:                  true,
   818  		Provider:                 "foo-provider",
   819  		RequiresControllerPlugin: true,
   820  		RequiresTopologies:       false,
   821  		ControllerInfo:           &CSIControllerInfo{},
   822  	})
   823  
   824  	plug.AddPlugin("n0", &CSIInfo{
   825  		PluginID:                 "foo",
   826  		AllocID:                  "a0",
   827  		Healthy:                  true,
   828  		Provider:                 "foo-provider",
   829  		RequiresControllerPlugin: true,
   830  		RequiresTopologies:       false,
   831  		NodeInfo:                 &CSINodeInfo{},
   832  	})
   833  
   834  	require.Equal(t, 1, plug.ControllersHealthy)
   835  	require.Equal(t, 1, plug.NodesHealthy)
   836  
   837  	err := plug.DeleteNode("n0")
   838  	require.NoError(t, err)
   839  
   840  	require.Equal(t, 0, plug.ControllersHealthy)
   841  	require.Equal(t, 0, plug.NodesHealthy)
   842  
   843  	require.Equal(t, 0, len(plug.Controllers))
   844  	require.Equal(t, 0, len(plug.Nodes))
   845  }
   846  
   847  func TestDeleteNodeForType_Controller(t *testing.T) {
   848  	ci.Parallel(t)
   849  
   850  	info := &CSIInfo{
   851  		PluginID:                 "foo",
   852  		AllocID:                  "a0",
   853  		Healthy:                  true,
   854  		Provider:                 "foo-provider",
   855  		RequiresControllerPlugin: true,
   856  		RequiresTopologies:       false,
   857  		ControllerInfo:           &CSIControllerInfo{},
   858  	}
   859  
   860  	plug := NewCSIPlugin("foo", 1000)
   861  
   862  	plug.Controllers["n0"] = info
   863  	plug.ControllersHealthy = 1
   864  
   865  	err := plug.DeleteNodeForType("n0", CSIPluginTypeController)
   866  	require.NoError(t, err)
   867  
   868  	require.Equal(t, 0, plug.ControllersHealthy)
   869  	require.Equal(t, 0, len(plug.Controllers))
   870  }
   871  
   872  func TestDeleteNodeForType_NilController(t *testing.T) {
   873  	ci.Parallel(t)
   874  
   875  	plug := NewCSIPlugin("foo", 1000)
   876  
   877  	plug.Controllers["n0"] = nil
   878  	plug.ControllersHealthy = 1
   879  
   880  	err := plug.DeleteNodeForType("n0", CSIPluginTypeController)
   881  	require.Error(t, err)
   882  	require.Equal(t, 1, len(plug.Controllers))
   883  
   884  	_, ok := plug.Controllers["foo"]
   885  	require.False(t, ok)
   886  }
   887  
   888  func TestDeleteNodeForType_Node(t *testing.T) {
   889  	ci.Parallel(t)
   890  
   891  	info := &CSIInfo{
   892  		PluginID:                 "foo",
   893  		AllocID:                  "a0",
   894  		Healthy:                  true,
   895  		Provider:                 "foo-provider",
   896  		RequiresControllerPlugin: true,
   897  		RequiresTopologies:       false,
   898  		NodeInfo:                 &CSINodeInfo{},
   899  	}
   900  
   901  	plug := NewCSIPlugin("foo", 1000)
   902  
   903  	plug.Nodes["n0"] = info
   904  	plug.NodesHealthy = 1
   905  
   906  	err := plug.DeleteNodeForType("n0", CSIPluginTypeNode)
   907  	require.NoError(t, err)
   908  
   909  	require.Equal(t, 0, plug.NodesHealthy)
   910  	require.Equal(t, 0, len(plug.Nodes))
   911  }
   912  
   913  func TestDeleteNodeForType_NilNode(t *testing.T) {
   914  	ci.Parallel(t)
   915  
   916  	plug := NewCSIPlugin("foo", 1000)
   917  
   918  	plug.Nodes["n0"] = nil
   919  	plug.NodesHealthy = 1
   920  
   921  	err := plug.DeleteNodeForType("n0", CSIPluginTypeNode)
   922  	require.Error(t, err)
   923  	require.Equal(t, 1, len(plug.Nodes))
   924  
   925  	_, ok := plug.Nodes["foo"]
   926  	require.False(t, ok)
   927  }
   928  
   929  func TestDeleteNodeForType_Monolith(t *testing.T) {
   930  	ci.Parallel(t)
   931  
   932  	controllerInfo := &CSIInfo{
   933  		PluginID:                 "foo",
   934  		AllocID:                  "a0",
   935  		Healthy:                  true,
   936  		Provider:                 "foo-provider",
   937  		RequiresControllerPlugin: true,
   938  		RequiresTopologies:       false,
   939  		ControllerInfo:           &CSIControllerInfo{},
   940  	}
   941  
   942  	nodeInfo := &CSIInfo{
   943  		PluginID:                 "foo",
   944  		AllocID:                  "a0",
   945  		Healthy:                  true,
   946  		Provider:                 "foo-provider",
   947  		RequiresControllerPlugin: true,
   948  		RequiresTopologies:       false,
   949  		NodeInfo:                 &CSINodeInfo{},
   950  	}
   951  
   952  	plug := NewCSIPlugin("foo", 1000)
   953  
   954  	plug.Controllers["n0"] = controllerInfo
   955  	plug.ControllersHealthy = 1
   956  
   957  	plug.Nodes["n0"] = nodeInfo
   958  	plug.NodesHealthy = 1
   959  
   960  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
   961  	require.NoError(t, err)
   962  
   963  	require.Equal(t, 0, len(plug.Controllers))
   964  	require.Equal(t, 0, len(plug.Nodes))
   965  
   966  	_, ok := plug.Nodes["foo"]
   967  	require.False(t, ok)
   968  
   969  	_, ok = plug.Controllers["foo"]
   970  	require.False(t, ok)
   971  }
   972  
   973  func TestDeleteNodeForType_Monolith_NilController(t *testing.T) {
   974  	ci.Parallel(t)
   975  
   976  	plug := NewCSIPlugin("foo", 1000)
   977  
   978  	plug.Controllers["n0"] = nil
   979  	plug.ControllersHealthy = 1
   980  
   981  	nodeInfo := &CSIInfo{
   982  		PluginID:                 "foo",
   983  		AllocID:                  "a0",
   984  		Healthy:                  true,
   985  		Provider:                 "foo-provider",
   986  		RequiresControllerPlugin: true,
   987  		RequiresTopologies:       false,
   988  		NodeInfo:                 &CSINodeInfo{},
   989  	}
   990  
   991  	plug.Nodes["n0"] = nodeInfo
   992  	plug.NodesHealthy = 1
   993  
   994  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
   995  	require.Error(t, err)
   996  
   997  	require.Equal(t, 1, len(plug.Controllers))
   998  	require.Equal(t, 0, len(plug.Nodes))
   999  
  1000  	_, ok := plug.Nodes["foo"]
  1001  	require.False(t, ok)
  1002  
  1003  	_, ok = plug.Controllers["foo"]
  1004  	require.False(t, ok)
  1005  }
  1006  
  1007  func TestDeleteNodeForType_Monolith_NilNode(t *testing.T) {
  1008  	ci.Parallel(t)
  1009  
  1010  	plug := NewCSIPlugin("foo", 1000)
  1011  
  1012  	plug.Nodes["n0"] = nil
  1013  	plug.NodesHealthy = 1
  1014  
  1015  	controllerInfo := &CSIInfo{
  1016  		PluginID:                 "foo",
  1017  		AllocID:                  "a0",
  1018  		Healthy:                  true,
  1019  		Provider:                 "foo-provider",
  1020  		RequiresControllerPlugin: true,
  1021  		RequiresTopologies:       false,
  1022  		ControllerInfo:           &CSIControllerInfo{},
  1023  	}
  1024  
  1025  	plug.Controllers["n0"] = controllerInfo
  1026  	plug.ControllersHealthy = 1
  1027  
  1028  	err := plug.DeleteNodeForType("n0", CSIPluginTypeMonolith)
  1029  	require.Error(t, err)
  1030  
  1031  	require.Equal(t, 0, len(plug.Controllers))
  1032  	require.Equal(t, 1, len(plug.Nodes))
  1033  
  1034  	_, ok := plug.Nodes["foo"]
  1035  	require.False(t, ok)
  1036  
  1037  	_, ok = plug.Controllers["foo"]
  1038  	require.False(t, ok)
  1039  }
  1040  
  1041  func TestTaskCSIPluginConfig_Equal(t *testing.T) {
  1042  	ci.Parallel(t)
  1043  
  1044  	must.Equal[*TaskCSIPluginConfig](t, nil, nil)
  1045  	must.NotEqual[*TaskCSIPluginConfig](t, nil, new(TaskCSIPluginConfig))
  1046  
  1047  	must.StructEqual(t, &TaskCSIPluginConfig{
  1048  		ID:                  "abc123",
  1049  		Type:                CSIPluginTypeMonolith,
  1050  		MountDir:            "/opt/csi/mount",
  1051  		StagePublishBaseDir: "/base",
  1052  		HealthTimeout:       42 * time.Second,
  1053  	}, []must.Tweak[*TaskCSIPluginConfig]{{
  1054  		Field: "ID",
  1055  		Apply: func(c *TaskCSIPluginConfig) { c.ID = "def345" },
  1056  	}, {
  1057  		Field: "Type",
  1058  		Apply: func(c *TaskCSIPluginConfig) { c.Type = CSIPluginTypeNode },
  1059  	}, {
  1060  		Field: "MountDir",
  1061  		Apply: func(c *TaskCSIPluginConfig) { c.MountDir = "/csi" },
  1062  	}, {
  1063  		Field: "StagePublishBaseDir",
  1064  		Apply: func(c *TaskCSIPluginConfig) { c.StagePublishBaseDir = "/opt/base" },
  1065  	}, {
  1066  		Field: "HealthTimeout",
  1067  		Apply: func(c *TaskCSIPluginConfig) { c.HealthTimeout = 1 * time.Second },
  1068  	}})
  1069  }