github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/nomad/volumewatcher/batcher_test.go (about)

     1  package volumewatcher
     2  
     3  import (
     4  	"context"
     5  	"fmt"
     6  	"sync"
     7  	"testing"
     8  
     9  	"github.com/hashicorp/nomad/helper/testlog"
    10  	"github.com/hashicorp/nomad/nomad/mock"
    11  	"github.com/hashicorp/nomad/nomad/state"
    12  	"github.com/hashicorp/nomad/nomad/structs"
    13  	"github.com/stretchr/testify/require"
    14  )
    15  
    16  // TestVolumeWatch_Batcher tests the update batching logic
    17  func TestVolumeWatch_Batcher(t *testing.T) {
    18  	t.Parallel()
    19  	require := require.New(t)
    20  
    21  	ctx, exitFn := context.WithCancel(context.Background())
    22  	defer exitFn()
    23  
    24  	srv := &MockBatchingRPCServer{}
    25  	srv.state = state.TestStateStore(t)
    26  	srv.volumeUpdateBatcher = NewVolumeUpdateBatcher(CrossVolumeUpdateBatchDuration, srv, ctx)
    27  	srv.nextCSIControllerDetachError = fmt.Errorf("some controller plugin error")
    28  
    29  	plugin := mock.CSIPlugin()
    30  	node := testNode(nil, plugin, srv.State())
    31  
    32  	// because we wait for the results to return from the batch for each
    33  	// Watcher.updateClaims, we can't test that we're batching except across
    34  	// multiple volume watchers. create 2 volumes and their watchers here.
    35  	alloc0 := mock.Alloc()
    36  	alloc0.ClientStatus = structs.AllocClientStatusComplete
    37  	vol0 := testVolume(nil, plugin, alloc0, node.ID)
    38  	w0 := &volumeWatcher{
    39  		v:            vol0,
    40  		rpc:          srv,
    41  		state:        srv.State(),
    42  		updateClaims: srv.UpdateClaims,
    43  		logger:       testlog.HCLogger(t),
    44  	}
    45  
    46  	alloc1 := mock.Alloc()
    47  	alloc1.ClientStatus = structs.AllocClientStatusComplete
    48  	vol1 := testVolume(nil, plugin, alloc1, node.ID)
    49  	w1 := &volumeWatcher{
    50  		v:            vol1,
    51  		rpc:          srv,
    52  		state:        srv.State(),
    53  		updateClaims: srv.UpdateClaims,
    54  		logger:       testlog.HCLogger(t),
    55  	}
    56  
    57  	var wg sync.WaitGroup
    58  	wg.Add(2)
    59  
    60  	go func() {
    61  		w0.volumeReapImpl(vol0)
    62  		wg.Done()
    63  	}()
    64  	go func() {
    65  		// send an artificial updateClaim rather than doing a full
    66  		// reap so that we can guarantee we exercise deduplication
    67  		w1.updateClaims([]structs.CSIVolumeClaimRequest{{
    68  			VolumeID: vol1.ID,
    69  			WriteRequest: structs.WriteRequest{
    70  				Namespace: vol1.Namespace,
    71  			},
    72  		},
    73  			{
    74  				VolumeID: vol1.ID,
    75  				WriteRequest: structs.WriteRequest{
    76  					Namespace: vol1.Namespace,
    77  				},
    78  			}})
    79  		wg.Done()
    80  	}()
    81  
    82  	wg.Wait()
    83  
    84  	require.Equal(structs.CSIVolumeClaimStateNodeDetached, vol0.PastClaims[alloc0.ID].State)
    85  	require.Equal(1, srv.countCSINodeDetachVolume)
    86  	require.Equal(1, srv.countCSIControllerDetachVolume)
    87  	require.Equal(2, srv.countUpdateClaims)
    88  
    89  	// note: it's technically possible that the goroutines under test
    90  	// get de-scheduled and we don't write both updates in the same
    91  	// batch. but this seems really unlikely, so we're testing for
    92  	// both cases here so that if we start seeing a flake here in the
    93  	// future we have a clear cause for it.
    94  	require.GreaterOrEqual(srv.countUpsertVolumeClaims, 1)
    95  	require.Equal(1, srv.countUpsertVolumeClaims)
    96  }