github.com/iqoqo/nomad@v0.11.3-0.20200911112621-d7021c74d101/nomad/volumewatcher/batcher.go (about)

     1  package volumewatcher
     2  
     3  import (
     4  	"context"
     5  	"time"
     6  
     7  	"github.com/hashicorp/nomad/nomad/structs"
     8  )
     9  
    10  // encoding a 100 claim batch is about 31K on the wire, which
    11  // is a reasonable batch size
    12  const maxBatchSize = 100
    13  
    14  // VolumeUpdateBatcher is used to batch the updates for volume claims
    15  type VolumeUpdateBatcher struct {
    16  	// batchDuration is the batching duration
    17  	batchDuration time.Duration
    18  
    19  	// raft is used to actually commit the updates
    20  	raft VolumeRaftEndpoints
    21  
    22  	// workCh is used to pass evaluations to the daemon process
    23  	workCh chan *updateWrapper
    24  
    25  	// ctx is used to exit the daemon batcher
    26  	ctx context.Context
    27  }
    28  
    29  // NewVolumeUpdateBatcher returns an VolumeUpdateBatcher that uses the
    30  // passed raft endpoints to create the updates to volume claims, and
    31  // exits the batcher when the passed exit channel is closed.
    32  func NewVolumeUpdateBatcher(batchDuration time.Duration, raft VolumeRaftEndpoints, ctx context.Context) *VolumeUpdateBatcher {
    33  	b := &VolumeUpdateBatcher{
    34  		batchDuration: batchDuration,
    35  		raft:          raft,
    36  		ctx:           ctx,
    37  		workCh:        make(chan *updateWrapper, 10),
    38  	}
    39  
    40  	go b.batcher()
    41  	return b
    42  }
    43  
    44  // CreateUpdate batches the volume claim update and returns a future
    45  // that can be used to track the completion of the batch. Note we
    46  // only return the *last* future if the claims gets broken up across
    47  // multiple batches because only the last one has useful information
    48  // for the caller.
    49  func (b *VolumeUpdateBatcher) CreateUpdate(claims []structs.CSIVolumeClaimRequest) *BatchFuture {
    50  	wrapper := &updateWrapper{
    51  		claims: claims,
    52  		f:      make(chan *BatchFuture, 1),
    53  	}
    54  
    55  	b.workCh <- wrapper
    56  	return <-wrapper.f
    57  }
    58  
    59  type updateWrapper struct {
    60  	claims []structs.CSIVolumeClaimRequest
    61  	f      chan *BatchFuture
    62  }
    63  
    64  type claimBatch struct {
    65  	claims map[string]structs.CSIVolumeClaimRequest
    66  	future *BatchFuture
    67  }
    68  
    69  // batcher is the long lived batcher goroutine
    70  func (b *VolumeUpdateBatcher) batcher() {
    71  
    72  	// we track claimBatches rather than a slice of
    73  	// CSIVolumeClaimBatchRequest so that we can deduplicate updates
    74  	// for the same volume
    75  	batches := []*claimBatch{{
    76  		claims: make(map[string]structs.CSIVolumeClaimRequest),
    77  		future: NewBatchFuture(),
    78  	}}
    79  	ticker := time.NewTicker(b.batchDuration)
    80  	defer ticker.Stop()
    81  	for {
    82  		select {
    83  		case <-b.ctx.Done():
    84  			// note: we can't flush here because we're likely no
    85  			// longer the leader
    86  			return
    87  		case w := <-b.workCh:
    88  			future := NewBatchFuture()
    89  
    90  		NEXT_CLAIM:
    91  			// de-dupe and store the claim update, and attach the future
    92  			for _, upd := range w.claims {
    93  				id := upd.VolumeID + upd.RequestNamespace()
    94  
    95  				for _, batch := range batches {
    96  					// first see if we can dedupe the update
    97  					_, ok := batch.claims[id]
    98  					if ok {
    99  						batch.claims[id] = upd
   100  						future = batch.future
   101  						continue NEXT_CLAIM
   102  					}
   103  					// otherwise append to the first non-full batch
   104  					if len(batch.claims) < maxBatchSize {
   105  						batch.claims[id] = upd
   106  						future = batch.future
   107  						continue NEXT_CLAIM
   108  					}
   109  				}
   110  				// all batches were full, so add a new batch
   111  				newBatch := &claimBatch{
   112  					claims: map[string]structs.CSIVolumeClaimRequest{id: upd},
   113  					future: NewBatchFuture(),
   114  				}
   115  				batches = append(batches, newBatch)
   116  				future = newBatch.future
   117  			}
   118  
   119  			// we send batches to raft FIFO, so we return the last
   120  			// future to the caller so that it can wait until the
   121  			// last batch has been sent
   122  			w.f <- future
   123  
   124  		case <-ticker.C:
   125  			if len(batches) > 0 && len(batches[0].claims) > 0 {
   126  				batch := batches[0]
   127  
   128  				f := batch.future
   129  
   130  				// Create the batch request for the oldest batch
   131  				req := structs.CSIVolumeClaimBatchRequest{}
   132  				for _, claim := range batch.claims {
   133  					req.Claims = append(req.Claims, claim)
   134  				}
   135  
   136  				// Upsert the claims in a go routine
   137  				go f.Set(b.raft.UpsertVolumeClaims(&req))
   138  
   139  				// Reset the batches list
   140  				batches = batches[1:]
   141  			}
   142  		}
   143  	}
   144  }
   145  
   146  // BatchFuture is a future that can be used to retrieve the index for
   147  // the update or any error in the update process
   148  type BatchFuture struct {
   149  	index  uint64
   150  	err    error
   151  	waitCh chan struct{}
   152  }
   153  
   154  // NewBatchFuture returns a new BatchFuture
   155  func NewBatchFuture() *BatchFuture {
   156  	return &BatchFuture{
   157  		waitCh: make(chan struct{}),
   158  	}
   159  }
   160  
   161  // Set sets the results of the future, unblocking any client.
   162  func (f *BatchFuture) Set(index uint64, err error) {
   163  	f.index = index
   164  	f.err = err
   165  	close(f.waitCh)
   166  }
   167  
   168  // Results returns the creation index and any error.
   169  func (f *BatchFuture) Results() (uint64, error) {
   170  	<-f.waitCh
   171  	return f.index, f.err
   172  }