github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/data/dirty_bcache.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package data
     6  
     7  import (
     8  	"fmt"
     9  	"sync"
    10  	"time"
    11  
    12  	"github.com/keybase/client/go/kbfs/idutil"
    13  	"github.com/keybase/client/go/kbfs/kbfsblock"
    14  	"github.com/keybase/client/go/kbfs/tlf"
    15  	"github.com/keybase/client/go/libkb"
    16  	"github.com/keybase/client/go/logger"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  type dirtyBlockID struct {
    21  	id       kbfsblock.ID
    22  	refNonce kbfsblock.RefNonce
    23  	branch   BranchName
    24  }
    25  
    26  type dirtyReq struct {
    27  	respChan chan<- struct{}
    28  	bytes    int64
    29  	start    time.Time
    30  	deadline time.Time
    31  }
    32  
    33  const (
    34  	resetBufferCapTimeDefault = 5 * time.Minute
    35  )
    36  
    37  // DirtyBlockCacheStandard implements the DirtyBlockCache interface by
    38  // storing blocks in an in-memory cache.  Dirty blocks are identified
    39  // by their block ID, branch name, and reference nonce, since the same
    40  // block may be forked and modified on different branches and under
    41  // different references simultaneously.
    42  //
    43  // DirtyBlockCacheStandard controls how fast uses can write into KBFS,
    44  // and does so with a TCP-like slow-start algorithm that adjusts
    45  // itself according to how fast bytes are synced to the server.
    46  // Conceptually, there are two buffers:
    47  //
    48  //	syncBuf: The bytes that are currently syncing, or have finished
    49  //	syncing, back to the servers.  Each TLF has only one sync at a
    50  //	time, but multiple TLFs may be syncing at the same time.  We also
    51  //	track how many bytes within this buffer have finished syncing.
    52  //
    53  //	waitBuf: The bytes that have not yet begun syncing to the
    54  //	servers.  Again, this can be for multiple TLFs, and from multiple
    55  //	files within a TLF.  In the TCP analogy, think of this as the
    56  //	congestion window (cwnd).
    57  //
    58  // The goal is to make sure that syncBuf can always be transmitted to
    59  // the server within the file system operation timeout forced on us by
    60  // the layer that interacts with the file system (19 seconds on OS X
    61  // and Windows, defaults to 30 seconds for other layers if not already
    62  // set).  In fact, ideally the data would be transmitted in HALF of
    63  // the file system operation timeout, in case a user Sync operation
    64  // gets blocked behind a background Sync operation when there is
    65  // significant data in waitBuf.  At the same time, we want it to be as
    66  // big as possible, because we get the best performance when writing
    67  // lots of blocks in parallel to the servers.  So, we want an
    68  // algorithm that allows waitBuf to grow, without causing the next
    69  // sync (or write, or setattr, etc) operation to timeout.  For the
    70  // purposes of this discussion, let's assume there is only one active
    71  // TLF at a time.
    72  //
    73  // We allow the user to set a min, start, and max size for waitBuf.
    74  // Whenever a sync starts, bytes are transferred from waitBuf into
    75  // syncBuf and a timer is started.  When a sync completes
    76  // successfully, the number of bytes synced is added to the allowed
    77  // size of waitBuf (i.e., "additive increase" == exponential growth).
    78  // However, if the number of sync'd bytes is smaller than the min
    79  // waitBuf size, we don't do additive increase (because we haven't
    80  // really tested the throughput of the server connection in that case).
    81  //
    82  // If the sync takes more than 33% of half the overall operation
    83  // timeout, the size of waitBuf is reduced by that same percentage
    84  // (i.e., "multiplicative decrease"), and any outstanding bytes in the
    85  // sync will not be used in the "additive increase" phase when the
    86  // sync completes (they are considered "lost" in the TCP analogy, even
    87  // though they should eventually succeed).  The 33% limit was chosen
    88  // mostly by trial and error, although if you assume that
    89  // capacity(waitBuf) will double after each sync, then `2*len(syncBuf)
    90  // == capacity(waitBuf)`, so at any given point there can be about
    91  // 3*capacity(syncBuf) bytes buffered; so if syncBuf can't be sync'd
    92  // in a third of the overall timeout, the next waitBuf should be
    93  // reduced.
    94  //
    95  // Finally, we need to make sure that the Write calls that are filling
    96  // up waitBuf while a sync is happening don't timeout.  But, we also
    97  // need to fill waitBuf quickly, so that the next sync is ready to go
    98  // as soon as the first one completes.  Here we implement a
    99  // compromise.  Allow waitBuf to fill up instantly until it holds
   100  // capacity(syncBuf) bytes.  After that, allow it to fill up to
   101  // 2*capacity(syncBuf), but artificially delay each write by adding
   102  // backpressure, by some fraction of the system operation timeout that
   103  // matches the fraction of the progress the buffer has made between
   104  // capacity(syncBuf) and 2*capacity(syncBuf).  As soon as the sync
   105  // completes, any delayed write is unblocked and gets to start filling
   106  // up the buffers again.
   107  //
   108  // To avoid keeping the buffer capacity large when network conditions
   109  // suddenly worsen (say after a laptop sleep when it comes back online
   110  // on a new, worse network), the capacity is reset back to the minimum
   111  // if resetBufferCapTime passes without any large syncs.  TODO: in the
   112  // future it might make sense to decrease the buffer capacity, rather
   113  // than resetting it to the minimum?
   114  type DirtyBlockCacheStandard struct {
   115  	clock idutil.Clock
   116  	log   logger.Logger
   117  	vlog  *libkb.VDebugLog
   118  	reqWg sync.WaitGroup
   119  
   120  	// requestsChan is a queue for channels that should be closed when
   121  	// permission is granted to dirty new data.
   122  	requestsChan chan dirtyReq
   123  	// bytesDecreasedChan is signalled when syncs have finished or dirty
   124  	// blocks have been deleted.
   125  	bytesDecreasedChan chan struct{}
   126  	// shutdownChan is closed when Shutdown is called.
   127  	shutdownChan chan struct{}
   128  	// blockedChanForTesting sends out the number of bytes of the
   129  	// request currently waiting.  Sends out -1 when the request is
   130  	// accepted. Used only for testing.
   131  	blockedChanForTesting chan<- int64
   132  
   133  	// The minimum (and initial) capacity of the sync buffer.
   134  	minSyncBufCap int64
   135  	// The maximum capacity of the sync buffer.  Also used as the
   136  	// denominator when calculating backpressure, such that the closer
   137  	// we are to reaching the maximum size (over and above the current
   138  	// sync buffer), the more write requests will be delayed.
   139  	maxSyncBufCap int64
   140  
   141  	// After how long without a syncBufferCap-sized sync will
   142  	// syncBufferCap be reset automatically back down to the minimum,
   143  	// to avoid keeping it too high as network conditions change?
   144  	resetBufferCapTime time.Duration
   145  
   146  	shutdownLock sync.RWMutex
   147  	isShutdown   bool
   148  
   149  	lock            sync.RWMutex
   150  	cache           map[dirtyBlockID]Block
   151  	syncBufBytes    int64
   152  	waitBufBytes    int64
   153  	syncBufferCap   int64
   154  	ignoreSyncBytes int64 // these bytes have "timed out"
   155  	syncStarted     time.Time
   156  	resetter        *time.Timer
   157  }
   158  
   159  // NewDirtyBlockCacheStandard constructs a new BlockCacheStandard
   160  // instance.  The min and max buffer capacities define the possible
   161  // range of how many bytes we'll try to sync in any one sync, and the
   162  // start size defines the initial buffer size.
   163  func NewDirtyBlockCacheStandard(
   164  	clock idutil.Clock, log logger.Logger, vlog *libkb.VDebugLog,
   165  	minSyncBufCap int64, maxSyncBufCap int64,
   166  	startSyncBufCap int64) *DirtyBlockCacheStandard {
   167  	d := &DirtyBlockCacheStandard{
   168  		clock:              clock,
   169  		log:                log,
   170  		vlog:               vlog,
   171  		requestsChan:       make(chan dirtyReq, 1000),
   172  		bytesDecreasedChan: make(chan struct{}, 1),
   173  		shutdownChan:       make(chan struct{}),
   174  		cache:              make(map[dirtyBlockID]Block),
   175  		minSyncBufCap:      minSyncBufCap,
   176  		maxSyncBufCap:      maxSyncBufCap,
   177  		syncBufferCap:      startSyncBufCap,
   178  		resetBufferCapTime: resetBufferCapTimeDefault,
   179  	}
   180  	d.reqWg.Add(1)
   181  	go d.processPermission()
   182  	return d
   183  }
   184  
   185  // SimpleDirtyBlockCacheStandard that can only handle block
   186  // put/get/delete requests; it cannot track dirty bytes.
   187  func SimpleDirtyBlockCacheStandard() *DirtyBlockCacheStandard {
   188  	return &DirtyBlockCacheStandard{
   189  		cache: make(map[dirtyBlockID]Block),
   190  	}
   191  }
   192  
   193  // Get implements the DirtyBlockCache interface for
   194  // DirtyBlockCacheStandard.
   195  func (d *DirtyBlockCacheStandard) Get(
   196  	_ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName) (
   197  	Block, error) {
   198  	block := func() Block {
   199  		dirtyID := dirtyBlockID{
   200  			id:       ptr.ID,
   201  			refNonce: ptr.RefNonce,
   202  			branch:   branch,
   203  		}
   204  		d.lock.RLock()
   205  		defer d.lock.RUnlock()
   206  		return d.cache[dirtyID]
   207  	}()
   208  	if block != nil {
   209  		return block, nil
   210  	}
   211  
   212  	return nil, NoSuchBlockError{ptr.ID}
   213  }
   214  
   215  // Put implements the DirtyBlockCache interface for
   216  // DirtyBlockCacheStandard.
   217  func (d *DirtyBlockCacheStandard) Put(
   218  	_ context.Context, _ tlf.ID, ptr BlockPointer, branch BranchName,
   219  	block Block) error {
   220  	dirtyID := dirtyBlockID{
   221  		id:       ptr.ID,
   222  		refNonce: ptr.RefNonce,
   223  		branch:   branch,
   224  	}
   225  
   226  	d.lock.Lock()
   227  	defer d.lock.Unlock()
   228  	d.cache[dirtyID] = block
   229  	return nil
   230  }
   231  
   232  // Delete implements the DirtyBlockCache interface for
   233  // DirtyBlockCacheStandard.
   234  func (d *DirtyBlockCacheStandard) Delete(_ tlf.ID, ptr BlockPointer,
   235  	branch BranchName) error {
   236  	dirtyID := dirtyBlockID{
   237  		id:       ptr.ID,
   238  		refNonce: ptr.RefNonce,
   239  		branch:   branch,
   240  	}
   241  
   242  	d.lock.Lock()
   243  	defer d.lock.Unlock()
   244  	delete(d.cache, dirtyID)
   245  	return nil
   246  }
   247  
   248  // IsDirty implements the DirtyBlockCache interface for
   249  // DirtyBlockCacheStandard.
   250  func (d *DirtyBlockCacheStandard) IsDirty(_ tlf.ID, ptr BlockPointer,
   251  	branch BranchName) (isDirty bool) {
   252  	dirtyID := dirtyBlockID{
   253  		id:       ptr.ID,
   254  		refNonce: ptr.RefNonce,
   255  		branch:   branch,
   256  	}
   257  
   258  	d.lock.RLock()
   259  	defer d.lock.RUnlock()
   260  	_, isDirty = d.cache[dirtyID]
   261  	return
   262  }
   263  
   264  // IsAnyDirty implements the DirtyBlockCache interface for
   265  // DirtyBlockCacheStandard.
   266  func (d *DirtyBlockCacheStandard) IsAnyDirty(_ tlf.ID) bool {
   267  	d.lock.RLock()
   268  	defer d.lock.RUnlock()
   269  	return len(d.cache) > 0 || d.syncBufBytes > 0 || d.waitBufBytes > 0
   270  }
   271  
   272  const backpressureSlack = 1 * time.Second
   273  
   274  // calcBackpressure returns how much longer a given request should be
   275  // blocked, as a function of its deadline and how past full the
   276  // syncing buffer is.  In its lifetime, the request should be blocked
   277  // by roughly the same fraction of its total deadline as how past full
   278  // the buffer is.  This will let KBFS slow down writes according to
   279  // how slow the background Syncs are, so we don't accumulate more
   280  // bytes to Sync than we can handle.  See KBFS-731.
   281  func (d *DirtyBlockCacheStandard) calcBackpressure(start time.Time,
   282  	deadline time.Time) time.Duration {
   283  	d.lock.RLock()
   284  	defer d.lock.RUnlock()
   285  	// We don't want to use the whole deadline, so cut it some slack.
   286  	totalReqTime := deadline.Sub(start) - backpressureSlack
   287  	if totalReqTime <= 0 {
   288  		return 0
   289  	}
   290  
   291  	// Keep the window full in preparation for the next sync, after
   292  	// it's full start applying backpressure.
   293  	if d.waitBufBytes < d.syncBufferCap {
   294  		return 0
   295  	}
   296  
   297  	// The backpressure is proportional to how far our overage is
   298  	// toward filling up our next sync buffer.
   299  	backpressureFrac := float64(d.waitBufBytes-d.syncBufferCap) /
   300  		float64(d.syncBufferCap)
   301  	if backpressureFrac > 1.0 {
   302  		backpressureFrac = 1.0
   303  	}
   304  	totalBackpressure := time.Duration(
   305  		float64(totalReqTime) * backpressureFrac)
   306  	timeSpentSoFar := d.clock.Now().Sub(start)
   307  	if totalBackpressure <= timeSpentSoFar {
   308  		return 0
   309  	}
   310  
   311  	// How much time do we have left, given how much time this request
   312  	// has waited so far?
   313  	return totalBackpressure - timeSpentSoFar
   314  }
   315  
   316  func (d *DirtyBlockCacheStandard) acceptNewWrite(newBytes int64) bool {
   317  	d.lock.Lock()
   318  	defer d.lock.Unlock()
   319  	// Accept any write, as long as we're not already over the limits.
   320  	// Allow the total dirty bytes to get close to double the max
   321  	// buffer size, to allow us to fill up the buffer for the next
   322  	// sync.
   323  	canAccept := d.waitBufBytes < d.maxSyncBufCap*2
   324  	if canAccept {
   325  		d.waitBufBytes += newBytes
   326  	}
   327  
   328  	return canAccept
   329  }
   330  
   331  func (d *DirtyBlockCacheStandard) maybeDecreaseBuffer(start time.Time,
   332  	deadline time.Time, soFar float64) (bool, time.Duration, float64) {
   333  	// Update syncBufferCap if the write has been blocked for more
   334  	// than half of its timeout.  (We use half the timeout in case a
   335  	// user Sync operation, which can't be subjected to backpressure,
   336  	// is blocked by a background Sync operation when waitBuf is
   337  	// nearly full.)
   338  	allowedTimeout := float64(deadline.Sub(start)) / 2.0
   339  	timeoutUsed := d.clock.Now().Sub(start)
   340  	fracTimeoutUsed := float64(timeoutUsed) / allowedTimeout
   341  	if fracTimeoutUsed >= 0.33 {
   342  		d.lock.Lock()
   343  		defer d.lock.Unlock()
   344  		// Decrease the syncBufferCap by the percentage of the timeout
   345  		// we're using, minus the percentage we've already decreased
   346  		// it so far.  TODO: a more logical algorithm would probably
   347  		// keep track of what the syncBufferCap was before the Sync
   348  		// started, and multiply that by the entire fracTimeoutUsed,
   349  		// since subtracting percentages in this way doesn't make a
   350  		// whole lot of sense.
   351  		d.syncBufferCap = int64(float64(d.syncBufferCap) *
   352  			(1 - (fracTimeoutUsed - soFar)))
   353  		if d.syncBufferCap < d.minSyncBufCap {
   354  			d.syncBufferCap = d.minSyncBufCap
   355  		}
   356  		d.log.CDebugf(context.TODO(), "Writes blocked for %s (%f%% of timeout), "+
   357  			"syncBufferCap=%d", timeoutUsed, fracTimeoutUsed*100,
   358  			d.syncBufferCap)
   359  		if d.syncBufBytes > d.ignoreSyncBytes {
   360  			d.ignoreSyncBytes = d.syncBufBytes
   361  		}
   362  		return true, time.Duration(allowedTimeout), fracTimeoutUsed
   363  	}
   364  
   365  	// If we haven't decreased the buffer yet, make sure we get a
   366  	// wake-up call at the right time.
   367  	maxWakeup := allowedTimeout / 3.0
   368  	return false, time.Duration(maxWakeup) - timeoutUsed, soFar
   369  }
   370  
   371  func (d *DirtyBlockCacheStandard) getSyncStarted() time.Time {
   372  	d.lock.RLock()
   373  	defer d.lock.RUnlock()
   374  	return d.syncStarted
   375  }
   376  
   377  func (d *DirtyBlockCacheStandard) getSyncBufferCap() int64 {
   378  	d.lock.RLock()
   379  	defer d.lock.RUnlock()
   380  	return d.syncBufferCap
   381  }
   382  
   383  func (d *DirtyBlockCacheStandard) processPermission() {
   384  	defer d.reqWg.Done()
   385  	// Keep track of the most-recently seen request across loop
   386  	// iterations, because we aren't necessarily going to be able to
   387  	// deal with it as soon as we see it (since we might be past our
   388  	// limits already).
   389  	var currentReq dirtyReq
   390  	var backpressure time.Duration
   391  	var maxWakeup time.Duration
   392  	decreased := false
   393  	var fracDeadlineSoFar float64
   394  	var lastKnownTimeout time.Duration
   395  	for {
   396  		reqChan := d.requestsChan
   397  		if currentReq.respChan != nil {
   398  			// We are already waiting on a request, so don't bother
   399  			// trying to read another request from the requests chan.
   400  			reqChan = nil
   401  
   402  			// If we haven't decreased the buffer size yet, make sure
   403  			// we wake up in time to do that.
   404  			if !decreased && (backpressure <= 0 || maxWakeup < backpressure) {
   405  				backpressure = maxWakeup
   406  			}
   407  		} else if !d.getSyncStarted().IsZero() {
   408  			// There are no requests pending, but there is still a
   409  			// sync pending.
   410  			backpressure = maxWakeup
   411  		}
   412  
   413  		var bpTimer <-chan time.Time
   414  		if backpressure > 0 {
   415  			bpTimer = time.After(backpressure)
   416  		}
   417  
   418  		newReq := false
   419  		select {
   420  		case <-d.shutdownChan:
   421  			return
   422  		case <-d.bytesDecreasedChan:
   423  		case <-bpTimer:
   424  		case r := <-reqChan:
   425  			currentReq = r
   426  			newReq = true
   427  			decreased = false
   428  		}
   429  
   430  		if currentReq.respChan != nil || maxWakeup > 0 {
   431  			syncStarted := d.getSyncStarted()
   432  			// Has this sync been blocking so long that we should
   433  			// decrease the buffer size?
   434  			if !syncStarted.IsZero() {
   435  				deadline := syncStarted.Add(lastKnownTimeout)
   436  				decreased, maxWakeup, fracDeadlineSoFar =
   437  					d.maybeDecreaseBuffer(syncStarted,
   438  						deadline, fracDeadlineSoFar)
   439  			} else {
   440  				maxWakeup = 0
   441  			}
   442  		}
   443  
   444  		if currentReq.respChan != nil {
   445  			lastKnownTimeout = currentReq.deadline.Sub(currentReq.start)
   446  			// Apply any backpressure?
   447  			backpressure = d.calcBackpressure(currentReq.start,
   448  				currentReq.deadline)
   449  			switch {
   450  			case backpressure == 0 && d.acceptNewWrite(currentReq.bytes):
   451  				// If we have an active request, and we have room in
   452  				// our buffers to deal with it, grant permission to
   453  				// the requestor by closing the response channel.
   454  				close(currentReq.respChan)
   455  				currentReq = dirtyReq{}
   456  				if d.blockedChanForTesting != nil {
   457  					d.blockedChanForTesting <- -1
   458  				}
   459  			case d.blockedChanForTesting != nil && newReq:
   460  				// Otherwise, if this is the first time we've
   461  				// considered this request, inform any tests that the
   462  				// request is blocked.
   463  				d.blockedChanForTesting <- currentReq.bytes
   464  			case backpressure != 0:
   465  				func() {
   466  					d.lock.Lock()
   467  					defer d.lock.Unlock()
   468  					if d.syncStarted.IsZero() {
   469  						// TODO: in this case where there are multiple
   470  						// concurrent Syncs from multiple TLFs, this
   471  						// might not correctly capture the start time
   472  						// of the Nth Sync.  We might want to assign
   473  						// each Sync its own unique ID somehow, so we
   474  						// can track them separately and more
   475  						// accurately.
   476  						d.syncStarted = d.clock.Now()
   477  						fracDeadlineSoFar = 0
   478  					}
   479  					d.log.CDebugf(context.TODO(), "Applying backpressure %s", backpressure)
   480  				}()
   481  			}
   482  		}
   483  	}
   484  }
   485  
   486  // RequestPermissionToDirty implements the DirtyBlockCache interface
   487  // for DirtyBlockCacheStandard.
   488  func (d *DirtyBlockCacheStandard) RequestPermissionToDirty(
   489  	ctx context.Context, _ tlf.ID, estimatedDirtyBytes int64) (
   490  	DirtyPermChan, error) {
   491  	d.shutdownLock.RLock()
   492  	defer d.shutdownLock.RUnlock()
   493  	if d.isShutdown {
   494  		return nil, ShutdownHappenedError{}
   495  	}
   496  
   497  	if estimatedDirtyBytes < 0 {
   498  		panic("Must request permission for a non-negative number of bytes.")
   499  	}
   500  	c := make(chan struct{})
   501  
   502  	// No need to wait to write 0 bytes.
   503  	if estimatedDirtyBytes == 0 {
   504  		close(c)
   505  		return c, nil
   506  	}
   507  
   508  	now := d.clock.Now()
   509  	deadline, ok := ctx.Deadline()
   510  	defaultDeadline := now.Add(BackgroundTaskTimeout / 2)
   511  	if !ok || deadline.After(defaultDeadline) {
   512  		// Use half of the background task timeout, to make sure we
   513  		// never get close to a timeout in a background task.
   514  		deadline = defaultDeadline
   515  	}
   516  	req := dirtyReq{c, estimatedDirtyBytes, now, deadline}
   517  	select {
   518  	case d.requestsChan <- req:
   519  		return c, nil
   520  	case <-ctx.Done():
   521  		return nil, ctx.Err()
   522  	}
   523  }
   524  
   525  func (d *DirtyBlockCacheStandard) signalDecreasedBytes() {
   526  	select {
   527  	case d.bytesDecreasedChan <- struct{}{}:
   528  	default:
   529  		// Already something queued there, and one is enough.
   530  	}
   531  }
   532  
   533  func (d *DirtyBlockCacheStandard) updateWaitBufLocked(bytes int64) {
   534  	d.waitBufBytes += bytes
   535  	if d.waitBufBytes < 0 {
   536  		// It would be better if we didn't have this check, but it's
   537  		// hard for folderBlockOps to account correctly when bytes in
   538  		// a syncing block are overwritten, and then the write is
   539  		// deferred (see KBFS-2157).
   540  		d.waitBufBytes = 0
   541  	}
   542  }
   543  
   544  // UpdateUnsyncedBytes implements the DirtyBlockCache interface for
   545  // DirtyBlockCacheStandard.
   546  func (d *DirtyBlockCacheStandard) UpdateUnsyncedBytes(_ tlf.ID,
   547  	newUnsyncedBytes int64, wasSyncing bool) {
   548  	d.lock.Lock()
   549  	defer d.lock.Unlock()
   550  	if wasSyncing {
   551  		d.syncBufBytes += newUnsyncedBytes
   552  	} else {
   553  		d.updateWaitBufLocked(newUnsyncedBytes)
   554  	}
   555  	if newUnsyncedBytes < 0 {
   556  		d.signalDecreasedBytes()
   557  	}
   558  }
   559  
   560  // UpdateSyncingBytes implements the DirtyBlockCache interface for
   561  // DirtyBlockCacheStandard.
   562  func (d *DirtyBlockCacheStandard) UpdateSyncingBytes(_ tlf.ID, size int64) {
   563  	d.lock.Lock()
   564  	defer d.lock.Unlock()
   565  	d.syncBufBytes += size
   566  	d.updateWaitBufLocked(-size)
   567  	d.signalDecreasedBytes()
   568  }
   569  
   570  // BlockSyncFinished implements the DirtyBlockCache interface for
   571  // DirtyBlockCacheStandard.
   572  func (d *DirtyBlockCacheStandard) BlockSyncFinished(_ tlf.ID, size int64) {
   573  	d.lock.Lock()
   574  	defer d.lock.Unlock()
   575  	if size > 0 {
   576  		d.syncBufBytes -= size
   577  	} else {
   578  		// The block will be retried, so put it back on the waitBuf
   579  		d.updateWaitBufLocked(-size)
   580  	}
   581  	if size > 0 {
   582  		d.signalDecreasedBytes()
   583  	}
   584  }
   585  
   586  func (d *DirtyBlockCacheStandard) resetBufferCap() {
   587  	d.lock.Lock()
   588  	defer d.lock.Unlock()
   589  	d.log.CDebugf(context.TODO(), "Resetting syncBufferCap from %d to %d", d.syncBufferCap,
   590  		d.minSyncBufCap)
   591  	d.syncBufferCap = d.minSyncBufCap
   592  	d.resetter = nil
   593  	if d.blockedChanForTesting != nil {
   594  		d.blockedChanForTesting <- -1
   595  	}
   596  }
   597  
   598  // SyncFinished implements the DirtyBlockCache interface for
   599  // DirtyBlockCacheStandard.
   600  func (d *DirtyBlockCacheStandard) SyncFinished(_ tlf.ID, size int64) {
   601  	d.lock.Lock()
   602  	defer d.lock.Unlock()
   603  	if size <= 0 {
   604  		return
   605  	}
   606  	d.syncStarted = time.Time{}
   607  
   608  	// If the outstanding bytes have timed out, don't count them
   609  	// towards the buffer increase.
   610  	ignore := d.ignoreSyncBytes
   611  	if ignore > size {
   612  		ignore = size
   613  	}
   614  	bufferIncrease := size - ignore
   615  	d.ignoreSyncBytes -= ignore
   616  
   617  	// If the sync was a reasonably large fraction of the current
   618  	// buffer capacity, restart the reset timer.
   619  	if size >= d.syncBufferCap/2 {
   620  		if d.resetter != nil {
   621  			d.resetter.Stop()
   622  		}
   623  		d.resetter = time.AfterFunc(d.resetBufferCapTime, d.resetBufferCap)
   624  	}
   625  
   626  	// Only increase the buffer size if we sent over a lot of bytes.
   627  	// We don't want a series of small writes to increase the buffer
   628  	// size, since that doesn't give us any real information about the
   629  	// throughput of the connection.
   630  	if bufferIncrease >= d.syncBufferCap {
   631  		d.syncBufferCap += bufferIncrease
   632  		if d.syncBufferCap > d.maxSyncBufCap {
   633  			d.syncBufferCap = d.maxSyncBufCap
   634  		}
   635  	}
   636  	d.signalDecreasedBytes()
   637  	d.vlog.CLogf(
   638  		context.TODO(), libkb.VLog1,
   639  		"Finished syncing %d bytes, syncBufferCap=%d, waitBuf=%d, ignored=%d",
   640  		size, d.syncBufferCap, d.waitBufBytes, ignore)
   641  }
   642  
   643  // ShouldForceSync implements the DirtyBlockCache interface for
   644  // DirtyBlockCacheStandard.
   645  func (d *DirtyBlockCacheStandard) ShouldForceSync(_ tlf.ID) bool {
   646  	d.lock.RLock()
   647  	defer d.lock.RUnlock()
   648  	// TODO: Fill up to likely block boundaries?
   649  	return d.waitBufBytes >= d.syncBufferCap
   650  }
   651  
   652  // Size returns the number of blocks currently in the cache.
   653  func (d *DirtyBlockCacheStandard) Size() int {
   654  	d.lock.RLock()
   655  	defer d.lock.RUnlock()
   656  	return len(d.cache)
   657  }
   658  
   659  // Shutdown implements the DirtyBlockCache interface for
   660  // DirtyBlockCacheStandard.
   661  func (d *DirtyBlockCacheStandard) Shutdown() error {
   662  	func() {
   663  		d.shutdownLock.Lock()
   664  		defer d.shutdownLock.Unlock()
   665  		d.isShutdown = true
   666  		close(d.shutdownChan)
   667  	}()
   668  
   669  	d.reqWg.Wait()
   670  	close(d.requestsChan)
   671  	d.lock.Lock()
   672  	defer d.lock.Unlock()
   673  	// Clear out the remaining requests
   674  	for req := range d.requestsChan {
   675  		d.updateWaitBufLocked(req.bytes)
   676  	}
   677  	if d.syncBufBytes != 0 || d.waitBufBytes != 0 || d.ignoreSyncBytes != 0 {
   678  		return fmt.Errorf("Unexpected dirty bytes leftover on shutdown: "+
   679  			"syncBuf=%d, waitBuf=%d, ignore=%d",
   680  			d.syncBufBytes, d.waitBufBytes, d.ignoreSyncBytes)
   681  	}
   682  	return nil
   683  }