github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/backpressure_disk_limiter.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/keybase/client/go/kbfs/kbfssync"
    14  	"github.com/keybase/client/go/logger"
    15  	"github.com/keybase/client/go/protocol/keybase1"
    16  	"github.com/pkg/errors"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  // backpressureTracker keeps track of the variables used to calculate
    21  // backpressure. It keeps track of a generic resource (which can be
    22  // either bytes or files).
    23  //
    24  // Let U be the (approximate) resource usage of the journal and F be
    25  // the free resources. Then we want to enforce
    26  //
    27  //	U <= min(k(U+F), L),
    28  //
    29  // where 0 < k <= 1 is some fraction, and L > 0 is the absolute
    30  // resource usage limit. But in addition to that, we want to set
    31  // thresholds 0 <= m <= M <= 1 such that we apply proportional
    32  // backpressure (with a given maximum delay) when
    33  //
    34  //	m <= max(U/(k(U+F)), U/L) <= M,
    35  //
    36  // which is equivalent to
    37  //
    38  //	m <= U/min(k(U+F), L) <= M.
    39  //
    40  // Note that this type doesn't do any locking, so it's the caller's
    41  // responsibility to do so.
    42  type backpressureTracker struct {
    43  	// minThreshold is m in the above.
    44  	minThreshold float64
    45  	// maxThreshold is M in the above.
    46  	maxThreshold float64
    47  	// limitFrac is k in the above.
    48  	limitFrac float64
    49  	// limit is L in the above.
    50  	limit int64
    51  
    52  	// used is U in the above.
    53  	used int64
    54  	// free is F in the above.
    55  	free int64
    56  
    57  	// semaphoreMax is the last calculated value of currLimit(),
    58  	// which is min(k(U+F), L).
    59  	semaphoreMax int64
    60  	// The count of the semaphore is semaphoreMax - U - I, where I
    61  	// is the resource count that is currently "in-flight",
    62  	// i.e. between beforeBlockPut() and afterBlockPut() calls.
    63  	semaphore *kbfssync.Semaphore
    64  }
    65  
    66  func newBackpressureTracker(minThreshold, maxThreshold, limitFrac float64,
    67  	limit, initialFree int64) (*backpressureTracker, error) {
    68  	if minThreshold < 0.0 {
    69  		return nil, errors.Errorf("minThreshold=%f < 0.0",
    70  			minThreshold)
    71  	}
    72  	if maxThreshold < minThreshold {
    73  		return nil, errors.Errorf(
    74  			"maxThreshold=%f < minThreshold=%f",
    75  			maxThreshold, minThreshold)
    76  	}
    77  	if 1.0 < maxThreshold {
    78  		return nil, errors.Errorf("1.0 < maxThreshold=%f",
    79  			maxThreshold)
    80  	}
    81  	if limitFrac <= 0 {
    82  		return nil, errors.Errorf("limitFrac=%f <= 0", limitFrac)
    83  	}
    84  	if limitFrac > 1.0 {
    85  		return nil, errors.Errorf("limitFrac=%f > 1.0", limitFrac)
    86  	}
    87  	if limit < 0 {
    88  		return nil, errors.Errorf("limit=%d < 0", limit)
    89  	}
    90  	if initialFree < 0 {
    91  		return nil, errors.Errorf("initialFree=%d < 0", initialFree)
    92  	}
    93  	bt := &backpressureTracker{
    94  		minThreshold, maxThreshold, limitFrac, limit,
    95  		0, initialFree, 0, kbfssync.NewSemaphore(),
    96  	}
    97  	bt.updateSemaphoreMax()
    98  	return bt, nil
    99  }
   100  
   101  // currLimit returns the resource limit, taking into account the
   102  // amount of free resources left. This is min(k(U+F), L).
   103  func (bt backpressureTracker) currLimit() float64 {
   104  	// Calculate k(U+F), converting to float64 first to avoid
   105  	// overflow, although losing some precision in the process.
   106  	usedFloat := float64(bt.used)
   107  	freeFloat := float64(bt.free)
   108  	limit := bt.limitFrac * (usedFloat + freeFloat)
   109  	minLimit := math.Min(limit, float64(bt.limit))
   110  	// Based on local tests, the magic number of 512 gets us past overflow
   111  	// issues at the limit due to floating point precision.
   112  	maxFloatForInt64 := float64(math.MaxInt64 - 512)
   113  	if minLimit > maxFloatForInt64 {
   114  		minLimit = maxFloatForInt64
   115  	}
   116  	return minLimit
   117  }
   118  
   119  func (bt backpressureTracker) usedFrac() float64 {
   120  	return float64(bt.used) / bt.currLimit()
   121  }
   122  
   123  func (bt backpressureTracker) usedResources() int64 {
   124  	return bt.used
   125  }
   126  
   127  // delayScale returns a number between 0 and 1, which should be
   128  // multiplied with the maximum delay to get the backpressure delay to
   129  // apply.
   130  func (bt backpressureTracker) delayScale() float64 {
   131  	usedFrac := bt.usedFrac()
   132  
   133  	// We want the delay to be 0 if usedFrac <= m and the max
   134  	// delay if usedFrac >= M, so linearly interpolate the delay
   135  	// scale.
   136  	m := bt.minThreshold
   137  	M := bt.maxThreshold
   138  	return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
   139  }
   140  
   141  // updateSemaphoreMax must be called whenever bt.used or bt.free
   142  // changes.
   143  func (bt *backpressureTracker) updateSemaphoreMax() {
   144  	newMax := int64(bt.currLimit())
   145  	delta := newMax - bt.semaphoreMax
   146  	// These operations are adjusting the *maximum* value of
   147  	// bt.semaphore.
   148  	if delta > 0 {
   149  		bt.semaphore.Release(delta)
   150  	} else if delta < 0 {
   151  		bt.semaphore.ForceAcquire(-delta)
   152  	}
   153  	bt.semaphoreMax = newMax
   154  }
   155  
   156  func (bt *backpressureTracker) onEnable(usedResources int64) (
   157  	availableResources int64) {
   158  	bt.used += usedResources
   159  	bt.updateSemaphoreMax()
   160  	if usedResources == 0 {
   161  		return bt.semaphore.Count()
   162  	}
   163  	return bt.semaphore.ForceAcquire(usedResources)
   164  }
   165  
   166  func (bt *backpressureTracker) onDisable(usedResources int64) {
   167  	bt.used -= usedResources
   168  	bt.updateSemaphoreMax()
   169  	if usedResources > 0 {
   170  		bt.semaphore.Release(usedResources)
   171  	}
   172  }
   173  
   174  func (bt *backpressureTracker) updateFree(freeResources int64) {
   175  	bt.free = freeResources
   176  	bt.updateSemaphoreMax()
   177  }
   178  
   179  func (bt *backpressureTracker) reserve(
   180  	ctx context.Context, blockResources int64) (
   181  	availableResources int64, err error) {
   182  	return bt.semaphore.Acquire(ctx, blockResources)
   183  }
   184  
   185  func (bt *backpressureTracker) commit(blockResources int64) {
   186  	bt.used += blockResources
   187  	bt.updateSemaphoreMax()
   188  }
   189  
   190  func (bt *backpressureTracker) rollback(blockResources int64) {
   191  	bt.semaphore.Release(blockResources)
   192  }
   193  
   194  func (bt *backpressureTracker) commitOrRollback(
   195  	blockResources int64, shouldCommit bool) {
   196  	if shouldCommit {
   197  		bt.commit(blockResources)
   198  	} else {
   199  		bt.rollback(blockResources)
   200  	}
   201  }
   202  
   203  func (bt *backpressureTracker) release(blockResources int64) {
   204  	if blockResources == 0 {
   205  		return
   206  	}
   207  
   208  	bt.semaphore.Release(blockResources)
   209  
   210  	bt.used -= blockResources
   211  	bt.updateSemaphoreMax()
   212  }
   213  
   214  func (bt *backpressureTracker) tryReserve(blockResources int64) (
   215  	availableResources int64) {
   216  	return bt.semaphore.TryAcquire(blockResources)
   217  }
   218  
   219  func (bt *backpressureTracker) getLimitInfo() (used int64, limit float64) {
   220  	return bt.used, bt.currLimit()
   221  }
   222  
   223  type backpressureTrackerStatus struct {
   224  	// Derived numbers.
   225  	UsedFrac   float64
   226  	DelayScale float64
   227  
   228  	// Constants.
   229  	MinThreshold float64
   230  	MaxThreshold float64
   231  	LimitFrac    float64
   232  	Limit        int64
   233  
   234  	// Raw numbers.
   235  	Used  int64
   236  	Free  int64
   237  	Max   int64
   238  	Count int64
   239  }
   240  
   241  func (bt *backpressureTracker) getStatus() backpressureTrackerStatus {
   242  	return backpressureTrackerStatus{
   243  		UsedFrac:   bt.usedFrac(),
   244  		DelayScale: bt.delayScale(),
   245  
   246  		MinThreshold: bt.minThreshold,
   247  		MaxThreshold: bt.maxThreshold,
   248  		LimitFrac:    bt.limitFrac,
   249  		Limit:        bt.limit,
   250  
   251  		Used:  bt.used,
   252  		Free:  bt.free,
   253  		Max:   bt.semaphoreMax,
   254  		Count: bt.semaphore.Count(),
   255  	}
   256  }
   257  
   258  // quotaBackpressureTracker keeps track of the variables used to
   259  // calculate quota-related backpressure.
   260  //
   261  // Let U be the (approximate) unflushed bytes in the journal, R be the
   262  // remote quota usage, and Q be the quota. Then we want to set
   263  // thresholds 0 <= m <= M such that we apply proportional backpressure
   264  // (with a given maximum delay) when
   265  //
   266  //	m <= (U+R)/Q <= M.
   267  //
   268  // Note that this type doesn't do any locking, so it's the caller's
   269  // responsibility to do so.
   270  type quotaBackpressureTracker struct {
   271  	// minThreshold is m in the above.
   272  	minThreshold float64
   273  	// maxThreshold is M in the above.
   274  	maxThreshold float64
   275  
   276  	// unflushedBytes is U in the above.
   277  	unflushedBytes int64
   278  	// remoteUsedBytes is R in the above.
   279  	remoteUsedBytes int64
   280  	// quotaBytes is Q in the above.
   281  	quotaBytes int64
   282  }
   283  
   284  func newQuotaBackpressureTracker(minThreshold, maxThreshold float64) (
   285  	*quotaBackpressureTracker, error) {
   286  	if minThreshold < 0.0 {
   287  		return nil, errors.Errorf("minThreshold=%f < 0.0",
   288  			minThreshold)
   289  	}
   290  	if maxThreshold < minThreshold {
   291  		return nil, errors.Errorf(
   292  			"maxThreshold=%f < minThreshold=%f",
   293  			maxThreshold, minThreshold)
   294  	}
   295  	qbt := &quotaBackpressureTracker{
   296  		minThreshold, maxThreshold, 0, 0, math.MaxInt64,
   297  	}
   298  	return qbt, nil
   299  }
   300  
   301  func (qbt quotaBackpressureTracker) usedFrac() float64 {
   302  	return (float64(qbt.unflushedBytes) + float64(qbt.remoteUsedBytes)) /
   303  		float64(qbt.quotaBytes)
   304  }
   305  
   306  // delayScale returns a number between 0 and 1, which should be
   307  // multiplied with the maximum delay to get the backpressure delay to
   308  // apply.
   309  func (qbt quotaBackpressureTracker) delayScale() float64 {
   310  	usedFrac := qbt.usedFrac()
   311  
   312  	// We want the delay to be 0 if usedFrac <= m and the max
   313  	// delay if usedFrac >= M, so linearly interpolate the delay
   314  	// scale.
   315  	m := qbt.minThreshold
   316  	M := qbt.maxThreshold
   317  	return math.Min(1.0, math.Max(0.0, (usedFrac-m)/(M-m)))
   318  }
   319  
   320  func (qbt quotaBackpressureTracker) getQuotaInfo() (
   321  	usedQuotaBytes, quotaBytes int64) {
   322  	usedQuotaBytes = qbt.unflushedBytes + qbt.remoteUsedBytes
   323  	quotaBytes = qbt.quotaBytes
   324  	return usedQuotaBytes, quotaBytes
   325  }
   326  
   327  func (qbt *quotaBackpressureTracker) onJournalEnable(unflushedBytes int64) {
   328  	qbt.unflushedBytes += unflushedBytes
   329  }
   330  
   331  func (qbt *quotaBackpressureTracker) onJournalDisable(unflushedBytes int64) {
   332  	qbt.unflushedBytes -= unflushedBytes
   333  }
   334  
   335  func (qbt *quotaBackpressureTracker) updateRemote(
   336  	remoteUsedBytes, quotaBytes int64) {
   337  	qbt.remoteUsedBytes = remoteUsedBytes
   338  	qbt.quotaBytes = quotaBytes
   339  }
   340  
   341  func (qbt *quotaBackpressureTracker) afterBlockPut(
   342  	blockBytes int64, putData bool) {
   343  	if putData {
   344  		qbt.unflushedBytes += blockBytes
   345  	}
   346  }
   347  
   348  func (qbt *quotaBackpressureTracker) onBlocksFlush(blockBytes int64) {
   349  	qbt.unflushedBytes -= blockBytes
   350  }
   351  
   352  type quotaBackpressureTrackerStatus struct {
   353  	// Derived numbers.
   354  	UsedFrac   float64
   355  	DelayScale float64
   356  
   357  	// Constants.
   358  	MinThreshold float64
   359  	MaxThreshold float64
   360  
   361  	// Raw numbers.
   362  	UnflushedBytes  int64
   363  	RemoteUsedBytes int64
   364  	QuotaBytes      int64
   365  }
   366  
   367  func (qbt *quotaBackpressureTracker) getStatus() quotaBackpressureTrackerStatus {
   368  	return quotaBackpressureTrackerStatus{
   369  		UsedFrac:   qbt.usedFrac(),
   370  		DelayScale: qbt.delayScale(),
   371  
   372  		MinThreshold: qbt.minThreshold,
   373  		MaxThreshold: qbt.maxThreshold,
   374  
   375  		UnflushedBytes:  qbt.unflushedBytes,
   376  		RemoteUsedBytes: qbt.remoteUsedBytes,
   377  		QuotaBytes:      qbt.quotaBytes,
   378  	}
   379  }
   380  
   381  // journalTracker aggregates all the journal trackers. This type also
   382  // doesn't do any locking, so it's the caller's responsibility to do
   383  // so.
   384  type journalTracker struct {
   385  	byte, file        *backpressureTracker
   386  	quota             map[keybase1.UserOrTeamID]*quotaBackpressureTracker
   387  	quotaMinThreshold float64
   388  	quotaMaxThreshold float64
   389  }
   390  
   391  func newJournalTracker(
   392  	minThreshold, maxThreshold, quotaMinThreshold, quotaMaxThreshold, journalFrac float64,
   393  	byteLimit, fileLimit, freeBytes, freeFiles int64) (
   394  	journalTracker, error) {
   395  	// byteLimit and fileLimit must be scaled by the proportion of
   396  	// the limit that the journal should consume. Add 0.5 to round
   397  	// up.
   398  	journalByteLimit := int64((float64(byteLimit) * journalFrac) + 0.5)
   399  	byteTracker, err := newBackpressureTracker(
   400  		minThreshold, maxThreshold, journalFrac, journalByteLimit,
   401  		freeBytes)
   402  	if err != nil {
   403  		return journalTracker{}, err
   404  	}
   405  	// the fileLimit is only used by the journal, so in theory we
   406  	// don't have to scale it by journalFrac, but in the interest
   407  	// of consistency with how we treat the byteLimit, we do so
   408  	// anyway. Add 0.5 to round up.
   409  	journalFileLimit := int64((float64(fileLimit) * journalFrac) + 0.5)
   410  	fileTracker, err := newBackpressureTracker(
   411  		minThreshold, maxThreshold, journalFrac, journalFileLimit,
   412  		freeFiles)
   413  	if err != nil {
   414  		return journalTracker{}, err
   415  	}
   416  
   417  	// Test quota parameters -- actual quota trackers will be created
   418  	// on a per-chargedTo-ID basis.
   419  	_, err = newQuotaBackpressureTracker(quotaMinThreshold, quotaMaxThreshold)
   420  	if err != nil {
   421  		return journalTracker{}, err
   422  	}
   423  
   424  	return journalTracker{
   425  		byte: byteTracker,
   426  		file: fileTracker,
   427  		quota: make(
   428  			map[keybase1.UserOrTeamID]*quotaBackpressureTracker),
   429  		quotaMinThreshold: quotaMinThreshold,
   430  		quotaMaxThreshold: quotaMaxThreshold,
   431  	}, nil
   432  }
   433  
   434  func (jt journalTracker) getQuotaTracker(
   435  	chargedTo keybase1.UserOrTeamID) *quotaBackpressureTracker {
   436  	quota, ok := jt.quota[chargedTo]
   437  	if !ok {
   438  		var err error
   439  		quota, err = newQuotaBackpressureTracker(
   440  			jt.quotaMinThreshold, jt.quotaMaxThreshold)
   441  		if err != nil {
   442  			// We already tested the parameters, so this shouldn't
   443  			// ever happen.
   444  			panic(err)
   445  		}
   446  		jt.quota[chargedTo] = quota
   447  	}
   448  	return quota
   449  }
   450  
   451  type jtSnapshot struct {
   452  	used  int64
   453  	free  int64
   454  	max   int64
   455  	count int64
   456  }
   457  
   458  func (jt journalTracker) getSnapshotsForTest(chargedTo keybase1.UserOrTeamID) (
   459  	byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
   460  	byteSnapshot = jtSnapshot{jt.byte.used, jt.byte.free,
   461  		jt.byte.semaphoreMax, jt.byte.semaphore.Count()}
   462  	fileSnapshot = jtSnapshot{jt.file.used, jt.file.free,
   463  		jt.file.semaphoreMax, jt.file.semaphore.Count()}
   464  	usedQuotaBytes, quotaBytes := jt.getQuotaTracker(chargedTo).getQuotaInfo()
   465  	free := quotaBytes - usedQuotaBytes
   466  	quotaSnapshot = jtSnapshot{usedQuotaBytes, free, 0, 0}
   467  	return byteSnapshot, fileSnapshot, quotaSnapshot
   468  
   469  }
   470  
   471  func (jt journalTracker) onEnable(storedBytes, unflushedBytes, files int64,
   472  	chargedTo keybase1.UserOrTeamID) (availableBytes, availableFiles int64) {
   473  	// storedBytes should be >= unflushedBytes. But it's not too
   474  	// bad to let it go through.
   475  	availableBytes = jt.byte.onEnable(storedBytes)
   476  	availableFiles = jt.file.onEnable(files)
   477  	jt.getQuotaTracker(chargedTo).onJournalEnable(unflushedBytes)
   478  	return availableBytes, availableFiles
   479  }
   480  
   481  func (jt journalTracker) onDisable(storedBytes, unflushedBytes, files int64,
   482  	chargedTo keybase1.UserOrTeamID) {
   483  	// As above, storedBytes should be >= unflushedBytes. Let it
   484  	// go through here, too.
   485  	jt.byte.onDisable(storedBytes)
   486  	jt.file.onDisable(files)
   487  	jt.getQuotaTracker(chargedTo).onJournalDisable(unflushedBytes)
   488  }
   489  
   490  func (jt journalTracker) getDelayScale(
   491  	chargedTo keybase1.UserOrTeamID) float64 {
   492  	byteDelayScale := jt.byte.delayScale()
   493  	fileDelayScale := jt.file.delayScale()
   494  	quotaDelayScale := jt.getQuotaTracker(chargedTo).delayScale()
   495  	delayScale := math.Max(
   496  		math.Max(byteDelayScale, fileDelayScale), quotaDelayScale)
   497  	return delayScale
   498  }
   499  
   500  func (jt journalTracker) updateFree(
   501  	freeBytes, overallUsedBytes, freeFiles int64) {
   502  	// We calculate the total free bytes by adding the reported free bytes and
   503  	// the non-journal used bytes.
   504  	jt.byte.updateFree(freeBytes + overallUsedBytes - jt.byte.used)
   505  	jt.file.updateFree(freeFiles)
   506  }
   507  
   508  func (jt journalTracker) updateRemote(remoteUsedBytes, quotaBytes int64,
   509  	chargedTo keybase1.UserOrTeamID) {
   510  	jt.getQuotaTracker(chargedTo).updateRemote(remoteUsedBytes, quotaBytes)
   511  }
   512  
   513  func (jt journalTracker) getSemaphoreCounts() (byteCount, fileCount int64) {
   514  	return jt.byte.semaphore.Count(), jt.file.semaphore.Count()
   515  }
   516  
   517  func (jt journalTracker) reserve(
   518  	ctx context.Context, blockBytes, blockFiles int64) (
   519  	availableBytes, availableFiles int64, err error) {
   520  	availableBytes, err = jt.byte.reserve(ctx, blockBytes)
   521  	if err != nil {
   522  		return availableBytes, jt.file.semaphore.Count(), err
   523  	}
   524  	defer func() {
   525  		if err != nil {
   526  			jt.byte.rollback(blockBytes)
   527  			availableBytes = jt.byte.semaphore.Count()
   528  		}
   529  	}()
   530  
   531  	availableFiles, err = jt.file.reserve(ctx, blockFiles)
   532  	if err != nil {
   533  		return availableBytes, availableFiles, err
   534  	}
   535  
   536  	return availableBytes, availableFiles, nil
   537  }
   538  
   539  func (jt journalTracker) commitOrRollback(
   540  	blockBytes, blockFiles int64, putData bool,
   541  	chargedTo keybase1.UserOrTeamID) {
   542  	jt.byte.commitOrRollback(blockBytes, putData)
   543  	jt.file.commitOrRollback(blockFiles, putData)
   544  	jt.getQuotaTracker(chargedTo).afterBlockPut(blockBytes, putData)
   545  }
   546  
   547  func (jt journalTracker) onBlocksFlush(
   548  	blockBytes int64, chargedTo keybase1.UserOrTeamID) {
   549  	jt.getQuotaTracker(chargedTo).onBlocksFlush(blockBytes)
   550  }
   551  
   552  func (jt journalTracker) release(blockBytes, blockFiles int64) {
   553  	jt.byte.release(blockBytes)
   554  	jt.file.release(blockFiles)
   555  }
   556  
   557  func (jt journalTracker) getStatusLine(chargedTo keybase1.UserOrTeamID) string {
   558  	quota := jt.getQuotaTracker(chargedTo)
   559  	return fmt.Sprintf("journalBytes=%d, freeBytes=%d, "+
   560  		"journalFiles=%d, freeFiles=%d, "+
   561  		"quotaUnflushedBytes=%d, quotaRemoteUsedBytes=%d, "+
   562  		"quotaBytes=%d",
   563  		jt.byte.used, jt.byte.free,
   564  		jt.file.used, jt.file.free,
   565  		quota.unflushedBytes, quota.remoteUsedBytes, quota.quotaBytes)
   566  }
   567  
   568  func (jt journalTracker) getQuotaInfo(chargedTo keybase1.UserOrTeamID) (
   569  	usedQuotaBytes, quotaBytes int64) {
   570  	return jt.getQuotaTracker(chargedTo).getQuotaInfo()
   571  }
   572  
   573  func (jt journalTracker) getDiskLimitInfo() (
   574  	usedBytes int64, limitBytes float64, usedFiles int64, limitFiles float64) {
   575  	usedBytes, limitBytes = jt.byte.getLimitInfo()
   576  	usedFiles, limitFiles = jt.file.getLimitInfo()
   577  	return usedBytes, limitBytes, usedFiles, limitFiles
   578  }
   579  
   580  type journalTrackerStatus struct {
   581  	ByteStatus  backpressureTrackerStatus
   582  	FileStatus  backpressureTrackerStatus
   583  	QuotaStatus quotaBackpressureTrackerStatus
   584  }
   585  
   586  func (jt journalTracker) getStatus(
   587  	chargedTo keybase1.UserOrTeamID) journalTrackerStatus {
   588  	return journalTrackerStatus{
   589  		ByteStatus:  jt.byte.getStatus(),
   590  		FileStatus:  jt.file.getStatus(),
   591  		QuotaStatus: jt.getQuotaTracker(chargedTo).getStatus(),
   592  	}
   593  }
   594  
   595  type diskLimiterQuotaFn func(
   596  	ctx context.Context, chargedTo keybase1.UserOrTeamID) (int64, int64)
   597  
   598  // backpressureDiskLimiter is an implementation of diskLimiter that
   599  // uses backpressure to slow down block puts before they hit the disk
   600  // limits.
   601  type backpressureDiskLimiter struct {
   602  	log logger.Logger
   603  
   604  	maxDelay            time.Duration
   605  	delayFn             func(context.Context, time.Duration) error
   606  	freeBytesAndFilesFn func() (int64, int64, error)
   607  	quotaFn             diskLimiterQuotaFn
   608  
   609  	// lock protects everything in journalTracker and
   610  	// diskCacheByteTracker, including the (implicit) maximum
   611  	// values of the semaphores, but not the actual semaphores
   612  	// themselves.
   613  	lock sync.RWMutex
   614  	// overallByteTracker tracks the overall number of bytes used by Keybase.
   615  	overallByteTracker *backpressureTracker
   616  	// journalTracker tracks the journal bytes and files used.
   617  	journalTracker journalTracker
   618  	// diskCacheByteTracker tracks the disk cache bytes used.
   619  	diskCacheByteTracker *backpressureTracker
   620  	// syncCacheByteTracker tracks the sync cache bytes used.
   621  	syncCacheByteTracker *backpressureTracker
   622  }
   623  
   624  var _ DiskLimiter = (*backpressureDiskLimiter)(nil)
   625  
   626  type backpressureDiskLimiterParams struct {
   627  	// minThreshold is the fraction of the free bytes/files at
   628  	// which we start to apply backpressure.
   629  	minThreshold float64
   630  	// maxThreshold is the fraction of the free bytes/files at
   631  	// which we max out on backpressure.
   632  	maxThreshold float64
   633  	// quotaMinThreshold is the fraction of used quota at which we
   634  	// start to apply backpressure.
   635  	quotaMinThreshold float64
   636  	// quotaMaxThreshold is the fraction of used quota at which we
   637  	// max out on backpressure.
   638  	quotaMaxThreshold float64
   639  	// journalFrac is fraction of the free bytes/files that the
   640  	// journal is allowed to use.
   641  	journalFrac float64
   642  	// diskCacheFrac is the fraction of the free bytes that the
   643  	// disk cache is allowed to use. The disk cache doesn't store
   644  	// individual files.
   645  	diskCacheFrac float64
   646  	// syncCacheFrac is the fraction of the free bytes that the
   647  	// sync cache is allowed to use.
   648  	syncCacheFrac float64
   649  	// byteLimit is the total cap for free bytes. The journal will
   650  	// be allowed to use at most journalFrac*byteLimit, and the
   651  	// disk cache will be allowed to use at most
   652  	// diskCacheFrac*byteLimit.
   653  	byteLimit int64
   654  	// maxFreeFiles is the cap for free files. The journal will be
   655  	// allowed to use at most journalFrac*fileLimit. This limit
   656  	// doesn't apply to the disk cache, since it doesn't store
   657  	// individual files.
   658  	fileLimit int64
   659  	// maxDelay is the maximum delay used for backpressure.
   660  	maxDelay time.Duration
   661  	// delayFn is a function that takes a context and a duration
   662  	// and returns after sleeping for that duration, or if the
   663  	// context is cancelled. Overridable for testing.
   664  	delayFn func(context.Context, time.Duration) error
   665  	// freeBytesAndFilesFn is a function that returns the current
   666  	// free bytes and files on the disk containing the
   667  	// journal/disk cache directory. Overridable for testing.
   668  	freeBytesAndFilesFn func() (int64, int64, error)
   669  	// quotaFn is a function that returns the current used and
   670  	// total quota bytes. Overridable for testing.
   671  	quotaFn diskLimiterQuotaFn
   672  }
   673  
   674  // defaultDiskLimitMaxDelay is the maximum amount to delay a block
   675  // put. Exposed as a constant as it is used by
   676  // tlfJournalConfigAdapter.
   677  const defaultDiskLimitMaxDelay = 10 * time.Second
   678  
   679  type quotaUsageGetter func(
   680  	chargedTo keybase1.UserOrTeamID) *EventuallyConsistentQuotaUsage
   681  
   682  func makeDefaultBackpressureDiskLimiterParams(
   683  	storageRoot string,
   684  	quotaUsage quotaUsageGetter, diskCacheFrac float64, syncCacheFrac float64) backpressureDiskLimiterParams {
   685  	return backpressureDiskLimiterParams{
   686  		// Start backpressure when 50% of free bytes or files
   687  		// are used...
   688  		minThreshold: 0.5,
   689  		// ...and max it out at 95% (slightly less than 100%
   690  		// to allow for inaccuracies in estimates).
   691  		maxThreshold: 0.95,
   692  		// Start backpressure when we've used 100% of our quota...
   693  		quotaMinThreshold: 1.0,
   694  		// ...and max it out at 120% of quota.
   695  		quotaMaxThreshold: 1.2,
   696  		// Cap journal usage to 85% of free bytes and files...
   697  		journalFrac: 0.85,
   698  		// ...and cap disk cache usage as specified. The
   699  		// disk cache doesn't store individual files.
   700  		diskCacheFrac: diskCacheFrac,
   701  		// Also cap the sync cache usage for offline files.
   702  		syncCacheFrac: syncCacheFrac,
   703  		// Set the byte limit to 200 GiB, which translates to
   704  		// having the journal take up at most 170 GiB, and the
   705  		// disk cache to take up at most 20 GiB.
   706  		byteLimit: 200 * 1024 * 1024 * 1024,
   707  		// Set the file limit to 6 million files, which
   708  		// translates to having the journal take up at most
   709  		// 900k files.
   710  		fileLimit: 6000000,
   711  		maxDelay:  defaultDiskLimitMaxDelay,
   712  		delayFn:   defaultDoDelay,
   713  		freeBytesAndFilesFn: func() (int64, int64, error) {
   714  			return defaultGetFreeBytesAndFiles(storageRoot)
   715  		},
   716  		quotaFn: func(ctx context.Context, chargedTo keybase1.UserOrTeamID) (
   717  			int64, int64) {
   718  			timestamp, usageBytes, _, limitBytes, err :=
   719  				quotaUsage(chargedTo).Get(ctx, 1*time.Minute, math.MaxInt64)
   720  			if err != nil {
   721  				return 0, math.MaxInt64
   722  			}
   723  
   724  			if timestamp.IsZero() {
   725  				return 0, math.MaxInt64
   726  			}
   727  
   728  			return usageBytes, limitBytes
   729  		},
   730  	}
   731  }
   732  
   733  // newBackpressureDiskLimiter constructs a new backpressureDiskLimiter
   734  // with the given params.
   735  func newBackpressureDiskLimiter(
   736  	log logger.Logger, params backpressureDiskLimiterParams) (
   737  	*backpressureDiskLimiter, error) {
   738  	freeBytes, freeFiles, err := params.freeBytesAndFilesFn()
   739  	if err != nil {
   740  		return nil, err
   741  	}
   742  
   743  	journalTracker, err := newJournalTracker(
   744  		params.minThreshold, params.maxThreshold,
   745  		params.quotaMinThreshold, params.quotaMaxThreshold,
   746  		params.journalFrac, params.byteLimit, params.fileLimit,
   747  		freeBytes, freeFiles)
   748  	if err != nil {
   749  		return nil, err
   750  	}
   751  
   752  	// byteLimit must be scaled by the proportion of the limit
   753  	// that the disk cache should consume. Add 0.5 for rounding.
   754  	diskCacheByteLimit := int64(
   755  		(float64(params.byteLimit) * params.diskCacheFrac) + 0.5)
   756  	// The byte limit doesn't apply to the sync cache.
   757  	syncCacheByteLimit := int64(math.MaxInt64)
   758  	overallByteTracker, err := newBackpressureTracker(
   759  		1.0, 1.0, 1.0, params.byteLimit, freeBytes)
   760  	if err != nil {
   761  		return nil, err
   762  	}
   763  
   764  	diskCacheByteTracker, err := newBackpressureTracker(
   765  		1.0, 1.0, params.diskCacheFrac, diskCacheByteLimit, freeBytes)
   766  	if err != nil {
   767  		return nil, err
   768  	}
   769  	syncCacheByteTracker, err := newBackpressureTracker(
   770  		1.0, 1.0, params.syncCacheFrac, syncCacheByteLimit, freeBytes)
   771  	if err != nil {
   772  		return nil, err
   773  	}
   774  
   775  	bdl := &backpressureDiskLimiter{
   776  		log:                  log,
   777  		maxDelay:             params.maxDelay,
   778  		delayFn:              params.delayFn,
   779  		freeBytesAndFilesFn:  params.freeBytesAndFilesFn,
   780  		quotaFn:              params.quotaFn,
   781  		lock:                 sync.RWMutex{},
   782  		overallByteTracker:   overallByteTracker,
   783  		journalTracker:       journalTracker,
   784  		diskCacheByteTracker: diskCacheByteTracker,
   785  		syncCacheByteTracker: syncCacheByteTracker,
   786  	}
   787  	return bdl, nil
   788  }
   789  
   790  // defaultDoDelay uses a timer to delay by the given duration.
   791  func defaultDoDelay(ctx context.Context, delay time.Duration) error {
   792  	if delay == 0 {
   793  		return nil
   794  	}
   795  
   796  	timer := time.NewTimer(delay)
   797  	select {
   798  	case <-timer.C:
   799  		return nil
   800  	case <-ctx.Done():
   801  		timer.Stop()
   802  		return errors.WithStack(ctx.Err())
   803  	}
   804  }
   805  
   806  func defaultGetFreeBytesAndFiles(path string) (int64, int64, error) {
   807  	// getDiskLimits returns availableBytes and availableFiles,
   808  	// but we want to avoid confusing that with availBytes and
   809  	// availFiles in the sense of the semaphore value.
   810  	freeBytes, _, freeFiles, _, err := getDiskLimits(path)
   811  	if err != nil {
   812  		return 0, 0, err
   813  	}
   814  
   815  	if freeBytes > uint64(math.MaxInt64) {
   816  		freeBytes = math.MaxInt64
   817  	}
   818  	if freeFiles > uint64(math.MaxInt64) {
   819  		freeFiles = math.MaxInt64
   820  	}
   821  	return int64(freeBytes), int64(freeFiles), nil
   822  }
   823  
   824  func (bdl *backpressureDiskLimiter) simpleByteTrackerFromType(typ diskLimitTrackerType) (
   825  	tracker simpleResourceTracker, err error) {
   826  	switch typ {
   827  	case workingSetCacheLimitTrackerType:
   828  		return bdl.diskCacheByteTracker, nil
   829  	case syncCacheLimitTrackerType:
   830  		return bdl.syncCacheByteTracker, nil
   831  	default:
   832  		return nil, unknownTrackerTypeError{typ}
   833  	}
   834  }
   835  
   836  func (bdl *backpressureDiskLimiter) getJournalSnapshotsForTest(
   837  	chargedTo keybase1.UserOrTeamID) (
   838  	byteSnapshot, fileSnapshot, quotaSnapshot jtSnapshot) {
   839  	bdl.lock.RLock()
   840  	defer bdl.lock.RUnlock()
   841  	return bdl.journalTracker.getSnapshotsForTest(chargedTo)
   842  }
   843  
   844  func (bdl *backpressureDiskLimiter) onJournalEnable(
   845  	ctx context.Context,
   846  	journalStoredBytes, journalUnflushedBytes, journalFiles int64,
   847  	chargedTo keybase1.UserOrTeamID) (
   848  	availableBytes, availableFiles int64) {
   849  	bdl.lock.Lock()
   850  	defer bdl.lock.Unlock()
   851  	bdl.overallByteTracker.onEnable(journalStoredBytes)
   852  	return bdl.journalTracker.onEnable(
   853  		journalStoredBytes, journalUnflushedBytes, journalFiles, chargedTo)
   854  }
   855  
   856  func (bdl *backpressureDiskLimiter) onJournalDisable(
   857  	ctx context.Context,
   858  	journalStoredBytes, journalUnflushedBytes, journalFiles int64,
   859  	chargedTo keybase1.UserOrTeamID) {
   860  	bdl.lock.Lock()
   861  	defer bdl.lock.Unlock()
   862  	bdl.journalTracker.onDisable(
   863  		journalStoredBytes, journalUnflushedBytes, journalFiles, chargedTo)
   864  	bdl.overallByteTracker.onDisable(journalStoredBytes)
   865  }
   866  
   867  func (bdl *backpressureDiskLimiter) onSimpleByteTrackerEnable(ctx context.Context,
   868  	typ diskLimitTrackerType, diskCacheBytes int64) {
   869  	tracker, err := bdl.simpleByteTrackerFromType(typ)
   870  	if err != nil {
   871  		panic("Invalid tracker type passed to onByteTrackerEnable")
   872  	}
   873  	bdl.lock.Lock()
   874  	defer bdl.lock.Unlock()
   875  	bdl.overallByteTracker.onEnable(diskCacheBytes)
   876  	if typ != syncCacheLimitTrackerType {
   877  		tracker.onEnable(diskCacheBytes)
   878  	}
   879  }
   880  
   881  func (bdl *backpressureDiskLimiter) onSimpleByteTrackerDisable(ctx context.Context,
   882  	typ diskLimitTrackerType, diskCacheBytes int64) {
   883  	tracker, err := bdl.simpleByteTrackerFromType(typ)
   884  	if err != nil {
   885  		panic("Invalid tracker type passed to onByteTrackerDisable")
   886  	}
   887  	bdl.lock.Lock()
   888  	defer bdl.lock.Unlock()
   889  	tracker.onDisable(diskCacheBytes)
   890  	if typ != syncCacheLimitTrackerType {
   891  		bdl.overallByteTracker.onDisable(diskCacheBytes)
   892  	}
   893  }
   894  
   895  func (bdl *backpressureDiskLimiter) getDelayLocked(
   896  	ctx context.Context, now time.Time,
   897  	chargedTo keybase1.UserOrTeamID) time.Duration {
   898  	delayScale := bdl.journalTracker.getDelayScale(chargedTo)
   899  
   900  	// Set maxDelay to min(bdl.maxDelay, time until deadline - 1s).
   901  	maxDelay := bdl.maxDelay
   902  	if deadline, ok := ctx.Deadline(); ok {
   903  		// Subtract a second to allow for some slack.
   904  		remainingTime := deadline.Sub(now) - time.Second
   905  		if remainingTime < maxDelay {
   906  			maxDelay = remainingTime
   907  		}
   908  	}
   909  
   910  	return time.Duration(delayScale * float64(maxDelay))
   911  }
   912  
   913  func (bdl *backpressureDiskLimiter) reserveError(err error) (
   914  	availableBytes, availableFiles int64, _ error) {
   915  	bdl.lock.RLock()
   916  	defer bdl.lock.RUnlock()
   917  	availableBytes, availableFiles =
   918  		bdl.journalTracker.getSemaphoreCounts()
   919  	return availableBytes, availableFiles, err
   920  }
   921  
   922  func (bdl *backpressureDiskLimiter) reserveWithBackpressure(
   923  	ctx context.Context, typ diskLimitTrackerType, blockBytes, blockFiles int64,
   924  	chargedTo keybase1.UserOrTeamID) (availableBytes, availableFiles int64,
   925  	err error) {
   926  	// TODO: if other backpressure consumers are introduced, remove this check.
   927  	if typ != journalLimitTrackerType {
   928  		return bdl.reserveError(errors.New(
   929  			"reserveWithBackpressure called with " +
   930  				"non-journal tracker type."))
   931  	}
   932  	if blockBytes == 0 {
   933  		// Better to return an error than to panic in Acquire.
   934  		return bdl.reserveError(errors.New(
   935  			"reserveWithBackpressure called with 0 blockBytes"))
   936  	}
   937  	if blockFiles == 0 {
   938  		// Better to return an error than to panic in Acquire.
   939  		return bdl.reserveError(errors.New(
   940  			"reserveWithBackpressure called with 0 blockFiles"))
   941  	}
   942  	delay, err := func() (time.Duration, error) {
   943  		bdl.lock.Lock()
   944  		defer bdl.lock.Unlock()
   945  
   946  		// Call this under lock to avoid problems with its
   947  		// return values going stale while blocking on
   948  		// bdl.lock.
   949  		freeBytes, freeFiles, err := bdl.freeBytesAndFilesFn()
   950  		if err != nil {
   951  			return 0, err
   952  		}
   953  
   954  		bdl.overallByteTracker.updateFree(freeBytes)
   955  		bdl.journalTracker.updateFree(freeBytes, bdl.overallByteTracker.used,
   956  			freeFiles)
   957  
   958  		remoteUsedBytes, quotaBytes := bdl.quotaFn(ctx, chargedTo)
   959  		bdl.journalTracker.updateRemote(remoteUsedBytes, quotaBytes, chargedTo)
   960  
   961  		delay := bdl.getDelayLocked(ctx, time.Now(), chargedTo)
   962  		if delay > 0 {
   963  			bdl.log.CDebugf(ctx, "Delaying block put of %d bytes and %d "+
   964  				"files by %f s (%s)", blockBytes, blockFiles, delay.Seconds(),
   965  				bdl.journalTracker.getStatusLine(chargedTo))
   966  		}
   967  
   968  		return delay, nil
   969  	}()
   970  	if err != nil {
   971  		return bdl.reserveError(err)
   972  	}
   973  
   974  	// TODO: Update delay if any variables change (i.e., we suddenly free up a
   975  	// lot of space).
   976  	err = bdl.delayFn(ctx, delay)
   977  	if err != nil {
   978  		return bdl.reserveError(err)
   979  	}
   980  	bdl.lock.Lock()
   981  	defer bdl.lock.Unlock()
   982  
   983  	_, err = bdl.overallByteTracker.reserve(ctx, blockBytes)
   984  	if err != nil {
   985  		// Just log this error -- let the journal tracker error stand
   986  		// as the real returned error.
   987  		bdl.log.CDebugf(ctx, "Error reserving overall tracker: %+v", err)
   988  	}
   989  	return bdl.journalTracker.reserve(ctx, blockBytes, blockFiles)
   990  }
   991  
   992  func (bdl *backpressureDiskLimiter) commitOrRollback(ctx context.Context,
   993  	typ diskLimitTrackerType, blockBytes, blockFiles int64, shouldCommit bool,
   994  	chargedTo keybase1.UserOrTeamID) {
   995  	bdl.lock.Lock()
   996  	defer bdl.lock.Unlock()
   997  	switch typ {
   998  	case journalLimitTrackerType:
   999  		bdl.journalTracker.commitOrRollback(blockBytes, blockFiles,
  1000  			shouldCommit, chargedTo)
  1001  	default:
  1002  		tracker, err := bdl.simpleByteTrackerFromType(typ)
  1003  		if err != nil {
  1004  			panic("Bad tracker type for commitOrRollback")
  1005  		}
  1006  		tracker.commitOrRollback(blockBytes, shouldCommit)
  1007  	}
  1008  	if typ != syncCacheLimitTrackerType {
  1009  		bdl.overallByteTracker.commitOrRollback(blockBytes, shouldCommit)
  1010  	}
  1011  }
  1012  
  1013  func (bdl *backpressureDiskLimiter) onBlocksFlush(
  1014  	ctx context.Context, blockBytes int64, chargedTo keybase1.UserOrTeamID) {
  1015  	bdl.lock.Lock()
  1016  	defer bdl.lock.Unlock()
  1017  	bdl.journalTracker.onBlocksFlush(blockBytes, chargedTo)
  1018  }
  1019  
  1020  func (bdl *backpressureDiskLimiter) release(ctx context.Context,
  1021  	typ diskLimitTrackerType, blockBytes, blockFiles int64) {
  1022  	bdl.lock.Lock()
  1023  	defer bdl.lock.Unlock()
  1024  	switch typ {
  1025  	case journalLimitTrackerType:
  1026  		bdl.journalTracker.release(blockBytes, blockFiles)
  1027  	default:
  1028  		tracker, err := bdl.simpleByteTrackerFromType(typ)
  1029  		if err != nil {
  1030  			panic("Bad tracker type for commitOrRollback")
  1031  		}
  1032  		tracker.release(blockBytes)
  1033  	}
  1034  	if typ != syncCacheLimitTrackerType {
  1035  		bdl.overallByteTracker.release(blockBytes)
  1036  	}
  1037  }
  1038  
  1039  func (bdl *backpressureDiskLimiter) reserveBytes(
  1040  	ctx context.Context, typ diskLimitTrackerType, blockBytes int64) (
  1041  	availableBytes int64, err error) {
  1042  	if blockBytes == 0 {
  1043  		// Better to return an error than to panic in ForceAcquire.
  1044  		return 0, errors.New("reserve called with 0 blockBytes")
  1045  	}
  1046  	tracker, err := bdl.simpleByteTrackerFromType(typ)
  1047  	if err != nil {
  1048  		return 0, err
  1049  	}
  1050  	bdl.lock.Lock()
  1051  	defer bdl.lock.Unlock()
  1052  
  1053  	// Call this under lock to avoid problems with its return
  1054  	// values going stale while blocking on bdl.lock.
  1055  	freeBytes, _, err := bdl.freeBytesAndFilesFn()
  1056  	if err != nil {
  1057  		return 0, err
  1058  	}
  1059  
  1060  	bdl.overallByteTracker.updateFree(freeBytes)
  1061  	if typ != syncCacheLimitTrackerType {
  1062  		count := bdl.overallByteTracker.tryReserve(blockBytes)
  1063  		if count < 0 {
  1064  			return count, nil
  1065  		}
  1066  		// We calculate the total free bytes by adding the reported free bytes and
  1067  		// the non-`tracker` used bytes.
  1068  		tracker.updateFree(freeBytes + bdl.overallByteTracker.used -
  1069  			tracker.usedResources())
  1070  	} else {
  1071  		// This allows the sync cache to take up 100% of free space
  1072  		// even if another cache is using 5% of space, and they would overlap.
  1073  		tracker.updateFree(freeBytes + bdl.overallByteTracker.used)
  1074  	}
  1075  
  1076  	count := tracker.tryReserve(blockBytes)
  1077  	if count < 0 && typ != syncCacheLimitTrackerType {
  1078  		bdl.overallByteTracker.rollback(blockBytes)
  1079  	}
  1080  	return count, nil
  1081  }
  1082  
  1083  func (bdl *backpressureDiskLimiter) getQuotaInfo(
  1084  	chargedTo keybase1.UserOrTeamID) (usedQuotaBytes, quotaBytes int64) {
  1085  	bdl.lock.RLock()
  1086  	defer bdl.lock.RUnlock()
  1087  	return bdl.journalTracker.getQuotaInfo(chargedTo)
  1088  }
  1089  
  1090  func (bdl *backpressureDiskLimiter) getDiskLimitInfo() (
  1091  	usedBytes int64, limitBytes float64, usedFiles int64, limitFiles float64) {
  1092  	bdl.lock.RLock()
  1093  	defer bdl.lock.RUnlock()
  1094  	return bdl.journalTracker.getDiskLimitInfo()
  1095  }
  1096  
  1097  type backpressureDiskLimiterStatus struct {
  1098  	Type string
  1099  
  1100  	// Derived stats.
  1101  	CurrentDelaySec float64
  1102  
  1103  	JournalTrackerStatus journalTrackerStatus
  1104  	DiskCacheByteStatus  backpressureTrackerStatus
  1105  	SyncCacheByteStatus  backpressureTrackerStatus
  1106  }
  1107  
  1108  func (bdl *backpressureDiskLimiter) getStatus(
  1109  	ctx context.Context, chargedTo keybase1.UserOrTeamID) interface{} {
  1110  	bdl.lock.Lock()
  1111  	defer bdl.lock.Unlock()
  1112  
  1113  	currentDelay := bdl.getDelayLocked(
  1114  		context.Background(), time.Now(), chargedTo)
  1115  
  1116  	jStatus := bdl.journalTracker.getStatus(chargedTo)
  1117  	// If we haven't updated the quota limit yet, update it now.
  1118  	if jStatus.QuotaStatus.QuotaBytes == math.MaxInt64 {
  1119  		remoteUsedBytes, quotaBytes := bdl.quotaFn(ctx, chargedTo)
  1120  		bdl.journalTracker.updateRemote(remoteUsedBytes, quotaBytes, chargedTo)
  1121  		jStatus = bdl.journalTracker.getStatus(chargedTo)
  1122  	}
  1123  
  1124  	return backpressureDiskLimiterStatus{
  1125  		Type: "BackpressureDiskLimiter",
  1126  
  1127  		CurrentDelaySec: currentDelay.Seconds(),
  1128  
  1129  		JournalTrackerStatus: jStatus,
  1130  		DiskCacheByteStatus:  bdl.diskCacheByteTracker.getStatus(),
  1131  		SyncCacheByteStatus:  bdl.syncCacheByteTracker.getStatus(),
  1132  	}
  1133  }