github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/backpressure_disk_limiter_test.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	"math"
    10  	"testing"
    11  	"time"
    12  
    13  	"github.com/keybase/client/go/logger"
    14  	"github.com/keybase/client/go/protocol/keybase1"
    15  	"github.com/pkg/errors"
    16  	"github.com/stretchr/testify/require"
    17  	"golang.org/x/net/context"
    18  )
    19  
    20  // TestBackpressureTrackerCounters checks that a backpressure
    21  // tracker's counters are updated properly for each public method.
    22  func TestBackpressureTrackerCounters(t *testing.T) {
    23  	bt, err := newBackpressureTracker(0.1, 0.9, 0.25, 100, 200)
    24  	require.NoError(t, err)
    25  
    26  	// semaphoreMax = min(k(U+F), L) = min(0.25(0+200), 100) = 50.
    27  	require.Equal(t, int64(0), bt.used)
    28  	require.Equal(t, int64(200), bt.free)
    29  	require.Equal(t, int64(50), bt.semaphoreMax)
    30  	require.Equal(t, int64(50), bt.semaphore.Count())
    31  
    32  	// Increase U by 10, so that increases sM by 0.25*10 = 2.5, so
    33  	// sM is now 52.
    34  
    35  	avail := bt.onEnable(10)
    36  	require.Equal(t, int64(42), avail)
    37  
    38  	require.Equal(t, int64(10), bt.used)
    39  	require.Equal(t, int64(200), bt.free)
    40  	require.Equal(t, int64(52), bt.semaphoreMax)
    41  	require.Equal(t, int64(42), bt.semaphore.Count())
    42  
    43  	// Decrease U by 9, so that decreases sM by 0.25*9 = 2.25, so
    44  	// sM is back to 50.
    45  
    46  	bt.onDisable(9)
    47  
    48  	require.Equal(t, int64(1), bt.used)
    49  	require.Equal(t, int64(200), bt.free)
    50  	require.Equal(t, int64(50), bt.semaphoreMax)
    51  	require.Equal(t, int64(49), bt.semaphore.Count())
    52  
    53  	// Increase U by 440, so that increases sM by 0.25*110 = 110,
    54  	// so sM maxes out at 100, and semaphore should go negative.
    55  
    56  	avail = bt.onEnable(440)
    57  	require.Equal(t, int64(-341), avail)
    58  
    59  	require.Equal(t, int64(441), bt.used)
    60  	require.Equal(t, int64(200), bt.free)
    61  	require.Equal(t, int64(100), bt.semaphoreMax)
    62  	require.Equal(t, int64(-341), bt.semaphore.Count())
    63  
    64  	// Now revert that increase.
    65  
    66  	bt.onDisable(440)
    67  
    68  	require.Equal(t, int64(1), bt.used)
    69  	require.Equal(t, int64(200), bt.free)
    70  	require.Equal(t, int64(50), bt.semaphoreMax)
    71  	require.Equal(t, int64(49), bt.semaphore.Count())
    72  
    73  	// This should be a no-op.
    74  	avail = bt.onEnable(0)
    75  	require.Equal(t, int64(49), avail)
    76  
    77  	require.Equal(t, int64(1), bt.used)
    78  	require.Equal(t, int64(200), bt.free)
    79  	require.Equal(t, int64(50), bt.semaphoreMax)
    80  	require.Equal(t, int64(49), bt.semaphore.Count())
    81  
    82  	// So should this.
    83  	bt.onDisable(0)
    84  
    85  	require.Equal(t, int64(1), bt.used)
    86  	require.Equal(t, int64(200), bt.free)
    87  	require.Equal(t, int64(50), bt.semaphoreMax)
    88  	require.Equal(t, int64(49), bt.semaphore.Count())
    89  
    90  	// Add more free resources and put a block successfully.
    91  
    92  	bt.updateFree(400)
    93  
    94  	avail, err = bt.reserve(context.Background(), 10)
    95  	require.NoError(t, err)
    96  	require.Equal(t, int64(89), avail)
    97  
    98  	require.Equal(t, int64(1), bt.used)
    99  	require.Equal(t, int64(400), bt.free)
   100  	require.Equal(t, int64(100), bt.semaphoreMax)
   101  	require.Equal(t, int64(89), bt.semaphore.Count())
   102  
   103  	bt.commitOrRollback(10, true)
   104  
   105  	require.Equal(t, int64(11), bt.used)
   106  	require.Equal(t, int64(400), bt.free)
   107  	require.Equal(t, int64(100), bt.semaphoreMax)
   108  	require.Equal(t, int64(89), bt.semaphore.Count())
   109  
   110  	// Then try to put a block but fail it.
   111  
   112  	avail, err = bt.reserve(context.Background(), 9)
   113  	require.NoError(t, err)
   114  	require.Equal(t, int64(80), avail)
   115  
   116  	require.Equal(t, int64(11), bt.used)
   117  	require.Equal(t, int64(400), bt.free)
   118  	require.Equal(t, int64(100), bt.semaphoreMax)
   119  	require.Equal(t, int64(80), bt.semaphore.Count())
   120  
   121  	bt.commitOrRollback(9, false)
   122  
   123  	require.Equal(t, int64(11), bt.used)
   124  	require.Equal(t, int64(400), bt.free)
   125  	require.Equal(t, int64(100), bt.semaphoreMax)
   126  	require.Equal(t, int64(89), bt.semaphore.Count())
   127  
   128  	// Finally, delete a block.
   129  
   130  	bt.release(11)
   131  
   132  	require.Equal(t, int64(0), bt.used)
   133  	require.Equal(t, int64(400), bt.free)
   134  	require.Equal(t, int64(100), bt.semaphoreMax)
   135  	require.Equal(t, int64(100), bt.semaphore.Count())
   136  
   137  	// This should be a no-op.
   138  	bt.release(0)
   139  
   140  	require.Equal(t, int64(0), bt.used)
   141  	require.Equal(t, int64(400), bt.free)
   142  	require.Equal(t, int64(100), bt.semaphoreMax)
   143  	require.Equal(t, int64(100), bt.semaphore.Count())
   144  }
   145  
   146  // TestQuotaBackpressureTrackerCounters checks that a quota tracker's
   147  // counters are updated properly for each public method.
   148  func TestQuotaBackpressureTrackerCounters(t *testing.T) {
   149  	qbt, err := newQuotaBackpressureTracker(0.1, 0.9)
   150  	require.NoError(t, err)
   151  
   152  	require.Equal(t, int64(0), qbt.unflushedBytes)
   153  	require.Equal(t, int64(0), qbt.remoteUsedBytes)
   154  	require.Equal(t, int64(math.MaxInt64), qbt.quotaBytes)
   155  
   156  	qbt.onJournalEnable(10)
   157  	require.Equal(t, int64(10), qbt.unflushedBytes)
   158  	require.Equal(t, int64(0), qbt.remoteUsedBytes)
   159  	require.Equal(t, int64(math.MaxInt64), qbt.quotaBytes)
   160  
   161  	qbt.onJournalDisable(9)
   162  	require.Equal(t, int64(1), qbt.unflushedBytes)
   163  	require.Equal(t, int64(0), qbt.remoteUsedBytes)
   164  	require.Equal(t, int64(math.MaxInt64), qbt.quotaBytes)
   165  
   166  	// Add more free resources and put a block successfully.
   167  
   168  	qbt.updateRemote(10, 100)
   169  
   170  	require.Equal(t, int64(1), qbt.unflushedBytes)
   171  	require.Equal(t, int64(10), qbt.remoteUsedBytes)
   172  	require.Equal(t, int64(100), qbt.quotaBytes)
   173  
   174  	qbt.afterBlockPut(10, true)
   175  
   176  	require.Equal(t, int64(11), qbt.unflushedBytes)
   177  	require.Equal(t, int64(10), qbt.remoteUsedBytes)
   178  	require.Equal(t, int64(100), qbt.quotaBytes)
   179  
   180  	// Then try to put a block but fail it.
   181  
   182  	qbt.afterBlockPut(9, false)
   183  
   184  	require.Equal(t, int64(11), qbt.unflushedBytes)
   185  	require.Equal(t, int64(10), qbt.remoteUsedBytes)
   186  	require.Equal(t, int64(100), qbt.quotaBytes)
   187  
   188  	// Finally, flush a block.
   189  
   190  	qbt.onBlocksFlush(10)
   191  
   192  	require.Equal(t, int64(1), qbt.unflushedBytes)
   193  	require.Equal(t, int64(10), qbt.remoteUsedBytes)
   194  	require.Equal(t, int64(100), qbt.quotaBytes)
   195  }
   196  
   197  // TestJournalTrackerCounters checks that a journal tracker's counters
   198  // are updated properly for each public method.
   199  func TestJournalTrackerCounters(t *testing.T) {
   200  	jt, err := newJournalTracker(
   201  		0.1,  // minThreshold
   202  		0.9,  // maxThreshold
   203  		1.0,  // quotaMinThreshold
   204  		1.2,  // quotaMaxThreshold
   205  		0.15, // journalFrac
   206  		400,  // byteLimit
   207  		800,  // fileLimit
   208  		100,  // freeBytes
   209  		200)  // freeFiles
   210  	require.NoError(t, err)
   211  
   212  	// max = count = min(k(U+F), L) = min(0.15(0+100), 400) = 15.
   213  	expectedByteSnapshot := jtSnapshot{
   214  		used:  0,
   215  		free:  100,
   216  		max:   15,
   217  		count: 15,
   218  	}
   219  	// max = count = min(k(U+F), L) = min(0.15(0+200), 800) = 30.
   220  	expectedFileSnapshot := jtSnapshot{
   221  		used:  0,
   222  		free:  200,
   223  		max:   30,
   224  		count: 30,
   225  	}
   226  	expectedQuotaSnapshot := jtSnapshot{
   227  		used: 0,
   228  		free: math.MaxInt64,
   229  	}
   230  
   231  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   232  	checkSnapshots := func() {
   233  		byteSnapshot, fileSnapshot, quotaSnapshot :=
   234  			jt.getSnapshotsForTest(chargedTo)
   235  		require.Equal(t, expectedByteSnapshot, byteSnapshot)
   236  		require.Equal(t, expectedFileSnapshot, fileSnapshot)
   237  		require.Equal(t, expectedQuotaSnapshot, quotaSnapshot)
   238  	}
   239  
   240  	checkSnapshots()
   241  
   242  	// For stored bytes, increase U by 10, so that increases max
   243  	// by 0.15*10 = 1.5, so max is now 16. For files, increase U
   244  	// by 20, so that increases max by 0.15*20 = 3, so max is now
   245  	// 33.
   246  
   247  	availBytes, availFiles := jt.onEnable(10, 5, 20, chargedTo)
   248  	require.Equal(t, int64(6), availBytes)
   249  	require.Equal(t, int64(13), availFiles)
   250  
   251  	expectedByteSnapshot = jtSnapshot{
   252  		used:  10,
   253  		free:  100,
   254  		max:   16,
   255  		count: 6,
   256  	}
   257  	expectedFileSnapshot = jtSnapshot{
   258  		used:  20,
   259  		free:  200,
   260  		max:   33,
   261  		count: 13,
   262  	}
   263  	expectedQuotaSnapshot = jtSnapshot{
   264  		used: 5,
   265  		free: math.MaxInt64 - 5,
   266  	}
   267  
   268  	checkSnapshots()
   269  
   270  	// For stored bytes, decrease U by 9, so that decreases max by
   271  	// 0.15*9 = 1.35, so max is back to 15. For files, decrease U
   272  	// by 19, so that decreases max by 0.15*19 = 2.85, so max back to
   273  	// 30.
   274  
   275  	jt.onDisable(9, 4, 19, chargedTo)
   276  
   277  	expectedByteSnapshot = jtSnapshot{
   278  		used:  1,
   279  		free:  100,
   280  		max:   15,
   281  		count: 14,
   282  	}
   283  	expectedFileSnapshot = jtSnapshot{
   284  		used:  1,
   285  		free:  200,
   286  		max:   30,
   287  		count: 29,
   288  	}
   289  	expectedQuotaSnapshot = jtSnapshot{
   290  		used: 1,
   291  		free: math.MaxInt64 - 1,
   292  	}
   293  
   294  	checkSnapshots()
   295  
   296  	// Update free resources.
   297  
   298  	jt.updateFree(200, 41, 100)
   299  
   300  	expectedByteSnapshot = jtSnapshot{
   301  		used:  1,
   302  		free:  240,
   303  		max:   36,
   304  		count: 35,
   305  	}
   306  	expectedFileSnapshot = jtSnapshot{
   307  		used:  1,
   308  		free:  100,
   309  		max:   15,
   310  		count: 14,
   311  	}
   312  
   313  	checkSnapshots()
   314  
   315  	// Update remote resources.
   316  
   317  	jt.updateRemote(10, 100, chargedTo)
   318  
   319  	expectedQuotaSnapshot = jtSnapshot{
   320  		used: 11,
   321  		free: 89,
   322  	}
   323  
   324  	checkSnapshots()
   325  
   326  	// Put a block successfully.
   327  
   328  	availBytes, availFiles, err = jt.reserve(
   329  		context.Background(), 10, 5)
   330  	require.NoError(t, err)
   331  	require.Equal(t, int64(25), availBytes)
   332  	require.Equal(t, int64(9), availFiles)
   333  
   334  	expectedByteSnapshot.count -= 10
   335  	expectedFileSnapshot.count -= 5
   336  
   337  	checkSnapshots()
   338  
   339  	jt.commitOrRollback(10, 5, true, chargedTo)
   340  
   341  	// max = min(k(U+F), L) = min(0.15(11+240), 400) = 37.
   342  	expectedByteSnapshot = jtSnapshot{
   343  		used:  11,
   344  		free:  240,
   345  		max:   37,
   346  		count: 26,
   347  	}
   348  
   349  	// max = min(k(U+F), L) = min(0.15(6+100), 800) = 15.
   350  	expectedFileSnapshot = jtSnapshot{
   351  		used:  6,
   352  		free:  100,
   353  		max:   15,
   354  		count: 9,
   355  	}
   356  
   357  	expectedQuotaSnapshot = jtSnapshot{
   358  		used: 21,
   359  		free: 79,
   360  	}
   361  
   362  	checkSnapshots()
   363  
   364  	// Then try to put a block but fail it.
   365  
   366  	availBytes, availFiles, err = jt.reserve(
   367  		context.Background(), 10, 5)
   368  	require.NoError(t, err)
   369  	require.Equal(t, int64(16), availBytes)
   370  	require.Equal(t, int64(4), availFiles)
   371  
   372  	expectedByteSnapshot.count -= 10
   373  	expectedFileSnapshot.count -= 5
   374  
   375  	checkSnapshots()
   376  
   377  	jt.commitOrRollback(10, 5, false, chargedTo)
   378  
   379  	expectedByteSnapshot.count += 10
   380  	expectedFileSnapshot.count += 5
   381  
   382  	checkSnapshots()
   383  
   384  	// Now flush a block...
   385  
   386  	jt.onBlocksFlush(10, chargedTo)
   387  
   388  	expectedQuotaSnapshot = jtSnapshot{
   389  		used: 11,
   390  		free: 89,
   391  	}
   392  
   393  	checkSnapshots()
   394  
   395  	// ...and, finally, delete it.
   396  
   397  	jt.release(10, 5)
   398  
   399  	// max = min(k(U+F), L) = min(0.15(1+240), 400) = 36.
   400  	expectedByteSnapshot = jtSnapshot{
   401  		used:  1,
   402  		free:  240,
   403  		max:   36,
   404  		count: 35,
   405  	}
   406  	// max = min(k(U+F), L) = min(0.15(1+100), 800) = 15.
   407  	expectedFileSnapshot = jtSnapshot{
   408  		used:  1,
   409  		free:  100,
   410  		max:   15,
   411  		count: 14,
   412  	}
   413  
   414  	checkSnapshots()
   415  }
   416  
   417  // TestDefaultDoDelayCancel checks that defaultDoDelay respects
   418  // context cancellation.
   419  func TestDefaultDoDelayCancel(t *testing.T) {
   420  	ctx, cancel := context.WithTimeout(
   421  		context.Background(), individualTestTimeout)
   422  	cancel()
   423  
   424  	err := defaultDoDelay(ctx, individualTestTimeout)
   425  	require.Equal(t, context.Canceled, errors.Cause(err))
   426  }
   427  
   428  func makeTestBackpressureDiskLimiterParams() backpressureDiskLimiterParams {
   429  	return backpressureDiskLimiterParams{
   430  		minThreshold:      0.1,
   431  		maxThreshold:      0.9,
   432  		quotaMinThreshold: 1.0,
   433  		quotaMaxThreshold: 1.2,
   434  		journalFrac:       0.25,
   435  		diskCacheFrac:     0.1,
   436  		syncCacheFrac:     1.0,
   437  		byteLimit:         400,
   438  		fileLimit:         40,
   439  		maxDelay:          8 * time.Second,
   440  		delayFn: func(context.Context, time.Duration) error {
   441  			return nil
   442  		},
   443  		freeBytesAndFilesFn: func() (int64, int64, error) {
   444  			return math.MaxInt64, math.MaxInt64, nil
   445  		},
   446  		quotaFn: func(context.Context, keybase1.UserOrTeamID) (int64, int64) {
   447  			return 0, math.MaxInt64
   448  		},
   449  	}
   450  }
   451  
   452  func TestBackpressureDiskLimiterConstructorError(t *testing.T) {
   453  	log := logger.NewTestLogger(t)
   454  	fakeErr := errors.New("Fake error")
   455  	params := makeTestBackpressureDiskLimiterParams()
   456  	params.delayFn = nil
   457  	params.freeBytesAndFilesFn = func() (int64, int64, error) {
   458  		return 0, 0, fakeErr
   459  	}
   460  	_, err := newBackpressureDiskLimiter(log, params)
   461  	require.Equal(t, fakeErr, err)
   462  }
   463  
   464  // TestBackpressureDiskLimiterBeforeBlockPut checks that
   465  // backpressureDiskLimiter.beforeBlockPut keeps track of and returns
   466  // the available bytes/files correctly.
   467  func TestBackpressureDiskLimiterBeforeBlockPut(t *testing.T) {
   468  	log := logger.NewTestLogger(t)
   469  	params := makeTestBackpressureDiskLimiterParams()
   470  	params.byteLimit = 88
   471  	params.fileLimit = 20
   472  	bdl, err := newBackpressureDiskLimiter(log, params)
   473  	require.NoError(t, err)
   474  
   475  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   476  	availBytes, availFiles, err := bdl.reserveWithBackpressure(
   477  		context.Background(), journalLimitTrackerType, 10, 2, chargedTo)
   478  	require.NoError(t, err)
   479  	// (byteLimit=88) * (journalFrac=0.25) - 10 = 12.
   480  	require.Equal(t, int64(12), availBytes)
   481  	// (fileLimit=20) * (journalFrac=0.25) - 2 = 3.
   482  	require.Equal(t, int64(3), availFiles)
   483  }
   484  
   485  // TestBackpressureDiskLimiterBeforeBlockPutByteError checks that
   486  // backpressureDiskLimiter.beforeBlockPut handles errors correctly
   487  // when getting the byte semaphore; in particular, that we return the
   488  // right info even with a non-nil error.
   489  func TestBackpressureDiskLimiterBeforeBlockPutByteError(t *testing.T) {
   490  	log := logger.NewTestLogger(t)
   491  	params := makeTestBackpressureDiskLimiterParams()
   492  	params.byteLimit = 40
   493  	params.fileLimit = 4
   494  	bdl, err := newBackpressureDiskLimiter(log, params)
   495  	require.NoError(t, err)
   496  
   497  	ctx, cancel := context.WithCancel(context.Background())
   498  	cancel()
   499  
   500  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   501  	availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType, 11, 1,
   502  		chargedTo)
   503  	require.Equal(t, context.Canceled, errors.Cause(err))
   504  	require.Equal(t, int64(10), availBytes)
   505  	require.Equal(t, int64(1), availFiles)
   506  
   507  	require.Equal(t, int64(10), bdl.journalTracker.byte.semaphore.Count())
   508  	require.Equal(t, int64(1), bdl.journalTracker.file.semaphore.Count())
   509  }
   510  
   511  // TestBackpressureDiskLimiterBeforeBlockPutFileError checks that
   512  // backpressureDiskLimiter.beforeBlockPut handles errors correctly
   513  // when acquiring the file semaphore; in particular, that we don't
   514  // leak either bytes or files if either semaphore times out.
   515  func TestBackpressureDiskLimiterBeforeBlockPutFileError(t *testing.T) {
   516  	log := logger.NewTestLogger(t)
   517  	params := makeTestBackpressureDiskLimiterParams()
   518  	params.byteLimit = 40
   519  	params.fileLimit = 4
   520  	bdl, err := newBackpressureDiskLimiter(log, params)
   521  	require.NoError(t, err)
   522  
   523  	// We're relying on the fact that a semaphore acquire will
   524  	// succeed if it is immediately fulfillable, so that the byte
   525  	// acquire will succeed.
   526  	ctx, cancel := context.WithCancel(context.Background())
   527  	cancel()
   528  
   529  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   530  	availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType, 10, 2,
   531  		chargedTo)
   532  	require.Equal(t, context.Canceled, errors.Cause(err))
   533  	require.Equal(t, int64(10), availBytes)
   534  	require.Equal(t, int64(1), availFiles)
   535  
   536  	require.Equal(t, int64(10), bdl.journalTracker.byte.semaphore.Count())
   537  	require.Equal(t, int64(1), bdl.journalTracker.file.semaphore.Count())
   538  }
   539  
   540  // TestBackpressureDiskLimiterGetDelay tests the delay calculation.
   541  func TestBackpressureDiskLimiterGetDelay(t *testing.T) {
   542  	log := logger.NewTestLogger(t)
   543  	params := makeTestBackpressureDiskLimiterParams()
   544  	params.byteLimit = math.MaxInt64
   545  	params.fileLimit = math.MaxInt64
   546  	bdl, err := newBackpressureDiskLimiter(log, params)
   547  	require.NoError(t, err)
   548  
   549  	now := time.Now()
   550  
   551  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   552  	func() {
   553  		bdl.lock.Lock()
   554  		defer bdl.lock.Unlock()
   555  		// byteDelayScale should be 25/(.25(350 + 25)) =
   556  		// 0.267, which turns into a delay fraction of
   557  		// (0.267-0.1)/(0.9-0.1) = 0.209.
   558  		bdl.journalTracker.byte.used = 25
   559  		bdl.journalTracker.byte.free = 350
   560  		// fileDelayScale should be 50/(.25(350 + 50)) = 0.5,
   561  		// which turns into a delay fraction of
   562  		// (0.5-0.1)/(0.9-0.1) = 0.5.
   563  		bdl.journalTracker.file.used = 50
   564  		bdl.journalTracker.file.free = 350
   565  		// quotaDelayScale should be (100+5)/100 = 1.05, which
   566  		// turns into a delay fraction of (1.05-1.0)/(1.2-1.0)
   567  		// = 0.25.
   568  		bdl.journalTracker.getQuotaTracker(chargedTo).unflushedBytes = 100
   569  		bdl.journalTracker.getQuotaTracker(chargedTo).remoteUsedBytes = 5
   570  		bdl.journalTracker.getQuotaTracker(chargedTo).quotaBytes = 100
   571  	}()
   572  
   573  	ctx := context.Background()
   574  	delay := bdl.getDelayLocked(ctx, now, chargedTo)
   575  	require.InEpsilon(t, float64(4), delay.Seconds(), 0.01)
   576  
   577  	func() {
   578  		bdl.lock.Lock()
   579  		defer bdl.lock.Unlock()
   580  		// Swap byte and file delay fractions.
   581  		bdl.journalTracker.byte.used = 50
   582  		bdl.journalTracker.byte.free = 350
   583  
   584  		bdl.journalTracker.file.used = 25
   585  		bdl.journalTracker.file.free = 350
   586  	}()
   587  
   588  	delay = bdl.getDelayLocked(ctx, now, chargedTo)
   589  	require.InEpsilon(t, float64(4), delay.Seconds(), 0.01)
   590  
   591  	func() {
   592  		bdl.lock.Lock()
   593  		defer bdl.lock.Unlock()
   594  		// Reduce byte and delay fractions.
   595  		bdl.journalTracker.byte.used = 25
   596  		bdl.journalTracker.byte.free = 350
   597  
   598  		bdl.journalTracker.file.used = 25
   599  		bdl.journalTracker.file.free = 350
   600  
   601  		// quotaDelayScale should be (100+10)/100 = 1.1, which
   602  		// turns into a delay fraction of (1.1-1.0)/(1.2-1.0)
   603  		// = 0.5.
   604  		bdl.journalTracker.getQuotaTracker(chargedTo).unflushedBytes = 100
   605  		bdl.journalTracker.getQuotaTracker(chargedTo).remoteUsedBytes = 10
   606  		bdl.journalTracker.getQuotaTracker(chargedTo).quotaBytes = 100
   607  	}()
   608  
   609  	delay = bdl.getDelayLocked(ctx, now, chargedTo)
   610  	require.InEpsilon(t, float64(4), delay.Seconds(), 0.01)
   611  }
   612  
   613  // TestBackpressureDiskLimiterGetDelayWithDeadline makes sure the
   614  // delay calculation takes into account the context deadline.
   615  func TestBackpressureDiskLimiterGetDelayWithDeadline(t *testing.T) {
   616  	log := logger.NewTestLogger(t)
   617  	params := makeTestBackpressureDiskLimiterParams()
   618  	params.byteLimit = math.MaxInt64
   619  	params.fileLimit = math.MaxInt64
   620  	bdl, err := newBackpressureDiskLimiter(log, params)
   621  	require.NoError(t, err)
   622  
   623  	now := time.Now()
   624  
   625  	func() {
   626  		bdl.lock.Lock()
   627  		defer bdl.lock.Unlock()
   628  		// fileDelayScale should be 50/(.25(350 + 50)) = 0.5,
   629  		// which turns into a delay fraction of
   630  		// (0.5-0.1)/(0.9-0.1) = 0.5.
   631  		bdl.journalTracker.file.used = 50
   632  		bdl.journalTracker.file.free = 350
   633  	}()
   634  
   635  	deadline := now.Add(5 * time.Second)
   636  	ctx, cancel := context.WithDeadline(context.Background(), deadline)
   637  	defer cancel()
   638  
   639  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   640  	delay := bdl.getDelayLocked(ctx, now, chargedTo)
   641  	require.InEpsilon(t, float64(2), delay.Seconds(), 0.01)
   642  }
   643  
   644  type backpressureTestType int
   645  
   646  const (
   647  	byteTest backpressureTestType = iota
   648  	fileTest
   649  )
   650  
   651  func (t backpressureTestType) String() string {
   652  	switch t {
   653  	case byteTest:
   654  		return "byteTest"
   655  	case fileTest:
   656  		return "fileTest"
   657  	default:
   658  		return fmt.Sprintf("backpressureTestType(%d)", t)
   659  	}
   660  }
   661  
   662  // testBackpressureDiskLimiterLargeDiskDelay checks the delays when
   663  // pretending to have a large disk.
   664  func testBackpressureDiskLimiterLargeDiskDelay(
   665  	t *testing.T, testType backpressureTestType) {
   666  	var lastDelay time.Duration
   667  	delayFn := func(ctx context.Context, delay time.Duration) error {
   668  		lastDelay = delay
   669  		return nil
   670  	}
   671  
   672  	const blockBytes = 100
   673  	const blockFiles = 10
   674  
   675  	// Set the bottleneck, based on the test type; i.e. set
   676  	// parameters so that semaphoreMax for the bottleneck always
   677  	// has value 10 * blockX when called in beforeBlockPut, and
   678  	// every block put beyond the min threshold leads to an
   679  	// increase in timeout of 1 second up to the max.
   680  	var byteLimit, fileLimit int64
   681  	switch testType {
   682  	case byteTest:
   683  		// Make bytes be the bottleneck.
   684  		byteLimit = 10 * blockBytes
   685  		fileLimit = 20 * blockFiles
   686  	case fileTest:
   687  		// Make files be the bottleneck.
   688  		byteLimit = 20 * blockBytes
   689  		fileLimit = 10 * blockFiles
   690  	default:
   691  		panic(fmt.Sprintf("unknown test type %s", testType))
   692  	}
   693  
   694  	log := logger.NewTestLogger(t)
   695  	params := makeTestBackpressureDiskLimiterParams()
   696  	params.byteLimit = byteLimit * 4
   697  	params.fileLimit = fileLimit * 4
   698  	params.delayFn = delayFn
   699  	bdl, err := newBackpressureDiskLimiter(log, params)
   700  	require.NoError(t, err)
   701  
   702  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   703  	byteSnapshot, fileSnapshot, quotaSnapshot :=
   704  		bdl.getJournalSnapshotsForTest(chargedTo)
   705  	require.Equal(t, jtSnapshot{
   706  		used:  0,
   707  		free:  math.MaxInt64,
   708  		max:   byteLimit,
   709  		count: byteLimit,
   710  	}, byteSnapshot)
   711  	require.Equal(t, jtSnapshot{
   712  		used:  0,
   713  		free:  math.MaxInt64,
   714  		max:   fileLimit,
   715  		count: fileLimit,
   716  	}, fileSnapshot)
   717  	require.Equal(t, jtSnapshot{
   718  		used: 0,
   719  		free: math.MaxInt64,
   720  	}, quotaSnapshot)
   721  
   722  	ctx := context.Background()
   723  
   724  	var bytesPut, filesPut int64
   725  
   726  	checkCountersAfterBeforeBlockPut := func(
   727  		i int, availBytes, availFiles int64) {
   728  		byteSnapshot, fileSnapshot, quotaSnapshot :=
   729  			bdl.getJournalSnapshotsForTest(chargedTo)
   730  		expectedByteCount := byteLimit - bytesPut - blockBytes
   731  		expectedFileCount := fileLimit - filesPut - blockFiles
   732  		require.Equal(t, expectedByteCount, availBytes)
   733  		require.Equal(t, expectedFileCount, availFiles)
   734  		require.Equal(t, jtSnapshot{
   735  			used:  bytesPut,
   736  			free:  math.MaxInt64,
   737  			max:   byteLimit,
   738  			count: expectedByteCount,
   739  		}, byteSnapshot, "i=%d", i)
   740  		require.Equal(t, jtSnapshot{
   741  			used:  filesPut,
   742  			free:  math.MaxInt64,
   743  			max:   fileLimit,
   744  			count: expectedFileCount,
   745  		}, fileSnapshot, "i=%d", i)
   746  		require.Equal(t, jtSnapshot{
   747  			used: bytesPut,
   748  			free: math.MaxInt64 - bytesPut,
   749  		}, quotaSnapshot, "i=%d", i)
   750  	}
   751  
   752  	checkCountersAfterBlockPut := func(i int) {
   753  		byteSnapshot, fileSnapshot, quotaSnapshot :=
   754  			bdl.getJournalSnapshotsForTest(chargedTo)
   755  		require.Equal(t, jtSnapshot{
   756  			used:  bytesPut,
   757  			free:  math.MaxInt64,
   758  			max:   byteLimit,
   759  			count: byteLimit - bytesPut,
   760  		}, byteSnapshot, "i=%d", i)
   761  		require.Equal(t, jtSnapshot{
   762  			used:  filesPut,
   763  			free:  math.MaxInt64,
   764  			max:   fileLimit,
   765  			count: fileLimit - filesPut,
   766  		}, fileSnapshot, "i=%d", i)
   767  		require.Equal(t, jtSnapshot{
   768  			used: bytesPut,
   769  			free: math.MaxInt64 - bytesPut,
   770  		}, quotaSnapshot, "i=%d", i)
   771  	}
   772  
   773  	// The first two puts shouldn't encounter any backpressure...
   774  
   775  	for i := 0; i < 2; i++ {
   776  		availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType,
   777  			blockBytes, blockFiles, chargedTo)
   778  		require.NoError(t, err)
   779  		require.Equal(t, 0*time.Second, lastDelay)
   780  		checkCountersAfterBeforeBlockPut(i, availBytes, availFiles)
   781  
   782  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
   783  			true, chargedTo)
   784  		bytesPut += blockBytes
   785  		filesPut += blockFiles
   786  		checkCountersAfterBlockPut(i)
   787  	}
   788  
   789  	// ...but the next eight should encounter increasing
   790  	// backpressure...
   791  
   792  	for i := 1; i < 9; i++ {
   793  		availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType,
   794  			blockBytes, blockFiles, chargedTo)
   795  		require.NoError(t, err)
   796  		require.InEpsilon(t, float64(i), lastDelay.Seconds(),
   797  			0.01, "i=%d", i)
   798  		checkCountersAfterBeforeBlockPut(i, availBytes, availFiles)
   799  
   800  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
   801  			true, chargedTo)
   802  		bytesPut += blockBytes
   803  		filesPut += blockFiles
   804  		checkCountersAfterBlockPut(i)
   805  	}
   806  
   807  	// ...and the last one should stall completely, if not for the
   808  	// cancelled context.
   809  
   810  	ctx2, cancel2 := context.WithCancel(ctx)
   811  	cancel2()
   812  	availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx2, journalLimitTrackerType,
   813  		blockBytes, blockFiles, chargedTo)
   814  	require.Equal(t, context.Canceled, errors.Cause(err))
   815  	require.Equal(t, 8*time.Second, lastDelay)
   816  
   817  	// This does the same thing as checkCountersAfterBlockPut(),
   818  	// but only by coincidence; contrast with similar block in
   819  	// TestBackpressureDiskLimiterSmallDisk below.
   820  	expectedByteCount := byteLimit - bytesPut
   821  	expectedFileCount := fileLimit - filesPut
   822  	require.Equal(t, expectedByteCount, availBytes)
   823  	require.Equal(t, expectedFileCount, availFiles)
   824  	byteSnapshot, fileSnapshot, quotaSnapshot =
   825  		bdl.getJournalSnapshotsForTest(chargedTo)
   826  	require.Equal(t, jtSnapshot{
   827  		used:  bytesPut,
   828  		free:  math.MaxInt64,
   829  		max:   byteLimit,
   830  		count: expectedByteCount,
   831  	}, byteSnapshot)
   832  	require.Equal(t, jtSnapshot{
   833  		used:  filesPut,
   834  		free:  math.MaxInt64,
   835  		max:   fileLimit,
   836  		count: expectedFileCount,
   837  	}, fileSnapshot)
   838  	require.Equal(t, jtSnapshot{
   839  		used: bytesPut,
   840  		free: math.MaxInt64 - bytesPut,
   841  	}, quotaSnapshot)
   842  }
   843  
   844  func TestBackpressureDiskLimiterLargeDiskDelay(t *testing.T) {
   845  	t.Run(byteTest.String(), func(t *testing.T) {
   846  		testBackpressureDiskLimiterLargeDiskDelay(t, byteTest)
   847  	})
   848  	t.Run(fileTest.String(), func(t *testing.T) {
   849  		testBackpressureDiskLimiterLargeDiskDelay(t, fileTest)
   850  	})
   851  }
   852  
   853  // TestBackpressureDiskLimiterJournalAndDiskCache checks that the limiter
   854  // correctly handles the interaction between changes to the disk cache and the
   855  // journal.
   856  func TestBackpressureDiskLimiterJournalAndDiskCache(t *testing.T) {
   857  	t.Parallel()
   858  	var lastDelay time.Duration
   859  	delayFn := func(ctx context.Context, delay time.Duration) error {
   860  		lastDelay = delay
   861  		return nil
   862  	}
   863  
   864  	const blockBytes int64 = 100
   865  	// Big number, but no risk of overflow
   866  	maxFreeBytes := int64(1 << 30)
   867  
   868  	// Set the bottleneck; i.e. set parameters so that semaphoreMax for the
   869  	// bottleneck always has value 10 * blockBytes when called in
   870  	// beforeBlockPut, and every block put beyond the min threshold leads to an
   871  	// increase in timeout of 1 second up to the max.
   872  	byteLimit := 10 * blockBytes
   873  	// arbitrarily large number
   874  	var fileLimit int64 = math.MaxInt64
   875  
   876  	log := logger.NewTestLogger(t)
   877  	params := makeTestBackpressureDiskLimiterParams()
   878  	// 4 = 1/(journalFrac=0.25)
   879  	params.byteLimit = byteLimit * 4
   880  	params.fileLimit = fileLimit
   881  	params.delayFn = delayFn
   882  	params.freeBytesAndFilesFn = func() (int64, int64, error) {
   883  		return maxFreeBytes, math.MaxInt64, nil
   884  	}
   885  	bdl, err := newBackpressureDiskLimiter(log, params)
   886  	require.NoError(t, err)
   887  
   888  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
   889  	byteSnapshot, _, _ := bdl.getJournalSnapshotsForTest(chargedTo)
   890  	require.Equal(t, jtSnapshot{
   891  		used:  0,
   892  		free:  maxFreeBytes,
   893  		max:   byteLimit,
   894  		count: byteLimit,
   895  	}, byteSnapshot)
   896  
   897  	ctx := context.Background()
   898  
   899  	var journalBytesPut int64
   900  	var diskCacheBytesPut int64
   901  
   902  	checkCountersAfterBeforeBlockPut := func(
   903  		i int, availBytes int64) {
   904  		byteSnapshot, _, _ := bdl.getJournalSnapshotsForTest(chargedTo)
   905  		expectedByteCount := byteLimit - journalBytesPut - blockBytes
   906  		require.Equal(t, expectedByteCount, availBytes)
   907  		require.Equal(t, jtSnapshot{
   908  			used:  journalBytesPut,
   909  			free:  maxFreeBytes + diskCacheBytesPut,
   910  			max:   byteLimit,
   911  			count: expectedByteCount,
   912  		}, byteSnapshot, "i=%d", i)
   913  	}
   914  
   915  	checkCountersAfterBlockPut := func(i int) {
   916  		byteSnapshot, _, _ := bdl.getJournalSnapshotsForTest(chargedTo)
   917  		require.Equal(t, jtSnapshot{
   918  			used:  journalBytesPut,
   919  			free:  maxFreeBytes + diskCacheBytesPut,
   920  			max:   byteLimit,
   921  			count: byteLimit - journalBytesPut,
   922  		}, byteSnapshot, "i=%d", i)
   923  	}
   924  
   925  	diskCacheByteLimit := int64(float64(params.byteLimit) *
   926  		params.diskCacheFrac)
   927  
   928  	// The first two puts shouldn't encounter any backpressure...
   929  
   930  	for i := 0; i < 2; i++ {
   931  		// Ensure the disk block cache doesn't interfere with the journal
   932  		// limits.
   933  		availBytes, err := bdl.reserveBytes(ctx, workingSetCacheLimitTrackerType, blockBytes)
   934  		require.NoError(t, err)
   935  		require.Equal(t, diskCacheByteLimit-(int64(i)+1)*blockBytes, availBytes)
   936  		bdl.commitOrRollback(ctx, workingSetCacheLimitTrackerType, blockBytes, 0, true,
   937  			"")
   938  		diskCacheBytesPut += blockBytes
   939  
   940  		availBytes, _, err = bdl.reserveWithBackpressure(ctx,
   941  			journalLimitTrackerType, blockBytes, 1, chargedTo)
   942  		require.NoError(t, err)
   943  		require.Equal(t, 0*time.Second, lastDelay)
   944  		checkCountersAfterBeforeBlockPut(i, availBytes)
   945  
   946  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, 1, true,
   947  			chargedTo)
   948  		journalBytesPut += blockBytes
   949  		checkCountersAfterBlockPut(i)
   950  
   951  		// TODO: track disk cache puts as well
   952  	}
   953  
   954  	// ...but the next eight should encounter increasing
   955  	// backpressure...
   956  
   957  	for i := 1; i < 9; i++ {
   958  		// Ensure the disk block cache doesn't interfere with the journal
   959  		// limits.
   960  		_, err := bdl.reserveBytes(ctx, workingSetCacheLimitTrackerType, blockBytes)
   961  		require.NoError(t, err)
   962  		bdl.commitOrRollback(ctx, workingSetCacheLimitTrackerType, blockBytes, 0, true,
   963  			"")
   964  		diskCacheBytesPut += blockBytes
   965  
   966  		availBytes, _, err := bdl.reserveWithBackpressure(ctx,
   967  			journalLimitTrackerType, blockBytes, 1, chargedTo)
   968  		require.NoError(t, err)
   969  		require.InEpsilon(t, float64(i), lastDelay.Seconds(),
   970  			0.01, "i=%d", i)
   971  		checkCountersAfterBeforeBlockPut(i, availBytes)
   972  
   973  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, 1, true,
   974  			chargedTo)
   975  		journalBytesPut += blockBytes
   976  		checkCountersAfterBlockPut(i)
   977  	}
   978  
   979  	// ...and the last one should stall completely, if not for the
   980  	// cancelled context.
   981  
   982  	ctx2, cancel2 := context.WithCancel(ctx)
   983  	cancel2()
   984  	availBytes, _, err := bdl.reserveWithBackpressure(ctx2, journalLimitTrackerType, blockBytes, 1,
   985  		chargedTo)
   986  	require.Equal(t, context.Canceled, errors.Cause(err))
   987  	require.Equal(t, 8*time.Second, lastDelay)
   988  
   989  	// This does the same thing as checkCountersAfterBlockPut(),
   990  	// but only by coincidence; contrast with similar block in
   991  	// TestBackpressureDiskLimiterSmallDisk below.
   992  	expectedByteCount := byteLimit - journalBytesPut
   993  	require.Equal(t, expectedByteCount, availBytes)
   994  	byteSnapshot, _, _ = bdl.getJournalSnapshotsForTest(chargedTo)
   995  	require.Equal(t, jtSnapshot{
   996  		used:  journalBytesPut,
   997  		free:  maxFreeBytes + diskCacheBytesPut,
   998  		max:   byteLimit,
   999  		count: expectedByteCount,
  1000  	}, byteSnapshot)
  1001  }
  1002  
  1003  // TestBackpressureDiskLimiterSmallDiskDelay checks the delays when
  1004  // pretending to have a small disk.
  1005  func testBackpressureDiskLimiterSmallDiskDelay(
  1006  	t *testing.T, testType backpressureTestType) {
  1007  	var lastDelay time.Duration
  1008  	delayFn := func(ctx context.Context, delay time.Duration) error {
  1009  		lastDelay = delay
  1010  		return nil
  1011  	}
  1012  
  1013  	const blockBytes = 80
  1014  	const blockFiles = 8
  1015  
  1016  	// Set the bottleneck, based on the test type; i.e. set
  1017  	// parameters so that semaphoreMax for the bottleneck always
  1018  	// has value 10 * blockX when called in beforeBlockPut, and
  1019  	// every block put beyond the min threshold leads to an
  1020  	// increase in timeout of 1 second up to the max.
  1021  	var diskBytes, diskFiles int64
  1022  	// Multiply by 4 to compensate for the 0.25 limitFrac.
  1023  	switch testType {
  1024  	case byteTest:
  1025  		// Make bytes be the bottleneck.
  1026  		diskBytes = 40 * blockBytes
  1027  		diskFiles = 400 * blockFiles
  1028  	case fileTest:
  1029  		// Make files be the bottleneck.
  1030  		diskBytes = 400 * blockBytes
  1031  		diskFiles = 40 * blockFiles
  1032  	default:
  1033  		panic(fmt.Sprintf("unknown test type %s", testType))
  1034  	}
  1035  
  1036  	var bdl *backpressureDiskLimiter
  1037  
  1038  	getFreeBytesAndFilesFn := func() (int64, int64, error) {
  1039  		// When called for the first time from the
  1040  		// constructor, bdl will be nil.
  1041  		if bdl == nil {
  1042  			return diskBytes, diskFiles, nil
  1043  		}
  1044  
  1045  		// When called in subsequent times from
  1046  		// beforeBlockPut, simulate the journal taking up
  1047  		// space.
  1048  		return diskBytes - bdl.journalTracker.byte.used,
  1049  			diskFiles - bdl.journalTracker.file.used, nil
  1050  	}
  1051  
  1052  	log := logger.NewTestLogger(t)
  1053  	params := makeTestBackpressureDiskLimiterParams()
  1054  	params.byteLimit = math.MaxInt64
  1055  	params.fileLimit = math.MaxInt64
  1056  	params.delayFn = delayFn
  1057  	params.freeBytesAndFilesFn = getFreeBytesAndFilesFn
  1058  	bdl, err := newBackpressureDiskLimiter(log, params)
  1059  	require.NoError(t, err)
  1060  
  1061  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
  1062  	byteSnapshot, fileSnapshot, quotaSnapshot :=
  1063  		bdl.getJournalSnapshotsForTest(chargedTo)
  1064  	require.Equal(t, jtSnapshot{
  1065  		used:  0,
  1066  		free:  diskBytes,
  1067  		max:   diskBytes / 4,
  1068  		count: diskBytes / 4,
  1069  	}, byteSnapshot)
  1070  	require.Equal(t, jtSnapshot{
  1071  		used:  0,
  1072  		free:  diskFiles,
  1073  		max:   diskFiles / 4,
  1074  		count: diskFiles / 4,
  1075  	}, fileSnapshot)
  1076  	require.Equal(t, jtSnapshot{
  1077  		used: 0,
  1078  		free: math.MaxInt64,
  1079  	}, quotaSnapshot)
  1080  
  1081  	ctx := context.Background()
  1082  
  1083  	var bytesPut, filesPut int64
  1084  
  1085  	checkCountersAfterBeforeBlockPut := func(
  1086  		i int, availBytes, availFiles int64) {
  1087  		expectedByteCount := diskBytes/4 - bytesPut - blockBytes
  1088  		expectedFileCount := diskFiles/4 - filesPut - blockFiles
  1089  		require.Equal(t, expectedByteCount, availBytes)
  1090  		require.Equal(t, expectedFileCount, availFiles)
  1091  		byteSnapshot, fileSnapshot, quotaSnapshot :=
  1092  			bdl.getJournalSnapshotsForTest(chargedTo)
  1093  		require.Equal(t, jtSnapshot{
  1094  			used:  bytesPut,
  1095  			free:  diskBytes - bytesPut,
  1096  			max:   diskBytes / 4,
  1097  			count: expectedByteCount,
  1098  		}, byteSnapshot, "i=%d", i)
  1099  		require.Equal(t, jtSnapshot{
  1100  			used:  filesPut,
  1101  			free:  diskFiles - filesPut,
  1102  			max:   diskFiles / 4,
  1103  			count: expectedFileCount,
  1104  		}, fileSnapshot, "i=%d", i)
  1105  		require.Equal(t, jtSnapshot{
  1106  			used: bytesPut,
  1107  			free: math.MaxInt64 - bytesPut,
  1108  		}, quotaSnapshot, "i=%d", i)
  1109  	}
  1110  
  1111  	checkCountersAfterBlockPut := func(i int) {
  1112  		// freeBytes is only updated on beforeBlockPut, so we
  1113  		// have to compensate for that.
  1114  		byteSnapshot, fileSnapshot, quotaSnapshot :=
  1115  			bdl.getJournalSnapshotsForTest(chargedTo)
  1116  		require.Equal(t, jtSnapshot{
  1117  			used:  bytesPut,
  1118  			free:  diskBytes - bytesPut + blockBytes,
  1119  			max:   diskBytes/4 + blockBytes/4,
  1120  			count: diskBytes/4 + blockBytes/4 - bytesPut,
  1121  		}, byteSnapshot, "i=%d", i)
  1122  		require.Equal(t, jtSnapshot{
  1123  			used:  filesPut,
  1124  			free:  diskFiles - filesPut + blockFiles,
  1125  			max:   diskFiles/4 + blockFiles/4,
  1126  			count: diskFiles/4 + blockFiles/4 - filesPut,
  1127  		}, fileSnapshot, "i=%d", i)
  1128  		require.Equal(t, jtSnapshot{
  1129  			used: bytesPut,
  1130  			free: math.MaxInt64 - bytesPut,
  1131  		}, quotaSnapshot, "i=%d", i)
  1132  	}
  1133  
  1134  	// The first two puts shouldn't encounter any backpressure...
  1135  
  1136  	for i := 0; i < 2; i++ {
  1137  		availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType,
  1138  			blockBytes, blockFiles, chargedTo)
  1139  		require.NoError(t, err)
  1140  		require.Equal(t, 0*time.Second, lastDelay)
  1141  		checkCountersAfterBeforeBlockPut(i, availBytes, availFiles)
  1142  
  1143  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
  1144  			true, chargedTo)
  1145  		bytesPut += blockBytes
  1146  		filesPut += blockFiles
  1147  		checkCountersAfterBlockPut(i)
  1148  	}
  1149  
  1150  	// ...but the next eight should encounter increasing
  1151  	// backpressure...
  1152  
  1153  	for i := 1; i < 9; i++ {
  1154  		availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType,
  1155  			blockBytes, blockFiles, chargedTo)
  1156  		require.NoError(t, err)
  1157  		require.InEpsilon(t, float64(i), lastDelay.Seconds(),
  1158  			0.01, "i=%d", i)
  1159  		checkCountersAfterBeforeBlockPut(i, availBytes, availFiles)
  1160  
  1161  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
  1162  			true, chargedTo)
  1163  		bytesPut += blockBytes
  1164  		filesPut += blockFiles
  1165  		checkCountersAfterBlockPut(i)
  1166  	}
  1167  
  1168  	// ...and the last one should stall completely, if not for the
  1169  	// cancelled context.
  1170  
  1171  	ctx2, cancel2 := context.WithCancel(ctx)
  1172  	cancel2()
  1173  	availBytes, availFiles, err := bdl.reserveWithBackpressure(ctx2, journalLimitTrackerType,
  1174  		blockBytes, blockFiles, chargedTo)
  1175  	require.Equal(t, context.Canceled, errors.Cause(err))
  1176  	require.Equal(t, 8*time.Second, lastDelay)
  1177  
  1178  	expectedByteCount := diskBytes/4 - bytesPut
  1179  	expectedFileCount := diskFiles/4 - filesPut
  1180  	require.Equal(t, expectedByteCount, availBytes)
  1181  	require.Equal(t, expectedFileCount, availFiles)
  1182  	byteSnapshot, fileSnapshot, quotaSnapshot =
  1183  		bdl.getJournalSnapshotsForTest(chargedTo)
  1184  	require.Equal(t, jtSnapshot{
  1185  		used:  bytesPut,
  1186  		free:  diskBytes - bytesPut,
  1187  		max:   diskBytes / 4,
  1188  		count: expectedByteCount,
  1189  	}, byteSnapshot)
  1190  	require.Equal(t, jtSnapshot{
  1191  		used:  filesPut,
  1192  		free:  diskFiles - filesPut,
  1193  		max:   diskFiles / 4,
  1194  		count: expectedFileCount,
  1195  	}, fileSnapshot)
  1196  	require.Equal(t, jtSnapshot{
  1197  		used: bytesPut,
  1198  		free: math.MaxInt64 - bytesPut,
  1199  	}, quotaSnapshot)
  1200  }
  1201  
  1202  func TestBackpressureDiskLimiterSmallDiskDelay(t *testing.T) {
  1203  	t.Run(byteTest.String(), func(t *testing.T) {
  1204  		testBackpressureDiskLimiterSmallDiskDelay(t, byteTest)
  1205  	})
  1206  	t.Run(fileTest.String(), func(t *testing.T) {
  1207  		testBackpressureDiskLimiterSmallDiskDelay(t, fileTest)
  1208  	})
  1209  }
  1210  
  1211  // TestBackpressureDiskLimiterNearQuota checks the delays when
  1212  // pretending to near and over the quota limit.
  1213  func TestBackpressureDiskLimiterNearQuota(t *testing.T) {
  1214  	var lastDelay time.Duration
  1215  	delayFn := func(ctx context.Context, delay time.Duration) error {
  1216  		lastDelay = delay
  1217  		return nil
  1218  	}
  1219  
  1220  	const blockBytes = 100
  1221  	const blockFiles = 10
  1222  	const remoteUsedBytes = 400
  1223  	const quotaBytes = 1000
  1224  
  1225  	log := logger.NewTestLogger(t)
  1226  	params := makeTestBackpressureDiskLimiterParams()
  1227  	params.byteLimit = math.MaxInt64
  1228  	params.fileLimit = math.MaxInt64
  1229  	params.maxDelay = 2 * time.Second
  1230  	params.delayFn = delayFn
  1231  	params.quotaFn = func(
  1232  		_ context.Context, _ keybase1.UserOrTeamID) (int64, int64) {
  1233  		return remoteUsedBytes, quotaBytes
  1234  	}
  1235  	bdl, err := newBackpressureDiskLimiter(log, params)
  1236  	require.NoError(t, err)
  1237  
  1238  	chargedTo := keybase1.MakeTestUID(1).AsUserOrTeam()
  1239  	_, _, quotaSnapshot := bdl.getJournalSnapshotsForTest(chargedTo)
  1240  	require.Equal(t, jtSnapshot{
  1241  		used: 0,
  1242  		free: math.MaxInt64,
  1243  	}, quotaSnapshot)
  1244  
  1245  	ctx := context.Background()
  1246  
  1247  	var bytesPut int64
  1248  
  1249  	checkCounters := func(i int) {
  1250  		_, _, quotaSnapshot := bdl.getJournalSnapshotsForTest(chargedTo)
  1251  		used := remoteUsedBytes + bytesPut
  1252  		require.Equal(t, jtSnapshot{
  1253  			used: used,
  1254  			free: quotaBytes - used,
  1255  		}, quotaSnapshot, "i=%d", i)
  1256  	}
  1257  
  1258  	// The first seven puts shouldn't encounter any backpressure...
  1259  
  1260  	for i := 0; i < 7; i++ {
  1261  		_, _, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType, blockBytes,
  1262  			blockFiles, chargedTo)
  1263  		require.NoError(t, err)
  1264  		require.Equal(t, 0*time.Second, lastDelay, "i=%d", i)
  1265  		checkCounters(i)
  1266  
  1267  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
  1268  			true, chargedTo)
  1269  		bytesPut += blockBytes
  1270  		checkCounters(i)
  1271  	}
  1272  
  1273  	// ...but the next two should encounter increasing
  1274  	// backpressure...
  1275  
  1276  	for i := 1; i <= 2; i++ {
  1277  		_, _, err := bdl.reserveWithBackpressure(ctx, journalLimitTrackerType, blockBytes,
  1278  			blockFiles, chargedTo)
  1279  		require.NoError(t, err)
  1280  		require.InEpsilon(t, float64(i), lastDelay.Seconds(),
  1281  			0.01, "i=%d", i)
  1282  		checkCounters(i)
  1283  
  1284  		bdl.commitOrRollback(ctx, journalLimitTrackerType, blockBytes, blockFiles,
  1285  			true, chargedTo)
  1286  		bytesPut += blockBytes
  1287  		checkCounters(i)
  1288  	}
  1289  
  1290  	// ...and the last one should encounter the max backpressure.
  1291  
  1292  	_, _, err = bdl.reserveWithBackpressure(ctx, journalLimitTrackerType, blockBytes, blockFiles,
  1293  		chargedTo)
  1294  	require.NoError(t, err)
  1295  	require.Equal(t, 2*time.Second, lastDelay)
  1296  	checkCounters(0)
  1297  }