github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/disk_block_cache_test.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"math"
     9  	"math/rand"
    10  	"os"
    11  	"sync"
    12  	"testing"
    13  	"time"
    14  
    15  	"github.com/keybase/client/go/kbfs/data"
    16  	"github.com/keybase/client/go/kbfs/env"
    17  	"github.com/keybase/client/go/kbfs/ioutil"
    18  	"github.com/keybase/client/go/kbfs/kbfsblock"
    19  	"github.com/keybase/client/go/kbfs/kbfscrypto"
    20  	"github.com/keybase/client/go/kbfs/kbfsmd"
    21  	"github.com/keybase/client/go/kbfs/test/clocktest"
    22  	"github.com/keybase/client/go/kbfs/tlf"
    23  	"github.com/keybase/client/go/protocol/keybase1"
    24  	"github.com/stretchr/testify/require"
    25  	"github.com/syndtr/goleveldb/leveldb/errors"
    26  	"github.com/syndtr/goleveldb/leveldb/util"
    27  	"golang.org/x/net/context"
    28  )
    29  
    30  const (
    31  	testDiskBlockCacheMaxBytes int64 = 1 << 21
    32  )
    33  
    34  type testDiskBlockCacheConfig struct {
    35  	codecGetter
    36  	logMaker
    37  	*testClockGetter
    38  	limiter DiskLimiter
    39  	syncedTlfGetterSetter
    40  	initModeGetter
    41  	bcache data.BlockCache
    42  }
    43  
    44  func newTestDiskBlockCacheConfig(t *testing.T) *testDiskBlockCacheConfig {
    45  	return &testDiskBlockCacheConfig{
    46  		newTestCodecGetter(),
    47  		newTestLogMaker(t),
    48  		newTestClockGetter(),
    49  		nil,
    50  		newTestSyncedTlfGetterSetter(),
    51  		testInitModeGetter{InitDefault},
    52  		data.NewBlockCacheStandard(100, 100),
    53  	}
    54  }
    55  
    56  func (c testDiskBlockCacheConfig) DiskLimiter() DiskLimiter {
    57  	return c.limiter
    58  }
    59  
    60  func (c testDiskBlockCacheConfig) BlockCache() data.BlockCache {
    61  	return c.bcache
    62  }
    63  
    64  func newDiskBlockCacheForTest(config *testDiskBlockCacheConfig,
    65  	maxBytes int64) (*diskBlockCacheWrapped, error) {
    66  	maxFiles := int64(10000)
    67  	workingSetCache, err := newDiskBlockCacheLocalForTest(config,
    68  		workingSetCacheLimitTrackerType)
    69  	if err != nil {
    70  		return nil, err
    71  	}
    72  	syncCache, err := newDiskBlockCacheLocalForTest(
    73  		config, syncCacheLimitTrackerType)
    74  	if err != nil {
    75  		return nil, err
    76  	}
    77  	err = workingSetCache.WaitUntilStarted()
    78  	if err != nil {
    79  		return nil, err
    80  	}
    81  	err = syncCache.WaitUntilStarted()
    82  	if err != nil {
    83  		return nil, err
    84  	}
    85  	params := backpressureDiskLimiterParams{
    86  		minThreshold:      0.5,
    87  		maxThreshold:      0.95,
    88  		quotaMinThreshold: 1.0,
    89  		quotaMaxThreshold: 1.2,
    90  		journalFrac:       0.25,
    91  		diskCacheFrac:     0.25,
    92  		syncCacheFrac:     0.25,
    93  		byteLimit:         testDiskBlockCacheMaxBytes,
    94  		fileLimit:         maxFiles,
    95  		maxDelay:          time.Second,
    96  		delayFn:           defaultDoDelay,
    97  		freeBytesAndFilesFn: func() (int64, int64, error) {
    98  			// hackity hackeroni: simulate the disk cache taking up space.
    99  			syncBytes, workingBytes := testGetDiskCacheBytes(
   100  				syncCache, workingSetCache)
   101  			freeBytes := maxBytes - syncBytes - workingBytes
   102  			return freeBytes, maxFiles, nil
   103  		},
   104  		quotaFn: func(
   105  			context.Context, keybase1.UserOrTeamID) (int64, int64) {
   106  			return 0, math.MaxInt64
   107  		},
   108  	}
   109  	config.limiter, err = newBackpressureDiskLimiter(
   110  		config.MakeLogger(""), params)
   111  	if err != nil {
   112  		return nil, err
   113  	}
   114  	return &diskBlockCacheWrapped{
   115  		config:          config,
   116  		storageRoot:     "",
   117  		workingSetCache: workingSetCache,
   118  		syncCache:       syncCache,
   119  	}, nil
   120  }
   121  
   122  func initDiskBlockCacheTest(t *testing.T) (*diskBlockCacheWrapped,
   123  	*testDiskBlockCacheConfig) {
   124  	config := newTestDiskBlockCacheConfig(t)
   125  	cache, err := newDiskBlockCacheForTest(config,
   126  		testDiskBlockCacheMaxBytes)
   127  	require.NoError(t, err)
   128  	return cache, config
   129  }
   130  
   131  type testDiskBlockCacheGetter struct {
   132  	lock  sync.RWMutex
   133  	cache DiskBlockCache
   134  }
   135  
   136  func (dbcg *testDiskBlockCacheGetter) DiskBlockCache() DiskBlockCache {
   137  	dbcg.lock.RLock()
   138  	defer dbcg.lock.RUnlock()
   139  	return dbcg.cache
   140  }
   141  
   142  func newTestDiskBlockCacheGetter(t *testing.T,
   143  	cache DiskBlockCache) *testDiskBlockCacheGetter {
   144  	return &testDiskBlockCacheGetter{cache: cache}
   145  }
   146  
   147  func shutdownDiskBlockCacheTest(cache DiskBlockCache) {
   148  	<-cache.Shutdown(context.Background())
   149  }
   150  
   151  func setupRealBlockForDiskCache(t *testing.T, ptr data.BlockPointer, block data.Block,
   152  	config diskBlockCacheConfig) ([]byte, kbfscrypto.BlockCryptKeyServerHalf) {
   153  	blockEncoded, err := config.Codec().Encode(block)
   154  	require.NoError(t, err)
   155  	serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
   156  	require.NoError(t, err)
   157  	return blockEncoded, serverHalf
   158  }
   159  
   160  func setupBlockForDiskCache(t *testing.T, config diskBlockCacheConfig) (
   161  	data.BlockPointer, data.Block, []byte, kbfscrypto.BlockCryptKeyServerHalf) {
   162  	ptr := makeRandomBlockPointer(t)
   163  	block := makeFakeFileBlock(t, true)
   164  	blockEncoded, serverHalf :=
   165  		setupRealBlockForDiskCache(t, ptr, block, config)
   166  	return ptr, block, blockEncoded, serverHalf
   167  }
   168  
   169  func TestDiskBlockCachePutAndGet(t *testing.T) {
   170  	t.Parallel()
   171  	t.Log("Test that basic disk cache Put and Get operations work.")
   172  	cache, config := initDiskBlockCacheTest(t)
   173  	defer shutdownDiskBlockCacheTest(cache)
   174  
   175  	tlf1 := tlf.FakeID(0, tlf.Private)
   176  	block1Ptr, _, block1Encoded, block1ServerHalf := setupBlockForDiskCache(
   177  		t, config)
   178  
   179  	ctx := context.Background()
   180  
   181  	t.Log("Put a block into the cache.")
   182  	err := cache.Put(
   183  		ctx, tlf1, block1Ptr.ID, block1Encoded, block1ServerHalf,
   184  		DiskBlockAnyCache)
   185  	require.NoError(t, err)
   186  	putMd, err := cache.GetMetadata(ctx, block1Ptr.ID)
   187  	require.NoError(t, err)
   188  	config.TestClock().Add(time.Second)
   189  
   190  	t.Log("Get that block from the cache. Verify that it's the same.")
   191  	buf, serverHalf, _, err := cache.Get(
   192  		ctx, tlf1, block1Ptr.ID, DiskBlockAnyCache)
   193  	require.NoError(t, err)
   194  	require.Equal(t, block1ServerHalf, serverHalf)
   195  	require.Equal(t, block1Encoded, buf)
   196  
   197  	t.Log("Verify that the Get updated the LRU time for the block.")
   198  	getMd, err := cache.GetMetadata(ctx, block1Ptr.ID)
   199  	require.NoError(t, err)
   200  	require.True(t, getMd.LRUTime.After(putMd.LRUTime.Time), "Get LRU time isn't "+
   201  		"after the Put LRU time. Put metadata: %+v, Get metadata: %+v",
   202  		putMd, getMd)
   203  
   204  	t.Log("Attempt to Get a block from the cache that isn't there." +
   205  		" Verify that it fails.")
   206  	ptr2 := makeRandomBlockPointer(t)
   207  	buf, serverHalf, _, err = cache.Get(
   208  		ctx, tlf1, ptr2.ID, DiskBlockAnyCache)
   209  	require.EqualError(t, err, data.NoSuchBlockError{ID: ptr2.ID}.Error())
   210  	require.Equal(t, kbfscrypto.BlockCryptKeyServerHalf{}, serverHalf)
   211  	require.Nil(t, buf)
   212  
   213  	t.Log("Verify that the cache returns no metadata for the missing block.")
   214  	_, err = cache.GetMetadata(ctx, ptr2.ID)
   215  	require.EqualError(t, err, errors.ErrNotFound.Error())
   216  }
   217  
   218  func TestDiskBlockCacheDelete(t *testing.T) {
   219  	t.Parallel()
   220  	t.Log("Test that disk cache deletion works.")
   221  	cache, config := initDiskBlockCacheTest(t)
   222  	defer shutdownDiskBlockCacheTest(cache)
   223  	ctx := context.Background()
   224  
   225  	t.Log("Seed the cache with some other TLFs")
   226  	fakeTlfs := []byte{0, 1, 2, 4, 5}
   227  	for _, f := range fakeTlfs {
   228  		tlf := tlf.FakeID(f, tlf.Private)
   229  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   230  			t, config)
   231  		err := cache.Put(
   232  			ctx, tlf, blockPtr.ID, blockEncoded, serverHalf, DiskBlockAnyCache)
   233  		require.NoError(t, err)
   234  	}
   235  	tlf1 := tlf.FakeID(3, tlf.Private)
   236  	block1Ptr, _, block1Encoded, block1ServerHalf := setupBlockForDiskCache(t,
   237  		config)
   238  	block2Ptr, _, block2Encoded, block2ServerHalf := setupBlockForDiskCache(t,
   239  		config)
   240  	block3Ptr, _, block3Encoded, block3ServerHalf := setupBlockForDiskCache(t,
   241  		config)
   242  
   243  	t.Log("Put three blocks into the cache.")
   244  	err := cache.Put(
   245  		ctx, tlf1, block1Ptr.ID, block1Encoded, block1ServerHalf,
   246  		DiskBlockAnyCache)
   247  	require.NoError(t, err)
   248  	err = cache.Put(
   249  		ctx, tlf1, block2Ptr.ID, block2Encoded, block2ServerHalf,
   250  		DiskBlockAnyCache)
   251  	require.NoError(t, err)
   252  	err = cache.Put(
   253  		ctx, tlf1, block3Ptr.ID, block3Encoded, block3ServerHalf,
   254  		DiskBlockAnyCache)
   255  	require.NoError(t, err)
   256  
   257  	t.Log("Delete two of the blocks from the cache.")
   258  	_, _, err = cache.Delete(
   259  		ctx, []kbfsblock.ID{block1Ptr.ID, block2Ptr.ID}, DiskBlockAnyCache)
   260  	require.NoError(t, err)
   261  
   262  	t.Log("Verify that only the non-deleted block is still in the cache.")
   263  	_, _, _, err = cache.Get(ctx, tlf1, block1Ptr.ID, DiskBlockAnyCache)
   264  	require.EqualError(t, err, data.NoSuchBlockError{ID: block1Ptr.ID}.Error())
   265  	_, _, _, err = cache.Get(ctx, tlf1, block2Ptr.ID, DiskBlockAnyCache)
   266  	require.EqualError(t, err, data.NoSuchBlockError{ID: block2Ptr.ID}.Error())
   267  	_, _, _, err = cache.Get(ctx, tlf1, block3Ptr.ID, DiskBlockAnyCache)
   268  	require.NoError(t, err)
   269  
   270  	t.Log("Verify that the cache returns no LRU time for the missing blocks.")
   271  	_, err = cache.GetMetadata(ctx, block1Ptr.ID)
   272  	require.EqualError(t, err, errors.ErrNotFound.Error())
   273  	_, err = cache.GetMetadata(ctx, block2Ptr.ID)
   274  	require.EqualError(t, err, errors.ErrNotFound.Error())
   275  }
   276  
   277  func TestDiskBlockCacheEvictFromTLF(t *testing.T) {
   278  	t.Parallel()
   279  	t.Log("Test that disk cache eviction works for a single TLF.")
   280  	cache, config := initDiskBlockCacheTest(t)
   281  	standardCache := cache.workingSetCache
   282  	defer shutdownDiskBlockCacheTest(cache)
   283  
   284  	tlf1 := tlf.FakeID(3, tlf.Private)
   285  	ctx := context.Background()
   286  	clock := config.TestClock()
   287  	initialTime := clock.Now()
   288  	t.Log("Seed the cache with some other TLFs.")
   289  	fakeTlfs := []byte{0, 1, 2, 4, 5}
   290  	for _, f := range fakeTlfs {
   291  		tlf := tlf.FakeID(f, tlf.Private)
   292  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   293  			t, config)
   294  		err := standardCache.Put(
   295  			ctx, tlf, blockPtr.ID, blockEncoded, serverHalf)
   296  		require.NoError(t, err)
   297  		clock.Add(time.Second)
   298  	}
   299  	tlf1NumBlocks := 100
   300  	t.Log("Put 100 blocks into the cache.")
   301  	for i := 0; i < tlf1NumBlocks; i++ {
   302  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   303  			t, config)
   304  		err := standardCache.Put(
   305  			ctx, tlf1, blockPtr.ID, blockEncoded, serverHalf)
   306  		require.NoError(t, err)
   307  		clock.Add(time.Second)
   308  	}
   309  
   310  	previousAvgDuration := 50 * time.Second
   311  	averageDifference := float64(0)
   312  	numEvictionDifferences := 0
   313  	expectedCount := tlf1NumBlocks
   314  
   315  	t.Log("Incrementally evict all the tlf1 blocks in the cache.")
   316  	// Because the eviction algorithm is probabilistic, we can't rely on the
   317  	// same number of blocks being evicted every time. So we have to be smart
   318  	// about our assertions.
   319  	for expectedCount != 0 {
   320  		t.Log("Evict 10 blocks from the cache.")
   321  		numRemoved, _, err := standardCache.evictFromTLFLocked(ctx, tlf1, 10)
   322  		require.NoError(t, err)
   323  		expectedCount -= numRemoved
   324  
   325  		blockCount := 0
   326  		var avgDuration time.Duration
   327  		func() {
   328  			tlfBytes := tlf1.Bytes()
   329  			tlf1Range := util.BytesPrefix(tlfBytes)
   330  			iter := standardCache.tlfDb.NewIterator(tlf1Range, nil)
   331  			defer iter.Release()
   332  			for iter.Next() {
   333  				blockIDBytes := iter.Key()[len(tlfBytes):]
   334  				blockID, err := kbfsblock.IDFromBytes(blockIDBytes)
   335  				require.NoError(t, err)
   336  				putMd, err := standardCache.GetMetadata(ctx, blockID)
   337  				require.NoError(t, err)
   338  				avgDuration += putMd.LRUTime.Sub(initialTime)
   339  				blockCount++
   340  			}
   341  		}()
   342  		t.Logf("Verify that there are %d blocks in the cache.", expectedCount)
   343  		require.Equal(t, expectedCount, blockCount,
   344  			"Removed %d blocks this round.", numRemoved)
   345  		if expectedCount > 0 {
   346  			avgDuration /= time.Duration(expectedCount)
   347  			t.Logf("Average LRU time of remaining blocks: %.2f",
   348  				avgDuration.Seconds())
   349  			averageDifference += avgDuration.Seconds() -
   350  				previousAvgDuration.Seconds()
   351  			previousAvgDuration = avgDuration
   352  			numEvictionDifferences++
   353  		}
   354  	}
   355  	t.Log("Verify that, on average, the LRU time of the blocks remaining in" +
   356  		" the queue keeps going up.")
   357  	averageDifference /= float64(numEvictionDifferences)
   358  	require.True(t, averageDifference > 3.0,
   359  		"Average overall LRU delta from an eviction: %.2f", averageDifference)
   360  }
   361  
   362  func TestDiskBlockCacheEvictOverall(t *testing.T) {
   363  	t.Parallel()
   364  	t.Log("Test that disk cache eviction works overall.")
   365  	cache, config := initDiskBlockCacheTest(t)
   366  	standardCache := cache.workingSetCache
   367  	defer shutdownDiskBlockCacheTest(cache)
   368  
   369  	ctx := context.Background()
   370  	clock := config.TestClock()
   371  	initialTime := clock.Now()
   372  
   373  	numTlfs := 10
   374  	numBlocksPerTlf := 10
   375  	totalBlocks := numTlfs * numBlocksPerTlf
   376  
   377  	t.Log("Seed the cache with some other TLFs.")
   378  	for i := byte(0); int(i) < numTlfs; i++ {
   379  		currTlf := tlf.FakeID(i, tlf.Private)
   380  		for j := 0; j < numBlocksPerTlf; j++ {
   381  			blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   382  				t, config)
   383  			err := standardCache.Put(
   384  				ctx, currTlf, blockPtr.ID, blockEncoded, serverHalf)
   385  			require.NoError(t, err)
   386  			clock.Add(time.Second)
   387  		}
   388  	}
   389  
   390  	// Average LRU will initially be half the total number of blocks, in
   391  	// seconds.
   392  	previousAvgDuration := time.Duration(totalBlocks>>1) * time.Second
   393  	averageDifference := float64(0)
   394  	numEvictionDifferences := 0
   395  	expectedCount := totalBlocks
   396  
   397  	t.Log("Incrementally evict all the blocks in the cache.")
   398  	// Because the eviction algorithm is probabilistic, we can't rely on the
   399  	// same number of blocks being evicted every time. So we have to be smart
   400  	// about our assertions.
   401  	for expectedCount != 0 {
   402  		t.Log("Evict 10 blocks from the cache.")
   403  		numRemoved, _, err := standardCache.evictLocked(ctx, 10)
   404  		require.NoError(t, err)
   405  		expectedCount -= numRemoved
   406  
   407  		blockCount := 0
   408  		var avgDuration time.Duration
   409  		func() {
   410  			iter := standardCache.metaDb.NewIterator(nil, nil)
   411  			defer iter.Release()
   412  			for iter.Next() {
   413  				metadata := DiskBlockCacheMetadata{}
   414  				err = config.Codec().Decode(iter.Value(), &metadata)
   415  				require.NoError(t, err)
   416  				avgDuration += metadata.LRUTime.Sub(initialTime)
   417  				blockCount++
   418  			}
   419  		}()
   420  		t.Logf("Verify that there are %d blocks in the cache.", expectedCount)
   421  		require.Equal(t, expectedCount, blockCount,
   422  			"Removed %d blocks this round.", numRemoved)
   423  		if expectedCount > 0 {
   424  			avgDuration /= time.Duration(expectedCount)
   425  			t.Logf("Average LRU time of remaining blocks: %.2f",
   426  				avgDuration.Seconds())
   427  			averageDifference += avgDuration.Seconds() -
   428  				previousAvgDuration.Seconds()
   429  			previousAvgDuration = avgDuration
   430  			numEvictionDifferences++
   431  		}
   432  	}
   433  	t.Log("Verify that, on average, the LRU time of the blocks remaining in" +
   434  		" the queue keeps going up.")
   435  	averageDifference /= float64(numEvictionDifferences)
   436  	require.True(t, averageDifference > 3.0,
   437  		"Average overall LRU delta from an eviction: %.2f", averageDifference)
   438  }
   439  
   440  func TestDiskBlockCacheStaticLimit(t *testing.T) {
   441  	t.Parallel()
   442  	t.Log("Test that disk cache eviction works when we hit the static limit.")
   443  	cache, config := initDiskBlockCacheTest(t)
   444  	standardCache := cache.workingSetCache
   445  	defer shutdownDiskBlockCacheTest(cache)
   446  
   447  	ctx := context.Background()
   448  	clock := config.TestClock()
   449  
   450  	numTlfs := 10
   451  	numBlocksPerTlf := 5
   452  	numBlocks := numTlfs * numBlocksPerTlf
   453  
   454  	t.Log("Seed the cache with some blocks.")
   455  	for i := byte(0); int(i) < numTlfs; i++ {
   456  		currTlf := tlf.FakeID(i, tlf.Private)
   457  		for j := 0; j < numBlocksPerTlf; j++ {
   458  			blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   459  				t, config)
   460  			err := standardCache.Put(
   461  				ctx, currTlf, blockPtr.ID, blockEncoded, serverHalf)
   462  			require.NoError(t, err)
   463  			clock.Add(time.Second)
   464  		}
   465  	}
   466  
   467  	t.Log("Set the cache maximum bytes to the current total.")
   468  	currBytes := int64(standardCache.currBytes)
   469  	limiter := config.DiskLimiter().(*backpressureDiskLimiter)
   470  	limiter.diskCacheByteTracker.limit = currBytes
   471  
   472  	t.Log("Add a block to the cache. Verify that blocks were evicted.")
   473  	blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   474  		t, config)
   475  	err := standardCache.Put(
   476  		ctx, tlf.FakeID(10, tlf.Private), blockPtr.ID, blockEncoded, serverHalf)
   477  	require.NoError(t, err)
   478  
   479  	require.True(t, int64(standardCache.currBytes) < currBytes)
   480  	require.Equal(
   481  		t, 1+numBlocks-minNumBlocksToEvictInBatch, standardCache.numBlocks)
   482  }
   483  
   484  func TestDiskBlockCacheDynamicLimit(t *testing.T) {
   485  	t.Parallel()
   486  	t.Log("Test that disk cache eviction works when we hit a dynamic limit.")
   487  	cache, config := initDiskBlockCacheTest(t)
   488  	standardCache := cache.workingSetCache
   489  	defer shutdownDiskBlockCacheTest(cache)
   490  
   491  	ctx := context.Background()
   492  	clock := config.TestClock()
   493  
   494  	numTlfs := 10
   495  	numBlocksPerTlf := 5
   496  	numBlocks := numTlfs * numBlocksPerTlf
   497  
   498  	t.Log("Seed the cache with some blocks.")
   499  	for i := byte(0); int(i) < numTlfs; i++ {
   500  		currTlf := tlf.FakeID(i, tlf.Private)
   501  		for j := 0; j < numBlocksPerTlf; j++ {
   502  			blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   503  				t, config)
   504  			err := standardCache.Put(
   505  				ctx, currTlf, blockPtr.ID, blockEncoded, serverHalf)
   506  			require.NoError(t, err)
   507  			clock.Add(time.Second)
   508  		}
   509  	}
   510  
   511  	t.Log("Set the cache dynamic limit to its current value by tweaking the" +
   512  		" free space function.")
   513  	currBytes := int64(standardCache.currBytes)
   514  	limiter := config.DiskLimiter().(*backpressureDiskLimiter)
   515  	limiter.freeBytesAndFilesFn = func() (int64, int64, error) {
   516  		// Since the limit is 25% of the total available space, make that true
   517  		// for the current used byte count.  We do this by setting the free
   518  		// byte count to 75% of the total, which is 3x used bytes.
   519  		freeBytes := currBytes * 3
   520  		// arbitrarily large number
   521  		numFiles := int64(100000000)
   522  		return freeBytes, numFiles, nil
   523  	}
   524  
   525  	t.Log("Add a round of blocks to the cache. Verify that blocks were" +
   526  		" evicted each time we went past the limit.")
   527  	start := numBlocks - minNumBlocksToEvictInBatch
   528  	for i := 1; i <= numBlocks; i++ {
   529  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   530  			t, config)
   531  		err := standardCache.Put(
   532  			ctx, tlf.FakeID(10, tlf.Private), blockPtr.ID, blockEncoded,
   533  			serverHalf)
   534  		require.NoError(t, err)
   535  		require.Equal(
   536  			t, start+(i%minNumBlocksToEvictInBatch), standardCache.numBlocks)
   537  	}
   538  
   539  	require.True(t, int64(standardCache.currBytes) < currBytes)
   540  	require.Equal(t, start, standardCache.numBlocks)
   541  }
   542  
   543  func TestDiskBlockCacheWithRetrievalQueue(t *testing.T) {
   544  	t.Parallel()
   545  	t.Log("Test the interaction of the disk block cache and retrieval queue.")
   546  	cache, dbcConfig := initDiskBlockCacheTest(t)
   547  	require.NotNil(t, cache)
   548  	defer shutdownDiskBlockCacheTest(cache)
   549  
   550  	t.Log("Create a queue with 0 workers to rule it out from serving blocks.")
   551  	bg := newFakeBlockGetter(false)
   552  	q := newBlockRetrievalQueue(
   553  		0, 0, 0, newTestBlockRetrievalConfig(t, bg, cache),
   554  		env.EmptyAppStateUpdater{})
   555  	require.NotNil(t, q)
   556  	defer endBlockRetrievalQueueTest(t, q)
   557  
   558  	ctx := context.Background()
   559  	kmd := makeKMD()
   560  	ptr1, block1, block1Encoded, serverHalf1 := setupBlockForDiskCache(
   561  		t, dbcConfig)
   562  	err := cache.Put(
   563  		ctx, kmd.TlfID(), ptr1.ID, block1Encoded, serverHalf1,
   564  		DiskBlockAnyCache)
   565  	require.NoError(t, err)
   566  	// No workers initialized, so no need to clean up the continue ch since
   567  	// there will be nothing blocking on it.
   568  	_, _ = bg.setBlockToReturn(ptr1, block1)
   569  
   570  	t.Log("Request a block retrieval for ptr1. " +
   571  		"Verify the block against the one we put in the disk block cache.")
   572  	block := &data.FileBlock{}
   573  	ch := q.Request(
   574  		ctx, 1, kmd, ptr1, block, data.TransientEntry, BlockRequestWithPrefetch)
   575  	err = <-ch
   576  	require.NoError(t, err)
   577  	require.Equal(t, block1, block)
   578  }
   579  
   580  func seedDiskBlockCacheForTest(ctx context.Context, t *testing.T,
   581  	cache *diskBlockCacheWrapped, config diskBlockCacheConfig, numTlfs,
   582  	numBlocksPerTlf int) {
   583  	t.Log("Seed the cache with some blocks.")
   584  	clock := config.Clock().(*clocktest.TestClock)
   585  	for i := byte(0); int(i) < numTlfs; i++ {
   586  		currTlf := tlf.FakeID(i, tlf.Private)
   587  		for j := 0; j < numBlocksPerTlf; j++ {
   588  			blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   589  				t, config)
   590  			err := cache.Put(
   591  				ctx, currTlf, blockPtr.ID, blockEncoded, serverHalf,
   592  				DiskBlockSyncCache)
   593  			require.NoError(t, err)
   594  			clock.Add(time.Second)
   595  		}
   596  	}
   597  }
   598  
   599  func testPutBlockWhenSyncCacheFull(
   600  	ctx context.Context, t *testing.T, putCache *DiskBlockCacheLocal,
   601  	cache *diskBlockCacheWrapped, config *testDiskBlockCacheConfig) {
   602  	numTlfs := 10
   603  	numBlocksPerTlf := 5
   604  	numBlocks := numTlfs * numBlocksPerTlf
   605  	seedDiskBlockCacheForTest(ctx, t, cache, config, numTlfs, numBlocksPerTlf)
   606  
   607  	t.Log("Set the cache maximum bytes to the current total.")
   608  	require.Equal(t, 0, putCache.numBlocks)
   609  	currBytes := int64(cache.syncCache.currBytes)
   610  	limiter := config.DiskLimiter().(*backpressureDiskLimiter)
   611  	limiter.syncCacheByteTracker.limit = currBytes
   612  
   613  	t.Log("Add a block to the cache. Verify that no blocks were evicted " +
   614  		"and the working set got a new block.")
   615  	blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   616  		t, config)
   617  	err := putCache.Put(
   618  		ctx, tlf.FakeID(0, tlf.Private), blockPtr.ID, blockEncoded, serverHalf)
   619  	require.NoError(t, err)
   620  
   621  	require.Equal(t, int64(cache.syncCache.currBytes), currBytes)
   622  	require.Equal(t, numBlocks, cache.syncCache.numBlocks)
   623  	require.Equal(t, 1, putCache.numBlocks)
   624  }
   625  
   626  func TestSyncBlockCacheStaticLimit(t *testing.T) {
   627  	t.Parallel()
   628  	t.Log("Test that disk cache eviction doesn't happen in sync cache")
   629  	cache, config := initDiskBlockCacheTest(t)
   630  	defer shutdownDiskBlockCacheTest(cache)
   631  	ctx := context.Background()
   632  
   633  	testPutBlockWhenSyncCacheFull(ctx, t, cache.workingSetCache, cache, config)
   634  }
   635  
   636  func TestCrDirtyBlockCacheStaticLimit(t *testing.T) {
   637  	t.Parallel()
   638  	t.Log("Test that cr cache accepts blocks even when sync limit is hit")
   639  	cache, config := initDiskBlockCacheTest(t)
   640  	defer shutdownDiskBlockCacheTest(cache)
   641  	crCache, err := newDiskBlockCacheLocalForTest(
   642  		config, crDirtyBlockCacheLimitTrackerType)
   643  	require.NoError(t, err)
   644  	ctx := context.Background()
   645  	defer crCache.Shutdown(ctx)
   646  
   647  	err = crCache.WaitUntilStarted()
   648  	require.NoError(t, err)
   649  
   650  	testPutBlockWhenSyncCacheFull(ctx, t, crCache, cache, config)
   651  }
   652  
   653  func TestDiskBlockCacheLastUnrefPutAndGet(t *testing.T) {
   654  	t.Parallel()
   655  	t.Log("Test that basic disk cache last unref Put and Get operations work.")
   656  	cache, _ := initDiskBlockCacheTest(t)
   657  	defer shutdownDiskBlockCacheTest(cache)
   658  
   659  	ctx := context.Background()
   660  
   661  	t.Log("Put and get a last unref revision into the cache.")
   662  	tlf1 := tlf.FakeID(0, tlf.Private)
   663  	rev1 := kbfsmd.Revision(1)
   664  	ct := DiskBlockWorkingSetCache
   665  	err := cache.PutLastUnrefRev(ctx, tlf1, rev1, ct)
   666  	require.NoError(t, err)
   667  	getRev1, err := cache.GetLastUnrefRev(ctx, tlf1, ct)
   668  	require.NoError(t, err)
   669  	require.Equal(t, rev1, getRev1)
   670  
   671  	t.Log("Put and get a last unref revision into the cache for another TLF.")
   672  	tlf2 := tlf.FakeID(1, tlf.Public)
   673  	rev2 := kbfsmd.Revision(200)
   674  	err = cache.PutLastUnrefRev(ctx, tlf2, rev2, ct)
   675  	require.NoError(t, err)
   676  	getRev2, err := cache.GetLastUnrefRev(ctx, tlf2, ct)
   677  	require.NoError(t, err)
   678  	require.Equal(t, rev2, getRev2)
   679  
   680  	t.Log("Put a lower revision; should be ignored")
   681  	rev2b := kbfsmd.Revision(100)
   682  	err = cache.PutLastUnrefRev(ctx, tlf2, rev2b, ct)
   683  	require.NoError(t, err)
   684  	getRev2, err = cache.GetLastUnrefRev(ctx, tlf2, ct)
   685  	require.NoError(t, err)
   686  	require.Equal(t, rev2, getRev2)
   687  
   688  	// Force re-read from DB.
   689  	cache.syncCache.tlfLastUnrefs = nil
   690  	err = cache.syncCache.syncBlockCountsAndUnrefsFromDb()
   691  	require.NoError(t, err)
   692  	getRev1, err = cache.GetLastUnrefRev(ctx, tlf1, ct)
   693  	require.NoError(t, err)
   694  	require.Equal(t, rev1, getRev1)
   695  	getRev2, err = cache.GetLastUnrefRev(ctx, tlf2, ct)
   696  	require.NoError(t, err)
   697  	require.Equal(t, rev2, getRev2)
   698  }
   699  
   700  func TestDiskBlockCacheUnsyncTlf(t *testing.T) {
   701  	t.Parallel()
   702  	t.Log("Test that blocks are cleaned up after unsyncing a TLF.")
   703  
   704  	tempdir, err := ioutil.TempDir(os.TempDir(), "kbfscache")
   705  	require.NoError(t, err)
   706  	defer func() {
   707  		err := ioutil.RemoveAll(tempdir)
   708  		require.NoError(t, err)
   709  	}()
   710  
   711  	// Use a real config, since we need the real SetTlfSyncState
   712  	// implementation.
   713  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "u1")
   714  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   715  
   716  	clock := clocktest.NewTestClockNow()
   717  	config.SetClock(clock)
   718  
   719  	err = config.EnableDiskLimiter(tempdir)
   720  	require.NoError(t, err)
   721  	err = config.loadSyncedTlfsLocked()
   722  	require.NoError(t, err)
   723  	config.diskCacheMode = DiskCacheModeLocal
   724  	err = config.MakeDiskBlockCacheIfNotExists()
   725  	require.NoError(t, err)
   726  	cache := config.DiskBlockCache().(*diskBlockCacheWrapped)
   727  	defer func() {
   728  		<-cache.Shutdown(context.Background())
   729  	}()
   730  	standardCache := cache.syncCache
   731  	err = standardCache.WaitUntilStarted()
   732  	require.NoError(t, err)
   733  
   734  	numTlfs := 3
   735  	numBlocksPerTlf := 5
   736  	numBlocks := numTlfs * numBlocksPerTlf
   737  	seedDiskBlockCacheForTest(ctx, t, cache, config, numTlfs, numBlocksPerTlf)
   738  	require.Equal(t, numBlocks, standardCache.numBlocks)
   739  
   740  	standardCache.clearTickerDuration = 0
   741  	standardCache.numBlocksToEvictOnClear = 1
   742  
   743  	tlfToUnsync := tlf.FakeID(1, tlf.Private)
   744  	ch, err := config.SetTlfSyncState(ctx, tlfToUnsync, FolderSyncConfig{
   745  		Mode: keybase1.FolderSyncMode_DISABLED,
   746  	})
   747  	require.NoError(t, err)
   748  	t.Log("Waiting for unsynced blocks to be cleared.")
   749  	err = <-ch
   750  	require.NoError(t, err)
   751  	require.Equal(t, numBlocks-numBlocksPerTlf, standardCache.numBlocks)
   752  }
   753  
   754  func TestDiskBlockCacheMoveBlock(t *testing.T) {
   755  	t.Parallel()
   756  	t.Log("Test that blocks can be moved between caches.")
   757  	cache, config := initDiskBlockCacheTest(t)
   758  	defer shutdownDiskBlockCacheTest(cache)
   759  
   760  	ctx := context.Background()
   761  	tlf1 := tlf.FakeID(0, tlf.Private)
   762  	block1Ptr, _, block1Encoded, block1ServerHalf := setupBlockForDiskCache(
   763  		t, config)
   764  
   765  	t.Log("Put a block into the default cache.")
   766  	err := cache.Put(
   767  		ctx, tlf1, block1Ptr.ID, block1Encoded, block1ServerHalf,
   768  		DiskBlockAnyCache)
   769  	require.NoError(t, err)
   770  	err = cache.UpdateMetadata(
   771  		ctx, tlf1, block1Ptr.ID, FinishedPrefetch, DiskBlockAnyCache)
   772  	require.NoError(t, err)
   773  	require.Equal(t, 1, cache.workingSetCache.numBlocks)
   774  	require.Equal(t, 0, cache.syncCache.numBlocks)
   775  
   776  	t.Log("Move the block by getting it with a different preferred cache.")
   777  	_, _, _, err = cache.Get(ctx, tlf1, block1Ptr.ID, DiskBlockSyncCache)
   778  	require.NoError(t, err)
   779  	err = cache.waitForDeletes(ctx)
   780  	require.NoError(t, err)
   781  	require.Equal(t, 1, cache.syncCache.numBlocks)
   782  	require.Equal(t, 0, cache.workingSetCache.numBlocks)
   783  
   784  	t.Log("After the move, make sure the prefetch status is downgraded.")
   785  	_, _, prefetchStatus, err := cache.Get(
   786  		ctx, tlf1, block1Ptr.ID, DiskBlockAnyCache)
   787  	require.NoError(t, err)
   788  	require.Equal(t, 1, cache.syncCache.numBlocks)
   789  	require.Equal(t, TriggeredPrefetch, prefetchStatus)
   790  }
   791  
   792  // seedTlf seeds the cache with blocks from a given TLF ID. Notably,
   793  // it does NOT give them different times,
   794  // because that makes TLFs filled first more likely to face eviction.
   795  func seedTlf(ctx context.Context, t *testing.T,
   796  	cache *diskBlockCacheWrapped, config diskBlockCacheConfig, tlfID tlf.ID,
   797  	numBlocksPerTlf int) {
   798  	for j := 0; j < numBlocksPerTlf; j++ {
   799  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   800  			t, config)
   801  		err := cache.Put(
   802  			ctx, tlfID, blockPtr.ID, blockEncoded, serverHalf,
   803  			DiskBlockSyncCache)
   804  		require.NoError(t, err)
   805  	}
   806  
   807  }
   808  
   809  func TestDiskBlockCacheHomeDirPriorities(t *testing.T) {
   810  	t.Parallel()
   811  	t.Log("Test that blocks from a home directory aren't evicted when there" +
   812  		" are other options.")
   813  	cache, config := initDiskBlockCacheTest(t)
   814  	defer shutdownDiskBlockCacheTest(cache)
   815  
   816  	ctx := context.Background()
   817  
   818  	rand.Seed(1)
   819  
   820  	t.Log("Set home directories on the cache")
   821  	homeTLF := tlf.FakeID(100, tlf.Private)
   822  	err := cache.AddHomeTLF(ctx, homeTLF)
   823  	require.NoError(t, err)
   824  	homePublicTLF := tlf.FakeID(101, tlf.Public)
   825  	err = cache.AddHomeTLF(ctx, homePublicTLF)
   826  	require.NoError(t, err)
   827  
   828  	t.Log("Seed the cache with blocks")
   829  	totalBlocks := 0
   830  	homeTLFBlocksEach := 50
   831  	originalSizes := map[tlf.ID]int{
   832  		homePublicTLF: homeTLFBlocksEach,
   833  		homeTLF:       homeTLFBlocksEach,
   834  	}
   835  
   836  	seedTlf(ctx, t, cache, config, homePublicTLF, homeTLFBlocksEach)
   837  	seedTlf(ctx, t, cache, config, homeTLF, homeTLFBlocksEach)
   838  	totalBlocks += 2 * homeTLFBlocksEach
   839  	otherTlfIds := []tlf.ID{
   840  		tlf.FakeID(1, tlf.Private),
   841  		tlf.FakeID(2, tlf.Public),
   842  		tlf.FakeID(3, tlf.Private),
   843  		tlf.FakeID(4, tlf.Private),
   844  		tlf.FakeID(5, tlf.Public),
   845  	}
   846  
   847  	// Distribute the blocks exponentially over the non-home TLFs.
   848  	// Use LOTS of blocks to get better statistical behavior.
   849  	nextTlfSize := 200
   850  	for _, tlfID := range otherTlfIds {
   851  		seedTlf(ctx, t, cache, config, tlfID, nextTlfSize)
   852  		originalSizes[tlfID] = nextTlfSize
   853  		totalBlocks += nextTlfSize
   854  		nextTlfSize *= 2
   855  	}
   856  
   857  	t.Log("Evict half the non-home TLF blocks using small eviction sizes.")
   858  	evictionSize := 5
   859  	numEvictions := (totalBlocks - 2*homeTLFBlocksEach) / (evictionSize * 2)
   860  	for i := 0; i < numEvictions; i++ {
   861  		_, _, err := cache.syncCache.evictLocked(ctx, evictionSize)
   862  		require.NoError(t, err)
   863  		totalBlocks -= evictionSize
   864  	}
   865  
   866  	t.Log("Verify that the non-home TLFs have been reduced in size by about" +
   867  		" half")
   868  	// Allow a tolerance of .5, so 25-75% of the original size.
   869  	for _, tlfID := range otherTlfIds {
   870  		original := originalSizes[tlfID]
   871  		current := cache.syncCache.tlfCounts[tlfID]
   872  		t.Logf("ID: %v, Current: %d, Original: %d", tlfID, current, original)
   873  		require.InEpsilon(t, original/2, current, 0.5)
   874  	}
   875  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homeTLF])
   876  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homePublicTLF])
   877  
   878  	t.Log("Evict the rest of the non-home TLF blocks in 2 evictions.")
   879  	numEvictions = 2
   880  	evictionSize1 := (totalBlocks - 2*homeTLFBlocksEach) / numEvictions
   881  	// In the second eviction, evict enough blocks to touch the public home.
   882  	publicHomeEvict := 10
   883  	evictionSize2 := totalBlocks - 2*homeTLFBlocksEach - evictionSize1 + publicHomeEvict
   884  
   885  	_, _, err = cache.syncCache.evictLocked(ctx, evictionSize1)
   886  	require.NoError(t, err)
   887  
   888  	// Make sure the home TLFs are not touched.
   889  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homeTLF])
   890  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homePublicTLF])
   891  
   892  	_, _, err = cache.syncCache.evictLocked(ctx, evictionSize2)
   893  	require.NoError(t, err)
   894  
   895  	// Make sure the home TLFs are minimally touched.
   896  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homeTLF])
   897  	require.Equal(t, homeTLFBlocksEach-publicHomeEvict,
   898  		cache.syncCache.tlfCounts[homePublicTLF])
   899  
   900  	t.Log("Evict enough blocks to get rid of the public home TLF.")
   901  	_, _, err = cache.syncCache.evictLocked(ctx, homeTLFBlocksEach-publicHomeEvict)
   902  	require.NoError(t, err)
   903  	require.Equal(t, homeTLFBlocksEach, cache.syncCache.tlfCounts[homeTLF])
   904  	require.Equal(t, 0, cache.syncCache.tlfCounts[homePublicTLF])
   905  
   906  	t.Log("Evict enough blocks to get rid of the private home TLF.")
   907  	_, _, err = cache.syncCache.evictLocked(ctx, homeTLFBlocksEach)
   908  	require.NoError(t, err)
   909  	require.Equal(t, 0, cache.syncCache.tlfCounts[homeTLF])
   910  	require.Equal(t, 0, cache.syncCache.tlfCounts[homePublicTLF])
   911  }
   912  
   913  func TestDiskBlockCacheMark(t *testing.T) {
   914  	t.Parallel()
   915  	t.Log("Test that basic disk cache marking and deleting work.")
   916  	cache, config := initDiskBlockCacheTest(t)
   917  	defer shutdownDiskBlockCacheTest(cache)
   918  	standardCache := cache.syncCache
   919  	ctx := context.Background()
   920  
   921  	t.Log("Insert lots of blocks.")
   922  	numTlfs := 3
   923  	numBlocksPerTlf := 5
   924  	numBlocks := numTlfs * numBlocksPerTlf
   925  	seedDiskBlockCacheForTest(ctx, t, cache, config, numTlfs, numBlocksPerTlf)
   926  	require.Equal(t, numBlocks, standardCache.numBlocks)
   927  
   928  	t.Log("Generate some blocks we can mark.")
   929  	tlfID := tlf.FakeID(1, tlf.Private)
   930  	ids := make([]kbfsblock.ID, numBlocksPerTlf)
   931  	for i := 0; i < numBlocksPerTlf; i++ {
   932  		blockPtr, _, blockEncoded, serverHalf := setupBlockForDiskCache(
   933  			t, config)
   934  		err := cache.Put(
   935  			ctx, tlfID, blockPtr.ID, blockEncoded, serverHalf,
   936  			DiskBlockSyncCache)
   937  		require.NoError(t, err)
   938  		ids[i] = blockPtr.ID
   939  	}
   940  	numBlocks += numBlocksPerTlf
   941  	require.Equal(t, numBlocks, standardCache.numBlocks)
   942  
   943  	t.Log("Mark a couple blocks.")
   944  	tag := "mark"
   945  	err := cache.Mark(ctx, ids[1], tag, DiskBlockSyncCache)
   946  	require.NoError(t, err)
   947  	err = cache.Mark(ctx, ids[3], tag, DiskBlockSyncCache)
   948  	require.NoError(t, err)
   949  
   950  	t.Log("Delete all unmarked blocks.")
   951  	standardCache.clearTickerDuration = 0
   952  	standardCache.numUnmarkedBlocksToCheck = 1
   953  	err = cache.DeleteUnmarked(ctx, tlfID, tag, DiskBlockSyncCache)
   954  	require.NoError(t, err)
   955  	require.Equal(t, numBlocks-(2*numBlocksPerTlf-2), standardCache.numBlocks)
   956  	_, _, _, err = cache.Get(ctx, tlfID, ids[0], DiskBlockAnyCache)
   957  	require.EqualError(t, err, data.NoSuchBlockError{ID: ids[0]}.Error())
   958  	_, _, _, err = cache.Get(ctx, tlfID, ids[2], DiskBlockAnyCache)
   959  	require.EqualError(t, err, data.NoSuchBlockError{ID: ids[2]}.Error())
   960  	_, _, _, err = cache.Get(ctx, tlfID, ids[4], DiskBlockAnyCache)
   961  	require.EqualError(t, err, data.NoSuchBlockError{ID: ids[4]}.Error())
   962  }
   963  
   964  func TestDiskBlockCacheRemoveBrokenBlocks(t *testing.T) {
   965  	t.Parallel()
   966  	t.Log("Test that blocks with corrupt metadata are removed.")
   967  	cache, config := initDiskBlockCacheTest(t)
   968  	defer shutdownDiskBlockCacheTest(cache)
   969  	wsCache := cache.workingSetCache
   970  	ctx := context.Background()
   971  
   972  	t.Log("Add one block, then corrupt its metadata.")
   973  	tlfID := tlf.FakeID(1, tlf.Private)
   974  	blockPtr1, _, blockEncoded1, serverHalf1 := setupBlockForDiskCache(
   975  		t, config)
   976  	err := cache.Put(
   977  		ctx, tlfID, blockPtr1.ID, blockEncoded1, serverHalf1,
   978  		DiskBlockWorkingSetCache)
   979  	require.NoError(t, err)
   980  	require.Equal(t, 1, wsCache.numBlocks)
   981  
   982  	err = wsCache.metaDb.Delete(blockPtr1.ID.Bytes(), nil)
   983  	require.NoError(t, err)
   984  
   985  	t.Log("Make the cache full, and put a new block, which should succeed " +
   986  		"and will remove the broken block.")
   987  	currBytes := int64(cache.workingSetCache.currBytes)
   988  	limiter := config.DiskLimiter().(*backpressureDiskLimiter)
   989  	limiter.diskCacheByteTracker.limit = currBytes
   990  
   991  	blockPtr2, _, blockEncoded2, serverHalf2 := setupBlockForDiskCache(
   992  		t, config)
   993  	err = cache.Put(
   994  		ctx, tlfID, blockPtr2.ID, blockEncoded2, serverHalf2,
   995  		DiskBlockWorkingSetCache)
   996  	require.NoError(t, err)
   997  	require.Equal(t, 1, wsCache.numBlocks)
   998  }