github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/block_journal_test.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"math"
     9  	"os"
    10  	"testing"
    11  
    12  	kbfsdata "github.com/keybase/client/go/kbfs/data"
    13  	"github.com/keybase/client/go/kbfs/ioutil"
    14  	"github.com/keybase/client/go/kbfs/kbfsblock"
    15  	"github.com/keybase/client/go/kbfs/kbfscodec"
    16  	"github.com/keybase/client/go/kbfs/kbfscrypto"
    17  	"github.com/keybase/client/go/kbfs/kbfsmd"
    18  	"github.com/keybase/client/go/kbfs/tlf"
    19  	"github.com/keybase/client/go/libkb"
    20  	"github.com/keybase/client/go/logger"
    21  	"github.com/keybase/client/go/protocol/keybase1"
    22  	"github.com/keybase/go-codec/codec"
    23  	"github.com/stretchr/testify/assert"
    24  	"github.com/stretchr/testify/require"
    25  	"golang.org/x/net/context"
    26  )
    27  
    28  type blockJournalEntryFuture struct {
    29  	blockJournalEntry
    30  	kbfscodec.Extra
    31  }
    32  
    33  func (ef blockJournalEntryFuture) toCurrent() blockJournalEntry {
    34  	return ef.blockJournalEntry
    35  }
    36  
    37  func (ef blockJournalEntryFuture) ToCurrentStruct() kbfscodec.CurrentStruct {
    38  	return ef.toCurrent()
    39  }
    40  
    41  func makeFakeBlockJournalEntryFuture(t *testing.T) blockJournalEntryFuture {
    42  	ef := blockJournalEntryFuture{
    43  		blockJournalEntry{
    44  			blockPutOp,
    45  			kbfsblock.ContextMap{
    46  				kbfsblock.FakeID(1): {
    47  					makeFakeBlockContext(t),
    48  					makeFakeBlockContext(t),
    49  					makeFakeBlockContext(t),
    50  				},
    51  			},
    52  			kbfsmd.RevisionInitial,
    53  			false,
    54  			nil,
    55  			false,
    56  			false,
    57  			codec.UnknownFieldSetHandler{},
    58  		},
    59  		kbfscodec.MakeExtraOrBust("blockJournalEntry", t),
    60  	}
    61  	return ef
    62  }
    63  
    64  func TestBlockJournalEntryUnknownFields(t *testing.T) {
    65  	testStructUnknownFields(t, makeFakeBlockJournalEntryFuture(t))
    66  }
    67  
    68  func TestSaturateAdd(t *testing.T) {
    69  	var x int64
    70  	saturateAdd(&x, math.MaxInt64-1)
    71  	require.Equal(t, int64(math.MaxInt64-1), x)
    72  	saturateAdd(&x, math.MaxInt64-1)
    73  	require.Equal(t, int64(math.MaxInt64), x)
    74  	saturateAdd(&x, math.MinInt64+2)
    75  	require.Equal(t, int64(1), x)
    76  	saturateAdd(&x, math.MinInt64)
    77  	require.Equal(t, int64(0), x)
    78  
    79  	x = math.MinInt64
    80  	saturateAdd(&x, math.MinInt64)
    81  	require.Equal(t, int64(0), x)
    82  
    83  	x = -1
    84  	saturateAdd(&x, math.MinInt64)
    85  	require.Equal(t, int64(0), x)
    86  
    87  	x = -1
    88  	saturateAdd(&x, 5)
    89  	require.Equal(t, int64(5), x)
    90  
    91  	x = -1
    92  	saturateAdd(&x, 0)
    93  	require.Equal(t, int64(0), x)
    94  }
    95  
    96  func setupBlockJournalTest(t *testing.T) (
    97  	ctx context.Context, cancel context.CancelFunc, tempdir string,
    98  	log logger.Logger, j *blockJournal) {
    99  	codec := kbfscodec.NewMsgpack()
   100  	log = logger.NewTestLogger(t)
   101  
   102  	tempdir, err := ioutil.TempDir(os.TempDir(), "block_journal")
   103  	require.NoError(t, err)
   104  
   105  	// Clean up the tempdir if the rest of the setup fails.
   106  	setupSucceeded := false
   107  	defer func() {
   108  		if !setupSucceeded {
   109  			err := ioutil.RemoveAll(tempdir)
   110  			assert.NoError(t, err)
   111  		}
   112  	}()
   113  
   114  	ctx, cancel = context.WithTimeout(
   115  		context.Background(), individualTestTimeout)
   116  
   117  	// Clean up the context if the rest of the setup fails.
   118  	defer func() {
   119  		if !setupSucceeded {
   120  			cancel()
   121  		}
   122  	}()
   123  
   124  	j, err = makeBlockJournal(ctx, codec, tempdir, log, libkb.NewVDebugLog(log))
   125  	require.NoError(t, err)
   126  	require.Equal(t, uint64(0), j.length())
   127  
   128  	setupSucceeded = true
   129  	return ctx, cancel, tempdir, log, j
   130  }
   131  
   132  func teardownBlockJournalTest(ctx context.Context, t *testing.T,
   133  	cancel context.CancelFunc, tempdir string, j *blockJournal) {
   134  	cancel()
   135  
   136  	err := j.checkInSyncForTest()
   137  	assert.NoError(t, err)
   138  
   139  	err = ioutil.RemoveAll(tempdir)
   140  	assert.NoError(t, err)
   141  }
   142  
   143  func putBlockData(
   144  	ctx context.Context, t *testing.T, j *blockJournal, data []byte) (
   145  	kbfsblock.ID, kbfsblock.Context, kbfscrypto.BlockCryptKeyServerHalf) {
   146  	oldLength := j.length()
   147  
   148  	bID, err := kbfsblock.MakePermanentID(
   149  		data, kbfscrypto.EncryptionSecretboxWithKeyNonce)
   150  	require.NoError(t, err)
   151  
   152  	uid1 := keybase1.MakeTestUID(1)
   153  	bCtx := kbfsblock.MakeFirstContext(
   154  		uid1.AsUserOrTeam(), keybase1.BlockType_DATA)
   155  	serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
   156  	require.NoError(t, err)
   157  
   158  	putData, err := j.putBlockData(ctx, bID, bCtx, data, serverHalf)
   159  	require.NoError(t, err)
   160  	require.True(t, putData)
   161  	err = j.appendBlock(ctx, bID, bCtx, int64(len(data)))
   162  	require.NoError(t, err)
   163  
   164  	require.Equal(t, oldLength+1, j.length())
   165  
   166  	return bID, bCtx, serverHalf
   167  }
   168  
   169  func addBlockRef(
   170  	ctx context.Context, t *testing.T, j *blockJournal,
   171  	bID kbfsblock.ID) kbfsblock.Context {
   172  	oldLength := j.length()
   173  
   174  	nonce, err := kbfsblock.MakeRefNonce()
   175  	require.NoError(t, err)
   176  
   177  	uid1 := keybase1.MakeTestUID(1)
   178  	uid2 := keybase1.MakeTestUID(2)
   179  	bCtx2 := kbfsblock.MakeContext(
   180  		uid1.AsUserOrTeam(), uid2.AsUserOrTeam(), nonce,
   181  		keybase1.BlockType_DATA)
   182  	err = j.addReference(ctx, bID, bCtx2)
   183  	require.NoError(t, err)
   184  	require.Equal(t, oldLength+1, j.length())
   185  	return bCtx2
   186  }
   187  
   188  func getAndCheckBlockData(ctx context.Context, t *testing.T, j *blockJournal,
   189  	bID kbfsblock.ID, bCtx kbfsblock.Context, expectedData []byte,
   190  	expectedServerHalf kbfscrypto.BlockCryptKeyServerHalf) {
   191  	data, serverHalf, err := j.getDataWithContext(ctx, bID, bCtx)
   192  	require.NoError(t, err)
   193  	require.Equal(t, expectedData, data)
   194  	require.Equal(t, expectedServerHalf, serverHalf)
   195  }
   196  
   197  func TestBlockJournalBasic(t *testing.T) {
   198  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   199  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   200  
   201  	// Put the block.
   202  	data := []byte{1, 2, 3, 4}
   203  	bID, bCtx, serverHalf := putBlockData(ctx, t, j, data)
   204  
   205  	// Make sure we get the same block back.
   206  	getAndCheckBlockData(ctx, t, j, bID, bCtx, data, serverHalf)
   207  
   208  	// Add a reference.
   209  	bCtx2 := addBlockRef(ctx, t, j, bID)
   210  
   211  	// Make sure we get the same block via that reference.
   212  	getAndCheckBlockData(ctx, t, j, bID, bCtx2, data, serverHalf)
   213  
   214  	// Shutdown and restart.
   215  	err := j.checkInSyncForTest()
   216  	require.NoError(t, err)
   217  	j, err = makeBlockJournal(ctx, j.codec, tempdir, j.log, j.vlog)
   218  	require.NoError(t, err)
   219  
   220  	require.Equal(t, uint64(2), j.length())
   221  
   222  	// Make sure we get the same block for both refs.
   223  
   224  	getAndCheckBlockData(ctx, t, j, bID, bCtx, data, serverHalf)
   225  	getAndCheckBlockData(ctx, t, j, bID, bCtx2, data, serverHalf)
   226  }
   227  
   228  func TestBlockJournalDuplicatePut(t *testing.T) {
   229  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   230  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   231  
   232  	data := []byte{1, 2, 3, 4}
   233  
   234  	oldLength := j.length()
   235  
   236  	bID, err := kbfsblock.MakePermanentID(
   237  		data, kbfscrypto.EncryptionSecretboxWithKeyNonce)
   238  	require.NoError(t, err)
   239  
   240  	uid1 := keybase1.MakeTestUID(1)
   241  	bCtx := kbfsblock.MakeFirstContext(
   242  		uid1.AsUserOrTeam(), keybase1.BlockType_DATA)
   243  	serverHalf, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
   244  	require.NoError(t, err)
   245  
   246  	putData, err := j.putBlockData(ctx, bID, bCtx, data, serverHalf)
   247  	require.NoError(t, err)
   248  	require.True(t, putData)
   249  	err = j.appendBlock(ctx, bID, bCtx, int64(len(data)))
   250  	require.NoError(t, err)
   251  
   252  	require.Equal(t, int64(len(data)), j.getStoredBytes())
   253  	require.Equal(t, int64(len(data)), j.getUnflushedBytes())
   254  	require.Equal(t, int64(filesPerBlockMax), j.getStoredFiles())
   255  
   256  	// Put a second time.
   257  	putData, err = j.putBlockData(ctx, bID, bCtx, data, serverHalf)
   258  	require.NoError(t, err)
   259  	require.False(t, putData)
   260  	err = j.appendBlock(ctx, bID, bCtx, 0)
   261  	require.NoError(t, err)
   262  
   263  	require.Equal(t, oldLength+2, j.length())
   264  
   265  	// Shouldn't count the block twice.
   266  	require.Equal(t, int64(len(data)), j.getStoredBytes())
   267  	require.Equal(t, int64(len(data)), j.getUnflushedBytes())
   268  	require.Equal(t, int64(filesPerBlockMax), j.getStoredFiles())
   269  }
   270  
   271  func TestBlockJournalAddReference(t *testing.T) {
   272  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   273  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   274  
   275  	data := []byte{1, 2, 3, 4}
   276  	bID, err := kbfsblock.MakePermanentID(
   277  		data, kbfscrypto.EncryptionSecretboxWithKeyNonce)
   278  	require.NoError(t, err)
   279  
   280  	// Add a reference, which should succeed.
   281  	bCtx := addBlockRef(ctx, t, j, bID)
   282  
   283  	// Of course, the block get should still fail.
   284  	_, _, err = j.getDataWithContext(ctx, bID, bCtx)
   285  	require.Equal(t, blockNonExistentError{bID}, err)
   286  }
   287  
   288  func TestBlockJournalArchiveReferences(t *testing.T) {
   289  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   290  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   291  
   292  	// Put the block.
   293  	data := []byte{1, 2, 3, 4}
   294  	bID, bCtx, serverHalf := putBlockData(ctx, t, j, data)
   295  
   296  	// Add a reference.
   297  	bCtx2 := addBlockRef(ctx, t, j, bID)
   298  
   299  	// Archive references.
   300  	err := j.archiveReferences(
   301  		ctx, kbfsblock.ContextMap{bID: {bCtx, bCtx2}})
   302  	require.NoError(t, err)
   303  	require.Equal(t, uint64(3), j.length())
   304  
   305  	// Get block should still succeed.
   306  	getAndCheckBlockData(ctx, t, j, bID, bCtx, data, serverHalf)
   307  }
   308  
   309  func TestBlockJournalArchiveNonExistentReference(t *testing.T) {
   310  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   311  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   312  
   313  	uid1 := keybase1.MakeTestUID(1)
   314  
   315  	bCtx := kbfsblock.MakeFirstContext(
   316  		uid1.AsUserOrTeam(), keybase1.BlockType_DATA)
   317  
   318  	data := []byte{1, 2, 3, 4}
   319  	bID, err := kbfsblock.MakePermanentID(
   320  		data, kbfscrypto.EncryptionSecretboxWithKeyNonce)
   321  	require.NoError(t, err)
   322  
   323  	// Archive references.
   324  	err = j.archiveReferences(
   325  		ctx, kbfsblock.ContextMap{bID: {bCtx}})
   326  	require.NoError(t, err)
   327  }
   328  
   329  func TestBlockJournalRemoveReferences(t *testing.T) {
   330  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   331  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   332  
   333  	// Put the block.
   334  	data := []byte{1, 2, 3, 4}
   335  	bID, bCtx, serverHalf := putBlockData(ctx, t, j, data)
   336  
   337  	// Add a reference.
   338  	bCtx2 := addBlockRef(ctx, t, j, bID)
   339  
   340  	// Remove references.
   341  	liveCounts, err := j.removeReferences(
   342  		ctx, kbfsblock.ContextMap{bID: {bCtx, bCtx2}})
   343  	require.NoError(t, err)
   344  	require.Equal(t, map[kbfsblock.ID]int{bID: 0}, liveCounts)
   345  	require.Equal(t, uint64(3), j.length())
   346  
   347  	// Make sure the block data is inaccessible.
   348  	_, _, err = j.getDataWithContext(ctx, bID, bCtx)
   349  	require.Equal(t, blockNonExistentError{bID}, err)
   350  
   351  	// But the actual data should remain (for flushing).
   352  	buf, half, err := j.getData(ctx, bID)
   353  	require.NoError(t, err)
   354  	require.Equal(t, data, buf)
   355  	require.Equal(t, serverHalf, half)
   356  }
   357  
   358  func TestBlockJournalDuplicateRemove(t *testing.T) {
   359  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
   360  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   361  
   362  	// Put the block.
   363  	data := []byte{1, 2, 3, 4}
   364  	dataLen := int64(len(data))
   365  	bID, bCtx, _ := putBlockData(ctx, t, j, data)
   366  
   367  	require.Equal(t, dataLen, j.getStoredBytes())
   368  	require.Equal(t, dataLen, j.getUnflushedBytes())
   369  	require.Equal(t, int64(filesPerBlockMax), j.getStoredFiles())
   370  
   371  	// Remove the only reference to the block, then remove the
   372  	// block.
   373  	liveCounts, err := j.removeReferences(
   374  		ctx, kbfsblock.ContextMap{bID: {bCtx}})
   375  	require.NoError(t, err)
   376  	require.Equal(t, map[kbfsblock.ID]int{bID: 0}, liveCounts)
   377  	removedBytes, removedFiles, err := j.remove(ctx, bID)
   378  	require.NoError(t, err)
   379  	require.Equal(t, dataLen, removedBytes)
   380  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
   381  	err = j.unstoreBlocks(removedBytes, removedFiles)
   382  	require.NoError(t, err)
   383  
   384  	// This violates the invariant that UnflushedBytes <=
   385  	// StoredBytes, but that's because we're manually removing the
   386  	// block -- normally, the block would be flushed first, then
   387  	// removed.
   388  	require.Equal(t, int64(0), j.getStoredBytes())
   389  	require.Equal(t, dataLen, j.getUnflushedBytes())
   390  	require.Equal(t, int64(0), j.getStoredFiles())
   391  
   392  	// Remove the block again.
   393  	removedBytes, removedFiles, err = j.remove(ctx, bID)
   394  	require.NoError(t, err)
   395  	require.Equal(t, int64(0), removedBytes)
   396  	require.Equal(t, int64(0), removedFiles)
   397  
   398  	// Shouldn't account for the block again.
   399  	require.Equal(t, int64(0), j.getStoredBytes())
   400  	require.Equal(t, dataLen, j.getUnflushedBytes())
   401  	require.Equal(t, int64(0), j.getStoredFiles())
   402  }
   403  
   404  func testBlockJournalGCd(t *testing.T, j *blockJournal) {
   405  	// None of these dirs should exist.
   406  	for _, file := range j.blockJournalFiles() {
   407  		_, err := ioutil.Stat(file)
   408  		require.True(t, ioutil.IsNotExist(err))
   409  	}
   410  
   411  	require.True(t, j.empty())
   412  	require.Equal(t, blockAggregateInfo{}, j.aggregateInfo)
   413  }
   414  
   415  func goGCForTest(ctx context.Context, t *testing.T, j *blockJournal) (
   416  	int64, int64) {
   417  	length, earliest, latest, err := j.getDeferredGCRange()
   418  	require.NoError(t, err)
   419  	if length == 0 {
   420  		return 0, 0
   421  	}
   422  	removedBytes, removedFiles, err := j.doGC(ctx, earliest, latest)
   423  	require.NoError(t, err)
   424  	_, _, err = j.clearDeferredGCRange(
   425  		ctx, removedBytes, removedFiles, earliest, latest)
   426  	require.NoError(t, err)
   427  	return removedBytes, removedFiles
   428  }
   429  
   430  func TestBlockJournalFlush(t *testing.T) {
   431  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   432  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   433  
   434  	// Put a block.
   435  
   436  	data := []byte{1, 2, 3, 4}
   437  	bID, bCtx, serverHalf := putBlockData(ctx, t, j, data)
   438  
   439  	// Add some references.
   440  
   441  	bCtx2 := addBlockRef(ctx, t, j, bID)
   442  	bCtx3 := addBlockRef(ctx, t, j, bID)
   443  
   444  	// Archive one of the references.
   445  
   446  	err := j.archiveReferences(
   447  		ctx, kbfsblock.ContextMap{
   448  			bID: {bCtx3},
   449  		})
   450  	require.NoError(t, err)
   451  
   452  	blockServer := NewBlockServerMemory(log)
   453  
   454  	tlfID := tlf.FakeID(1, tlf.Private)
   455  
   456  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   457  	reporter := NewReporterSimple(nil, 0)
   458  
   459  	flush := func() (flushedBytes, removedBytes, removedFiles int64) {
   460  		end, err := j.end()
   461  		require.NoError(t, err)
   462  		if end == 0 {
   463  			return 0, 0, 0
   464  		}
   465  
   466  		// Test that the end parameter is respected.
   467  		var partialEntries blockEntriesToFlush
   468  		var rev kbfsmd.Revision
   469  		if end > firstValidJournalOrdinal+1 {
   470  			partialEntries, _, rev, err = j.getNextEntriesToFlush(
   471  				ctx, end-1, maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   472  			require.NoError(t, err)
   473  			require.Equal(t, rev, kbfsmd.RevisionUninitialized)
   474  		}
   475  
   476  		entries, b, rev, err := j.getNextEntriesToFlush(ctx, end,
   477  			maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   478  		require.NoError(t, err)
   479  		require.Equal(t, partialEntries.length()+1, entries.length())
   480  		require.Equal(t, rev, kbfsmd.RevisionUninitialized)
   481  
   482  		err = flushBlockEntries(
   483  			ctx, j.log, j.deferLog, blockServer, bcache, reporter,
   484  			tlfID, tlf.CanonicalName("fake TLF"), entries, DiskBlockAnyCache)
   485  		require.NoError(t, err)
   486  
   487  		flushedBytes, err = j.removeFlushedEntries(
   488  			ctx, entries, tlfID, reporter)
   489  		require.NoError(t, err)
   490  		require.Equal(t, b, flushedBytes)
   491  
   492  		removedBytes, removedFiles = goGCForTest(ctx, t, j)
   493  		return flushedBytes, removedBytes, removedFiles
   494  	}
   495  
   496  	// Flushing all the reference adds should flush and remove the
   497  	// (now-unreferenced) block.
   498  	flushedBytes, removedBytes, removedFiles := flush()
   499  	require.Equal(t, int64(len(data)), flushedBytes)
   500  	require.Equal(t, int64(len(data)), removedBytes)
   501  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
   502  
   503  	// Check the Put.
   504  	buf, key, err := blockServer.Get(ctx, tlfID, bID, bCtx, DiskBlockAnyCache)
   505  	require.NoError(t, err)
   506  	require.Equal(t, data, buf)
   507  	require.Equal(t, serverHalf, key)
   508  
   509  	// Check the AddReference.
   510  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx2, DiskBlockAnyCache)
   511  	require.NoError(t, err)
   512  	require.Equal(t, data, buf)
   513  	require.Equal(t, serverHalf, key)
   514  
   515  	// Check the archiving.
   516  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   517  	require.NoError(t, err)
   518  	require.Equal(t, data, buf)
   519  	require.Equal(t, serverHalf, key)
   520  
   521  	// Now remove all the references.
   522  	liveCounts, err := j.removeReferences(
   523  		ctx, kbfsblock.ContextMap{
   524  			bID: {bCtx, bCtx2, bCtx3},
   525  		})
   526  	require.NoError(t, err)
   527  	require.Equal(t, map[kbfsblock.ID]int{bID: 0}, liveCounts)
   528  
   529  	flushedBytes, removedBytes, removedFiles = flush()
   530  	require.Equal(t, int64(0), flushedBytes)
   531  	require.Equal(t, int64(0), removedBytes)
   532  	require.Equal(t, int64(0), removedFiles)
   533  
   534  	// Check they're all gone.
   535  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx, DiskBlockAnyCache)
   536  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   537  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx2, DiskBlockAnyCache)
   538  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   539  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   540  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   541  
   542  	length := j.length()
   543  	require.Equal(t, uint64(0), length)
   544  
   545  	// Make sure the ordinals and blocks are flushed.
   546  	testBlockJournalGCd(t, j)
   547  }
   548  
   549  func flushBlockJournalOne(ctx context.Context, t *testing.T,
   550  	j *blockJournal, blockServer BlockServer,
   551  	bcache kbfsdata.BlockCache, reporter Reporter, tlfID tlf.ID) (
   552  	flushedBytes, removedFiles, removedBytes int64) {
   553  	first, err := j.j.readEarliestOrdinal()
   554  	require.NoError(t, err)
   555  	entries, b, _, err := j.getNextEntriesToFlush(ctx, first+1,
   556  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   557  	require.NoError(t, err)
   558  	require.Equal(t, 1, entries.length())
   559  	err = flushBlockEntries(ctx, j.log, j.deferLog, blockServer,
   560  		bcache, reporter, tlfID, tlf.CanonicalName("fake TLF"),
   561  		entries, DiskBlockAnyCache)
   562  	require.NoError(t, err)
   563  	flushedBytes, err = j.removeFlushedEntries(
   564  		ctx, entries, tlfID, reporter)
   565  	require.NoError(t, err)
   566  	require.Equal(t, b, flushedBytes)
   567  
   568  	removedBytes, removedFiles = goGCForTest(ctx, t, j)
   569  	require.NoError(t, err)
   570  
   571  	err = j.checkInSyncForTest()
   572  	require.NoError(t, err)
   573  	return flushedBytes, removedBytes, removedFiles
   574  }
   575  
   576  func TestBlockJournalFlushInterleaved(t *testing.T) {
   577  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   578  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   579  
   580  	// Put a block.
   581  
   582  	data := []byte{1, 2, 3, 4}
   583  	bID, bCtx, serverHalf := putBlockData(ctx, t, j, data)
   584  
   585  	// Add some references.
   586  
   587  	bCtx2 := addBlockRef(ctx, t, j, bID)
   588  	bCtx3 := addBlockRef(ctx, t, j, bID)
   589  
   590  	// Flush the block put. (Interleave flushes to test
   591  	// checkInSync in intermediate states.)
   592  
   593  	blockServer := NewBlockServerMemory(log)
   594  
   595  	tlfID := tlf.FakeID(1, tlf.Private)
   596  
   597  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   598  	reporter := NewReporterSimple(nil, 0)
   599  
   600  	flushOne := func() (int64, int64, int64) {
   601  		return flushBlockJournalOne(
   602  			ctx, t, j, blockServer, bcache, reporter, tlfID)
   603  	}
   604  
   605  	flushedBytes, removedBytes, removedFiles := flushOne()
   606  	require.Equal(t, int64(len(data)), flushedBytes)
   607  	require.Equal(t, int64(0), removedBytes)
   608  	require.Equal(t, int64(0), removedFiles)
   609  
   610  	buf, key, err := blockServer.Get(ctx, tlfID, bID, bCtx, DiskBlockAnyCache)
   611  	require.NoError(t, err)
   612  	require.Equal(t, data, buf)
   613  	require.Equal(t, serverHalf, key)
   614  
   615  	// Remove some references.
   616  
   617  	liveCounts, err := j.removeReferences(
   618  		ctx, kbfsblock.ContextMap{
   619  			bID: {bCtx, bCtx2},
   620  		})
   621  	require.NoError(t, err)
   622  	require.Equal(t, map[kbfsblock.ID]int{bID: 1}, liveCounts)
   623  
   624  	// Flush the reference adds.
   625  
   626  	flushOneZero := func() {
   627  		flushedBytes, removedBytes, removedFiles := flushOne()
   628  		require.Equal(t, int64(0), flushedBytes)
   629  		require.Equal(t, int64(0), removedBytes)
   630  		require.Equal(t, int64(0), removedFiles)
   631  	}
   632  
   633  	flushOneZero()
   634  
   635  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx2, DiskBlockAnyCache)
   636  	require.NoError(t, err)
   637  	require.Equal(t, data, buf)
   638  	require.Equal(t, serverHalf, key)
   639  
   640  	// Flushing the last reference add should remove the
   641  	// (now-unreferenced) block.
   642  	flushedBytes, removedBytes, removedFiles = flushOne()
   643  	require.Equal(t, int64(0), flushedBytes)
   644  	require.Equal(t, int64(len(data)), removedBytes)
   645  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
   646  
   647  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   648  	require.NoError(t, err)
   649  	require.Equal(t, data, buf)
   650  	require.Equal(t, serverHalf, key)
   651  
   652  	// Archive the rest.
   653  
   654  	err = j.archiveReferences(
   655  		ctx, kbfsblock.ContextMap{
   656  			bID: {bCtx3},
   657  		})
   658  	require.NoError(t, err)
   659  
   660  	// Flush the reference removals.
   661  
   662  	flushOneZero()
   663  
   664  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx, DiskBlockAnyCache)
   665  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   666  
   667  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx2, DiskBlockAnyCache)
   668  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   669  
   670  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   671  	require.NoError(t, err)
   672  	require.Equal(t, data, buf)
   673  	require.Equal(t, serverHalf, key)
   674  
   675  	// Remove the archived references.
   676  
   677  	liveCounts, err = j.removeReferences(
   678  		ctx, kbfsblock.ContextMap{
   679  			bID: {bCtx3},
   680  		})
   681  	require.NoError(t, err)
   682  	require.Equal(t, map[kbfsblock.ID]int{bID: 0}, liveCounts)
   683  
   684  	// Flush the reference archival.
   685  
   686  	flushOneZero()
   687  
   688  	buf, key, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   689  	require.NoError(t, err)
   690  	require.Equal(t, data, buf)
   691  	require.Equal(t, serverHalf, key)
   692  
   693  	// Flush the last removal.
   694  
   695  	flushOneZero()
   696  
   697  	_, _, err = blockServer.Get(ctx, tlfID, bID, bCtx3, DiskBlockAnyCache)
   698  	require.IsType(t, kbfsblock.ServerErrorBlockNonExistent{}, err)
   699  
   700  	end, err := j.end()
   701  	require.NoError(t, err)
   702  	entries, b, _, err := j.getNextEntriesToFlush(ctx, end,
   703  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   704  	require.NoError(t, err)
   705  	require.Equal(t, 0, entries.length())
   706  	require.Equal(t, int64(0), b)
   707  
   708  	// Make sure the ordinals and blocks are flushed.
   709  	testBlockJournalGCd(t, j)
   710  }
   711  
   712  func TestBlockJournalFlushMDRevMarker(t *testing.T) {
   713  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   714  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   715  
   716  	// Put a block.
   717  
   718  	data := []byte{1, 2, 3, 4}
   719  	putBlockData(ctx, t, j, data)
   720  
   721  	// Put a revision marker
   722  	rev := kbfsmd.Revision(10)
   723  	err := j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   724  	require.NoError(t, err)
   725  
   726  	blockServer := NewBlockServerMemory(log)
   727  	tlfID := tlf.FakeID(1, tlf.Private)
   728  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   729  	reporter := NewReporterSimple(nil, 0)
   730  
   731  	// Make sure the block journal reports that entries up to `rev`
   732  	// can be flushed.
   733  	last, err := j.j.readLatestOrdinal()
   734  	require.NoError(t, err)
   735  	entries, b, gotRev, err := j.getNextEntriesToFlush(ctx, last+1,
   736  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   737  	require.NoError(t, err)
   738  	require.Equal(t, rev, gotRev)
   739  	require.Equal(t, 2, entries.length())
   740  	err = flushBlockEntries(ctx, j.log, j.deferLog, blockServer,
   741  		bcache, reporter, tlfID, tlf.CanonicalName("fake TLF"),
   742  		entries, DiskBlockAnyCache)
   743  	require.NoError(t, err)
   744  	flushedBytes, err := j.removeFlushedEntries(
   745  		ctx, entries, tlfID, reporter)
   746  	require.NoError(t, err)
   747  	require.Equal(t, int64(len(data)), flushedBytes)
   748  	require.Equal(t, b, flushedBytes)
   749  	removedBytes, removedFiles := goGCForTest(ctx, t, j)
   750  	require.NoError(t, err)
   751  	require.Equal(t, int64(len(data)), removedBytes)
   752  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
   753  	err = j.checkInSyncForTest()
   754  	require.NoError(t, err)
   755  }
   756  
   757  func TestBlockJournalFlushMDRevMarkerForPendingLocalSquash(t *testing.T) {
   758  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   759  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   760  
   761  	// Put some blocks.
   762  
   763  	data1 := []byte{1, 2, 3, 4}
   764  	_, _, _ = putBlockData(ctx, t, j, data1)
   765  	data2 := []byte{5, 6, 7, 8}
   766  	id2, _, _ := putBlockData(ctx, t, j, data2)
   767  
   768  	// Put a revision marker and say it's from a local squash.
   769  	rev := kbfsmd.Revision(10)
   770  	err := j.markMDRevision(ctx, rev, kbfsmd.ID{}, true)
   771  	require.NoError(t, err)
   772  
   773  	// Do another, that isn't from a local squash.
   774  	data3 := []byte{9, 10, 11, 12}
   775  	id3, _, _ := putBlockData(ctx, t, j, data3)
   776  	data4 := []byte{13, 14, 15, 16}
   777  	_, _, _ = putBlockData(ctx, t, j, data4)
   778  	rev++
   779  	err = j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   780  	require.NoError(t, err)
   781  
   782  	ignoredBytes, err := j.ignoreBlocksAndMDRevMarkers(
   783  		ctx, []kbfsblock.ID{id2, id3}, rev)
   784  	require.NoError(t, err)
   785  	require.Equal(t, int64(len(data2)+len(data3)), ignoredBytes)
   786  
   787  	blockServer := NewBlockServerMemory(log)
   788  	tlfID := tlf.FakeID(1, tlf.Private)
   789  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   790  	reporter := NewReporterSimple(nil, 0)
   791  
   792  	// Make sure the block journal reports that entries up to 10 can
   793  	// be flushed; there should only be two blocks left, and one
   794  	// revision marker (plus 2 ignored blocks and 1 ignored revision
   795  	// marker).
   796  	last, err := j.j.readLatestOrdinal()
   797  	require.NoError(t, err)
   798  	entries, b, gotRev, err := j.getNextEntriesToFlush(ctx, last+1,
   799  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   800  	require.NoError(t, err)
   801  	require.Equal(t, rev-1, gotRev)
   802  	require.Equal(t, 6, entries.length())
   803  	require.Equal(t, 2, entries.puts.numBlocks())
   804  	require.Equal(t, 0, entries.adds.numBlocks())
   805  	require.Len(t, entries.other, 4)
   806  
   807  	err = flushBlockEntries(ctx, j.log, j.deferLog, blockServer,
   808  		bcache, reporter, tlfID, tlf.CanonicalName("fake TLF"),
   809  		entries, DiskBlockAnyCache)
   810  	require.NoError(t, err)
   811  
   812  	flushedBytes, err := j.removeFlushedEntries(
   813  		ctx, entries, tlfID, reporter)
   814  	require.NoError(t, err)
   815  	require.Equal(t, int64(len(data1)+len(data4)), flushedBytes)
   816  	require.Equal(t, b, flushedBytes)
   817  	removedBytes, removedFiles := goGCForTest(ctx, t, j)
   818  	require.NoError(t, err)
   819  	require.Equal(t, int64(len(data1)+len(data2)+len(data3)+len(data4)),
   820  		removedBytes)
   821  	require.Equal(t, int64(4*filesPerBlockMax), removedFiles)
   822  
   823  	err = j.checkInSyncForTest()
   824  	require.NoError(t, err)
   825  }
   826  
   827  func TestBlockJournalIgnoreBlocks(t *testing.T) {
   828  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   829  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   830  
   831  	// Put a few blocks
   832  	data1 := []byte{1, 2, 3}
   833  	bID1, _, _ := putBlockData(ctx, t, j, data1)
   834  
   835  	// Put a revision marker
   836  	rev := kbfsmd.Revision(9)
   837  	firstRev := rev
   838  	err := j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   839  	require.NoError(t, err)
   840  
   841  	data2 := []byte{4, 5, 6, 7}
   842  	bID2, _, _ := putBlockData(ctx, t, j, data2)
   843  
   844  	// Put a revision marker
   845  	rev = kbfsmd.Revision(10)
   846  	err = j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   847  	require.NoError(t, err)
   848  
   849  	data3 := []byte{8, 9, 10, 11, 12}
   850  	bID3, _, _ := putBlockData(ctx, t, j, data3)
   851  	data4 := []byte{13, 14, 15, 16, 17, 18}
   852  	bID4, _, _ := putBlockData(ctx, t, j, data4)
   853  
   854  	// Put a revision marker
   855  	rev = kbfsmd.Revision(11)
   856  	err = j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   857  	require.NoError(t, err)
   858  
   859  	ignoredBytes, err := j.ignoreBlocksAndMDRevMarkers(
   860  		ctx, []kbfsblock.ID{bID2, bID3}, firstRev)
   861  	require.NoError(t, err)
   862  	require.Equal(t, int64(len(data2)+len(data3)), ignoredBytes)
   863  
   864  	blockServer := NewBlockServerMemory(log)
   865  	tlfID := tlf.FakeID(1, tlf.Private)
   866  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   867  	reporter := NewReporterSimple(nil, 0)
   868  
   869  	// Flush and make sure we only flush the non-ignored blocks.
   870  	last, err := j.j.readLatestOrdinal()
   871  	require.NoError(t, err)
   872  	entries, b, gotRev, err := j.getNextEntriesToFlush(ctx, last+1,
   873  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   874  	require.NoError(t, err)
   875  	require.Equal(t, kbfsmd.RevisionUninitialized, gotRev)
   876  	require.Equal(t, 7, entries.length())
   877  	require.Equal(t, 2, entries.puts.numBlocks())
   878  	require.Equal(t, 0, entries.adds.numBlocks())
   879  	require.Len(t, entries.other, 5)
   880  	ptrs := entries.puts.Ptrs()
   881  	ids := make([]kbfsblock.ID, len(ptrs))
   882  	for i, ptr := range ptrs {
   883  		ids[i] = ptr.ID
   884  	}
   885  	require.Contains(t, ids, bID1)
   886  	require.Contains(t, ids, bID4)
   887  	err = flushBlockEntries(ctx, j.log, j.deferLog, blockServer,
   888  		bcache, reporter, tlfID, tlf.CanonicalName("fake TLF"),
   889  		entries, DiskBlockAnyCache)
   890  	require.NoError(t, err)
   891  	flushedBytes, err := j.removeFlushedEntries(
   892  		ctx, entries, tlfID, reporter)
   893  	require.NoError(t, err)
   894  	require.Equal(t, int64(len(data1)+len(data4)), flushedBytes)
   895  	require.Equal(t, b, flushedBytes)
   896  
   897  	// Flush everything.
   898  	removedBytes, removedFiles := goGCForTest(ctx, t, j)
   899  	require.NoError(t, err)
   900  	require.Equal(t, int64(len(data1)+len(data2)+len(data3)+len(data4)),
   901  		removedBytes)
   902  	require.Equal(t, int64(4*filesPerBlockMax), removedFiles)
   903  
   904  	err = j.checkInSyncForTest()
   905  	require.NoError(t, err)
   906  }
   907  
   908  func TestBlockJournalSaveUntilMDFlush(t *testing.T) {
   909  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
   910  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
   911  
   912  	// Put a few blocks
   913  	data1 := []byte{1, 2, 3, 4}
   914  	bID1, _, _ := putBlockData(ctx, t, j, data1)
   915  	data2 := []byte{5, 6, 7, 8}
   916  	bID2, _, _ := putBlockData(ctx, t, j, data2)
   917  
   918  	// Put a revision marker
   919  	rev := kbfsmd.Revision(10)
   920  	err := j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   921  	require.NoError(t, err)
   922  
   923  	data3 := []byte{9, 10, 11, 12}
   924  	bID3, _, _ := putBlockData(ctx, t, j, data3)
   925  	data4 := []byte{13, 14, 15, 16}
   926  	bID4, _, _ := putBlockData(ctx, t, j, data4)
   927  
   928  	// Put a revision marker
   929  	rev = kbfsmd.Revision(11)
   930  	err = j.markMDRevision(ctx, rev, kbfsmd.ID{}, false)
   931  	require.NoError(t, err)
   932  
   933  	savedBlocks := []kbfsblock.ID{bID1, bID2, bID3, bID4}
   934  
   935  	blockServer := NewBlockServerMemory(log)
   936  	tlfID := tlf.FakeID(1, tlf.Private)
   937  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
   938  	reporter := NewReporterSimple(nil, 0)
   939  
   940  	// Flush all the entries, but they should still remain accessible.
   941  	flushAll := func() int64 {
   942  		last, err := j.j.readLatestOrdinal()
   943  		require.NoError(t, err)
   944  		entries, b, _, err := j.getNextEntriesToFlush(ctx, last+1,
   945  			maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   946  		require.NoError(t, err)
   947  		err = flushBlockEntries(ctx, j.log, j.deferLog, blockServer,
   948  			bcache, reporter, tlfID, tlf.CanonicalName("fake TLF"),
   949  			entries, DiskBlockAnyCache)
   950  		require.NoError(t, err)
   951  		flushedBytes, err := j.removeFlushedEntries(
   952  			ctx, entries, tlfID, reporter)
   953  		require.NoError(t, err)
   954  		require.Equal(t, b, flushedBytes)
   955  		return flushedBytes
   956  	}
   957  	flushedBytes := flushAll()
   958  	require.Equal(t, int64(len(data1)+len(data2)+len(data3)+len(data4)),
   959  		flushedBytes)
   960  
   961  	// The blocks can still be fetched from the journal.
   962  	for _, bid := range savedBlocks {
   963  		ok, err := j.hasData(ctx, bid)
   964  		require.NoError(t, err)
   965  		require.True(t, ok)
   966  	}
   967  
   968  	// No more blocks to flush though.
   969  	end, err := j.end()
   970  	require.NoError(t, err)
   971  	entries, b, gotRev, err := j.getNextEntriesToFlush(ctx, end,
   972  		maxJournalBlockFlushBatchSize, kbfsmd.ID{})
   973  	require.NoError(t, err)
   974  	require.Equal(t, 0, entries.length())
   975  	require.Equal(t, kbfsmd.RevisionUninitialized, gotRev)
   976  	require.Equal(t, int64(0), b)
   977  
   978  	// Add a few more blocks and save those too.
   979  	data5 := []byte{17, 18, 19, 20}
   980  	bID5, _, _ := putBlockData(ctx, t, j, data5)
   981  	data6 := []byte{21, 22, 23, 24}
   982  	bID6, _, _ := putBlockData(ctx, t, j, data6)
   983  	savedBlocks = append(savedBlocks, bID5, bID6)
   984  	flushedBytes = flushAll()
   985  	require.Equal(t, int64(len(data5)+len(data6)), flushedBytes)
   986  
   987  	// Make sure all the blocks still exist, including both the old
   988  	// and the new ones.
   989  	for _, bid := range savedBlocks {
   990  		ok, err := j.hasData(ctx, bid)
   991  		require.NoError(t, err)
   992  		require.True(t, ok)
   993  	}
   994  
   995  	// Now remove all the data.
   996  	var expectedBytes int64
   997  	var expectedFiles int64
   998  	for i := 0; i < len(savedBlocks)-1+2; i++ {
   999  		if i%3 != 2 {
  1000  			expectedBytes += 4
  1001  			expectedFiles += filesPerBlockMax
  1002  		}
  1003  	}
  1004  	expectedBytes += 4
  1005  	expectedFiles += filesPerBlockMax
  1006  
  1007  	removedBytes, removedFiles := goGCForTest(ctx, t, j)
  1008  	require.NoError(t, err)
  1009  	require.Equal(t, expectedBytes, removedBytes)
  1010  	require.Equal(t, expectedFiles, removedFiles)
  1011  
  1012  	ok, err := j.isUnflushed(ctx, bID1)
  1013  	require.NoError(t, err)
  1014  	require.False(t, ok)
  1015  	ok, err = j.isUnflushed(ctx, bID2)
  1016  	require.NoError(t, err)
  1017  	require.False(t, ok)
  1018  	ok, err = j.isUnflushed(ctx, bID3)
  1019  	require.NoError(t, err)
  1020  	require.False(t, ok)
  1021  	ok, err = j.isUnflushed(ctx, bID4)
  1022  	require.NoError(t, err)
  1023  	require.False(t, ok)
  1024  
  1025  	testBlockJournalGCd(t, j)
  1026  }
  1027  
  1028  func TestBlockJournalByteCounters(t *testing.T) {
  1029  	ctx, cancel, tempdir, log, j := setupBlockJournalTest(t)
  1030  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
  1031  
  1032  	// In this test, stored bytes and unflushed bytes should
  1033  	// change identically.
  1034  	requireCounts := func(expectedBytes, expectedFiles int) {
  1035  		require.Equal(t, int64(expectedBytes), j.getStoredBytes())
  1036  		require.Equal(t, int64(expectedBytes), j.getUnflushedBytes())
  1037  		require.Equal(t, int64(expectedFiles), j.getStoredFiles())
  1038  		var info blockAggregateInfo
  1039  		err := kbfscodec.DeserializeFromFile(
  1040  			j.codec, aggregateInfoPath(j.dir), &info)
  1041  		if !ioutil.IsNotExist(err) {
  1042  			require.NoError(t, err)
  1043  		}
  1044  		require.Equal(t, int64(expectedBytes), info.StoredBytes)
  1045  		require.Equal(t, int64(expectedBytes), info.UnflushedBytes)
  1046  		require.Equal(t, int64(expectedFiles), info.StoredFiles)
  1047  	}
  1048  
  1049  	// Prime the cache.
  1050  	requireCounts(0, 0)
  1051  
  1052  	data1 := []byte{1, 2, 3, 4}
  1053  	bID1, bCtx1, _ := putBlockData(ctx, t, j, data1)
  1054  
  1055  	requireCounts(len(data1), filesPerBlockMax)
  1056  
  1057  	data2 := []byte{1, 2, 3, 4, 5}
  1058  	bID2, bCtx2, _ := putBlockData(ctx, t, j, data2)
  1059  
  1060  	expectedSize := len(data1) + len(data2)
  1061  	requireCounts(expectedSize, 2*filesPerBlockMax)
  1062  
  1063  	// Adding, archive, or removing references shouldn't change
  1064  	// anything.
  1065  
  1066  	bCtx1b := addBlockRef(ctx, t, j, bID1)
  1067  	requireCounts(expectedSize, 2*filesPerBlockMax)
  1068  
  1069  	data3 := []byte{1, 2, 3}
  1070  	bID3, err := kbfsblock.MakePermanentID(
  1071  		data3, kbfscrypto.EncryptionSecretboxWithKeyNonce)
  1072  	require.NoError(t, err)
  1073  	_ = addBlockRef(ctx, t, j, bID3)
  1074  	require.NoError(t, err)
  1075  
  1076  	err = j.archiveReferences(
  1077  		ctx, kbfsblock.ContextMap{bID2: {bCtx2}})
  1078  	require.NoError(t, err)
  1079  	requireCounts(expectedSize, 2*filesPerBlockMax)
  1080  
  1081  	liveCounts, err := j.removeReferences(
  1082  		ctx, kbfsblock.ContextMap{bID1: {bCtx1, bCtx1b}})
  1083  	require.NoError(t, err)
  1084  	require.Equal(t, map[kbfsblock.ID]int{bID1: 0}, liveCounts)
  1085  	requireCounts(expectedSize, 2*filesPerBlockMax)
  1086  
  1087  	liveCounts, err = j.removeReferences(
  1088  		ctx, kbfsblock.ContextMap{bID2: {bCtx2}})
  1089  	require.NoError(t, err)
  1090  	require.Equal(t, map[kbfsblock.ID]int{bID2: 0}, liveCounts)
  1091  	requireCounts(expectedSize, 2*filesPerBlockMax)
  1092  
  1093  	blockServer := NewBlockServerMemory(log)
  1094  	tlfID := tlf.FakeID(1, tlf.Private)
  1095  	bcache := kbfsdata.NewBlockCacheStandard(0, 0)
  1096  	reporter := NewReporterSimple(nil, 0)
  1097  	flushOne := func() (int64, int64, int64) {
  1098  		return flushBlockJournalOne(
  1099  			ctx, t, j, blockServer, bcache, reporter, tlfID)
  1100  	}
  1101  
  1102  	// Flush the first put. This causes the block to be GCed since
  1103  	// the subsequent ops for that block remove the references.
  1104  	flushedBytes, removedBytes, removedFiles := flushOne()
  1105  	require.Equal(t, int64(len(data1)), flushedBytes)
  1106  	require.Equal(t, int64(len(data1)), removedBytes)
  1107  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
  1108  	expectedSize = len(data2)
  1109  	requireCounts(expectedSize, filesPerBlockMax)
  1110  
  1111  	// Flush the second put.
  1112  	flushedBytes, removedBytes, removedFiles = flushOne()
  1113  	require.Equal(t, int64(len(data2)), flushedBytes)
  1114  	require.Equal(t, int64(len(data2)), removedBytes)
  1115  	require.Equal(t, int64(filesPerBlockMax), removedFiles)
  1116  	requireCounts(0, 0)
  1117  
  1118  	// Flush the first add ref.
  1119  
  1120  	flushOneZero := func() {
  1121  		flushedBytes, removedBytes, removedFiles := flushOne()
  1122  		require.Equal(t, int64(0), flushedBytes)
  1123  		require.Equal(t, int64(0), removedBytes)
  1124  		require.Equal(t, int64(0), removedFiles)
  1125  		requireCounts(0, 0)
  1126  	}
  1127  
  1128  	flushOneZero()
  1129  
  1130  	// Flush the second add ref, but push the block to the server
  1131  	// first.
  1132  
  1133  	uid1 := keybase1.MakeTestUID(1)
  1134  	bCtx3 := kbfsblock.MakeFirstContext(
  1135  		uid1.AsUserOrTeam(), keybase1.BlockType_DATA)
  1136  	serverHalf3, err := kbfscrypto.MakeRandomBlockCryptKeyServerHalf()
  1137  	require.NoError(t, err)
  1138  
  1139  	err = blockServer.Put(
  1140  		context.Background(), tlfID, bID3, bCtx3, data3, serverHalf3,
  1141  		DiskBlockAnyCache)
  1142  	require.NoError(t, err)
  1143  
  1144  	flushOneZero()
  1145  
  1146  	// Flush the add archive.
  1147  	flushOneZero()
  1148  
  1149  	// Flush the first remove.
  1150  	flushOneZero()
  1151  
  1152  	// Flush the second remove.
  1153  	flushOneZero()
  1154  }
  1155  
  1156  func TestBlockJournalUnflushedBytesIgnore(t *testing.T) {
  1157  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
  1158  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
  1159  
  1160  	requireCounts := func(expectedStoredBytes, expectedUnflushedBytes,
  1161  		expectedStoredFiles int) {
  1162  		require.Equal(t, int64(expectedStoredBytes), j.getStoredBytes())
  1163  		require.Equal(t, int64(expectedUnflushedBytes),
  1164  			j.getUnflushedBytes())
  1165  		require.Equal(t, int64(expectedStoredFiles), j.getStoredFiles())
  1166  	}
  1167  
  1168  	// Prime the cache.
  1169  	requireCounts(0, 0, 0)
  1170  
  1171  	data1 := []byte{1, 2, 3, 4}
  1172  	bID1, _, _ := putBlockData(ctx, t, j, data1)
  1173  
  1174  	requireCounts(len(data1), len(data1), filesPerBlockMax)
  1175  
  1176  	data2 := []byte{1, 2, 3, 4, 5}
  1177  	_, _, _ = putBlockData(ctx, t, j, data2)
  1178  
  1179  	requireCounts(len(data1)+len(data2), len(data1)+len(data2),
  1180  		2*filesPerBlockMax)
  1181  
  1182  	ignoredBytes, err := j.ignoreBlocksAndMDRevMarkers(
  1183  		ctx, []kbfsblock.ID{bID1}, kbfsmd.Revision(0))
  1184  	require.NoError(t, err)
  1185  	require.Equal(t, int64(len(data1)), ignoredBytes)
  1186  
  1187  	requireCounts(len(data1)+len(data2), len(data2), 2*filesPerBlockMax)
  1188  }
  1189  
  1190  // Regression test for HOTPOT-1553 -- make sure that if there's a
  1191  // crash after resolving an MD conflict but before ignoring the MD
  1192  // markers in the block journal, the journal won't mistakenly flush MD
  1193  // revisions before the resolved blocks have been flushed.
  1194  func TestBlockJournalIgnoreMDRevMarkerByID(t *testing.T) {
  1195  	ctx, cancel, tempdir, _, j := setupBlockJournalTest(t)
  1196  	defer teardownBlockJournalTest(ctx, t, cancel, tempdir, j)
  1197  
  1198  	// Put some blocks.
  1199  	data1 := []byte{1, 2, 3, 4}
  1200  	_, _, _ = putBlockData(ctx, t, j, data1)
  1201  	data2 := []byte{5, 6, 7, 8}
  1202  	_, _, _ = putBlockData(ctx, t, j, data2)
  1203  
  1204  	// Put a revision marker and say it's from a local squash.
  1205  	rev := kbfsmd.Revision(10)
  1206  	journalID1 := kbfsmd.FakeID(1)
  1207  	err := j.markMDRevision(ctx, rev, journalID1, true)
  1208  	require.NoError(t, err)
  1209  
  1210  	// Do another, that isn't from a local squash.
  1211  	data3 := []byte{9, 10, 11, 12}
  1212  	_, _, _ = putBlockData(ctx, t, j, data3)
  1213  	data4 := []byte{13, 14, 15, 16}
  1214  	_, _, _ = putBlockData(ctx, t, j, data4)
  1215  	rev++
  1216  	err = j.markMDRevision(ctx, rev, journalID1, false)
  1217  	require.NoError(t, err)
  1218  
  1219  	t.Log("When the journal ID is current, we may flush the latest MD " +
  1220  		"revision after flushing the blocks")
  1221  	last, err := j.j.readLatestOrdinal()
  1222  	require.NoError(t, err)
  1223  	_, _, gotRev, err := j.getNextEntriesToFlush(
  1224  		ctx, last+1, maxJournalBlockFlushBatchSize, journalID1)
  1225  	require.NoError(t, err)
  1226  	require.Equal(t, rev, gotRev)
  1227  
  1228  	t.Log("When the journal ID has changed, we may not flush any MD revisions")
  1229  	journalID2 := kbfsmd.FakeID(2)
  1230  	_, _, gotRev, err = j.getNextEntriesToFlush(
  1231  		ctx, last+1, maxJournalBlockFlushBatchSize, journalID2)
  1232  	require.NoError(t, err)
  1233  	require.Equal(t, kbfsmd.RevisionUninitialized, gotRev)
  1234  }