github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libkbfs/folder_block_manager_test.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"bytes"
     9  	"os"
    10  	"reflect"
    11  	"testing"
    12  	"time"
    13  
    14  	"github.com/keybase/client/go/kbfs/data"
    15  	"github.com/keybase/client/go/kbfs/ioutil"
    16  	"github.com/keybase/client/go/kbfs/kbfsblock"
    17  	"github.com/keybase/client/go/kbfs/kbfsmd"
    18  	"github.com/keybase/client/go/kbfs/test/clocktest"
    19  	"github.com/keybase/client/go/kbfs/tlf"
    20  	kbname "github.com/keybase/client/go/kbun"
    21  	"github.com/keybase/client/go/libkb"
    22  	"github.com/keybase/client/go/protocol/keybase1"
    23  	"github.com/stretchr/testify/require"
    24  	"golang.org/x/net/context"
    25  )
    26  
    27  func totalBlockRefs(m map[kbfsblock.ID]blockRefMap) int {
    28  	n := 0
    29  	for _, refs := range m {
    30  		n += len(refs)
    31  	}
    32  	return n
    33  }
    34  
    35  // Test that quota reclamation works for a simple case where the user
    36  // does a few updates, then lets quota reclamation run, and we make
    37  // sure that all historical blocks have been deleted.
    38  func testQuotaReclamation(ctx context.Context, t *testing.T, config Config,
    39  	userName kbname.NormalizedUsername) (
    40  	ops *folderBranchOps, preBlocks map[kbfsblock.ID]blockRefMap) {
    41  	clock, now := clocktest.NewTestClockAndTimeNow()
    42  	config.SetClock(clock)
    43  
    44  	rootNode := GetRootNodeOrBust(
    45  		ctx, t, config, userName.String(), tlf.Private)
    46  	kbfsOps := config.KBFSOps()
    47  	_, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
    48  	require.NoError(t, err, "Couldn't create dir: %+v", err)
    49  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
    50  	require.NoError(t, err, "Couldn't sync all: %v", err)
    51  	err = kbfsOps.RemoveDir(ctx, rootNode, testPPS("a"))
    52  	require.NoError(t, err, "Couldn't remove dir: %+v", err)
    53  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
    54  	require.NoError(t, err, "Couldn't sync all: %v", err)
    55  
    56  	// Wait for outstanding archives
    57  	err = kbfsOps.SyncFromServer(ctx,
    58  		rootNode.GetFolderBranch(), nil)
    59  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
    60  
    61  	// Make sure no blocks are deleted before there's a new-enough update.
    62  	bserver := config.BlockServer()
    63  	if jbserver, ok := bserver.(journalBlockServer); ok {
    64  		bserver = jbserver.BlockServer
    65  	}
    66  	bserverLocal, ok := bserver.(blockServerLocal)
    67  	if !ok {
    68  		t.Fatalf("Bad block server")
    69  	}
    70  	preQR1Blocks, err := bserverLocal.getAllRefsForTest(
    71  		ctx, rootNode.GetFolderBranch().Tlf)
    72  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
    73  
    74  	ops = kbfsOps.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode)
    75  	ops.fbm.forceQuotaReclamation()
    76  	err = ops.fbm.waitForQuotaReclamations(ctx)
    77  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
    78  
    79  	postQR1Blocks, err := bserverLocal.getAllRefsForTest(
    80  		ctx, rootNode.GetFolderBranch().Tlf)
    81  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
    82  
    83  	if !reflect.DeepEqual(preQR1Blocks, postQR1Blocks) {
    84  		t.Fatalf("Blocks deleted too early (%v vs %v)!",
    85  			preQR1Blocks, postQR1Blocks)
    86  	}
    87  
    88  	// Increase the time and make a new revision, but don't run quota
    89  	// reclamation yet.
    90  	clock.Set(now.Add(2 * config.Mode().QuotaReclamationMinUnrefAge()))
    91  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("b"))
    92  	require.NoError(t, err, "Couldn't create dir: %+v", err)
    93  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
    94  	require.NoError(t, err, "Couldn't sync all: %v", err)
    95  
    96  	preQR2Blocks, err := bserverLocal.getAllRefsForTest(
    97  		ctx, rootNode.GetFolderBranch().Tlf)
    98  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
    99  
   100  	return ops, preQR2Blocks
   101  }
   102  
   103  func ensureFewerBlocksPostQR(
   104  	ctx context.Context, t *testing.T, config *ConfigLocal,
   105  	ops *folderBranchOps, preBlocks map[kbfsblock.ID]blockRefMap) {
   106  	ops.fbm.forceQuotaReclamation()
   107  	err := ops.fbm.waitForQuotaReclamations(ctx)
   108  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   109  
   110  	bserver := config.BlockServer()
   111  	if jbserver, ok := bserver.(journalBlockServer); ok {
   112  		bserver = jbserver.BlockServer
   113  	}
   114  	bserverLocal, ok := bserver.(blockServerLocal)
   115  	require.True(t, ok)
   116  
   117  	postBlocks, err := bserverLocal.getAllRefsForTest(ctx, ops.id())
   118  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   119  
   120  	pre, post := totalBlockRefs(preBlocks), totalBlockRefs(postBlocks)
   121  	require.True(t, post < pre,
   122  		"Blocks didn't shrink after reclamation: pre: %d, post %d",
   123  		pre, post)
   124  }
   125  
   126  func TestQuotaReclamationSimple(t *testing.T) {
   127  	var userName kbname.NormalizedUsername = "test_user"
   128  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   129  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   130  
   131  	ops, preBlocks := testQuotaReclamation(ctx, t, config, userName)
   132  	ensureFewerBlocksPostQR(ctx, t, config, ops, preBlocks)
   133  }
   134  
   135  type modeTestWithNoTimedQR struct {
   136  	InitMode
   137  }
   138  
   139  func (mtwmpl modeTestWithNoTimedQR) QuotaReclamationPeriod() time.Duration {
   140  	return 0
   141  }
   142  
   143  type modeTestWithMaxPtrsLimit struct {
   144  	InitMode
   145  }
   146  
   147  func (mtwmpl modeTestWithMaxPtrsLimit) MaxBlockPtrsToManageAtOnce() int {
   148  	return 1
   149  }
   150  
   151  func TestQuotaReclamationConstrained(t *testing.T) {
   152  	var userName kbname.NormalizedUsername = "test_user"
   153  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   154  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   155  	config.SetMode(modeTestWithNoTimedQR{config.Mode()})
   156  	originalMode := config.Mode()
   157  	config.SetMode(modeTestWithMaxPtrsLimit{originalMode})
   158  
   159  	ops, preBlocks := testQuotaReclamation(ctx, t, config, userName)
   160  
   161  	// Unconstrain it for the final QR.
   162  	config.SetMode(originalMode)
   163  	ensureFewerBlocksPostQR(ctx, t, config, ops, preBlocks)
   164  }
   165  
   166  // Just like the simple case, except tests that it unembeds large sets
   167  // of pointers correctly.
   168  func TestQuotaReclamationUnembedded(t *testing.T) {
   169  	var userName kbname.NormalizedUsername = "test_user"
   170  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   171  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   172  
   173  	config.bsplit.(*data.BlockSplitterSimple).
   174  		SetBlockChangeEmbedMaxSizeForTesting(32)
   175  
   176  	ops, preBlocks := testQuotaReclamation(ctx, t, config, userName)
   177  	ensureFewerBlocksPostQR(ctx, t, config, ops, preBlocks)
   178  
   179  	// Make sure the MD has an unembedded change block.
   180  	md, err := config.MDOps().GetForTLF(ctx, ops.id(), nil)
   181  	require.NoError(t, err, "Couldn't get MD: %+v", err)
   182  	if md.data.cachedChanges.Info.BlockPointer == data.ZeroPtr {
   183  		t.Fatalf("No unembedded changes for ops %v", md.data.Changes.Ops)
   184  	}
   185  }
   186  
   187  // Just like the simple case, except tests that it unembeds large sets
   188  // of pointers correctly.
   189  func TestQuotaReclamationUnembeddedJournal(t *testing.T) {
   190  	var userName kbname.NormalizedUsername = "test_user"
   191  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   192  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   193  
   194  	tempdir, err := ioutil.TempDir(os.TempDir(), "journal_server")
   195  	require.NoError(t, err)
   196  	defer func() {
   197  		err := ioutil.RemoveAll(tempdir)
   198  		require.NoError(t, err)
   199  	}()
   200  
   201  	err = config.EnableDiskLimiter(tempdir)
   202  	require.NoError(t, err)
   203  	err = config.EnableJournaling(
   204  		ctx, tempdir, TLFJournalBackgroundWorkPaused)
   205  	require.NoError(t, err)
   206  
   207  	config.bsplit.(*data.BlockSplitterSimple).
   208  		SetBlockChangeEmbedMaxSizeForTesting(32)
   209  
   210  	rootNode := GetRootNodeOrBust(
   211  		ctx, t, config, userName.String(), tlf.Private)
   212  	jManager, err := GetJournalManager(config)
   213  	require.NoError(t, err)
   214  	jManager.PauseBackgroundWork(ctx, rootNode.GetFolderBranch().Tlf)
   215  
   216  	ops, _ := testQuotaReclamation(ctx, t, config, userName)
   217  
   218  	t.Log("Check that the latest merged revision didn't get updated")
   219  	rev := ops.getLatestMergedRevision(makeFBOLockState())
   220  	require.Equal(t, kbfsmd.RevisionInitial, rev)
   221  
   222  	jManager.ResumeBackgroundWork(ctx, ops.id())
   223  	err = jManager.Wait(ctx, ops.id())
   224  	require.NoError(t, err)
   225  }
   226  
   227  // Test that a single quota reclamation run doesn't try to reclaim too
   228  // much quota at once.
   229  func TestQuotaReclamationIncrementalReclamation(t *testing.T) {
   230  	var userName kbname.NormalizedUsername = "test_user"
   231  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   232  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   233  
   234  	now := time.Now()
   235  	var clock clocktest.TestClock
   236  	clock.Set(now)
   237  	config.SetClock(&clock)
   238  
   239  	// Allow for big embedded block changes, so they don't confuse our
   240  	// block-checking logic.
   241  	config.bsplit.(*data.BlockSplitterSimple).
   242  		SetBlockChangeEmbedMaxSizeForTesting(16 << 20)
   243  
   244  	rootNode := GetRootNodeOrBust(
   245  		ctx, t, config, userName.String(), tlf.Private)
   246  	// Do a bunch of operations.
   247  	kbfsOps := config.KBFSOps()
   248  	testPointersPerGCThreshold := 10
   249  	for i := 0; i < testPointersPerGCThreshold; i++ {
   250  		_, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
   251  		require.NoError(t, err, "Couldn't create dir: %+v", err)
   252  		err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   253  		require.NoError(t, err, "Couldn't sync all: %v", err)
   254  		err = kbfsOps.RemoveDir(ctx, rootNode, testPPS("a"))
   255  		require.NoError(t, err, "Couldn't remove dir: %+v", err)
   256  		err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   257  		require.NoError(t, err, "Couldn't sync all: %v", err)
   258  	}
   259  
   260  	// Increase the time, and make sure that there is still more than
   261  	// one block in the history
   262  	clock.Set(now.Add(2 * config.Mode().QuotaReclamationMinUnrefAge()))
   263  
   264  	// Run it.
   265  	ops := kbfsOps.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode)
   266  	ops.fbm.numPointersPerGCThreshold = testPointersPerGCThreshold
   267  	ops.fbm.forceQuotaReclamation()
   268  	err := ops.fbm.waitForQuotaReclamations(ctx)
   269  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   270  
   271  	bserverLocal, ok := config.BlockServer().(blockServerLocal)
   272  	if !ok {
   273  		t.Fatalf("Bad block server")
   274  	}
   275  	blocks, err := bserverLocal.getAllRefsForTest(
   276  		ctx, rootNode.GetFolderBranch().Tlf)
   277  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   278  
   279  	b := totalBlockRefs(blocks)
   280  	if b <= 1 {
   281  		t.Errorf("Too many blocks left after first QR: %d", b)
   282  	}
   283  
   284  	// Now let it run to completion
   285  	for b > 1 {
   286  		ops.fbm.forceQuotaReclamation()
   287  		err = ops.fbm.waitForQuotaReclamations(ctx)
   288  		require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   289  
   290  		blocks, err := bserverLocal.getAllRefsForTest(
   291  			ctx, rootNode.GetFolderBranch().Tlf)
   292  		require.NoError(t, err, "Couldn't get blocks: %+v", err)
   293  		oldB := b
   294  		b = totalBlockRefs(blocks)
   295  		if b >= oldB {
   296  			t.Fatalf("Blocks didn't shrink after reclamation: %d vs. %d",
   297  				b, oldB)
   298  		}
   299  	}
   300  }
   301  
   302  // Test that deleted blocks are correctly flushed from the user cache.
   303  func TestQuotaReclamationDeletedBlocks(t *testing.T) {
   304  	var u1, u2 kbname.NormalizedUsername = "u1", "u2"
   305  	config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, u1, u2)
   306  	defer kbfsTestShutdownNoMocks(ctx, t, config1, cancel)
   307  
   308  	clock, now := clocktest.NewTestClockAndTimeNow()
   309  	config1.SetClock(clock)
   310  
   311  	// Initialize the MD using a different config
   312  	config2 := ConfigAsUser(config1, u2)
   313  	defer CheckConfigAndShutdown(ctx, t, config2)
   314  	config2.SetClock(clock)
   315  
   316  	name := u1.String() + "," + u2.String()
   317  	rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, tlf.Private)
   318  	data1 := []byte{1, 2, 3, 4, 5}
   319  	kbfsOps1 := config1.KBFSOps()
   320  	aNode1, _, err := kbfsOps1.CreateFile(
   321  		ctx, rootNode1, testPPS("a"), false, NoExcl)
   322  	require.NoError(t, err)
   323  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   324  	err = kbfsOps1.Write(ctx, aNode1, data1, 0)
   325  	require.NoError(t, err, "Couldn't write file: %+v", err)
   326  	err = kbfsOps1.SyncAll(ctx, aNode1.GetFolderBranch())
   327  	require.NoError(t, err, "Couldn't sync file: %+v", err)
   328  
   329  	// Make two more files that share a block, only one of which will
   330  	// be deleted.
   331  	otherData := []byte{5, 4, 3, 2, 1}
   332  	for _, name := range []data.PathPartString{testPPS("b"), testPPS("c")} {
   333  		node, _, err := kbfsOps1.CreateFile(ctx, rootNode1, name, false, NoExcl)
   334  		require.NoError(t, err, "Couldn't create dir: %+v", err)
   335  		err = kbfsOps1.Write(ctx, node, otherData, 0)
   336  		require.NoError(t, err, "Couldn't write file: %+v", err)
   337  		err = kbfsOps1.SyncAll(ctx, node.GetFolderBranch())
   338  		require.NoError(t, err, "Couldn't sync file: %+v", err)
   339  	}
   340  
   341  	// u2 reads the file
   342  	rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, tlf.Private)
   343  	kbfsOps2 := config2.KBFSOps()
   344  	aNode2, _, err := kbfsOps2.Lookup(ctx, rootNode2, testPPS("a"))
   345  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   346  	data2 := make([]byte, len(data1))
   347  	_, err = kbfsOps2.Read(ctx, aNode2, data2, 0)
   348  	require.NoError(t, err, "Couldn't read file: %+v", err)
   349  	if !bytes.Equal(data1, data2) {
   350  		t.Fatalf("Read bad data: %v", data2)
   351  	}
   352  	bNode2, _, err := kbfsOps2.Lookup(ctx, rootNode2, testPPS("b"))
   353  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   354  	data2 = make([]byte, len(data1))
   355  	_, err = kbfsOps2.Read(ctx, bNode2, data2, 0)
   356  	require.NoError(t, err, "Couldn't read file: %+v", err)
   357  	if !bytes.Equal(otherData, data2) {
   358  		t.Fatalf("Read bad data: %v", data2)
   359  	}
   360  
   361  	// Remove two of the files
   362  	err = kbfsOps1.RemoveEntry(ctx, rootNode1, testPPS("a"))
   363  	require.NoError(t, err, "Couldn't remove file: %+v", err)
   364  	err = kbfsOps1.RemoveEntry(ctx, rootNode1, testPPS("b"))
   365  	require.NoError(t, err, "Couldn't remove file: %+v", err)
   366  	err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
   367  	require.NoError(t, err, "Couldn't sync file: %+v", err)
   368  
   369  	// Wait for outstanding archives
   370  	err = kbfsOps1.SyncFromServer(ctx,
   371  		rootNode1.GetFolderBranch(), nil)
   372  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   373  
   374  	// Get the current set of blocks
   375  	bserverLocal, ok := config1.BlockServer().(blockServerLocal)
   376  	if !ok {
   377  		t.Fatalf("Bad block server")
   378  	}
   379  	preQRBlocks, err := bserverLocal.getAllRefsForTest(
   380  		ctx, rootNode1.GetFolderBranch().Tlf)
   381  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   382  
   383  	clock.Set(now.Add(2 * config1.Mode().QuotaReclamationMinUnrefAge()))
   384  	ops1 := kbfsOps1.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode1)
   385  	ops1.fbm.forceQuotaReclamation()
   386  	err = ops1.fbm.waitForQuotaReclamations(ctx)
   387  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   388  
   389  	postQRBlocks, err := bserverLocal.getAllRefsForTest(
   390  		ctx, rootNode1.GetFolderBranch().Tlf)
   391  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   392  
   393  	if pre, post := totalBlockRefs(preQRBlocks),
   394  		totalBlockRefs(postQRBlocks); post >= pre {
   395  		t.Errorf("Blocks didn't shrink after reclamation: pre: %d, post %d",
   396  			pre, post)
   397  	}
   398  
   399  	// Sync u2
   400  	err = kbfsOps2.SyncFromServer(ctx,
   401  		rootNode2.GetFolderBranch(), nil)
   402  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   403  
   404  	// Make a file with the other data on node 2, which uses a block
   405  	// for which one reference has been deleted, but the other should
   406  	// still be live.  This will cause one dedup reference, and 3 new
   407  	// blocks (2 from the create, and 1 from the sync).
   408  	dNode, _, err := kbfsOps2.CreateFile(
   409  		ctx, rootNode2, testPPS("d"), false, NoExcl)
   410  	require.NoError(t, err, "Couldn't create file: %+v", err)
   411  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
   412  	require.NoError(t, err, "Couldn't sync file: %+v", err)
   413  	err = kbfsOps2.Write(ctx, dNode, otherData, 0)
   414  	require.NoError(t, err, "Couldn't write file: %+v", err)
   415  	err = kbfsOps2.SyncAll(ctx, dNode.GetFolderBranch())
   416  	require.NoError(t, err, "Couldn't write file: %+v", err)
   417  	// Wait for outstanding archives
   418  	err = kbfsOps2.SyncFromServer(ctx,
   419  		rootNode2.GetFolderBranch(), nil)
   420  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   421  
   422  	// Make the same file on node 2, making sure this doesn't try to
   423  	// reuse the same block (i.e., there are only 2 put calls).
   424  	eNode, _, err := kbfsOps2.CreateFile(
   425  		ctx, rootNode2, testPPS("e"), false, NoExcl)
   426  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   427  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
   428  	require.NoError(t, err, "Couldn't sync file: %+v", err)
   429  	err = kbfsOps2.Write(ctx, eNode, data1, 0)
   430  	require.NoError(t, err, "Couldn't write file: %+v", err)
   431  
   432  	// Stall the puts that comes as part of the sync call.
   433  	oldBServer := config2.BlockServer()
   434  	defer config2.SetBlockServer(oldBServer)
   435  	onWriteStalledCh, writeUnstallCh, ctxStall := StallBlockOp(
   436  		ctx, config2, StallableBlockPut, 2)
   437  
   438  	// Start the sync and wait for it to stall twice only.
   439  	errChan := make(chan error)
   440  	go func() {
   441  		errChan <- kbfsOps2.SyncAll(ctxStall, eNode.GetFolderBranch())
   442  	}()
   443  	<-onWriteStalledCh
   444  	<-onWriteStalledCh
   445  	writeUnstallCh <- struct{}{}
   446  	writeUnstallCh <- struct{}{}
   447  	// Don't close the channel, we want to make sure other Puts get
   448  	// stalled.
   449  	err = <-errChan
   450  	require.NoError(t, err, "Couldn't sync file: %+v", err)
   451  
   452  	// Wait for outstanding archives
   453  	err = kbfsOps2.SyncFromServer(ctx,
   454  		rootNode2.GetFolderBranch(), nil)
   455  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   456  
   457  	// Delete any blocks that happened to be put during a failed (due
   458  	// to recoverable block errors) update.
   459  	clock.Set(now.Add(2 * config1.Mode().QuotaReclamationMinUnrefAge()))
   460  	ops1.fbm.forceQuotaReclamation()
   461  	err = ops1.fbm.waitForQuotaReclamations(ctx)
   462  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   463  
   464  	endBlocks, err := bserverLocal.getAllRefsForTest(
   465  		ctx, rootNode2.GetFolderBranch().Tlf)
   466  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   467  
   468  	// There should be exactly 8 extra blocks refs (2 for the create,
   469  	// and 2 for the write/sync, for both files above) as a result of
   470  	// the operations, and exactly one should have more than one
   471  	// reference.
   472  	if pre, post := totalBlockRefs(postQRBlocks),
   473  		totalBlockRefs(endBlocks); post != pre+8 {
   474  		t.Errorf("Different number of blocks than expected: pre: %d, post %d",
   475  			pre, post)
   476  	}
   477  	oneDedupFound := false
   478  	for id, refs := range endBlocks {
   479  		areAllRefsArchived := true
   480  		for _, ref := range refs {
   481  			if ref.Status != archivedBlockRef {
   482  				areAllRefsArchived = false
   483  				break
   484  			}
   485  		}
   486  		if areAllRefsArchived {
   487  			continue
   488  		}
   489  		if len(refs) > 2 {
   490  			t.Errorf("Block %v unexpectedly had %d refs %+v", id, len(refs), refs)
   491  		} else if len(refs) == 2 {
   492  			if oneDedupFound {
   493  				t.Errorf("Extra dedup block %v with refs %+v", id, refs)
   494  			} else {
   495  				oneDedupFound = true
   496  			}
   497  		}
   498  	}
   499  	if !oneDedupFound {
   500  		t.Error("No dedup reference found")
   501  	}
   502  }
   503  
   504  // Test that quota reclamation doesn't happen while waiting for a
   505  // requested rekey.
   506  func TestQuotaReclamationFailAfterRekeyRequest(t *testing.T) {
   507  	var u1, u2 kbname.NormalizedUsername = "u1", "u2"
   508  	config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
   509  	defer kbfsConcurTestShutdown(ctx, t, config1, cancel)
   510  	clock := clocktest.NewTestClockNow()
   511  	config1.SetClock(clock)
   512  
   513  	config2 := ConfigAsUser(config1, u2)
   514  	defer CheckConfigAndShutdown(ctx, t, config2)
   515  	session2, err := config2.KBPKI().GetCurrentSession(context.Background())
   516  	if err != nil {
   517  		t.Fatal(err)
   518  	}
   519  	uid2 := session2.UID
   520  
   521  	// Create a shared folder.
   522  	name := u1.String() + "," + u2.String()
   523  	rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, tlf.Private)
   524  
   525  	config2Dev2 := ConfigAsUser(config1, u2)
   526  	defer CheckConfigAndShutdown(ctx, t, config2Dev2)
   527  
   528  	// Now give u2 a new device.  The configs don't share a Keybase
   529  	// Daemon so we have to do it in all places.
   530  	AddDeviceForLocalUserOrBust(t, config1, uid2)
   531  	AddDeviceForLocalUserOrBust(t, config2, uid2)
   532  	devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
   533  	SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
   534  
   535  	// user 2 should be unable to read the data now since its device
   536  	// wasn't registered when the folder was originally created.
   537  	_, err = GetRootNodeForTest(ctx, config2Dev2, name, tlf.Private)
   538  	if _, ok := err.(NeedSelfRekeyError); !ok {
   539  		t.Fatalf("Got unexpected error when reading with new key: %+v", err)
   540  	}
   541  
   542  	// Request a rekey from the new device, which will only be
   543  	// able to set the rekey bit (copying the root MD).
   544  	kbfsOps2Dev2 := config2Dev2.KBFSOps()
   545  	_, err = RequestRekeyAndWaitForOneFinishEvent(ctx,
   546  		kbfsOps2Dev2, rootNode1.GetFolderBranch().Tlf)
   547  	require.NoError(t, err, "Couldn't rekey: %+v", err)
   548  
   549  	// Make sure QR returns an error.
   550  	ops := config2Dev2.KBFSOps().(*KBFSOpsStandard).getOpsByNode(ctx, rootNode1)
   551  	timer := time.NewTimer(config2Dev2.Mode().QuotaReclamationPeriod())
   552  	ops.fbm.reclamationGroup.Add(1)
   553  	err = ops.fbm.doReclamation(timer)
   554  	if _, ok := err.(NeedSelfRekeyError); !ok {
   555  		t.Fatalf("Unexpected rekey error: %+v", err)
   556  	}
   557  
   558  	// Rekey from another device.
   559  	kbfsOps1 := config1.KBFSOps()
   560  	err = kbfsOps1.SyncFromServer(ctx,
   561  		rootNode1.GetFolderBranch(), nil)
   562  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   563  	_, err = RequestRekeyAndWaitForOneFinishEvent(ctx,
   564  		kbfsOps1, rootNode1.GetFolderBranch().Tlf)
   565  	require.NoError(t, err, "Couldn't rekey: %+v", err)
   566  
   567  	// Retry the QR; should work now.
   568  	err = kbfsOps2Dev2.SyncFromServer(ctx,
   569  		rootNode1.GetFolderBranch(), nil)
   570  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   571  	ops.fbm.reclamationGroup.Add(1)
   572  	err = ops.fbm.doReclamation(timer)
   573  	require.NoError(t, err, "Unexpected rekey error: %+v", err)
   574  }
   575  
   576  type modeTestWithQR struct {
   577  	InitMode
   578  }
   579  
   580  func (mtwqr modeTestWithQR) IsTestMode() bool {
   581  	return true
   582  }
   583  
   584  // Test that quota reclamation doesn't run if the current head root
   585  // block can't be fetched.
   586  func TestQuotaReclamationMissingRootBlock(t *testing.T) {
   587  	var u1, u2 kbname.NormalizedUsername = "u1", "u2"
   588  	config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
   589  	defer kbfsConcurTestShutdown(ctx, t, config1, cancel)
   590  	clock := clocktest.NewTestClockNow()
   591  	config1.SetClock(clock)
   592  	// Re-enable QR in test mode.
   593  	config1.SetMode(modeTestWithQR{NewInitModeFromType(InitDefault)})
   594  
   595  	config2 := ConfigAsUser(config1, u2)
   596  	defer CheckConfigAndShutdown(ctx, t, config2)
   597  
   598  	name := u1.String() + "," + u2.String()
   599  
   600  	// u1 does the writes, and u2 tries to do the QR.
   601  	rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, tlf.Private)
   602  	kbfsOps1 := config1.KBFSOps()
   603  	_, _, err := kbfsOps1.CreateDir(ctx, rootNode1, testPPS("a"))
   604  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   605  	err = kbfsOps1.RemoveDir(ctx, rootNode1, testPPS("a"))
   606  	require.NoError(t, err, "Couldn't remove dir: %+v", err)
   607  
   608  	// Increase the time and make a new revision, and make sure quota
   609  	// reclamation doesn't run.
   610  	clock.Add(2 * config2.Mode().QuotaReclamationMinUnrefAge())
   611  	_, _, err = kbfsOps1.CreateDir(ctx, rootNode1, testPPS("b"))
   612  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   613  
   614  	// Wait for outstanding archives
   615  	err = kbfsOps1.SyncFromServer(ctx,
   616  		rootNode1.GetFolderBranch(), nil)
   617  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   618  
   619  	// Delete the bad block directly from the bserver.
   620  	md, err := kbfsOps1.GetNodeMetadata(ctx, rootNode1)
   621  	require.NoError(t, err)
   622  	bserverLocal, ok := config1.BlockServer().(blockServerLocal)
   623  	require.True(t, ok)
   624  	ptr := md.BlockInfo.BlockPointer
   625  	contexts := kbfsblock.ContextMap{
   626  		ptr.ID: []kbfsblock.Context{ptr.Context},
   627  	}
   628  	_, err = bserverLocal.RemoveBlockReferences(
   629  		ctx, rootNode1.GetFolderBranch().Tlf, contexts)
   630  	require.NoError(t, err)
   631  
   632  	kbfsOps2 := config2.KBFSOps()
   633  	rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, tlf.Private)
   634  
   635  	// Increase the time again and make sure it is supposed to run.
   636  	clock.Add(2 * config2.Mode().QuotaReclamationMinHeadAge())
   637  
   638  	// Make sure no blocks are deleted while the block can't be fetched.
   639  	preQR1Blocks, err := bserverLocal.getAllRefsForTest(
   640  		ctx, rootNode2.GetFolderBranch().Tlf)
   641  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   642  
   643  	ops := kbfsOps2.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode2)
   644  	ops.fbm.forceQuotaReclamation()
   645  	err = ops.fbm.waitForQuotaReclamations(ctx)
   646  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   647  
   648  	postQR1Blocks, err := bserverLocal.getAllRefsForTest(
   649  		ctx, rootNode2.GetFolderBranch().Tlf)
   650  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   651  
   652  	if !reflect.DeepEqual(preQR1Blocks, postQR1Blocks) {
   653  		t.Fatalf("Blocks deleted despite error (%v vs %v)!",
   654  			preQR1Blocks, postQR1Blocks)
   655  	}
   656  
   657  	// Skip state-checking.
   658  	config1.MDServer().Shutdown()
   659  }
   660  
   661  // Test that quota reclamation doesn't run unless the current head is
   662  // at least the minimum needed age.
   663  func TestQuotaReclamationMinHeadAge(t *testing.T) {
   664  	var u1, u2 kbname.NormalizedUsername = "u1", "u2"
   665  	config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
   666  	defer kbfsConcurTestShutdown(ctx, t, config1, cancel)
   667  	clock := clocktest.NewTestClockNow()
   668  	config1.SetClock(clock)
   669  	// Re-enable QR in test mode.
   670  	config1.SetMode(modeTestWithQR{NewInitModeFromType(InitDefault)})
   671  
   672  	config2 := ConfigAsUser(config1, u2)
   673  	defer CheckConfigAndShutdown(ctx, t, config2)
   674  
   675  	name := u1.String() + "," + u2.String()
   676  
   677  	// u1 does the writes, and u2 tries to do the QR.
   678  	rootNode1 := GetRootNodeOrBust(ctx, t, config1, name, tlf.Private)
   679  	kbfsOps1 := config1.KBFSOps()
   680  	_, _, err := kbfsOps1.CreateDir(ctx, rootNode1, testPPS("a"))
   681  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   682  	err = kbfsOps1.RemoveDir(ctx, rootNode1, testPPS("a"))
   683  	require.NoError(t, err, "Couldn't remove dir: %+v", err)
   684  
   685  	// Increase the time and make a new revision, and make sure quota
   686  	// reclamation doesn't run.
   687  	clock.Add(2 * config2.Mode().QuotaReclamationMinUnrefAge())
   688  	_, _, err = kbfsOps1.CreateDir(ctx, rootNode1, testPPS("b"))
   689  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   690  
   691  	// Wait for outstanding archives
   692  	err = kbfsOps1.SyncFromServer(ctx,
   693  		rootNode1.GetFolderBranch(), nil)
   694  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   695  
   696  	kbfsOps2 := config2.KBFSOps()
   697  	rootNode2 := GetRootNodeOrBust(ctx, t, config2, name, tlf.Private)
   698  
   699  	// Make sure no blocks are deleted before there's a new-enough update.
   700  	bserverLocal, ok := config2.BlockServer().(blockServerLocal)
   701  	if !ok {
   702  		t.Fatalf("Bad block server")
   703  	}
   704  	preQR1Blocks, err := bserverLocal.getAllRefsForTest(
   705  		ctx, rootNode2.GetFolderBranch().Tlf)
   706  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   707  
   708  	ops := kbfsOps2.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode2)
   709  	ops.fbm.forceQuotaReclamation()
   710  	err = ops.fbm.waitForQuotaReclamations(ctx)
   711  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   712  
   713  	postQR1Blocks, err := bserverLocal.getAllRefsForTest(
   714  		ctx, rootNode2.GetFolderBranch().Tlf)
   715  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   716  
   717  	if !reflect.DeepEqual(preQR1Blocks, postQR1Blocks) {
   718  		t.Fatalf("Blocks deleted too early (%v vs %v)!",
   719  			preQR1Blocks, postQR1Blocks)
   720  	}
   721  
   722  	// Increase the time again and make sure it does run.
   723  	clock.Add(2 * config2.Mode().QuotaReclamationMinHeadAge())
   724  
   725  	preQR2Blocks, err := bserverLocal.getAllRefsForTest(
   726  		ctx, rootNode2.GetFolderBranch().Tlf)
   727  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   728  
   729  	ops.fbm.forceQuotaReclamation()
   730  	err = ops.fbm.waitForQuotaReclamations(ctx)
   731  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   732  
   733  	postQR2Blocks, err := bserverLocal.getAllRefsForTest(
   734  		ctx, rootNode2.GetFolderBranch().Tlf)
   735  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   736  
   737  	if pre, post := totalBlockRefs(preQR2Blocks),
   738  		totalBlockRefs(postQR2Blocks); post >= pre {
   739  		t.Errorf("Blocks didn't shrink after reclamation: pre: %d, post %d",
   740  			pre, post)
   741  	}
   742  
   743  	// If u2 does a write, we don't have to wait the minimum head age.
   744  	_, _, err = kbfsOps2.CreateDir(ctx, rootNode2, testPPS("c"))
   745  	require.NoError(t, err, "Couldn't create dir: %+v", err)
   746  
   747  	// Wait for outstanding archives
   748  	err = kbfsOps2.SyncFromServer(ctx,
   749  		rootNode2.GetFolderBranch(), nil)
   750  	require.NoError(t, err, "Couldn't sync from server: %+v", err)
   751  
   752  	clock.Add(2 * config2.Mode().QuotaReclamationMinUnrefAge())
   753  
   754  	preQR3Blocks, err := bserverLocal.getAllRefsForTest(
   755  		ctx, rootNode2.GetFolderBranch().Tlf)
   756  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   757  
   758  	ops.fbm.forceQuotaReclamation()
   759  	err = ops.fbm.waitForQuotaReclamations(ctx)
   760  	require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   761  
   762  	postQR3Blocks, err := bserverLocal.getAllRefsForTest(
   763  		ctx, rootNode2.GetFolderBranch().Tlf)
   764  	require.NoError(t, err, "Couldn't get blocks: %+v", err)
   765  
   766  	if pre, post := totalBlockRefs(preQR3Blocks),
   767  		totalBlockRefs(postQR3Blocks); post >= pre {
   768  		t.Errorf("Blocks didn't shrink after reclamation: pre: %d, post %d",
   769  			pre, post)
   770  	}
   771  }
   772  
   773  // Test that quota reclamation makes GCOps to account for other GCOps,
   774  // to make sure clients don't waste time scanning over a bunch of old
   775  // GCOps when there is nothing to be done.
   776  func TestQuotaReclamationGCOpsForGCOps(t *testing.T) {
   777  	var userName kbname.NormalizedUsername = "test_user"
   778  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   779  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   780  	clock := clocktest.NewTestClockNow()
   781  	config.SetClock(clock)
   782  
   783  	rootNode := GetRootNodeOrBust(
   784  		ctx, t, config, userName.String(), tlf.Private)
   785  	kbfsOps := config.KBFSOps()
   786  	ops := kbfsOps.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode)
   787  	// This threshold isn't exact; in this case it works out to 3
   788  	// pointers per GC.
   789  	ops.fbm.numPointersPerGCThreshold = 1
   790  
   791  	numCycles := 4
   792  	for i := 0; i < numCycles; i++ {
   793  		_, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
   794  		require.NoError(t, err, "Couldn't create dir: %+v", err)
   795  		err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   796  		require.NoError(t, err, "Couldn't sync all: %v", err)
   797  		err = kbfsOps.RemoveDir(ctx, rootNode, testPPS("a"))
   798  		require.NoError(t, err, "Couldn't remove dir: %+v", err)
   799  		err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   800  		require.NoError(t, err, "Couldn't sync all: %v", err)
   801  	}
   802  	clock.Add(2 * config.Mode().QuotaReclamationMinUnrefAge())
   803  
   804  	// Make sure the head has a GCOp that doesn't point to just the
   805  	// previous revision.
   806  	md, err := config.MDOps().GetForTLF(
   807  		ctx, rootNode.GetFolderBranch().Tlf, nil)
   808  	require.NoError(t, err, "Couldn't get MD: %+v", err)
   809  
   810  	// Run reclamation until the head doesn't change anymore, which
   811  	// should cover revision #3, as well as the two subsequent GCops.
   812  	lastRev := md.Revision()
   813  	count := 0
   814  	for {
   815  		ops.fbm.forceQuotaReclamation()
   816  		err = ops.fbm.waitForQuotaReclamations(ctx)
   817  		require.NoError(t, err, "Couldn't wait for QR: %+v", err)
   818  
   819  		md, err = config.MDOps().GetForTLF(
   820  			ctx, rootNode.GetFolderBranch().Tlf, nil)
   821  		require.NoError(t, err, "Couldn't get MD: %+v", err)
   822  
   823  		if md.Revision() == lastRev {
   824  			break
   825  		}
   826  		lastRev = md.Revision()
   827  		count++
   828  		if count == numCycles {
   829  			// Increase the clock so now we can GC all those GCOps.
   830  			clock.Add(2 * config.Mode().QuotaReclamationMinUnrefAge())
   831  		}
   832  	}
   833  
   834  	if g, e := count, numCycles+1; g != e {
   835  		t.Fatalf("Wrong number of forced QRs: %d vs %d", g, e)
   836  	}
   837  
   838  	if g, e := len(md.data.Changes.Ops), 1; g != e {
   839  		t.Fatalf("Unexpected number of ops: %d vs %d", g, e)
   840  	}
   841  
   842  	gcOp, ok := md.data.Changes.Ops[0].(*GCOp)
   843  	if !ok {
   844  		t.Fatalf("No GCOp: %s", md.data.Changes.Ops[0])
   845  	}
   846  
   847  	if g, e := gcOp.LatestRev, md.Revision()-1; g != e {
   848  		t.Fatalf("Last GCOp revision was unexpected: %d vs %d", g, e)
   849  	}
   850  }
   851  
   852  func TestFolderBlockManagerCleanSyncCache(t *testing.T) {
   853  	tempdir, err := ioutil.TempDir(os.TempDir(), "journal_server")
   854  	require.NoError(t, err)
   855  	defer func() {
   856  		err := ioutil.RemoveAll(tempdir)
   857  		require.NoError(t, err)
   858  	}()
   859  
   860  	var userName kbname.NormalizedUsername = "test_user"
   861  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
   862  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   863  	config.SetVLogLevel(libkb.VLog2String)
   864  
   865  	// Test the pointer-constraint logic.
   866  	config.SetMode(modeTestWithMaxPtrsLimit{config.Mode()})
   867  
   868  	err = config.EnableDiskLimiter(tempdir)
   869  	require.NoError(t, err)
   870  	err = config.loadSyncedTlfsLocked()
   871  	require.NoError(t, err)
   872  	config.diskCacheMode = DiskCacheModeLocal
   873  	err = config.MakeDiskBlockCacheIfNotExists()
   874  	require.NoError(t, err)
   875  	dbc := config.DiskBlockCache()
   876  	oldBserver := config.BlockServer()
   877  	config.SetBlockServer(bserverPutToDiskCache{config.BlockServer(), dbc})
   878  	defer config.SetBlockServer(oldBserver)
   879  
   880  	t.Log("Make a synced private TLF")
   881  	rootNode := GetRootNodeOrBust(
   882  		ctx, t, config, userName.String(), tlf.Private)
   883  	kbfsOps := config.KBFSOps()
   884  	_, err = config.SetTlfSyncState(
   885  		ctx, rootNode.GetFolderBranch().Tlf, FolderSyncConfig{
   886  			Mode: keybase1.FolderSyncMode_ENABLED,
   887  		})
   888  	require.NoError(t, err)
   889  	aNode, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
   890  	require.NoError(t, err)
   891  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   892  	require.NoError(t, err)
   893  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
   894  	require.NoError(t, err)
   895  	status := dbc.Status(ctx)
   896  	require.Equal(t, uint64(2), status[syncCacheName].NumBlocks)
   897  
   898  	t.Log("Make a second revision that will unref some blocks")
   899  	_, _, err = kbfsOps.CreateDir(ctx, aNode, testPPS("b"))
   900  	require.NoError(t, err)
   901  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   902  	require.NoError(t, err)
   903  
   904  	t.Log("Wait for cleanup")
   905  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
   906  	require.NoError(t, err)
   907  	// 3 blocks == root, a and b, without the old unref'd blocks.
   908  	status = dbc.Status(ctx)
   909  	require.Equal(t, uint64(3), status[syncCacheName].NumBlocks)
   910  
   911  	t.Log("Add two empty files, to cause deduplication")
   912  	_, _, err = kbfsOps.CreateFile(ctx, aNode, testPPS("c"), false, NoExcl)
   913  	require.NoError(t, err)
   914  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   915  	require.NoError(t, err)
   916  	_, _, err = kbfsOps.CreateFile(ctx, aNode, testPPS("d"), false, NoExcl)
   917  	require.NoError(t, err)
   918  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   919  	require.NoError(t, err)
   920  
   921  	t.Logf("Remove one file, but not the other")
   922  	err = kbfsOps.RemoveEntry(ctx, aNode, testPPS("d"))
   923  	require.NoError(t, err)
   924  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   925  	require.NoError(t, err)
   926  
   927  	t.Log("Wait for cleanup")
   928  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
   929  	require.NoError(t, err)
   930  	// 4 blocks == root, a, b, and d without the old unref'd blocks.
   931  	status = dbc.Status(ctx)
   932  	require.Equal(t, uint64(4), status[syncCacheName].NumBlocks)
   933  
   934  	t.Log("Test another TLF that isn't synced until after a few revisions")
   935  	rootNode = GetRootNodeOrBust(ctx, t, config, userName.String(), tlf.Public)
   936  	aNode, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
   937  	require.NoError(t, err)
   938  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   939  	require.NoError(t, err)
   940  	bNode, _, err := kbfsOps.CreateDir(ctx, aNode, testPPS("b"))
   941  	require.NoError(t, err)
   942  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   943  	require.NoError(t, err)
   944  	lastRev, err := dbc.GetLastUnrefRev(
   945  		ctx, rootNode.GetFolderBranch().Tlf, DiskBlockSyncCache)
   946  	require.NoError(t, err)
   947  	require.Equal(t, kbfsmd.RevisionUninitialized, lastRev)
   948  
   949  	t.Log("Set new TLF to syncing, and add a new revision")
   950  	_, err = config.SetTlfSyncState(
   951  		ctx, rootNode.GetFolderBranch().Tlf, FolderSyncConfig{
   952  			Mode: keybase1.FolderSyncMode_ENABLED,
   953  		})
   954  	require.NoError(t, err)
   955  	_, _, err = kbfsOps.CreateDir(ctx, bNode, testPPS("c"))
   956  	require.NoError(t, err)
   957  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
   958  	require.NoError(t, err)
   959  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
   960  	require.NoError(t, err)
   961  	lastRev, err = dbc.GetLastUnrefRev(
   962  		ctx, rootNode.GetFolderBranch().Tlf, DiskBlockSyncCache)
   963  	require.NoError(t, err)
   964  	require.Equal(t, kbfsmd.Revision(4), lastRev)
   965  }