github.com/keybase/client/go@v0.0.0-20240309051027-028f7c731f8b/kbfs/libkbfs/kbfs_ops_test.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"bytes"
     9  	"fmt"
    10  	"math/rand"
    11  	"os"
    12  	"path/filepath"
    13  	"sync"
    14  	"testing"
    15  	"time"
    16  
    17  	"github.com/golang/mock/gomock"
    18  	"github.com/keybase/client/go/kbfs/data"
    19  	"github.com/keybase/client/go/kbfs/env"
    20  	"github.com/keybase/client/go/kbfs/idutil"
    21  	"github.com/keybase/client/go/kbfs/ioutil"
    22  	"github.com/keybase/client/go/kbfs/kbfsblock"
    23  	"github.com/keybase/client/go/kbfs/kbfscodec"
    24  	"github.com/keybase/client/go/kbfs/kbfscrypto"
    25  	"github.com/keybase/client/go/kbfs/kbfshash"
    26  	"github.com/keybase/client/go/kbfs/kbfsmd"
    27  	"github.com/keybase/client/go/kbfs/kbfssync"
    28  	"github.com/keybase/client/go/kbfs/libcontext"
    29  	"github.com/keybase/client/go/kbfs/libkey"
    30  	"github.com/keybase/client/go/kbfs/test/clocktest"
    31  	"github.com/keybase/client/go/kbfs/tlf"
    32  	"github.com/keybase/client/go/kbfs/tlfhandle"
    33  	kbname "github.com/keybase/client/go/kbun"
    34  	"github.com/keybase/client/go/libkb"
    35  	"github.com/keybase/client/go/logger"
    36  	"github.com/keybase/client/go/protocol/keybase1"
    37  	"github.com/pkg/errors"
    38  	"github.com/stretchr/testify/assert"
    39  	"github.com/stretchr/testify/require"
    40  	ldberrors "github.com/syndtr/goleveldb/leveldb/errors"
    41  	"golang.org/x/net/context"
    42  	billy "gopkg.in/src-d/go-billy.v4"
    43  	"gopkg.in/src-d/go-billy.v4/memfs"
    44  )
    45  
    46  type CheckBlockOps struct {
    47  	BlockOps
    48  	tr gomock.TestReporter
    49  }
    50  
    51  var _ BlockOps = (*CheckBlockOps)(nil)
    52  
    53  func (cbo *CheckBlockOps) Ready(ctx context.Context, kmd libkey.KeyMetadata,
    54  	block data.Block) (id kbfsblock.ID, plainSize int, readyBlockData data.ReadyBlockData,
    55  	err error) {
    56  	id, plainSize, readyBlockData, err = cbo.BlockOps.Ready(ctx, kmd, block)
    57  	encodedSize := readyBlockData.GetEncodedSize()
    58  	if plainSize > encodedSize {
    59  		cbo.tr.Errorf("expected plainSize <= encodedSize, got plainSize = %d, "+
    60  			"encodedSize = %d", plainSize, encodedSize)
    61  	}
    62  	return
    63  }
    64  
    65  type tCtxIDType int
    66  
    67  const (
    68  	tCtxID tCtxIDType = iota
    69  )
    70  
    71  // Time out individual tests after 10 seconds.
    72  var individualTestTimeout = 30 * time.Second
    73  
    74  func kbfsOpsInit(t *testing.T) (mockCtrl *gomock.Controller,
    75  	config *ConfigMock, ctx context.Context, cancel context.CancelFunc) {
    76  	ctr := NewSafeTestReporter(t)
    77  	mockCtrl = gomock.NewController(ctr)
    78  	config = NewConfigMock(mockCtrl, ctr)
    79  	config.SetCodec(kbfscodec.NewMsgpack())
    80  	blockops := &CheckBlockOps{config.mockBops, ctr}
    81  	config.SetBlockOps(blockops)
    82  	kbfsops := NewKBFSOpsStandard(env.EmptyAppStateUpdater{}, config, nil)
    83  	config.SetKBFSOps(kbfsops)
    84  	config.SetNotifier(kbfsops)
    85  
    86  	// Use real caches, to avoid the overhead of tracking cache calls.
    87  	// Each test is expected to check the cache for correctness at the
    88  	// end of the test.
    89  	config.SetBlockCache(data.NewBlockCacheStandard(100, 1<<30))
    90  	log := config.MakeLogger("")
    91  	config.SetDirtyBlockCache(data.NewDirtyBlockCacheStandard(
    92  		data.WallClock{}, log, libkb.NewVDebugLog(log), 5<<20, 10<<20, 5<<20))
    93  	config.mockBcache = nil
    94  	config.mockDirtyBcache = nil
    95  
    96  	// These tests don't rely on external notifications at all, so ignore any
    97  	// goroutine attempting to register:
    98  	c := make(chan error, 1)
    99  	config.mockMdserv.EXPECT().RegisterForUpdate(gomock.Any(),
   100  		gomock.Any(), gomock.Any()).AnyTimes().Return(c, nil)
   101  	config.mockMdserv.EXPECT().CancelRegistration(
   102  		gomock.Any(), gomock.Any()).AnyTimes().Return()
   103  	config.mockMdserv.EXPECT().OffsetFromServerTime().
   104  		Return(time.Duration(0), true).AnyTimes()
   105  	// No chat monitoring.
   106  	config.mockChat.EXPECT().GetChannels(gomock.Any(),
   107  		gomock.Any(), gomock.Any(), gomock.Any()).AnyTimes().
   108  		Return(nil, nil, nil)
   109  
   110  	// Don't test implicit teams.
   111  	config.mockKbpki.EXPECT().ResolveImplicitTeam(
   112  		gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any(), gomock.Any()).
   113  		AnyTimes().Return(idutil.ImplicitTeamInfo{}, errors.New("No such team"))
   114  
   115  	// None of these tests depend on time
   116  	config.mockClock.EXPECT().Now().AnyTimes().Return(time.Now())
   117  
   118  	// Ignore Notify calls for now
   119  	config.mockRep.EXPECT().Notify(gomock.Any(), gomock.Any()).AnyTimes()
   120  
   121  	// Ignore MerkleRoot calls for now.
   122  	config.mockKbpki.EXPECT().GetCurrentMerkleRoot(gomock.Any()).
   123  		Return(keybase1.MerkleRootV2{}, time.Time{}, nil).AnyTimes()
   124  
   125  	// Max out MaxPtrsPerBlock
   126  	config.mockBsplit.EXPECT().MaxPtrsPerBlock().
   127  		Return(int((^uint(0)) >> 1)).AnyTimes()
   128  
   129  	// Never split dir blocks.
   130  	config.mockBsplit.EXPECT().SplitDirIfNeeded(gomock.Any()).DoAndReturn(
   131  		func(block *data.DirBlock) ([]*data.DirBlock, *data.StringOffset) {
   132  			return []*data.DirBlock{block}, nil
   133  		}).AnyTimes()
   134  
   135  	// Ignore Archive calls for now
   136  	config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
   137  		gomock.Any()).AnyTimes().Return(nil)
   138  	// Ignore Archive calls for now
   139  	config.mockBops.EXPECT().Archive(gomock.Any(), gomock.Any(),
   140  		gomock.Any()).AnyTimes().Return(nil)
   141  	// Ignore BlockRetriever calls
   142  	clock := clocktest.NewTestClockNow()
   143  	ctlr := gomock.NewController(t)
   144  	mockPublisher := NewMockSubscriptionManagerPublisher(ctlr)
   145  	mockPublisher.EXPECT().PublishChange(gomock.Any()).AnyTimes()
   146  	brc := &testBlockRetrievalConfig{
   147  		nil, newTestLogMaker(t), config.BlockCache(), nil,
   148  		NewMockBlockServer(ctlr), newTestDiskBlockCacheGetter(t, nil),
   149  		newTestSyncedTlfGetterSetter(), testInitModeGetter{InitDefault}, clock,
   150  		NewReporterSimple(clock, 1), nil, mockPublisher,
   151  	}
   152  	brq := newBlockRetrievalQueue(0, 0, 0, brc, env.EmptyAppStateUpdater{})
   153  	config.mockBops.EXPECT().BlockRetriever().AnyTimes().Return(brq)
   154  	config.mockBops.EXPECT().Prefetcher().AnyTimes().Return(brq.prefetcher)
   155  
   156  	// Ignore favorites
   157  	err := errors.New("Fake error to prevent trying to read favs from disk")
   158  	config.mockKbpki.EXPECT().GetCurrentSession(gomock.Any()).Return(
   159  		idutil.SessionInfo{}, err)
   160  	config.mockRep.EXPECT().
   161  		NotifyFavoritesChanged(gomock.Any()).Return().AnyTimes()
   162  	kbfsops.favs.Initialize(ctx)
   163  	config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).AnyTimes().
   164  		Return(keybase1.FavoritesResult{}, nil)
   165  	config.mockKbs.EXPECT().EncryptFavorites(gomock.Any(), gomock.Any()).
   166  		AnyTimes().Return(nil, nil)
   167  	config.mockKbpki.EXPECT().FavoriteAdd(gomock.Any(), gomock.Any()).
   168  		AnyTimes().Return(nil)
   169  
   170  	interposeDaemonKBPKI(config, "alice", "bob", "charlie")
   171  
   172  	timeoutCtx, cancel := context.WithTimeout(
   173  		context.Background(), individualTestTimeout)
   174  	initSuccess := false
   175  	defer func() {
   176  		if !initSuccess {
   177  			cancel()
   178  		}
   179  	}()
   180  
   181  	// make the context identifiable, to verify that it is passed
   182  	// correctly to the observer
   183  	id := rand.Int()
   184  	ctx, err = libcontext.NewContextWithCancellationDelayer(libcontext.NewContextReplayable(
   185  		timeoutCtx, func(ctx context.Context) context.Context {
   186  			return context.WithValue(ctx, tCtxID, id)
   187  		}))
   188  	if err != nil {
   189  		t.Fatal(err)
   190  	}
   191  
   192  	initSuccess = true
   193  	return mockCtrl, config, ctx, cancel
   194  }
   195  
   196  func kbfsTestShutdown(
   197  	ctx context.Context, t *testing.T, mockCtrl *gomock.Controller,
   198  	config *ConfigMock, cancel context.CancelFunc) {
   199  	config.ctr.CheckForFailures()
   200  	err := config.conflictResolutionDB.Close()
   201  	require.NoError(t, err)
   202  	err = config.KBFSOps().(*KBFSOpsStandard).Shutdown(ctx)
   203  	require.NoError(t, err)
   204  	if config.mockDirtyBcache == nil {
   205  		// Ignore error; some tests intentionally leave around dirty data.
   206  		_ = config.DirtyBlockCache().Shutdown()
   207  	}
   208  	select {
   209  	case <-config.mockBops.BlockRetriever().(*blockRetrievalQueue).Shutdown():
   210  	case <-ctx.Done():
   211  		require.NoError(t, ctx.Err())
   212  	}
   213  	cancel()
   214  	if err := libcontext.CleanupCancellationDelayer(ctx); err != nil {
   215  		panic(err)
   216  	}
   217  	mockCtrl.Finish()
   218  }
   219  
   220  type modeNoHistory struct {
   221  	InitMode
   222  }
   223  
   224  func (mnh modeNoHistory) TLFEditHistoryEnabled() bool {
   225  	return false
   226  }
   227  
   228  func (mnh modeNoHistory) SendEditNotificationsEnabled() bool {
   229  	return false
   230  }
   231  
   232  // kbfsOpsInitNoMocks returns a config that doesn't use any mocks. The
   233  // shutdown call is kbfsTestShutdownNoMocks.
   234  func kbfsOpsInitNoMocks(t *testing.T, users ...kbname.NormalizedUsername) (
   235  	*ConfigLocal, keybase1.UID, context.Context, context.CancelFunc) {
   236  	config := MakeTestConfigOrBust(t, users...)
   237  	// Turn off tlf edit history because it messes with the FBO state
   238  	// asynchronously.
   239  	config.SetMode(modeNoHistory{config.Mode()})
   240  	config.SetRekeyWithPromptWaitTime(individualTestTimeout)
   241  
   242  	timeoutCtx, cancel := context.WithTimeout(
   243  		context.Background(), individualTestTimeout)
   244  	initSuccess := false
   245  	defer func() {
   246  		if !initSuccess {
   247  			cancel()
   248  		}
   249  	}()
   250  
   251  	ctx, err := libcontext.NewContextWithCancellationDelayer(libcontext.NewContextReplayable(
   252  		timeoutCtx, func(c context.Context) context.Context {
   253  			return c
   254  		}))
   255  	if err != nil {
   256  		t.Fatal(err)
   257  	}
   258  
   259  	session, err := config.KBPKI().GetCurrentSession(ctx)
   260  	if err != nil {
   261  		t.Fatal(err)
   262  	}
   263  
   264  	initSuccess = true
   265  	return config, session.UID, ctx, cancel
   266  }
   267  
   268  func kbfsTestShutdownNoMocks(
   269  	ctx context.Context, t *testing.T,
   270  	config *ConfigLocal, cancel context.CancelFunc) {
   271  	CheckConfigAndShutdown(ctx, t, config)
   272  	cancel()
   273  	err := libcontext.CleanupCancellationDelayer(ctx)
   274  	require.NoError(t, err)
   275  }
   276  
   277  // TODO: Get rid of all users of this.
   278  func kbfsTestShutdownNoMocksNoCheck(ctx context.Context, t *testing.T,
   279  	config *ConfigLocal, cancel context.CancelFunc) {
   280  	_ = config.Shutdown(ctx)
   281  	cancel()
   282  	err := libcontext.CleanupCancellationDelayer(ctx)
   283  	require.NoError(t, err)
   284  }
   285  
   286  func checkBlockCache(
   287  	ctx context.Context, t *testing.T, config *ConfigMock, id tlf.ID,
   288  	expectedCleanBlocks []kbfsblock.ID,
   289  	expectedDirtyBlocks map[data.BlockPointer]data.BranchName) {
   290  	bcache := config.BlockCache().(*data.BlockCacheStandard)
   291  	// make sure the LRU consists of exactly the right set of clean blocks
   292  	for _, id := range expectedCleanBlocks {
   293  		_, lifetime, err := bcache.GetWithLifetime(data.BlockPointer{ID: id})
   294  		if err != nil {
   295  			t.Errorf("BlockCache missing clean block %v at the end of the test",
   296  				id)
   297  		}
   298  		require.Equal(t, data.TransientEntry, lifetime)
   299  	}
   300  	if bcache.NumCleanTransientBlocks() != len(expectedCleanBlocks) {
   301  		t.Errorf("BlockCache has extra clean blocks at end of test")
   302  	}
   303  
   304  	// make sure the dirty cache consists of exactly the right set of
   305  	// dirty blocks
   306  	dirtyBcache := config.DirtyBlockCache().(*data.DirtyBlockCacheStandard)
   307  	for ptr, branch := range expectedDirtyBlocks {
   308  		_, err := dirtyBcache.Get(ctx, id, ptr, branch)
   309  		if err != nil {
   310  			t.Errorf("BlockCache missing dirty block %v, branch %s at "+
   311  				"the end of the test: err %+v", ptr, branch, err)
   312  		}
   313  		if !dirtyBcache.IsDirty(id, ptr, branch) {
   314  			t.Errorf("BlockCache has incorrectly clean block %v, branch %s at "+
   315  				"the end of the test: err %+v", ptr, branch, err)
   316  		}
   317  	}
   318  	if dirtyBcache.Size() != len(expectedDirtyBlocks) {
   319  		t.Errorf("BlockCache has extra dirty blocks at end of test")
   320  	}
   321  }
   322  
   323  // parseTlfHandleOrBust parses the given TLF name, which must be
   324  // canonical, into a TLF handle, failing if there's an error.
   325  func parseTlfHandleOrBust(t logger.TestLogBackend, config Config,
   326  	name string, ty tlf.Type, id tlf.ID) *tlfhandle.Handle {
   327  	ctx := context.Background()
   328  	h, err := tlfhandle.ParseHandle(
   329  		ctx, config.KBPKI(), tlfhandle.ConstIDGetter{ID: id}, nil, name, ty)
   330  	if err != nil {
   331  		t.Fatalf("Couldn't parse %s (type=%s) into a TLF handle: %v",
   332  			name, ty, err)
   333  	}
   334  	return h
   335  }
   336  
   337  func TestKBFSOpsGetFavoritesSuccess(t *testing.T) {
   338  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
   339  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
   340  
   341  	handle1 := parseTlfHandleOrBust(t, config, "alice", tlf.Private, tlf.NullID)
   342  	handle2 := parseTlfHandleOrBust(
   343  		t, config, "alice,bob", tlf.Private, tlf.NullID)
   344  
   345  	// dup for testing
   346  	handles := []*tlfhandle.Handle{handle1, handle2, handle2}
   347  	for _, h := range handles {
   348  		err := config.KeybaseService().FavoriteAdd(
   349  			context.Background(), h.ToFavorite().ToKBFolderHandle(false))
   350  		require.NoError(t, err)
   351  	}
   352  
   353  	// The favorites list contains our own public dir by default, even
   354  	// if KBPKI doesn't return it.
   355  
   356  	handle3 := parseTlfHandleOrBust(t, config, "alice", tlf.Public, tlf.NullID)
   357  	handles = append(handles, handle3)
   358  
   359  	handles2, err := config.KBFSOps().GetFavorites(ctx)
   360  	if err != nil {
   361  		t.Errorf("Got error on favorites: %+v", err)
   362  	}
   363  	if len(handles2) != len(handles)-1 {
   364  		t.Errorf("Got bad handles back: %v", handles2)
   365  	}
   366  }
   367  
   368  func TestKBFSOpsGetFavoritesFail(t *testing.T) {
   369  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   370  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   371  
   372  	err := errors.New("Fake fail")
   373  
   374  	// Replace the old one (added in init function)
   375  	config.mockKbpki = NewMockKBPKI(mockCtrl)
   376  	config.SetKBPKI(config.mockKbpki)
   377  
   378  	// expect one call to favorites, and fail it
   379  	config.mockKbpki.EXPECT().FavoriteList(gomock.Any()).Return(keybase1.
   380  		FavoritesResult{}, err)
   381  
   382  	if _, err2 := config.KBFSOps().GetFavorites(ctx); err2 != err {
   383  		t.Errorf("Got bad error on favorites: %+v", err2)
   384  	}
   385  }
   386  
   387  // createNewRMD creates a new RMD for the given name. Returns its ID
   388  // and handle also.
   389  func createNewRMD(t *testing.T, config Config, name string, ty tlf.Type) (
   390  	tlf.ID, *tlfhandle.Handle, *RootMetadata) {
   391  	id := tlf.FakeID(1, ty)
   392  	h := parseTlfHandleOrBust(t, config, name, ty, id)
   393  	h.SetTlfID(id)
   394  	rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
   395  	require.NoError(t, err)
   396  	return id, h, rmd
   397  }
   398  
   399  func makeImmutableRMDForTest(t *testing.T, config Config, rmd *RootMetadata,
   400  	mdID kbfsmd.ID) ImmutableRootMetadata {
   401  	session, err := config.KBPKI().GetCurrentSession(context.Background())
   402  	require.NoError(t, err)
   403  	// We have to fake out the signature here because most tests
   404  	// in this file modify the returned value, invalidating any
   405  	// real signatures. TODO: Fix all the tests in this file to
   406  	// not do so, and then just use MakeImmutableRootMetadata.
   407  	if brmdv2, ok := rmd.bareMd.(*kbfsmd.RootMetadataV2); ok {
   408  		vk := brmdv2.WriterMetadataSigInfo.VerifyingKey
   409  		require.True(t, vk == (kbfscrypto.VerifyingKey{}) || vk == session.VerifyingKey,
   410  			"Writer signature %s with unexpected non-nil verifying key != %s",
   411  			brmdv2.WriterMetadataSigInfo, session.VerifyingKey)
   412  		brmdv2.WriterMetadataSigInfo = kbfscrypto.SignatureInfo{
   413  			VerifyingKey: session.VerifyingKey,
   414  		}
   415  	}
   416  	return MakeImmutableRootMetadata(
   417  		rmd, session.VerifyingKey, mdID, time.Now(), true)
   418  }
   419  
   420  // injectNewRMD creates a new RMD and makes sure the existing ops for
   421  // its ID has as its head that RMD.
   422  func injectNewRMD(t *testing.T, config *ConfigMock) (
   423  	keybase1.UserOrTeamID, tlf.ID, *RootMetadata) {
   424  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
   425  	var keyGen kbfsmd.KeyGen
   426  	if id.Type() == tlf.Public {
   427  		keyGen = kbfsmd.PublicKeyGen
   428  	} else {
   429  		keyGen = kbfsmd.FirstValidKeyGen
   430  	}
   431  	rmd.data.Dir = data.DirEntry{
   432  		BlockInfo: data.BlockInfo{
   433  			BlockPointer: data.BlockPointer{
   434  				KeyGen:  keyGen,
   435  				DataVer: 1,
   436  			},
   437  			EncodedSize: 1,
   438  		},
   439  	}
   440  	rmd.fakeInitialRekey()
   441  
   442  	ops := getOps(config, id)
   443  	ops.head = makeImmutableRMDForTest(
   444  		t, config, rmd, kbfsmd.FakeID(tlf.FakeIDByte(id)))
   445  	ops.headStatus = headTrusted
   446  	rmd.SetSerializedPrivateMetadata(make([]byte, 1))
   447  	err := config.Notifier().RegisterForChanges(
   448  		[]data.FolderBranch{{Tlf: id, Branch: data.MasterBranch}},
   449  		config.observer)
   450  	require.NoError(t, err)
   451  	wid := h.FirstResolvedWriter()
   452  	rmd.data.Dir.Creator = wid
   453  	return wid, id, rmd
   454  }
   455  
   456  func TestKBFSOpsGetRootNodeCacheSuccess(t *testing.T) {
   457  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   458  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   459  
   460  	_, id, rmd := injectNewRMD(t, config)
   461  	rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
   462  	rmd.data.Dir.Type = data.Dir
   463  
   464  	ops := getOps(config, id)
   465  	assert.False(t, fboIdentityDone(ops))
   466  
   467  	n, ei, h, err := ops.getRootNode(ctx)
   468  	require.NoError(t, err)
   469  	assert.False(t, fboIdentityDone(ops))
   470  
   471  	p := ops.nodeCache.PathFromNode(n)
   472  	assert.Equal(t, id, p.Tlf)
   473  	require.Equal(t, 1, len(p.Path))
   474  	assert.Equal(t, rmd.data.Dir.ID, p.Path[0].ID)
   475  	assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
   476  	assert.Equal(t, rmd.GetTlfHandle(), h)
   477  
   478  	// Trigger identify.
   479  	lState := makeFBOLockState()
   480  	_, err = ops.getMDForRead(ctx, lState, mdReadNeedIdentify)
   481  	require.NoError(t, err)
   482  	assert.True(t, fboIdentityDone(ops))
   483  }
   484  
   485  func TestKBFSOpsGetRootNodeReIdentify(t *testing.T) {
   486  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   487  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   488  
   489  	_, id, rmd := injectNewRMD(t, config)
   490  	rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
   491  	rmd.data.Dir.Type = data.Dir
   492  
   493  	ops := getOps(config, id)
   494  	assert.False(t, fboIdentityDone(ops))
   495  
   496  	n, ei, h, err := ops.getRootNode(ctx)
   497  	require.NoError(t, err)
   498  	assert.False(t, fboIdentityDone(ops))
   499  
   500  	p := ops.nodeCache.PathFromNode(n)
   501  	assert.Equal(t, id, p.Tlf)
   502  	require.Equal(t, 1, len(p.Path))
   503  	assert.Equal(t, rmd.data.Dir.ID, p.Path[0].ID)
   504  	assert.Equal(t, rmd.data.Dir.EntryInfo, ei)
   505  	assert.Equal(t, rmd.GetTlfHandle(), h)
   506  
   507  	// Trigger identify.
   508  	lState := makeFBOLockState()
   509  	_, err = ops.getMDForRead(ctx, lState, mdReadNeedIdentify)
   510  	require.NoError(t, err)
   511  	assert.True(t, fboIdentityDone(ops))
   512  
   513  	// Mark everything for reidentifying, and wait for it to finish
   514  	// before checking.
   515  	kop := config.KBFSOps().(*KBFSOpsStandard)
   516  	returnCh := make(chan struct{})
   517  	kop.reIdentifyControlChan <- returnCh
   518  	<-returnCh
   519  	assert.False(t, fboIdentityDone(ops))
   520  
   521  	// Trigger new identify.
   522  	lState = makeFBOLockState()
   523  	_, err = ops.getMDForRead(ctx, lState, mdReadNeedIdentify)
   524  	require.NoError(t, err)
   525  	assert.True(t, fboIdentityDone(ops))
   526  }
   527  
   528  // fboIdentityDone is needed to avoid data races.
   529  func fboIdentityDone(fbo *folderBranchOps) bool {
   530  	fbo.identifyLock.Lock()
   531  	defer fbo.identifyLock.Unlock()
   532  	return fbo.identifyDone
   533  }
   534  
   535  type failIdentifyKBPKI struct {
   536  	KBPKI
   537  	identifyErr error
   538  }
   539  
   540  func (kbpki failIdentifyKBPKI) Identify(
   541  	ctx context.Context, assertion, reason string,
   542  	_ keybase1.OfflineAvailability) (
   543  	kbname.NormalizedUsername, keybase1.UserOrTeamID, error) {
   544  	return kbname.NormalizedUsername(""), keybase1.UserOrTeamID(""),
   545  		kbpki.identifyErr
   546  }
   547  
   548  func TestKBFSOpsGetRootNodeCacheIdentifyFail(t *testing.T) {
   549  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   550  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   551  
   552  	_, id, rmd := injectNewRMD(t, config)
   553  
   554  	rmd.data.Dir.BlockPointer.ID = kbfsblock.FakeID(1)
   555  	rmd.data.Dir.Type = data.Dir
   556  
   557  	ops := getOps(config, id)
   558  
   559  	expectedErr := errors.New("Identify failure")
   560  	config.SetKBPKI(failIdentifyKBPKI{config.KBPKI(), expectedErr})
   561  
   562  	// Trigger identify.
   563  	lState := makeFBOLockState()
   564  	_, err := ops.getMDForRead(ctx, lState, mdReadNeedIdentify)
   565  	assert.Equal(t, expectedErr, err)
   566  	assert.False(t, fboIdentityDone(ops))
   567  }
   568  
   569  func expectBlock(config *ConfigMock, kmd libkey.KeyMetadata, blockPtr data.BlockPointer, block data.Block, err error) {
   570  	config.mockBops.EXPECT().Get(gomock.Any(), kmdMatcher{kmd},
   571  		ptrMatcher{blockPtr}, gomock.Any(), gomock.Any(), gomock.Any()).
   572  		Do(func(ctx context.Context, kmd libkey.KeyMetadata,
   573  			blockPtr data.BlockPointer, getBlock data.Block,
   574  			lifetime data.BlockCacheLifetime, _ data.BranchName) {
   575  			getBlock.Set(block)
   576  			_ = config.BlockCache().Put(
   577  				blockPtr, kmd.TlfID(), getBlock, lifetime, data.DoCacheHash)
   578  		}).Return(err)
   579  }
   580  
   581  // ptrMatcher implements the gomock.Matcher interface to compare
   582  // BlockPointer objects. We don't care about some of the fields in a
   583  // pointer for the purposes of these tests.
   584  type ptrMatcher struct {
   585  	ptr data.BlockPointer
   586  }
   587  
   588  // Matches implements the Matcher interface for ptrMatcher.
   589  func (p ptrMatcher) Matches(x interface{}) bool {
   590  	xPtr, ok := x.(data.BlockPointer)
   591  	if !ok {
   592  		return false
   593  	}
   594  	return (xPtr.ID == p.ptr.ID && xPtr.RefNonce == p.ptr.RefNonce)
   595  }
   596  
   597  // String implements the Matcher interface for ptrMatcher.
   598  func (p ptrMatcher) String() string {
   599  	return fmt.Sprintf("Matches BlockPointer %v", p.ptr)
   600  }
   601  
   602  func fillInNewMD(t *testing.T, config *ConfigMock, rmd *RootMetadata) {
   603  	if rmd.TypeForKeying() != tlf.PublicKeying {
   604  		rmd.fakeInitialRekey()
   605  	}
   606  	rootPtr := data.BlockPointer{
   607  		ID:      kbfsblock.FakeID(42),
   608  		KeyGen:  kbfsmd.FirstValidKeyGen,
   609  		DataVer: 1,
   610  	}
   611  
   612  	rmd.data.Dir = data.DirEntry{
   613  		BlockInfo: data.BlockInfo{
   614  			BlockPointer: rootPtr,
   615  			EncodedSize:  5,
   616  		},
   617  		EntryInfo: data.EntryInfo{
   618  			Type: data.Dir,
   619  			Size: 3,
   620  		},
   621  	}
   622  }
   623  
   624  func testKBFSOpsGetRootNodeCreateNewSuccess(t *testing.T, ty tlf.Type) {
   625  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   626  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   627  
   628  	id, _, rmd := createNewRMD(t, config, "alice", ty)
   629  	fillInNewMD(t, config, rmd)
   630  
   631  	// create a new MD
   632  	config.mockMdops.EXPECT().GetUnmergedForTLF(
   633  		gomock.Any(), id, gomock.Any()).Return(ImmutableRootMetadata{}, nil)
   634  	irmd := makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
   635  	config.mockMdops.EXPECT().GetForTLF(gomock.Any(), id, nil).Return(irmd, nil)
   636  
   637  	ops := getOps(config, id)
   638  	assert.False(t, fboIdentityDone(ops))
   639  	n, ei, h, err := ops.getRootNode(ctx)
   640  	require.NoError(t, err)
   641  	assert.True(t, fboIdentityDone(ops))
   642  
   643  	p := ops.nodeCache.PathFromNode(n)
   644  	require.Equal(t, id, p.Tlf)
   645  	require.Equal(t, 1, len(p.Path))
   646  	require.Equal(t, rmd.data.Dir.ID, p.Path[0].ID)
   647  	require.Equal(t, rmd.data.Dir.EntryInfo, ei)
   648  	require.Equal(t, rmd.GetTlfHandle(), h)
   649  }
   650  
   651  func TestKBFSOpsGetRootNodeCreateNewSuccessPublic(t *testing.T) {
   652  	testKBFSOpsGetRootNodeCreateNewSuccess(t, tlf.Public)
   653  }
   654  
   655  func TestKBFSOpsGetRootNodeCreateNewSuccessPrivate(t *testing.T) {
   656  	testKBFSOpsGetRootNodeCreateNewSuccess(t, tlf.Private)
   657  }
   658  
   659  func TestKBFSOpsGetRootMDForHandleExisting(t *testing.T) {
   660  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   661  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   662  
   663  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
   664  	rmd.data.Dir = data.DirEntry{
   665  		BlockInfo: data.BlockInfo{
   666  			BlockPointer: data.BlockPointer{
   667  				ID: kbfsblock.FakeID(1),
   668  			},
   669  			EncodedSize: 15,
   670  		},
   671  		EntryInfo: data.EntryInfo{
   672  			Type:  data.Dir,
   673  			Size:  10,
   674  			Mtime: 1,
   675  			Ctime: 2,
   676  		},
   677  	}
   678  
   679  	ops := getOps(config, id)
   680  	assert.False(t, fboIdentityDone(ops))
   681  
   682  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(2))
   683  	ops.headStatus = headTrusted
   684  	n, ei, err :=
   685  		config.KBFSOps().GetOrCreateRootNode(ctx, h, data.MasterBranch)
   686  	require.NoError(t, err)
   687  	assert.True(t, fboIdentityDone(ops))
   688  
   689  	p := ops.nodeCache.PathFromNode(n)
   690  	switch {
   691  	case p.Tlf != id:
   692  		t.Errorf("Got bad dir id back: %v", p.Tlf)
   693  	case len(p.Path) != 1:
   694  		t.Errorf("Got bad MD back: path size %d", len(p.Path))
   695  	case p.Path[0].ID != rmd.data.Dir.ID:
   696  		t.Errorf("Got bad MD back: root ID %v", p.Path[0].ID)
   697  	case ei.Type != data.Dir:
   698  		t.Error("Got bad MD non-dir rootID back")
   699  	case ei.Size != 10:
   700  		t.Errorf("Got bad MD Size back: %d", ei.Size)
   701  	case ei.Mtime != 1:
   702  		t.Errorf("Got bad MD MTime back: %d", ei.Mtime)
   703  	case ei.Ctime != 2:
   704  		t.Errorf("Got bad MD CTime back: %d", ei.Ctime)
   705  	}
   706  }
   707  
   708  // rmd should really be a ReadOnlyRootMetadata or *BareRootMetadata in
   709  // the helper functions below, but all the callers would have to go
   710  // md.ReadOnly(), which doesn't buy us much in tests.
   711  
   712  func makeBP(id kbfsblock.ID, kmd libkey.KeyMetadata, config Config,
   713  	u keybase1.UserOrTeamID) data.BlockPointer {
   714  	return data.BlockPointer{
   715  		ID:      id,
   716  		KeyGen:  kmd.LatestKeyGeneration(),
   717  		DataVer: data.DefaultNewBlockDataVersion(false),
   718  		Context: kbfsblock.Context{
   719  			Creator: u,
   720  			// Refnonces not needed; explicit refnonce
   721  			// testing happens elsewhere.
   722  		},
   723  	}
   724  }
   725  
   726  func makeIFP(id kbfsblock.ID, kmd libkey.KeyMetadata, config Config,
   727  	u keybase1.UserOrTeamID, encodedSize uint32,
   728  	off data.Int64Offset) data.IndirectFilePtr {
   729  	return data.IndirectFilePtr{
   730  		BlockInfo: data.BlockInfo{
   731  			BlockPointer: makeBP(id, kmd, config, u),
   732  			EncodedSize:  encodedSize,
   733  		},
   734  		Off:   off,
   735  		Holes: false,
   736  	}
   737  }
   738  
   739  func makeBIFromID(id kbfsblock.ID, user keybase1.UserOrTeamID) data.BlockInfo {
   740  	return data.BlockInfo{
   741  		BlockPointer: data.BlockPointer{
   742  			ID: id, KeyGen: kbfsmd.FirstValidKeyGen, DataVer: 1,
   743  			Context: kbfsblock.Context{
   744  				Creator: user,
   745  			},
   746  		},
   747  		EncodedSize: 1,
   748  	}
   749  }
   750  
   751  func nodeFromPath(t *testing.T, ops *folderBranchOps, p data.Path) Node {
   752  	var prevNode Node
   753  	// populate the node cache with all the nodes we'll need
   754  	for _, pathNode := range p.Path {
   755  		n, err := ops.nodeCache.GetOrCreate(pathNode.BlockPointer,
   756  			pathNode.Name, prevNode,
   757  
   758  			data.Dir)
   759  		if err != nil {
   760  			t.Fatal(err)
   761  		}
   762  		prevNode = n
   763  	}
   764  	return prevNode
   765  }
   766  
   767  func testPutBlockInCache(
   768  	t *testing.T, config *ConfigMock, ptr data.BlockPointer, id tlf.ID,
   769  	block data.Block) {
   770  	err := config.BlockCache().Put(
   771  		ptr, id, block, data.TransientEntry, data.DoCacheHash)
   772  	require.NoError(t, err)
   773  	if config.mockBcache != nil {
   774  		config.mockBcache.EXPECT().Get(ptr).AnyTimes().Return(block, nil)
   775  	}
   776  }
   777  
   778  func TestKBFSOpsGetBaseDirChildrenHidesFiles(t *testing.T) {
   779  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   780  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   781  
   782  	u, id, rmd := injectNewRMD(t, config)
   783  
   784  	rootID := kbfsblock.FakeID(42)
   785  	dirBlock := data.NewDirBlock().(*data.DirBlock)
   786  	dirBlock.Children["a"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.File}}
   787  	dirBlock.Children[".kbfs_git"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.Dir}}
   788  	blockPtr := makeBP(rootID, rmd, config, u)
   789  	rmd.data.Dir.BlockPointer = blockPtr
   790  	node := data.PathNode{BlockPointer: blockPtr, Name: testPPS("p")}
   791  	p := data.Path{
   792  		FolderBranch: data.FolderBranch{Tlf: id},
   793  		Path:         []data.PathNode{node},
   794  	}
   795  	testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
   796  	ops := getOps(config, id)
   797  	n := nodeFromPath(t, ops, p)
   798  
   799  	children, err := config.KBFSOps().GetDirChildren(ctx, n)
   800  	if err != nil {
   801  		t.Errorf("Got error on getdir: %+v", err)
   802  	} else if len(children) != 1 {
   803  		t.Errorf("Got bad children back: %v", children)
   804  	}
   805  	for c, ei := range children {
   806  		if de, ok := dirBlock.Children[c.Plaintext()]; !ok {
   807  			t.Errorf("No such child: %s", c)
   808  		} else if !de.EntryInfo.Eq(ei) {
   809  			t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
   810  		}
   811  	}
   812  }
   813  
   814  func TestKBFSOpsGetBaseDirChildrenCacheSuccess(t *testing.T) {
   815  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   816  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   817  
   818  	u, id, rmd := injectNewRMD(t, config)
   819  
   820  	rootID := kbfsblock.FakeID(42)
   821  	dirBlock := data.NewDirBlock().(*data.DirBlock)
   822  	dirBlock.Children["a"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.File}}
   823  	dirBlock.Children["b"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.Dir}}
   824  	blockPtr := makeBP(rootID, rmd, config, u)
   825  	rmd.data.Dir.BlockPointer = blockPtr
   826  	node := data.PathNode{BlockPointer: blockPtr, Name: testPPS("p")}
   827  	p := data.Path{
   828  		FolderBranch: data.FolderBranch{Tlf: id},
   829  		Path:         []data.PathNode{node},
   830  	}
   831  	testPutBlockInCache(t, config, node.BlockPointer, id, dirBlock)
   832  	ops := getOps(config, id)
   833  	n := nodeFromPath(t, ops, p)
   834  
   835  	children, err := config.KBFSOps().GetDirChildren(ctx, n)
   836  	if err != nil {
   837  		t.Errorf("Got error on getdir: %+v", err)
   838  	} else if len(children) != 2 {
   839  		t.Errorf("Got bad children back: %v", children)
   840  	}
   841  	for c, ei := range children {
   842  		if de, ok := dirBlock.Children[c.Plaintext()]; !ok {
   843  			t.Errorf("No such child: %s", c)
   844  		} else if !de.EntryInfo.Eq(ei) {
   845  			t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
   846  		}
   847  	}
   848  }
   849  
   850  func TestKBFSOpsGetBaseDirChildrenUncachedSuccess(t *testing.T) {
   851  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   852  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   853  
   854  	u, id, rmd := injectNewRMD(t, config)
   855  
   856  	rootID := kbfsblock.FakeID(42)
   857  	dirBlock := data.NewDirBlock().(*data.DirBlock)
   858  	blockPtr := makeBP(rootID, rmd, config, u)
   859  	rmd.data.Dir.BlockPointer = blockPtr
   860  	node := data.PathNode{BlockPointer: blockPtr, Name: testPPS("p")}
   861  	p := data.Path{
   862  		FolderBranch: data.FolderBranch{Tlf: id},
   863  		Path:         []data.PathNode{node},
   864  	}
   865  	ops := getOps(config, id)
   866  	n := nodeFromPath(t, ops, p)
   867  
   868  	// cache miss means fetching metadata and getting read key
   869  	expectBlock(config, rmd, blockPtr, dirBlock, nil)
   870  
   871  	if _, err := config.KBFSOps().GetDirChildren(ctx, n); err != nil {
   872  		t.Errorf("Got error on getdir: %+v", err)
   873  	}
   874  }
   875  
   876  func TestKBFSOpsGetBaseDirChildrenUncachedFailNonReader(t *testing.T) {
   877  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   878  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   879  
   880  	id := tlf.FakeID(1, tlf.Private)
   881  
   882  	h := parseTlfHandleOrBust(t, config, "bob#alice", tlf.Private, id)
   883  	h.SetTlfID(id)
   884  	// Hack around access check in ParseTlfHandle.
   885  	h.ClearResolvedReaders()
   886  
   887  	rmd, err := makeInitialRootMetadata(config.MetadataVersion(), id, h)
   888  	require.NoError(t, err)
   889  
   890  	session, err := config.KBPKI().GetCurrentSession(ctx)
   891  	if err != nil {
   892  		t.Fatal(err)
   893  	}
   894  
   895  	rootID := kbfsblock.FakeID(42)
   896  	node := data.PathNode{
   897  		BlockPointer: makeBP(rootID, rmd, config, session.UID.AsUserOrTeam()),
   898  		Name:         testPPS("p"),
   899  	}
   900  	p := data.Path{
   901  		FolderBranch: data.FolderBranch{Tlf: id},
   902  		Path:         []data.PathNode{node},
   903  	}
   904  
   905  	// won't even try getting the block if the user isn't a reader
   906  
   907  	ops := getOps(config, id)
   908  	n := nodeFromPath(t, ops, p)
   909  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
   910  	ops.headStatus = headTrusted
   911  	expectedErr := tlfhandle.NewReadAccessError(
   912  		h, "alice", "/keybase/private/bob#alice")
   913  
   914  	if _, err := config.KBFSOps().GetDirChildren(ctx, n); err == nil {
   915  		t.Errorf("Got no expected error on getdir")
   916  	} else if err != expectedErr {
   917  		t.Errorf("Got unexpected error on root MD: %+v", err)
   918  	}
   919  }
   920  
   921  func TestKBFSOpsGetBaseDirChildrenUncachedFailMissingBlock(t *testing.T) {
   922  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   923  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   924  
   925  	u, id, rmd := injectNewRMD(t, config)
   926  
   927  	rootID := kbfsblock.FakeID(42)
   928  	dirBlock := data.NewDirBlock().(*data.DirBlock)
   929  	blockPtr := makeBP(rootID, rmd, config, u)
   930  	rmd.data.Dir.BlockPointer = blockPtr
   931  	node := data.PathNode{BlockPointer: blockPtr, Name: testPPS("p")}
   932  	p := data.Path{
   933  		FolderBranch: data.FolderBranch{Tlf: id},
   934  		Path:         []data.PathNode{node},
   935  	}
   936  	ops := getOps(config, id)
   937  	n := nodeFromPath(t, ops, p)
   938  
   939  	// cache miss means fetching metadata and getting read key, then
   940  	// fail block fetch
   941  	err := data.NoSuchBlockError{ID: rootID}
   942  	expectBlock(config, rmd, blockPtr, dirBlock, err)
   943  
   944  	if _, err2 := config.KBFSOps().GetDirChildren(ctx, n); err2 == nil {
   945  		t.Errorf("Got no expected error on getdir")
   946  	} else if err2 != err {
   947  		t.Errorf("Got unexpected error on root MD: %+v", err)
   948  	}
   949  }
   950  
   951  func TestKBFSOpsGetNestedDirChildrenCacheSuccess(t *testing.T) {
   952  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
   953  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
   954  
   955  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
   956  
   957  	ops := getOps(config, id)
   958  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
   959  	ops.headStatus = headTrusted
   960  
   961  	u := h.FirstResolvedWriter()
   962  
   963  	rootID := kbfsblock.FakeID(42)
   964  	aID := kbfsblock.FakeID(43)
   965  	bID := kbfsblock.FakeID(44)
   966  	dirBlock := data.NewDirBlock().(*data.DirBlock)
   967  	dirBlock.Children["a"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.Exec}}
   968  	dirBlock.Children["b"] = data.DirEntry{EntryInfo: data.EntryInfo{Type: data.Sym}}
   969  	blockPtr := makeBP(rootID, rmd, config, u)
   970  	rmd.data.Dir.BlockPointer = blockPtr
   971  	node := data.PathNode{BlockPointer: blockPtr, Name: testPPS("p")}
   972  	aNode := data.PathNode{
   973  		BlockPointer: makeBP(aID, rmd, config, u), Name: testPPS("a")}
   974  	bNode := data.PathNode{
   975  		BlockPointer: makeBP(bID, rmd, config, u), Name: testPPS("b")}
   976  	p := data.Path{
   977  		FolderBranch: data.FolderBranch{Tlf: id},
   978  		Path:         []data.PathNode{node, aNode, bNode},
   979  	}
   980  	n := nodeFromPath(t, ops, p)
   981  
   982  	testPutBlockInCache(t, config, bNode.BlockPointer, id, dirBlock)
   983  
   984  	children, err := config.KBFSOps().GetDirChildren(ctx, n)
   985  	if err != nil {
   986  		t.Errorf("Got error on getdir: %+v", err)
   987  	} else if len(children) != 2 {
   988  		t.Errorf("Got bad children back: %v", children)
   989  	}
   990  
   991  	for c, ei := range children {
   992  		if de, ok := dirBlock.Children[c.Plaintext()]; !ok {
   993  			t.Errorf("No such child: %s", c)
   994  		} else if !de.EntryInfo.Eq(ei) {
   995  			t.Errorf("Wrong EntryInfo for child %s: %v", c, ei)
   996  		}
   997  	}
   998  }
   999  
  1000  func TestKBFSOpsLookupSuccess(t *testing.T) {
  1001  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown block ops.")
  1002  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1003  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1004  
  1005  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  1006  
  1007  	ops := getOps(config, id)
  1008  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  1009  	ops.headStatus = headTrusted
  1010  
  1011  	u := h.FirstResolvedWriter()
  1012  
  1013  	rootID := kbfsblock.FakeID(42)
  1014  	aID := kbfsblock.FakeID(43)
  1015  	bID := kbfsblock.FakeID(44)
  1016  	dirBlock := data.NewDirBlock().(*data.DirBlock)
  1017  	dirBlock.Children["b"] = data.DirEntry{
  1018  		BlockInfo: makeBIFromID(bID, u),
  1019  		EntryInfo: data.EntryInfo{
  1020  			Type: data.Dir,
  1021  		},
  1022  	}
  1023  	node := data.PathNode{
  1024  		BlockPointer: makeBP(rootID, rmd, config, u),
  1025  		Name:         testPPS("p"),
  1026  	}
  1027  	aNode := data.PathNode{
  1028  		BlockPointer: makeBP(aID, rmd, config, u), Name: testPPS("a")}
  1029  	p := data.Path{
  1030  		FolderBranch: data.FolderBranch{Tlf: id},
  1031  		Path:         []data.PathNode{node, aNode},
  1032  	}
  1033  	n := nodeFromPath(t, ops, p)
  1034  
  1035  	testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
  1036  
  1037  	bn, ei, err := config.KBFSOps().Lookup(ctx, n, testPPS("b"))
  1038  	if err != nil {
  1039  		t.Errorf("Error on Lookup: %+v", err)
  1040  	}
  1041  	bPath := ops.nodeCache.PathFromNode(bn)
  1042  	expectedBNode := data.PathNode{
  1043  		BlockPointer: makeBP(bID, rmd, config, u),
  1044  		Name:         testPPS("b"),
  1045  	}
  1046  	expectedBNode.KeyGen = kbfsmd.FirstValidKeyGen
  1047  	if !ei.Eq(dirBlock.Children["b"].EntryInfo) {
  1048  		t.Errorf("Lookup returned a bad entry info: %v vs %v",
  1049  			ei, dirBlock.Children["b"].EntryInfo)
  1050  	} else if bPath.Path[2] != expectedBNode {
  1051  		t.Errorf("Bad path node after lookup: %v vs %v",
  1052  			bPath.Path[2], expectedBNode)
  1053  	}
  1054  }
  1055  
  1056  func TestKBFSOpsLookupSymlinkSuccess(t *testing.T) {
  1057  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1058  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1059  
  1060  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  1061  
  1062  	ops := getOps(config, id)
  1063  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  1064  	ops.headStatus = headTrusted
  1065  
  1066  	u := h.FirstResolvedWriter()
  1067  	rootID := kbfsblock.FakeID(42)
  1068  	aID := kbfsblock.FakeID(43)
  1069  	bID := kbfsblock.FakeID(44)
  1070  	dirBlock := data.NewDirBlock().(*data.DirBlock)
  1071  	dirBlock.Children["b"] = data.DirEntry{
  1072  		BlockInfo: makeBIFromID(bID, u),
  1073  		EntryInfo: data.EntryInfo{
  1074  			Type: data.Sym,
  1075  		},
  1076  	}
  1077  	node := data.PathNode{
  1078  		BlockPointer: makeBP(rootID, rmd, config, u),
  1079  		Name:         testPPS("p"),
  1080  	}
  1081  	aNode := data.PathNode{
  1082  		BlockPointer: makeBP(aID, rmd, config, u),
  1083  		Name:         testPPS("a"),
  1084  	}
  1085  	p := data.Path{
  1086  		FolderBranch: data.FolderBranch{Tlf: id},
  1087  		Path:         []data.PathNode{node, aNode},
  1088  	}
  1089  	n := nodeFromPath(t, ops, p)
  1090  
  1091  	testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
  1092  
  1093  	bn, ei, err := config.KBFSOps().Lookup(ctx, n, testPPS("b"))
  1094  	if err != nil {
  1095  		t.Errorf("Error on Lookup: %+v", err)
  1096  	}
  1097  	if !ei.Eq(dirBlock.Children["b"].EntryInfo) {
  1098  		t.Errorf("Lookup returned a bad directory entry: %v vs %v",
  1099  			ei, dirBlock.Children["b"].EntryInfo)
  1100  	} else if bn != nil {
  1101  		t.Errorf("Node for symlink is not nil: %v", bn)
  1102  	}
  1103  }
  1104  
  1105  func TestKBFSOpsLookupNoSuchNameFail(t *testing.T) {
  1106  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown block ops.")
  1107  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1108  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1109  
  1110  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  1111  
  1112  	ops := getOps(config, id)
  1113  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  1114  	ops.headStatus = headTrusted
  1115  
  1116  	u := h.FirstResolvedWriter()
  1117  	rootID := kbfsblock.FakeID(42)
  1118  	aID := kbfsblock.FakeID(43)
  1119  	bID := kbfsblock.FakeID(44)
  1120  	dirBlock := data.NewDirBlock().(*data.DirBlock)
  1121  	dirBlock.Children["b"] = data.DirEntry{
  1122  		BlockInfo: makeBIFromID(bID, u),
  1123  		EntryInfo: data.EntryInfo{
  1124  			Type: data.Dir,
  1125  		},
  1126  	}
  1127  	node := data.PathNode{
  1128  		BlockPointer: makeBP(rootID, rmd, config, u),
  1129  		Name:         testPPS("p"),
  1130  	}
  1131  	aNode := data.PathNode{
  1132  		BlockPointer: makeBP(aID, rmd, config, u),
  1133  		Name:         testPPS("a"),
  1134  	}
  1135  	p := data.Path{
  1136  		FolderBranch: data.FolderBranch{Tlf: id},
  1137  		Path:         []data.PathNode{node, aNode},
  1138  	}
  1139  	n := nodeFromPath(t, ops, p)
  1140  
  1141  	testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
  1142  
  1143  	expectedErr := idutil.NoSuchNameError{Name: "c"}
  1144  	_, _, err := config.KBFSOps().Lookup(ctx, n, testPPS("c"))
  1145  	if err == nil {
  1146  		t.Error("No error as expected on Lookup")
  1147  	} else if err != expectedErr {
  1148  		t.Errorf("Unexpected error after bad Lookup: %+v", err)
  1149  	}
  1150  }
  1151  
  1152  func TestKBFSOpsReadNewDataVersionFail(t *testing.T) {
  1153  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1154  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1155  
  1156  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  1157  
  1158  	ops := getOps(config, id)
  1159  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  1160  	ops.headStatus = headTrusted
  1161  
  1162  	u := h.FirstResolvedWriter()
  1163  	rootID := kbfsblock.FakeID(42)
  1164  	aID := kbfsblock.FakeID(43)
  1165  	bID := kbfsblock.FakeID(44)
  1166  	dirBlock := data.NewDirBlock().(*data.DirBlock)
  1167  	bInfo := makeBIFromID(bID, u)
  1168  	bInfo.DataVer = 10
  1169  	dirBlock.Children["b"] = data.DirEntry{
  1170  		BlockInfo: bInfo,
  1171  		EntryInfo: data.EntryInfo{
  1172  			Type: data.Dir,
  1173  		},
  1174  	}
  1175  	node := data.PathNode{
  1176  		BlockPointer: makeBP(rootID, rmd, config, u),
  1177  		Name:         testPPS("p"),
  1178  	}
  1179  	aNode := data.PathNode{
  1180  		BlockPointer: makeBP(aID, rmd, config, u),
  1181  		Name:         testPPS("a"),
  1182  	}
  1183  	bNode := data.PathNode{
  1184  		BlockPointer: makeBP(bID, rmd, config, u),
  1185  		Name:         testPPS("b"),
  1186  	}
  1187  	p := data.Path{
  1188  		FolderBranch: data.FolderBranch{Tlf: id},
  1189  		Path:         []data.PathNode{node, aNode},
  1190  	}
  1191  	n := nodeFromPath(t, ops, p)
  1192  
  1193  	testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
  1194  	expectedErr := &NewDataVersionError{
  1195  		data.Path{
  1196  			FolderBranch: data.FolderBranch{Tlf: id},
  1197  			Path:         []data.PathNode{node, aNode, bNode},
  1198  		},
  1199  		bInfo.DataVer,
  1200  	}
  1201  
  1202  	n, _, err := config.KBFSOps().Lookup(ctx, n, testPPS("b"))
  1203  	if err != nil {
  1204  		t.Error("Unexpected error found on lookup")
  1205  	}
  1206  
  1207  	buf := make([]byte, 1)
  1208  	_, err = config.KBFSOps().Read(ctx, n, buf, 0)
  1209  	if err == nil {
  1210  		t.Error("No expected error found on read")
  1211  	} else if err.Error() != expectedErr.Error() {
  1212  		t.Errorf("Unexpected error after bad read: %+v", err)
  1213  	}
  1214  }
  1215  
  1216  func TestKBFSOpsStatSuccess(t *testing.T) {
  1217  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown prefetcher.")
  1218  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1219  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1220  
  1221  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  1222  
  1223  	ops := getOps(config, id)
  1224  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  1225  	ops.headStatus = headTrusted
  1226  
  1227  	u := h.FirstResolvedWriter()
  1228  	rootID := kbfsblock.FakeID(42)
  1229  	aID := kbfsblock.FakeID(43)
  1230  	bID := kbfsblock.FakeID(44)
  1231  	dirBlock := data.NewDirBlock().(*data.DirBlock)
  1232  	dirBlock.Children["b"] = data.DirEntry{
  1233  		BlockInfo: makeBIFromID(bID, u),
  1234  		EntryInfo: data.EntryInfo{
  1235  			Type: data.Dir,
  1236  		},
  1237  	}
  1238  	node := data.PathNode{
  1239  		BlockPointer: makeBP(rootID, rmd, config, u),
  1240  		Name:         testPPS("p"),
  1241  	}
  1242  	aNode := data.PathNode{
  1243  		BlockPointer: makeBP(aID, rmd, config, u),
  1244  		Name:         testPPS("a"),
  1245  	}
  1246  	bNode := data.PathNode{
  1247  		BlockPointer: dirBlock.Children["b"].BlockPointer,
  1248  		Name:         testPPS("b"),
  1249  	}
  1250  	p := data.Path{
  1251  		FolderBranch: data.FolderBranch{Tlf: id},
  1252  		Path:         []data.PathNode{node, aNode, bNode},
  1253  	}
  1254  	n := nodeFromPath(t, ops, p)
  1255  
  1256  	testPutBlockInCache(t, config, aNode.BlockPointer, id, dirBlock)
  1257  
  1258  	ei, err := config.KBFSOps().Stat(ctx, n)
  1259  	if err != nil {
  1260  		t.Errorf("Error on Stat: %+v", err)
  1261  	}
  1262  	if !ei.Eq(dirBlock.Children["b"].EntryInfo) {
  1263  		t.Errorf("Stat returned a bad entry info: %v vs %v",
  1264  			ei, dirBlock.Children["b"].EntryInfo)
  1265  	}
  1266  }
  1267  
  1268  func getBlockFromCache(
  1269  	ctx context.Context, t *testing.T, config Config, id tlf.ID,
  1270  	ptr data.BlockPointer, branch data.BranchName) data.Block {
  1271  	if block, err := config.DirtyBlockCache().Get(
  1272  		ctx, id, ptr, branch); err == nil {
  1273  		return block
  1274  	}
  1275  	block, err := config.BlockCache().Get(ptr)
  1276  	if err != nil {
  1277  		t.Errorf("Couldn't find block %v, branch %s in the cache after test: "+
  1278  			"%+v", ptr, branch, err)
  1279  		return nil
  1280  	}
  1281  	return block
  1282  }
  1283  
  1284  func getDirBlockFromCache(
  1285  	ctx context.Context, t *testing.T, config Config, id tlf.ID,
  1286  	ptr data.BlockPointer, branch data.BranchName) *data.DirBlock {
  1287  	block := getBlockFromCache(ctx, t, config, id, ptr, branch)
  1288  	dblock, ok := block.(*data.DirBlock)
  1289  	if !ok {
  1290  		t.Errorf("Cached block %v, branch %s was not a DirBlock", ptr, branch)
  1291  	}
  1292  	return dblock
  1293  }
  1294  
  1295  func getFileBlockFromCache(
  1296  	ctx context.Context, t *testing.T, config Config, id tlf.ID,
  1297  	ptr data.BlockPointer, branch data.BranchName) *data.FileBlock {
  1298  	block := getBlockFromCache(ctx, t, config, id, ptr, branch)
  1299  	fblock, ok := block.(*data.FileBlock)
  1300  	if !ok {
  1301  		t.Errorf("Cached block %v, branch %s was not a FileBlock", ptr, branch)
  1302  	}
  1303  	return fblock
  1304  }
  1305  
  1306  func testCreateEntryFailDupName(t *testing.T, isDir bool) {
  1307  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1308  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1309  
  1310  	u, id, rmd := injectNewRMD(t, config)
  1311  
  1312  	rootID := kbfsblock.FakeID(42)
  1313  	aID := kbfsblock.FakeID(43)
  1314  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  1315  	rootBlock.Children["a"] = data.DirEntry{
  1316  		BlockInfo: makeBIFromID(aID, u),
  1317  		EntryInfo: data.EntryInfo{
  1318  			Type: data.Dir,
  1319  		},
  1320  	}
  1321  	node := data.PathNode{
  1322  		BlockPointer: makeBP(rootID, rmd, config, u),
  1323  		Name:         testPPS("p"),
  1324  	}
  1325  	p := data.Path{
  1326  		FolderBranch: data.FolderBranch{Tlf: id},
  1327  		Path:         []data.PathNode{node},
  1328  	}
  1329  	ops := getOps(config, id)
  1330  	n := nodeFromPath(t, ops, p)
  1331  
  1332  	// creating "a", which already exists in the root block
  1333  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  1334  	expectedErr := data.NameExistsError{Name: "a"}
  1335  
  1336  	var err error
  1337  	// dir and link have different checks for dup name
  1338  	if isDir {
  1339  		_, _, err = config.KBFSOps().CreateDir(ctx, n, testPPS("a"))
  1340  	} else {
  1341  		_, err = config.KBFSOps().CreateLink(ctx, n, testPPS("a"), testPPS("b"))
  1342  	}
  1343  	if err == nil {
  1344  		t.Errorf("Got no expected error on create")
  1345  	} else if err != expectedErr {
  1346  		t.Errorf("Got unexpected error on create: %+v", err)
  1347  	}
  1348  }
  1349  
  1350  func TestCreateDirFailDupName(t *testing.T) {
  1351  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown prefetcher.")
  1352  	testCreateEntryFailDupName(t, true)
  1353  }
  1354  
  1355  func TestCreateLinkFailDupName(t *testing.T) {
  1356  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown prefetcher.")
  1357  	testCreateEntryFailDupName(t, false)
  1358  }
  1359  
  1360  func testCreateEntryFailNameTooLong(t *testing.T, isDir bool) {
  1361  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1362  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1363  
  1364  	u, id, rmd := injectNewRMD(t, config)
  1365  
  1366  	rootID := kbfsblock.FakeID(42)
  1367  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  1368  	node := data.PathNode{
  1369  		BlockPointer: makeBP(rootID, rmd, config, u),
  1370  		Name:         testPPS("p"),
  1371  	}
  1372  	p := data.Path{
  1373  		FolderBranch: data.FolderBranch{Tlf: id},
  1374  		Path:         []data.PathNode{node},
  1375  	}
  1376  	ops := getOps(config, id)
  1377  	n := nodeFromPath(t, ops, p)
  1378  
  1379  	config.maxNameBytes = 2
  1380  	name := "aaa"
  1381  
  1382  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  1383  	expectedErr := NameTooLongError{name, config.maxNameBytes}
  1384  
  1385  	var err error
  1386  	// dir and link have different checks for dup name
  1387  	if isDir {
  1388  		_, _, err = config.KBFSOps().CreateDir(ctx, n, testPPS(name))
  1389  	} else {
  1390  		_, err = config.KBFSOps().CreateLink(
  1391  			ctx, n, testPPS(name), testPPS("b"))
  1392  	}
  1393  	if err == nil {
  1394  		t.Errorf("Got no expected error on create")
  1395  	} else if err != expectedErr {
  1396  		t.Errorf("Got unexpected error on create: %+v", err)
  1397  	}
  1398  }
  1399  
  1400  func TestCreateDirFailNameTooLong(t *testing.T) {
  1401  	testCreateEntryFailNameTooLong(t, true)
  1402  }
  1403  
  1404  func TestCreateLinkFailNameTooLong(t *testing.T) {
  1405  	testCreateEntryFailNameTooLong(t, false)
  1406  }
  1407  
  1408  func testCreateEntryFailKBFSPrefix(t *testing.T, et data.EntryType) {
  1409  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1410  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1411  
  1412  	u, id, rmd := injectNewRMD(t, config)
  1413  
  1414  	rootID := kbfsblock.FakeID(42)
  1415  	aID := kbfsblock.FakeID(43)
  1416  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  1417  	rootBlock.Children["a"] = data.DirEntry{
  1418  		BlockInfo: makeBIFromID(aID, u),
  1419  		EntryInfo: data.EntryInfo{
  1420  			Type: data.Dir,
  1421  		},
  1422  	}
  1423  	node := data.PathNode{
  1424  		BlockPointer: makeBP(rootID, rmd, config, u),
  1425  		Name:         testPPS("p"),
  1426  	}
  1427  	p := data.Path{
  1428  		FolderBranch: data.FolderBranch{Tlf: id},
  1429  		Path:         []data.PathNode{node},
  1430  	}
  1431  	ops := getOps(config, id)
  1432  	n := nodeFromPath(t, ops, p)
  1433  
  1434  	name := ".kbfs_status"
  1435  	expectedErr := DisallowedPrefixError{testPPS(name), ".kbfs"}
  1436  
  1437  	var err error
  1438  	// dir and link have different checks for dup name
  1439  	switch et {
  1440  	case data.Dir:
  1441  		_, _, err = config.KBFSOps().CreateDir(ctx, n, testPPS(name))
  1442  	case data.Sym:
  1443  		_, err = config.KBFSOps().CreateLink(
  1444  			ctx, n, testPPS(name), testPPS("a"))
  1445  	case data.Exec:
  1446  		_, _, err = config.KBFSOps().CreateFile(
  1447  			ctx, n, testPPS(name), true, NoExcl)
  1448  	case data.File:
  1449  		_, _, err = config.KBFSOps().CreateFile(
  1450  			ctx, n, testPPS(name), false, NoExcl)
  1451  	}
  1452  	if err == nil {
  1453  		t.Errorf("Got no expected error on create")
  1454  	} else if errors.Cause(err) != expectedErr {
  1455  		t.Errorf("Got unexpected error on create: %+v", err)
  1456  	}
  1457  }
  1458  
  1459  func TestCreateDirFailKBFSPrefix(t *testing.T) {
  1460  	testCreateEntryFailKBFSPrefix(t, data.Dir)
  1461  }
  1462  
  1463  func TestCreateFileFailKBFSPrefix(t *testing.T) {
  1464  	testCreateEntryFailKBFSPrefix(t, data.File)
  1465  }
  1466  
  1467  func TestCreateExecFailKBFSPrefix(t *testing.T) {
  1468  	testCreateEntryFailKBFSPrefix(t, data.Exec)
  1469  }
  1470  
  1471  func TestCreateLinkFailKBFSPrefix(t *testing.T) {
  1472  	testCreateEntryFailKBFSPrefix(t, data.Sym)
  1473  }
  1474  
  1475  // makeDirTree creates a block tree for the given path components and
  1476  // returns the DirEntry for the root block, a path, and the
  1477  // corresponding list of blocks. If n components are given, then the
  1478  // path will have n+1 nodes (one extra for the root node), and there
  1479  // will be n+1 corresponding blocks.
  1480  func makeDirTree(id tlf.ID, uid keybase1.UserOrTeamID, components ...string) (
  1481  	data.DirEntry, data.Path, []*data.DirBlock) {
  1482  	var idCounter byte = 0x10
  1483  	makeBlockID := func() kbfsblock.ID {
  1484  		id := kbfsblock.FakeID(idCounter)
  1485  		idCounter++
  1486  		return id
  1487  	}
  1488  
  1489  	// Handle the first (root) block.
  1490  
  1491  	bid := makeBlockID()
  1492  	bi := makeBIFromID(bid, uid)
  1493  	rootEntry := data.DirEntry{
  1494  		BlockInfo: bi,
  1495  		EntryInfo: data.EntryInfo{
  1496  			Type: data.Dir,
  1497  		},
  1498  	}
  1499  	nodes := []data.PathNode{{
  1500  		BlockPointer: bi.BlockPointer, Name: testPPS("{root}")}}
  1501  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  1502  	rootBlock.SetEncodedSize(bi.EncodedSize)
  1503  	blocks := []*data.DirBlock{rootBlock}
  1504  
  1505  	// Handle the rest.
  1506  
  1507  	parentDirBlock := rootBlock
  1508  	for _, component := range components {
  1509  		bid := makeBlockID()
  1510  		bi := makeBIFromID(bid, uid)
  1511  		parentDirBlock.Children[component] = data.DirEntry{
  1512  			BlockInfo: bi,
  1513  			EntryInfo: data.EntryInfo{
  1514  				Type: data.Dir,
  1515  			},
  1516  		}
  1517  		nodes = append(nodes, data.PathNode{
  1518  			BlockPointer: bi.BlockPointer,
  1519  			Name:         testPPS(component),
  1520  		})
  1521  		dirBlock := data.NewDirBlock().(*data.DirBlock)
  1522  		dirBlock.SetEncodedSize(bi.EncodedSize)
  1523  		blocks = append(blocks, dirBlock)
  1524  
  1525  		parentDirBlock = dirBlock
  1526  	}
  1527  
  1528  	return rootEntry, data.Path{
  1529  		FolderBranch: data.FolderBranch{Tlf: id},
  1530  		Path:         nodes,
  1531  	}, blocks
  1532  }
  1533  
  1534  func TestRemoveDirFailNonEmpty(t *testing.T) {
  1535  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1536  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1537  
  1538  	uid, id, rmd := injectNewRMD(t, config)
  1539  
  1540  	rootEntry, p, blocks := makeDirTree(
  1541  		id, uid, "a", "b", "c", "d", "e")
  1542  	rmd.data.Dir = rootEntry
  1543  
  1544  	// Prime cache with all blocks.
  1545  	for i, block := range blocks {
  1546  		testPutBlockInCache(
  1547  			t, config, p.Path[i].BlockPointer, id, block)
  1548  	}
  1549  
  1550  	ops := getOps(config, id)
  1551  	n := nodeFromPath(t, ops, *p.ParentPath().ParentPath())
  1552  
  1553  	expectedErr := DirNotEmptyError{p.ParentPath().TailName()}
  1554  	err := config.KBFSOps().RemoveDir(ctx, n, testPPS("d"))
  1555  	require.Equal(t, expectedErr, err)
  1556  }
  1557  
  1558  func testKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T, et data.EntryType) {
  1559  	require.NotEqual(t, et, data.Sym)
  1560  
  1561  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
  1562  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  1563  	config.noBGFlush = true
  1564  
  1565  	// create a file.
  1566  	rootNode := GetRootNodeOrBust(ctx, t, config, "alice", tlf.Private)
  1567  
  1568  	kbfsOps := config.KBFSOps()
  1569  	var nodeA Node
  1570  	var err error
  1571  	if et == data.Dir {
  1572  		nodeA, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  1573  		require.NoError(t, err)
  1574  		err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
  1575  		require.NoError(t, err)
  1576  	} else {
  1577  		exec := false
  1578  		if et == data.Exec {
  1579  			exec = true
  1580  		}
  1581  
  1582  		nodeA, _, err = kbfsOps.CreateFile(
  1583  			ctx, rootNode, testPPS("a"), exec, NoExcl)
  1584  		require.NoError(t, err)
  1585  
  1586  		data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1587  		err = kbfsOps.Write(ctx, nodeA, data, 0)
  1588  		require.NoError(t, err)
  1589  		err = kbfsOps.SyncAll(ctx, nodeA.GetFolderBranch())
  1590  		require.NoError(t, err)
  1591  	}
  1592  
  1593  	ops := getOps(config, rootNode.GetFolderBranch().Tlf)
  1594  	// Remove block from the server directly, and clear caches.
  1595  	_, err = config.BlockOps().Delete(
  1596  		ctx, rootNode.GetFolderBranch().Tlf,
  1597  		[]data.BlockPointer{ops.nodeCache.PathFromNode(nodeA).TailPointer()})
  1598  	require.NoError(t, err)
  1599  	config.ResetCaches()
  1600  
  1601  	err = config.KBFSOps().RemoveEntry(ctx, rootNode, testPPS("a"))
  1602  	require.NoError(t, err)
  1603  	err = config.KBFSOps().SyncAll(ctx, rootNode.GetFolderBranch())
  1604  	require.NoError(t, err)
  1605  
  1606  	// Shutdown the mdserver explicitly before the state checker tries
  1607  	// to run, since the sizes will definitely be wrong.
  1608  	defer config.MDServer().Shutdown()
  1609  }
  1610  
  1611  func TestKBFSOpsRemoveFileMissingBlockSuccess(t *testing.T) {
  1612  	testKBFSOpsRemoveFileMissingBlockSuccess(t, data.File)
  1613  }
  1614  
  1615  func TestKBFSOpsRemoveExecMissingBlockSuccess(t *testing.T) {
  1616  	testKBFSOpsRemoveFileMissingBlockSuccess(t, data.Exec)
  1617  }
  1618  
  1619  func TestKBFSOpsRemoveDirMissingBlockSuccess(t *testing.T) {
  1620  	testKBFSOpsRemoveFileMissingBlockSuccess(t, data.Dir)
  1621  }
  1622  
  1623  func TestRemoveDirFailNoSuchName(t *testing.T) {
  1624  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1625  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1626  
  1627  	uid, id, rmd := injectNewRMD(t, config)
  1628  
  1629  	rootEntry, p, blocks := makeDirTree(
  1630  		id, uid, "a", "b", "c", "d", "e")
  1631  	rmd.data.Dir = rootEntry
  1632  
  1633  	// Prime cache with all blocks.
  1634  	for i, block := range blocks {
  1635  		testPutBlockInCache(
  1636  			t, config, p.Path[i].BlockPointer, id, block)
  1637  	}
  1638  
  1639  	ops := getOps(config, id)
  1640  	n := nodeFromPath(t, ops, p)
  1641  
  1642  	expectedErr := idutil.NoSuchNameError{Name: "nonexistent"}
  1643  	err := config.KBFSOps().RemoveDir(ctx, n, testPPS("nonexistent"))
  1644  	require.Equal(t, expectedErr, err)
  1645  }
  1646  
  1647  func TestRenameFailAcrossTopLevelFolders(t *testing.T) {
  1648  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1649  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1650  
  1651  	id1 := tlf.FakeID(1, tlf.Private)
  1652  	h1 := parseTlfHandleOrBust(t, config, "alice,bob", tlf.Private, id1)
  1653  	rmd1, err := makeInitialRootMetadata(config.MetadataVersion(), id1, h1)
  1654  	require.NoError(t, err)
  1655  
  1656  	id2 := tlf.FakeID(2, tlf.Private)
  1657  	h2 := parseTlfHandleOrBust(t, config, "alice,bob,charlie", tlf.Private, id2)
  1658  	rmd2, err := makeInitialRootMetadata(config.MetadataVersion(), id2, h2)
  1659  	require.NoError(t, err)
  1660  
  1661  	uid1 := h2.ResolvedWriters()[0]
  1662  	uid2 := h2.ResolvedWriters()[2]
  1663  
  1664  	rootID1 := kbfsblock.FakeID(41)
  1665  	aID1 := kbfsblock.FakeID(42)
  1666  	node1 := data.PathNode{
  1667  		BlockPointer: makeBP(rootID1, rmd1, config, uid1),
  1668  		Name:         testPPS("p"),
  1669  	}
  1670  	aNode1 := data.PathNode{
  1671  		BlockPointer: makeBP(aID1, rmd1, config, uid1),
  1672  		Name:         testPPS("a"),
  1673  	}
  1674  	p1 := data.Path{
  1675  		FolderBranch: data.FolderBranch{Tlf: id1},
  1676  		Path:         []data.PathNode{node1, aNode1},
  1677  	}
  1678  	ops1 := getOps(config, id1)
  1679  	n1 := nodeFromPath(t, ops1, p1)
  1680  
  1681  	rootID2 := kbfsblock.FakeID(38)
  1682  	aID2 := kbfsblock.FakeID(39)
  1683  	node2 := data.PathNode{
  1684  		BlockPointer: makeBP(rootID2, rmd2, config, uid2),
  1685  		Name:         testPPS("p"),
  1686  	}
  1687  	aNode2 := data.PathNode{
  1688  		BlockPointer: makeBP(aID2, rmd2, config, uid2),
  1689  		Name:         testPPS("a"),
  1690  	}
  1691  	p2 := data.Path{
  1692  		FolderBranch: data.FolderBranch{Tlf: id2},
  1693  		Path:         []data.PathNode{node2, aNode2},
  1694  	}
  1695  	ops2 := getOps(config, id2)
  1696  	n2 := nodeFromPath(t, ops2, p2)
  1697  
  1698  	expectedErr := RenameAcrossDirsError{}
  1699  
  1700  	if err := config.KBFSOps().Rename(
  1701  		ctx, n1, testPPS("b"), n2, testPPS("c")); err == nil {
  1702  		t.Errorf("Got no expected error on rename")
  1703  	} else if err.Error() != expectedErr.Error() {
  1704  		t.Errorf("Got unexpected error on rename: %+v", err)
  1705  	}
  1706  }
  1707  
  1708  func TestKBFSOpsCacheReadFullSuccess(t *testing.T) {
  1709  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1710  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1711  
  1712  	u, id, rmd := injectNewRMD(t, config)
  1713  
  1714  	rootID := kbfsblock.FakeID(42)
  1715  	fileID := kbfsblock.FakeID(43)
  1716  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1717  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1718  	node := data.PathNode{
  1719  		BlockPointer: makeBP(rootID, rmd, config, u),
  1720  		Name:         testPPS("p"),
  1721  	}
  1722  	fileNode := data.PathNode{
  1723  		BlockPointer: makeBP(fileID, rmd, config, u),
  1724  		Name:         testPPS("f"),
  1725  	}
  1726  	p := data.Path{
  1727  		FolderBranch: data.FolderBranch{Tlf: id},
  1728  		Path:         []data.PathNode{node, fileNode},
  1729  	}
  1730  	ops := getOps(config, id)
  1731  	pNode := nodeFromPath(t, ops, p)
  1732  
  1733  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  1734  
  1735  	n := len(fileBlock.Contents)
  1736  	dest := make([]byte, n)
  1737  	if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil { // nolint
  1738  		t.Errorf("Got error on read: %+v", err)
  1739  	} else if n2 != int64(n) {
  1740  		t.Errorf("Read the wrong number of bytes: %d", n2)
  1741  	} else if !bytes.Equal(dest, fileBlock.Contents) {
  1742  		t.Errorf("Read bad contents: %v", dest)
  1743  	}
  1744  }
  1745  
  1746  func TestKBFSOpsCacheReadPartialSuccess(t *testing.T) {
  1747  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1748  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1749  
  1750  	u, id, rmd := injectNewRMD(t, config)
  1751  
  1752  	rootID := kbfsblock.FakeID(42)
  1753  	fileID := kbfsblock.FakeID(43)
  1754  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1755  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1756  	node := data.PathNode{
  1757  		BlockPointer: makeBP(rootID, rmd, config, u),
  1758  		Name:         testPPS("p"),
  1759  	}
  1760  	fileNode := data.PathNode{
  1761  		BlockPointer: makeBP(fileID, rmd, config, u),
  1762  		Name:         testPPS("f"),
  1763  	}
  1764  	p := data.Path{
  1765  		FolderBranch: data.FolderBranch{Tlf: id},
  1766  		Path:         []data.PathNode{node, fileNode},
  1767  	}
  1768  	ops := getOps(config, id)
  1769  	pNode := nodeFromPath(t, ops, p)
  1770  
  1771  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  1772  
  1773  	dest := make([]byte, 4)
  1774  	if n, err := config.KBFSOps().Read(ctx, pNode, dest, 2); err != nil { // nolint
  1775  		t.Errorf("Got error on read: %+v", err)
  1776  	} else if n != 4 {
  1777  		t.Errorf("Read the wrong number of bytes: %d", n)
  1778  	} else if !bytes.Equal(dest, fileBlock.Contents[2:6]) {
  1779  		t.Errorf("Read bad contents: %v", dest)
  1780  	}
  1781  }
  1782  
  1783  func TestKBFSOpsCacheReadFullMultiBlockSuccess(t *testing.T) {
  1784  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1785  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1786  
  1787  	u, id, rmd := injectNewRMD(t, config)
  1788  
  1789  	rootID := kbfsblock.FakeID(42)
  1790  	fileID := kbfsblock.FakeID(43)
  1791  	id1 := kbfsblock.FakeID(44)
  1792  	id2 := kbfsblock.FakeID(45)
  1793  	id3 := kbfsblock.FakeID(46)
  1794  	id4 := kbfsblock.FakeID(47)
  1795  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1796  	fileBlock.IsInd = true
  1797  	fileBlock.IPtrs = []data.IndirectFilePtr{
  1798  		makeIFP(id1, rmd, config, u, 0, 0),
  1799  		makeIFP(id2, rmd, config, u, 6, 5),
  1800  		makeIFP(id3, rmd, config, u, 7, 10),
  1801  		makeIFP(id4, rmd, config, u, 8, 15),
  1802  	}
  1803  	block1 := data.NewFileBlock().(*data.FileBlock)
  1804  	block1.Contents = []byte{5, 4, 3, 2, 1}
  1805  	block2 := data.NewFileBlock().(*data.FileBlock)
  1806  	block2.Contents = []byte{10, 9, 8, 7, 6}
  1807  	block3 := data.NewFileBlock().(*data.FileBlock)
  1808  	block3.Contents = []byte{15, 14, 13, 12, 11}
  1809  	block4 := data.NewFileBlock().(*data.FileBlock)
  1810  	block4.Contents = []byte{20, 19, 18, 17, 16}
  1811  	node := data.PathNode{
  1812  		BlockPointer: makeBP(rootID, rmd, config, u),
  1813  		Name:         testPPS("p"),
  1814  	}
  1815  	fileNode := data.PathNode{
  1816  		BlockPointer: makeBP(fileID, rmd, config, u),
  1817  		Name:         testPPS("a"),
  1818  	}
  1819  	p := data.Path{
  1820  		FolderBranch: data.FolderBranch{Tlf: id},
  1821  		Path:         []data.PathNode{node, fileNode},
  1822  	}
  1823  	ops := getOps(config, id)
  1824  	pNode := nodeFromPath(t, ops, p)
  1825  
  1826  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  1827  	testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
  1828  	testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
  1829  	testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
  1830  	testPutBlockInCache(t, config, fileBlock.IPtrs[3].BlockPointer, id, block4)
  1831  
  1832  	n := 20
  1833  	dest := make([]byte, n)
  1834  	fullContents := block1.Contents
  1835  	fullContents = append(fullContents, block2.Contents...)
  1836  	fullContents = append(fullContents, block3.Contents...)
  1837  	fullContents = append(fullContents, block4.Contents...)
  1838  	if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil { // nolint
  1839  		t.Errorf("Got error on read: %+v", err)
  1840  	} else if n2 != int64(n) {
  1841  		t.Errorf("Read the wrong number of bytes: %d", n2)
  1842  	} else if !bytes.Equal(dest, fullContents) {
  1843  		t.Errorf("Read bad contents: %v", dest)
  1844  	}
  1845  }
  1846  
  1847  func TestKBFSOpsCacheReadPartialMultiBlockSuccess(t *testing.T) {
  1848  	t.Skip("Broken test since Go 1.12.4 due to extra pending requests after test termination. Panic: unable to shutdown prefetcher.")
  1849  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1850  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1851  
  1852  	u, id, rmd := injectNewRMD(t, config)
  1853  
  1854  	rootID := kbfsblock.FakeID(42)
  1855  	fileID := kbfsblock.FakeID(43)
  1856  	id1 := kbfsblock.FakeID(44)
  1857  	id2 := kbfsblock.FakeID(45)
  1858  	id3 := kbfsblock.FakeID(46)
  1859  	id4 := kbfsblock.FakeID(47)
  1860  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1861  	fileBlock.IsInd = true
  1862  	fileBlock.IPtrs = []data.IndirectFilePtr{
  1863  		makeIFP(id1, rmd, config, u, 0, 0),
  1864  		makeIFP(id2, rmd, config, u, 6, 5),
  1865  		makeIFP(id3, rmd, config, u, 7, 10),
  1866  		makeIFP(id4, rmd, config, u, 8, 15),
  1867  	}
  1868  	block1 := data.NewFileBlock().(*data.FileBlock)
  1869  	block1.Contents = []byte{5, 4, 3, 2, 1}
  1870  	block2 := data.NewFileBlock().(*data.FileBlock)
  1871  	block2.Contents = []byte{10, 9, 8, 7, 6}
  1872  	block3 := data.NewFileBlock().(*data.FileBlock)
  1873  	block3.Contents = []byte{15, 14, 13, 12, 11}
  1874  	block4 := data.NewFileBlock().(*data.FileBlock)
  1875  	block4.Contents = []byte{20, 19, 18, 17, 16}
  1876  	node := data.PathNode{
  1877  		BlockPointer: makeBP(rootID, rmd, config, u),
  1878  		Name:         testPPS("p"),
  1879  	}
  1880  	fileNode := data.PathNode{
  1881  		BlockPointer: makeBP(fileID, rmd, config, u),
  1882  		Name:         testPPS("a"),
  1883  	}
  1884  	p := data.Path{
  1885  		FolderBranch: data.FolderBranch{Tlf: id},
  1886  		Path:         []data.PathNode{node, fileNode},
  1887  	}
  1888  	ops := getOps(config, id)
  1889  	pNode := nodeFromPath(t, ops, p)
  1890  
  1891  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  1892  	testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
  1893  	testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
  1894  	testPutBlockInCache(t, config, fileBlock.IPtrs[2].BlockPointer, id, block3)
  1895  
  1896  	n := 10
  1897  	dest := make([]byte, n)
  1898  	contents := block1.Contents[3:]
  1899  	contents = append(contents, block2.Contents...)
  1900  	contents = append(contents, block3.Contents[:3]...)
  1901  	if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 3); err != nil { // nolint
  1902  		t.Errorf("Got error on read: %+v", err)
  1903  	} else if n2 != int64(n) {
  1904  		t.Errorf("Read the wrong number of bytes: %d", n2)
  1905  	} else if !bytes.Equal(dest, contents) {
  1906  		t.Errorf("Read bad contents: %v", dest)
  1907  	}
  1908  }
  1909  
  1910  func TestKBFSOpsCacheReadFailPastEnd(t *testing.T) {
  1911  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1912  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1913  
  1914  	u, id, rmd := injectNewRMD(t, config)
  1915  
  1916  	rootID := kbfsblock.FakeID(42)
  1917  	fileID := kbfsblock.FakeID(43)
  1918  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1919  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1920  	node := data.PathNode{
  1921  		BlockPointer: makeBP(rootID, rmd, config, u),
  1922  		Name:         testPPS("p"),
  1923  	}
  1924  	fileNode := data.PathNode{
  1925  		BlockPointer: makeBP(fileID, rmd, config, u),
  1926  		Name:         testPPS("f"),
  1927  	}
  1928  	p := data.Path{
  1929  		FolderBranch: data.FolderBranch{Tlf: id},
  1930  		Path:         []data.PathNode{node, fileNode},
  1931  	}
  1932  	ops := getOps(config, id)
  1933  	pNode := nodeFromPath(t, ops, p)
  1934  
  1935  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  1936  
  1937  	dest := make([]byte, 4)
  1938  	if n, err := config.KBFSOps().Read(ctx, pNode, dest, 10); err != nil {
  1939  		t.Errorf("Got error on read: %+v", err)
  1940  	} else if n != 0 {
  1941  		t.Errorf("Read the wrong number of bytes: %d", n)
  1942  	}
  1943  }
  1944  
  1945  func TestKBFSOpsServerReadFullSuccess(t *testing.T) {
  1946  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1947  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1948  
  1949  	u, id, rmd := injectNewRMD(t, config)
  1950  
  1951  	rootID := kbfsblock.FakeID(42)
  1952  	fileID := kbfsblock.FakeID(43)
  1953  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1954  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1955  	node := data.PathNode{
  1956  		BlockPointer: makeBP(rootID, rmd, config, u),
  1957  		Name:         testPPS("p"),
  1958  	}
  1959  	fileBlockPtr := makeBP(fileID, rmd, config, u)
  1960  	fileNode := data.PathNode{BlockPointer: fileBlockPtr, Name: testPPS("f")}
  1961  	p := data.Path{
  1962  		FolderBranch: data.FolderBranch{Tlf: id},
  1963  		Path:         []data.PathNode{node, fileNode},
  1964  	}
  1965  	ops := getOps(config, id)
  1966  	pNode := nodeFromPath(t, ops, p)
  1967  
  1968  	// cache miss means fetching metadata and getting read key
  1969  	expectBlock(config, rmd, fileBlockPtr, fileBlock, nil)
  1970  
  1971  	n := len(fileBlock.Contents)
  1972  	dest := make([]byte, n)
  1973  	if n2, err := config.KBFSOps().Read(ctx, pNode, dest, 0); err != nil { // nolint
  1974  		t.Errorf("Got error on read: %+v", err)
  1975  	} else if n2 != int64(n) {
  1976  		t.Errorf("Read the wrong number of bytes: %d", n2)
  1977  	} else if !bytes.Equal(dest, fileBlock.Contents) {
  1978  		t.Errorf("Read bad contents: %v", dest)
  1979  	}
  1980  }
  1981  
  1982  func TestKBFSOpsServerReadFailNoSuchBlock(t *testing.T) {
  1983  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  1984  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  1985  
  1986  	u, id, rmd := injectNewRMD(t, config)
  1987  
  1988  	rootID := kbfsblock.FakeID(42)
  1989  	fileID := kbfsblock.FakeID(43)
  1990  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  1991  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  1992  	node := data.PathNode{
  1993  		BlockPointer: makeBP(rootID, rmd, config, u),
  1994  		Name:         testPPS("p"),
  1995  	}
  1996  	fileBlockPtr := makeBP(fileID, rmd, config, u)
  1997  	fileNode := data.PathNode{BlockPointer: fileBlockPtr, Name: testPPS("f")}
  1998  	p := data.Path{
  1999  		FolderBranch: data.FolderBranch{Tlf: id},
  2000  		Path:         []data.PathNode{node, fileNode},
  2001  	}
  2002  	ops := getOps(config, id)
  2003  	pNode := nodeFromPath(t, ops, p)
  2004  
  2005  	// cache miss means fetching metadata and getting read key
  2006  	err := data.NoSuchBlockError{ID: rootID}
  2007  	expectBlock(config, rmd, fileBlockPtr, fileBlock, err)
  2008  
  2009  	n := len(fileBlock.Contents)
  2010  	dest := make([]byte, n)
  2011  	if _, err2 := config.KBFSOps().Read(ctx, pNode, dest, 0); err2 == nil {
  2012  		t.Errorf("Got no expected error")
  2013  	} else if err2 != err {
  2014  		t.Errorf("Got unexpected error: %+v", err2)
  2015  	}
  2016  }
  2017  
  2018  func checkSyncOp(t *testing.T, codec kbfscodec.Codec,
  2019  	so *syncOp, filePtr data.BlockPointer, writes []WriteRange) {
  2020  	if so == nil {
  2021  		t.Error("No sync info for written file!")
  2022  		return
  2023  	}
  2024  	if so.File.Unref != filePtr {
  2025  		t.Errorf("Unexpected unref file in sync op: %v vs %v",
  2026  			so.File.Unref, filePtr)
  2027  	}
  2028  	if len(so.Writes) != len(writes) {
  2029  		t.Errorf("Unexpected number of writes: %v (expected %v)",
  2030  			len(so.Writes), len(writes))
  2031  	}
  2032  	for i, w := range writes {
  2033  		writeEqual, err := kbfscodec.Equal(codec, so.Writes[i], w)
  2034  		if err != nil {
  2035  			t.Fatal(err)
  2036  		}
  2037  		if !writeEqual {
  2038  			t.Errorf("Unexpected write: %v vs %v", so.Writes[i], w)
  2039  		}
  2040  	}
  2041  }
  2042  
  2043  func checkSyncOpInCache(t *testing.T, codec kbfscodec.Codec,
  2044  	ops *folderBranchOps, filePtr data.BlockPointer, writes []WriteRange) {
  2045  	// check the in-progress syncOp
  2046  	si, ok := ops.blocks.unrefCache[filePtr.Ref()]
  2047  	if !ok {
  2048  		t.Error("No sync info for written file!")
  2049  	}
  2050  	checkSyncOp(t, codec, si.op, filePtr, writes)
  2051  }
  2052  
  2053  func TestKBFSOpsWriteNewBlockSuccess(t *testing.T) {
  2054  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2055  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2056  
  2057  	uid, id, rmd := injectNewRMD(t, config)
  2058  
  2059  	rootID := kbfsblock.FakeID(42)
  2060  	fileID := kbfsblock.FakeID(43)
  2061  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2062  	rootBlock.Children["f"] = data.DirEntry{
  2063  		BlockInfo: data.BlockInfo{
  2064  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2065  			EncodedSize:  1,
  2066  		},
  2067  		EntryInfo: data.EntryInfo{
  2068  			Type: data.File,
  2069  		},
  2070  	}
  2071  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2072  	node := data.PathNode{
  2073  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2074  		Name:         testPPS("p"),
  2075  	}
  2076  	fileNode := data.PathNode{
  2077  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2078  		Name:         testPPS("f"),
  2079  	}
  2080  	p := data.Path{
  2081  		FolderBranch: data.FolderBranch{Tlf: id},
  2082  		Path:         []data.PathNode{node, fileNode},
  2083  	}
  2084  	ops := getOps(config, id)
  2085  	n := nodeFromPath(t, ops, p)
  2086  	buf := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2087  
  2088  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2089  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2090  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2091  		gomock.Any(), gomock.Any(), buf, int64(0)).
  2092  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2093  			block.Contents = data
  2094  		}).Return(int64(len(buf)))
  2095  
  2096  	if err := config.KBFSOps().Write(ctx, n, buf, 0); err != nil {
  2097  		t.Errorf("Got error on write: %+v", err)
  2098  	}
  2099  
  2100  	newFileBlock := getFileBlockFromCache(
  2101  		ctx, t, config, id, fileNode.BlockPointer,
  2102  		p.Branch)
  2103  	newRootBlock := getDirBlockFromCache(
  2104  		ctx, t, config, id, node.BlockPointer, p.Branch)
  2105  
  2106  	switch {
  2107  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2108  		len(p.Path):
  2109  		t.Errorf("Missing or incorrect local update during write: %v",
  2110  			config.observer.localChange)
  2111  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2112  		t.Errorf("Wrong context value passed in local notify: %v",
  2113  			config.observer.ctx.Value(tCtxID))
  2114  	case !bytes.Equal(buf, newFileBlock.Contents):
  2115  		t.Errorf("Wrote bad contents: %v", buf)
  2116  	case newRootBlock.Children["f"].GetWriter() != uid:
  2117  		t.Errorf("Wrong last writer: %v",
  2118  			newRootBlock.Children["f"].GetWriter())
  2119  	case newRootBlock.Children["f"].Size != uint64(len(buf)):
  2120  		t.Errorf("Wrong size for written file: %d",
  2121  			newRootBlock.Children["f"].Size)
  2122  	}
  2123  	checkBlockCache(
  2124  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2125  		map[data.BlockPointer]data.BranchName{
  2126  			node.BlockPointer:     p.Branch,
  2127  			fileNode.BlockPointer: p.Branch,
  2128  		})
  2129  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2130  		[]WriteRange{{Off: 0, Len: uint64(len(buf))}})
  2131  }
  2132  
  2133  func TestKBFSOpsWriteExtendSuccess(t *testing.T) {
  2134  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2135  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2136  
  2137  	uid, id, rmd := injectNewRMD(t, config)
  2138  
  2139  	rootID := kbfsblock.FakeID(42)
  2140  	fileID := kbfsblock.FakeID(43)
  2141  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2142  	rootBlock.Children["f"] = data.DirEntry{
  2143  		BlockInfo: data.BlockInfo{
  2144  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2145  			EncodedSize:  1,
  2146  		},
  2147  		EntryInfo: data.EntryInfo{
  2148  			Type: data.File,
  2149  		},
  2150  	}
  2151  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2152  	fileBlock.Contents = []byte{1, 2, 3, 4, 5}
  2153  	node := data.PathNode{
  2154  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2155  		Name:         testPPS("p"),
  2156  	}
  2157  	fileNode := data.PathNode{
  2158  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2159  		Name:         testPPS("f"),
  2160  	}
  2161  	p := data.Path{
  2162  		FolderBranch: data.FolderBranch{Tlf: id},
  2163  		Path:         []data.PathNode{node, fileNode},
  2164  	}
  2165  	ops := getOps(config, id)
  2166  	n := nodeFromPath(t, ops, p)
  2167  	buf := []byte{6, 7, 8, 9, 10}
  2168  	expectedFullData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2169  
  2170  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2171  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2172  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2173  		gomock.Any(), gomock.Any(), buf, int64(5)).
  2174  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2175  			block.Contents = expectedFullData
  2176  		}).Return(int64(len(buf)))
  2177  
  2178  	if err := config.KBFSOps().Write(ctx, n, buf, 5); err != nil {
  2179  		t.Errorf("Got error on write: %+v", err)
  2180  	}
  2181  
  2182  	newFileBlock := getFileBlockFromCache(
  2183  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2184  
  2185  	switch {
  2186  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2187  		len(p.Path):
  2188  		t.Errorf("Missing or incorrect local update during write: %v",
  2189  			config.observer.localChange)
  2190  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2191  		t.Errorf("Wrong context value passed in local notify: %v",
  2192  			config.observer.ctx.Value(tCtxID))
  2193  	case !bytes.Equal(expectedFullData, newFileBlock.Contents):
  2194  		t.Errorf("Wrote bad contents: %v", buf)
  2195  	}
  2196  	checkBlockCache(
  2197  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2198  		map[data.BlockPointer]data.BranchName{
  2199  			node.BlockPointer:     p.Branch,
  2200  			fileNode.BlockPointer: p.Branch,
  2201  		})
  2202  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2203  		[]WriteRange{{Off: 5, Len: uint64(len(buf))}})
  2204  }
  2205  
  2206  func TestKBFSOpsWritePastEndSuccess(t *testing.T) {
  2207  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2208  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2209  
  2210  	uid, id, rmd := injectNewRMD(t, config)
  2211  
  2212  	rootID := kbfsblock.FakeID(42)
  2213  	fileID := kbfsblock.FakeID(43)
  2214  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2215  	rootBlock.Children["f"] = data.DirEntry{
  2216  		BlockInfo: data.BlockInfo{
  2217  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2218  			EncodedSize:  1,
  2219  		},
  2220  		EntryInfo: data.EntryInfo{
  2221  			Type: data.File,
  2222  		},
  2223  	}
  2224  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2225  	fileBlock.Contents = []byte{1, 2, 3, 4, 5}
  2226  	node := data.PathNode{
  2227  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2228  		Name:         testPPS("p"),
  2229  	}
  2230  	fileNode := data.PathNode{
  2231  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2232  		Name:         testPPS("f"),
  2233  	}
  2234  	p := data.Path{
  2235  		FolderBranch: data.FolderBranch{Tlf: id},
  2236  		Path:         []data.PathNode{node, fileNode},
  2237  	}
  2238  	ops := getOps(config, id)
  2239  	n := nodeFromPath(t, ops, p)
  2240  	buf := []byte{6, 7, 8, 9, 10}
  2241  	expectedFullData := []byte{1, 2, 3, 4, 5, 0, 0, 6, 7, 8, 9, 10}
  2242  
  2243  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2244  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2245  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2246  		gomock.Any(), gomock.Any(), buf, int64(7)).
  2247  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2248  			block.Contents = expectedFullData
  2249  		}).Return(int64(len(buf)))
  2250  
  2251  	if err := config.KBFSOps().Write(ctx, n, buf, 7); err != nil {
  2252  		t.Errorf("Got error on write: %+v", err)
  2253  	}
  2254  
  2255  	newFileBlock := getFileBlockFromCache(
  2256  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2257  
  2258  	switch {
  2259  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2260  		len(p.Path):
  2261  		t.Errorf("Missing or incorrect local update during write: %v",
  2262  			config.observer.localChange)
  2263  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2264  		t.Errorf("Wrong context value passed in local notify: %v",
  2265  			config.observer.ctx.Value(tCtxID))
  2266  	case !bytes.Equal(expectedFullData, newFileBlock.Contents):
  2267  		t.Errorf("Wrote bad contents: %v", buf)
  2268  	}
  2269  	checkBlockCache(
  2270  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2271  		map[data.BlockPointer]data.BranchName{
  2272  			node.BlockPointer:     p.Branch,
  2273  			fileNode.BlockPointer: p.Branch,
  2274  		})
  2275  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2276  		[]WriteRange{{Off: 7, Len: uint64(len(buf))}})
  2277  }
  2278  
  2279  func TestKBFSOpsWriteCauseSplit(t *testing.T) {
  2280  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2281  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2282  
  2283  	uid, id, rmd := injectNewRMD(t, config)
  2284  
  2285  	rootID := kbfsblock.FakeID(42)
  2286  	fileID := kbfsblock.FakeID(43)
  2287  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2288  	rootBlock.Children["f"] = data.DirEntry{
  2289  		BlockInfo: data.BlockInfo{
  2290  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2291  			EncodedSize:  1,
  2292  		},
  2293  		EntryInfo: data.EntryInfo{
  2294  			Type: data.File,
  2295  		},
  2296  	}
  2297  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2298  	fileBlock.Contents = []byte{}
  2299  	node := data.PathNode{
  2300  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2301  		Name:         testPPS("p"),
  2302  	}
  2303  	fileNode := data.PathNode{
  2304  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2305  		Name:         testPPS("f"),
  2306  	}
  2307  	p := data.Path{
  2308  		FolderBranch: data.FolderBranch{Tlf: id},
  2309  		Path:         []data.PathNode{node, fileNode},
  2310  	}
  2311  	ops := getOps(config, id)
  2312  	n := nodeFromPath(t, ops, p)
  2313  	newData := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2314  	expectedFullData := append([]byte{0}, newData...)
  2315  
  2316  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2317  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2318  
  2319  	// only copy the first half first
  2320  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2321  		gomock.Any(), gomock.Any(), newData, int64(1)).
  2322  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2323  			block.Contents = append([]byte{0}, data[0:5]...)
  2324  		}).Return(int64(5))
  2325  
  2326  	// next we'll get the right block again
  2327  	// then the second half
  2328  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2329  		gomock.Any(), gomock.Any(), newData[5:10], int64(0)).
  2330  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2331  			block.Contents = data
  2332  		}).Return(int64(5))
  2333  
  2334  	if err := config.KBFSOps().Write(ctx, n, newData, 1); err != nil {
  2335  		t.Errorf("Got error on write: %+v", err)
  2336  	}
  2337  	b, _ := config.DirtyBlockCache().Get(ctx, id, node.BlockPointer, p.Branch)
  2338  	newRootBlock := b.(*data.DirBlock)
  2339  
  2340  	b, _ = config.DirtyBlockCache().Get(
  2341  		ctx, id, fileNode.BlockPointer, p.Branch)
  2342  	pblock := b.(*data.FileBlock)
  2343  	require.Len(t, pblock.IPtrs, 2)
  2344  	id1 := pblock.IPtrs[0].ID
  2345  	id2 := pblock.IPtrs[1].ID
  2346  	b, _ = config.DirtyBlockCache().Get(ctx, id, makeBP(id1, rmd, config, uid),
  2347  		p.Branch)
  2348  	block1 := b.(*data.FileBlock)
  2349  	b, _ = config.DirtyBlockCache().Get(ctx, id, makeBP(id2, rmd, config, uid),
  2350  		p.Branch)
  2351  	block2 := b.(*data.FileBlock)
  2352  
  2353  	switch {
  2354  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2355  		len(p.Path):
  2356  		t.Errorf("Missing or incorrect local update during write: %v",
  2357  			config.observer.localChange)
  2358  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2359  		t.Errorf("Wrong context value passed in local notify: %v",
  2360  			config.observer.ctx.Value(tCtxID))
  2361  	case !bytes.Equal(expectedFullData[0:6], block1.Contents):
  2362  		t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
  2363  	case !bytes.Equal(expectedFullData[6:11], block2.Contents):
  2364  		t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
  2365  	case !pblock.IsInd:
  2366  		t.Errorf("Parent block is not indirect!")
  2367  	case pblock.IPtrs[0].Off != 0:
  2368  		t.Errorf("Parent block has wrong offset for block 1: %d",
  2369  			pblock.IPtrs[0].Off)
  2370  	case pblock.IPtrs[1].Off != 6:
  2371  		t.Errorf("Parent block has wrong offset for block 5: %d",
  2372  			pblock.IPtrs[1].Off)
  2373  	case newRootBlock.Children["f"].Size != uint64(11):
  2374  		t.Errorf("Wrong size for written file: %d",
  2375  			newRootBlock.Children["f"].Size)
  2376  	}
  2377  
  2378  	checkBlockCache(
  2379  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2380  		map[data.BlockPointer]data.BranchName{
  2381  			node.BlockPointer:            p.Branch,
  2382  			fileNode.BlockPointer:        p.Branch,
  2383  			pblock.IPtrs[0].BlockPointer: p.Branch,
  2384  			pblock.IPtrs[1].BlockPointer: p.Branch,
  2385  		})
  2386  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2387  		[]WriteRange{{Off: 1, Len: uint64(len(newData))}})
  2388  }
  2389  
  2390  func mergeUnrefCache(
  2391  	ops *folderBranchOps, lState *kbfssync.LockState, file data.Path,
  2392  	md *RootMetadata) {
  2393  	ops.blocks.blockLock.RLock(lState)
  2394  	defer ops.blocks.blockLock.RUnlock(lState)
  2395  	ops.blocks.unrefCache[file.TailPointer().Ref()].mergeUnrefCache(md)
  2396  }
  2397  
  2398  func TestKBFSOpsWriteOverMultipleBlocks(t *testing.T) {
  2399  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2400  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2401  
  2402  	uid, id, rmd := injectNewRMD(t, config)
  2403  	rootID := kbfsblock.FakeID(42)
  2404  	fileID := kbfsblock.FakeID(43)
  2405  	id1 := kbfsblock.FakeID(44)
  2406  	id2 := kbfsblock.FakeID(45)
  2407  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2408  	filePtr := data.BlockPointer{
  2409  		ID: fileID, KeyGen: kbfsmd.FirstValidKeyGen, DataVer: 1,
  2410  		Context: kbfsblock.Context{
  2411  			Creator: uid,
  2412  		},
  2413  	}
  2414  	rootBlock.Children["f"] = data.DirEntry{
  2415  		BlockInfo: data.BlockInfo{
  2416  			BlockPointer: filePtr,
  2417  			EncodedSize:  1,
  2418  		},
  2419  		EntryInfo: data.EntryInfo{
  2420  			Size: 10,
  2421  		},
  2422  	}
  2423  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2424  	fileBlock.IsInd = true
  2425  	fileBlock.IPtrs = []data.IndirectFilePtr{
  2426  		makeIFP(id1, rmd, config, uid, 5, 0),
  2427  		makeIFP(id2, rmd, config, uid, 6, 5),
  2428  	}
  2429  	block1 := data.NewFileBlock().(*data.FileBlock)
  2430  	block1.Contents = []byte{5, 4, 3, 2, 1}
  2431  	block2 := data.NewFileBlock().(*data.FileBlock)
  2432  	block2.Contents = []byte{10, 9, 8, 7, 6}
  2433  	node := data.PathNode{
  2434  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2435  		Name:         testPPS("p"),
  2436  	}
  2437  	fileNode := data.PathNode{
  2438  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2439  		Name:         testPPS("f"),
  2440  	}
  2441  	p := data.Path{
  2442  		FolderBranch: data.FolderBranch{Tlf: id},
  2443  		Path:         []data.PathNode{node, fileNode},
  2444  	}
  2445  	ops := getOps(config, id)
  2446  	n := nodeFromPath(t, ops, p)
  2447  	buf := []byte{1, 2, 3, 4, 5}
  2448  	expectedFullData := []byte{5, 4, 1, 2, 3, 4, 5, 8, 7, 6}
  2449  	so, err := newSyncOp(filePtr)
  2450  	require.NoError(t, err)
  2451  	rmd.AddOp(so)
  2452  
  2453  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2454  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2455  	testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
  2456  	testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
  2457  
  2458  	// only copy the first half first
  2459  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2460  		gomock.Any(), gomock.Any(), []byte{1, 2, 3}, int64(2)).
  2461  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2462  			block.Contents = make([]byte, 5)
  2463  			copy(block.Contents, block1.Contents[0:2])
  2464  			copy(block.Contents[2:], data[0:3])
  2465  		}).Return(int64(3))
  2466  
  2467  	// update block 2
  2468  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2469  		gomock.Any(), gomock.Any(), buf[3:], int64(0)).
  2470  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2471  			block.Contents = make([]byte, len(data)+len(block2.Contents[2:]))
  2472  			copy(block.Contents, data)
  2473  			copy(block.Contents[len(data):], block2.Contents[2:])
  2474  		}).Return(int64(2))
  2475  
  2476  	if err := config.KBFSOps().Write(ctx, n, buf, 2); err != nil {
  2477  		t.Errorf("Got error on write: %+v", err)
  2478  	}
  2479  
  2480  	newBlock1 := getFileBlockFromCache(
  2481  		ctx, t, config, id, fileBlock.IPtrs[0].BlockPointer, p.Branch)
  2482  	newBlock2 := getFileBlockFromCache(
  2483  		ctx, t, config, id, fileBlock.IPtrs[1].BlockPointer, p.Branch)
  2484  
  2485  	switch {
  2486  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2487  		len(p.Path):
  2488  		t.Errorf("Missing or incorrect local update during write: %v",
  2489  			config.observer.localChange)
  2490  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2491  		t.Errorf("Wrong context value passed in local notify: %v",
  2492  			config.observer.ctx.Value(tCtxID))
  2493  	case !bytes.Equal(expectedFullData[0:5], newBlock1.Contents):
  2494  		t.Errorf("Wrote bad contents to block 1: %v", block1.Contents)
  2495  	case !bytes.Equal(expectedFullData[5:10], newBlock2.Contents):
  2496  		t.Errorf("Wrote bad contents to block 2: %v", block2.Contents)
  2497  	}
  2498  
  2499  	lState := makeFBOLockState()
  2500  
  2501  	// merge the unref cache to make it easy to check for changes
  2502  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2503  		[]WriteRange{{Off: 2, Len: uint64(len(buf))}})
  2504  	mergeUnrefCache(ops, lState, p, rmd)
  2505  	checkBlockCache(
  2506  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
  2507  		map[data.BlockPointer]data.BranchName{
  2508  			node.BlockPointer:               p.Branch,
  2509  			fileNode.BlockPointer:           p.Branch,
  2510  			fileBlock.IPtrs[0].BlockPointer: p.Branch,
  2511  			fileBlock.IPtrs[1].BlockPointer: p.Branch,
  2512  		})
  2513  }
  2514  
  2515  // Read tests check the same error cases, so no need for similar write
  2516  // error tests
  2517  
  2518  func TestKBFSOpsTruncateToZeroSuccess(t *testing.T) {
  2519  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2520  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2521  
  2522  	uid, id, rmd := injectNewRMD(t, config)
  2523  
  2524  	rootID := kbfsblock.FakeID(42)
  2525  	fileID := kbfsblock.FakeID(43)
  2526  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2527  	rootBlock.Children["f"] = data.DirEntry{
  2528  		BlockInfo: data.BlockInfo{
  2529  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2530  			EncodedSize:  1,
  2531  		},
  2532  		EntryInfo: data.EntryInfo{
  2533  			Type: data.File,
  2534  		},
  2535  	}
  2536  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2537  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2538  	node := data.PathNode{
  2539  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2540  		Name:         testPPS("p"),
  2541  	}
  2542  	fileNode := data.PathNode{
  2543  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2544  		Name:         testPPS("f"),
  2545  	}
  2546  	p := data.Path{
  2547  		FolderBranch: data.FolderBranch{Tlf: id},
  2548  		Path:         []data.PathNode{node, fileNode},
  2549  	}
  2550  	ops := getOps(config, id)
  2551  	n := nodeFromPath(t, ops, p)
  2552  
  2553  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2554  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2555  
  2556  	buf := []byte{}
  2557  	if err := config.KBFSOps().Truncate(ctx, n, 0); err != nil {
  2558  		t.Errorf("Got error on truncate: %+v", err)
  2559  	}
  2560  
  2561  	newFileBlock := getFileBlockFromCache(
  2562  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2563  	newRootBlock := getDirBlockFromCache(
  2564  		ctx, t, config, id, node.BlockPointer, p.Branch)
  2565  
  2566  	switch {
  2567  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2568  		len(p.Path):
  2569  		t.Errorf("Missing or incorrect local update during truncate: %v",
  2570  			config.observer.localChange)
  2571  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2572  		t.Errorf("Wrong context value passed in local notify: %v",
  2573  			config.observer.ctx.Value(tCtxID))
  2574  	case !bytes.Equal(buf, newFileBlock.Contents):
  2575  		t.Errorf("Wrote bad contents: %v", newFileBlock.Contents)
  2576  	case newRootBlock.Children["f"].GetWriter() != uid:
  2577  		t.Errorf("Wrong last writer: %v",
  2578  			newRootBlock.Children["f"].GetWriter())
  2579  	case newRootBlock.Children["f"].Size != 0:
  2580  		t.Errorf("Wrong size for written file: %d",
  2581  			newRootBlock.Children["f"].Size)
  2582  	}
  2583  	checkBlockCache(
  2584  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2585  		map[data.BlockPointer]data.BranchName{
  2586  			node.BlockPointer:     p.Branch,
  2587  			fileNode.BlockPointer: p.Branch,
  2588  		})
  2589  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2590  		[]WriteRange{{Off: 0, Len: 0}})
  2591  }
  2592  
  2593  func TestKBFSOpsTruncateSameSize(t *testing.T) {
  2594  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2595  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2596  
  2597  	u, id, rmd := injectNewRMD(t, config)
  2598  
  2599  	rootID := kbfsblock.FakeID(42)
  2600  	fileID := kbfsblock.FakeID(43)
  2601  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2602  	rootBlock.Children["f"] = data.DirEntry{
  2603  		BlockInfo: makeBIFromID(fileID, u),
  2604  		EntryInfo: data.EntryInfo{
  2605  			Type: data.File,
  2606  		},
  2607  	}
  2608  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2609  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2610  	node := data.PathNode{
  2611  		BlockPointer: makeBP(rootID, rmd, config, u),
  2612  		Name:         testPPS("p"),
  2613  	}
  2614  	fileNode := data.PathNode{
  2615  		BlockPointer: makeBP(fileID, rmd, config, u),
  2616  		Name:         testPPS("f"),
  2617  	}
  2618  	p := data.Path{
  2619  		FolderBranch: data.FolderBranch{Tlf: id},
  2620  		Path:         []data.PathNode{node, fileNode},
  2621  	}
  2622  	ops := getOps(config, id)
  2623  	n := nodeFromPath(t, ops, p)
  2624  
  2625  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2626  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2627  
  2628  	data := fileBlock.Contents
  2629  	if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil { // nolint
  2630  		t.Errorf("Got error on truncate: %+v", err)
  2631  	} else if config.observer.localChange != nil {
  2632  		t.Errorf("Unexpected local update during truncate: %v",
  2633  			config.observer.localChange)
  2634  	} else if !bytes.Equal(data, fileBlock.Contents) {
  2635  		t.Errorf("Wrote bad contents: %v", data)
  2636  	}
  2637  	checkBlockCache(ctx, t, config, id, []kbfsblock.ID{rootID, fileID}, nil)
  2638  }
  2639  
  2640  func TestKBFSOpsTruncateSmallerSuccess(t *testing.T) {
  2641  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2642  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2643  
  2644  	uid, id, rmd := injectNewRMD(t, config)
  2645  
  2646  	rootID := kbfsblock.FakeID(42)
  2647  	fileID := kbfsblock.FakeID(43)
  2648  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2649  	rootBlock.Children["f"] = data.DirEntry{
  2650  		BlockInfo: data.BlockInfo{
  2651  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2652  			EncodedSize:  1,
  2653  		},
  2654  		EntryInfo: data.EntryInfo{
  2655  			Type: data.File,
  2656  		},
  2657  	}
  2658  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2659  	fileBlock.Contents = []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  2660  	node := data.PathNode{
  2661  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2662  		Name:         testPPS("p"),
  2663  	}
  2664  	fileNode := data.PathNode{
  2665  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2666  		Name:         testPPS("f"),
  2667  	}
  2668  	p := data.Path{
  2669  		FolderBranch: data.FolderBranch{Tlf: id},
  2670  		Path:         []data.PathNode{node, fileNode},
  2671  	}
  2672  	ops := getOps(config, id)
  2673  	n := nodeFromPath(t, ops, p)
  2674  
  2675  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2676  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2677  
  2678  	buf := []byte{1, 2, 3, 4, 5}
  2679  	if err := config.KBFSOps().Truncate(ctx, n, 5); err != nil {
  2680  		t.Errorf("Got error on truncate: %+v", err)
  2681  	}
  2682  
  2683  	newFileBlock := getFileBlockFromCache(
  2684  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2685  
  2686  	switch {
  2687  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2688  		len(p.Path):
  2689  		t.Errorf("Missing or incorrect local update during truncate: %v",
  2690  			config.observer.localChange)
  2691  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2692  		t.Errorf("Wrong context value passed in local notify: %v",
  2693  			config.observer.ctx.Value(tCtxID))
  2694  	case !bytes.Equal(buf, newFileBlock.Contents):
  2695  		t.Errorf("Wrote bad contents: %v", buf)
  2696  	}
  2697  	checkBlockCache(
  2698  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2699  		map[data.BlockPointer]data.BranchName{
  2700  			node.BlockPointer:     p.Branch,
  2701  			fileNode.BlockPointer: p.Branch,
  2702  		})
  2703  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2704  		[]WriteRange{{Off: 5, Len: 0}})
  2705  }
  2706  
  2707  func TestKBFSOpsTruncateShortensLastBlock(t *testing.T) {
  2708  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2709  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2710  
  2711  	uid, id, rmd := injectNewRMD(t, config)
  2712  
  2713  	rootID := kbfsblock.FakeID(42)
  2714  	fileID := kbfsblock.FakeID(43)
  2715  	id1 := kbfsblock.FakeID(44)
  2716  	id2 := kbfsblock.FakeID(45)
  2717  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2718  	fileInfo := makeBIFromID(fileID, uid)
  2719  	rootBlock.Children["f"] = data.DirEntry{
  2720  		BlockInfo: fileInfo,
  2721  		EntryInfo: data.EntryInfo{
  2722  			Size: 10,
  2723  		},
  2724  	}
  2725  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2726  	fileBlock.IsInd = true
  2727  	fileBlock.IPtrs = []data.IndirectFilePtr{
  2728  		makeIFP(id1, rmd, config, uid, 5, 0),
  2729  		makeIFP(id2, rmd, config, uid, 6, 5),
  2730  	}
  2731  	block1 := data.NewFileBlock().(*data.FileBlock)
  2732  	block1.Contents = []byte{5, 4, 3, 2, 1}
  2733  	block2 := data.NewFileBlock().(*data.FileBlock)
  2734  	block2.Contents = []byte{10, 9, 8, 7, 6}
  2735  	node := data.PathNode{
  2736  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2737  		Name:         testPPS("p"),
  2738  	}
  2739  	fileNode := data.PathNode{
  2740  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2741  		Name:         testPPS("f"),
  2742  	}
  2743  	p := data.Path{
  2744  		FolderBranch: data.FolderBranch{Tlf: id},
  2745  		Path:         []data.PathNode{node, fileNode},
  2746  	}
  2747  	ops := getOps(config, id)
  2748  	n := nodeFromPath(t, ops, p)
  2749  	so, err := newSyncOp(fileInfo.BlockPointer)
  2750  	require.NoError(t, err)
  2751  	rmd.AddOp(so)
  2752  
  2753  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2754  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2755  	testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
  2756  	testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
  2757  
  2758  	data2 := []byte{10, 9}
  2759  	if err := config.KBFSOps().Truncate(ctx, n, 7); err != nil {
  2760  		t.Errorf("Got error on truncate: %+v", err)
  2761  	}
  2762  
  2763  	newPBlock := getFileBlockFromCache(
  2764  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2765  	newBlock1 := getFileBlockFromCache(
  2766  		ctx, t, config, id, fileBlock.IPtrs[0].BlockPointer, p.Branch)
  2767  	newBlock2 := getFileBlockFromCache(
  2768  		ctx, t, config, id, fileBlock.IPtrs[1].BlockPointer, p.Branch)
  2769  
  2770  	lState := makeFBOLockState()
  2771  
  2772  	// merge unref changes so we can easily check the block changes
  2773  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2774  		[]WriteRange{{Off: 7, Len: 0}})
  2775  	mergeUnrefCache(ops, lState, p, rmd)
  2776  
  2777  	switch {
  2778  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2779  		len(p.Path):
  2780  		t.Errorf("Missing or incorrect local update during truncate: %v",
  2781  			config.observer.localChange)
  2782  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2783  		t.Errorf("Wrong context value passed in local notify: %v",
  2784  			config.observer.ctx.Value(tCtxID))
  2785  	case !bytes.Equal(block1.Contents, newBlock1.Contents):
  2786  		t.Errorf("Wrote bad contents for block 1: %v", newBlock1.Contents)
  2787  	case !bytes.Equal(data2, newBlock2.Contents):
  2788  		t.Errorf("Wrote bad contents for block 2: %v", newBlock2.Contents)
  2789  	case len(newPBlock.IPtrs) != 2:
  2790  		t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
  2791  	case rmd.UnrefBytes() != 0+6:
  2792  		// The fileid and the last block was all modified and marked dirty
  2793  		t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
  2794  			rmd.UnrefBytes())
  2795  	}
  2796  	checkBlockCache(
  2797  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
  2798  		map[data.BlockPointer]data.BranchName{
  2799  			node.BlockPointer:               p.Branch,
  2800  			fileNode.BlockPointer:           p.Branch,
  2801  			fileBlock.IPtrs[1].BlockPointer: p.Branch,
  2802  		})
  2803  }
  2804  
  2805  func TestKBFSOpsTruncateRemovesABlock(t *testing.T) {
  2806  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2807  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2808  
  2809  	uid, id, rmd := injectNewRMD(t, config)
  2810  
  2811  	rootID := kbfsblock.FakeID(42)
  2812  	fileID := kbfsblock.FakeID(43)
  2813  	id1 := kbfsblock.FakeID(44)
  2814  	id2 := kbfsblock.FakeID(45)
  2815  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2816  	fileInfo := makeBIFromID(fileID, uid)
  2817  	rootBlock.Children["f"] = data.DirEntry{
  2818  		BlockInfo: fileInfo,
  2819  		EntryInfo: data.EntryInfo{
  2820  			Size: 10,
  2821  		},
  2822  	}
  2823  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2824  	fileBlock.IsInd = true
  2825  	fileBlock.IPtrs = []data.IndirectFilePtr{
  2826  		makeIFP(id1, rmd, config, uid, 5, 0),
  2827  		makeIFP(id2, rmd, config, uid, 6, 5),
  2828  	}
  2829  	block1 := data.NewFileBlock().(*data.FileBlock)
  2830  	block1.Contents = []byte{5, 4, 3, 2, 1}
  2831  	block2 := data.NewFileBlock().(*data.FileBlock)
  2832  	block2.Contents = []byte{10, 9, 8, 7, 6}
  2833  	node := data.PathNode{
  2834  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2835  		Name:         testPPS("p"),
  2836  	}
  2837  	fileNode := data.PathNode{
  2838  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2839  		Name:         testPPS("f"),
  2840  	}
  2841  	p := data.Path{
  2842  		FolderBranch: data.FolderBranch{Tlf: id},
  2843  		Path:         []data.PathNode{node, fileNode},
  2844  	}
  2845  	ops := getOps(config, id)
  2846  	n := nodeFromPath(t, ops, p)
  2847  	so, err := newSyncOp(fileInfo.BlockPointer)
  2848  	require.NoError(t, err)
  2849  	rmd.AddOp(so)
  2850  
  2851  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2852  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2853  	testPutBlockInCache(t, config, fileBlock.IPtrs[0].BlockPointer, id, block1)
  2854  	testPutBlockInCache(t, config, fileBlock.IPtrs[1].BlockPointer, id, block2)
  2855  
  2856  	buf := []byte{5, 4, 3, 2}
  2857  	if err := config.KBFSOps().Truncate(ctx, n, 4); err != nil {
  2858  		t.Errorf("Got error on truncate: %+v", err)
  2859  	}
  2860  
  2861  	newPBlock := getFileBlockFromCache(
  2862  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2863  	newBlock1 := getFileBlockFromCache(
  2864  		ctx, t, config, id, fileBlock.IPtrs[0].BlockPointer, p.Branch)
  2865  
  2866  	lState := makeFBOLockState()
  2867  
  2868  	// merge unref changes so we can easily check the block changes
  2869  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2870  		[]WriteRange{{Off: 4, Len: 0}})
  2871  	mergeUnrefCache(ops, lState, p, rmd)
  2872  
  2873  	switch {
  2874  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2875  		len(p.Path):
  2876  		t.Errorf("Missing or incorrect local update during truncate: %v",
  2877  			config.observer.localChange)
  2878  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2879  		t.Errorf("Wrong context value passed in local notify: %v",
  2880  			config.observer.ctx.Value(tCtxID))
  2881  	case !bytes.Equal(buf, newBlock1.Contents):
  2882  		t.Errorf("Wrote bad contents: %v", newBlock1.Contents)
  2883  	case len(newPBlock.IPtrs) != 1:
  2884  		t.Errorf("Wrong number of indirect pointers: %d", len(newPBlock.IPtrs))
  2885  	case rmd.UnrefBytes() != 0+5+6:
  2886  		// The fileid and both blocks were all modified and marked dirty
  2887  		t.Errorf("Truncated block not correctly unref'd, unrefBytes = %d",
  2888  			rmd.UnrefBytes())
  2889  	}
  2890  	checkBlockCache(
  2891  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID, id1, id2},
  2892  		map[data.BlockPointer]data.BranchName{
  2893  			node.BlockPointer:               p.Branch,
  2894  			fileNode.BlockPointer:           p.Branch,
  2895  			fileBlock.IPtrs[0].BlockPointer: p.Branch,
  2896  		})
  2897  }
  2898  
  2899  func TestKBFSOpsTruncateBiggerSuccess(t *testing.T) {
  2900  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2901  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2902  
  2903  	uid, id, rmd := injectNewRMD(t, config)
  2904  
  2905  	rootID := kbfsblock.FakeID(42)
  2906  	fileID := kbfsblock.FakeID(43)
  2907  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2908  	rootBlock.Children["f"] = data.DirEntry{
  2909  		BlockInfo: data.BlockInfo{
  2910  			BlockPointer: makeBP(fileID, rmd, config, uid),
  2911  			EncodedSize:  1,
  2912  		},
  2913  		EntryInfo: data.EntryInfo{
  2914  			Type: data.File,
  2915  		},
  2916  	}
  2917  	fileBlock := data.NewFileBlock().(*data.FileBlock)
  2918  	fileBlock.Contents = []byte{1, 2, 3, 4, 5}
  2919  	node := data.PathNode{
  2920  		BlockPointer: makeBP(rootID, rmd, config, uid),
  2921  		Name:         testPPS("p"),
  2922  	}
  2923  	fileNode := data.PathNode{
  2924  		BlockPointer: makeBP(fileID, rmd, config, uid),
  2925  		Name:         testPPS("f"),
  2926  	}
  2927  	p := data.Path{
  2928  		FolderBranch: data.FolderBranch{Tlf: id},
  2929  		Path:         []data.PathNode{node, fileNode},
  2930  	}
  2931  	ops := getOps(config, id)
  2932  	n := nodeFromPath(t, ops, p)
  2933  
  2934  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2935  	testPutBlockInCache(t, config, fileNode.BlockPointer, id, fileBlock)
  2936  	config.mockBsplit.EXPECT().CopyUntilSplit(
  2937  		gomock.Any(), gomock.Any(), []byte{0, 0, 0, 0, 0}, int64(5)).
  2938  		Do(func(block *data.FileBlock, lb bool, data []byte, off int64) {
  2939  			block.Contents = append(block.Contents, data...)
  2940  		}).Return(int64(5))
  2941  
  2942  	buf := []byte{1, 2, 3, 4, 5, 0, 0, 0, 0, 0}
  2943  	if err := config.KBFSOps().Truncate(ctx, n, 10); err != nil {
  2944  		t.Errorf("Got error on truncate: %+v", err)
  2945  	}
  2946  
  2947  	newFileBlock := getFileBlockFromCache(
  2948  		ctx, t, config, id, fileNode.BlockPointer, p.Branch)
  2949  
  2950  	switch {
  2951  	case len(ops.nodeCache.PathFromNode(config.observer.localChange).Path) !=
  2952  		len(p.Path):
  2953  		t.Errorf("Missing or incorrect local update during truncate: %v",
  2954  			config.observer.localChange)
  2955  	case ctx.Value(tCtxID) != config.observer.ctx.Value(tCtxID):
  2956  		t.Errorf("Wrong context value passed in local notify: %v",
  2957  			config.observer.ctx.Value(tCtxID))
  2958  	case !bytes.Equal(buf, newFileBlock.Contents):
  2959  		t.Errorf("Wrote bad contents: %v", buf)
  2960  	}
  2961  	checkBlockCache(
  2962  		ctx, t, config, id, []kbfsblock.ID{rootID, fileID},
  2963  		map[data.BlockPointer]data.BranchName{
  2964  			node.BlockPointer:     p.Branch,
  2965  			fileNode.BlockPointer: p.Branch,
  2966  		})
  2967  	// A truncate past the end of the file actually translates into a
  2968  	// write for the difference
  2969  	checkSyncOpInCache(t, config.Codec(), ops, fileNode.BlockPointer,
  2970  		[]WriteRange{{Off: 5, Len: 5}})
  2971  }
  2972  
  2973  func TestSetExFailNoSuchName(t *testing.T) {
  2974  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  2975  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  2976  
  2977  	u, id, rmd := injectNewRMD(t, config)
  2978  
  2979  	rootID := kbfsblock.FakeID(42)
  2980  	rmd.data.Dir.ID = rootID
  2981  	aID := kbfsblock.FakeID(43)
  2982  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  2983  	node := data.PathNode{
  2984  		BlockPointer: makeBP(rootID, rmd, config, u),
  2985  		Name:         testPPS("p"),
  2986  	}
  2987  	aNode := data.PathNode{
  2988  		BlockPointer: makeBP(aID, rmd, config, u),
  2989  		Name:         testPPS("a"),
  2990  	}
  2991  	p := data.Path{
  2992  		FolderBranch: data.FolderBranch{Tlf: id},
  2993  		Path:         []data.PathNode{node, aNode},
  2994  	}
  2995  	ops := getOps(config, id)
  2996  	n := nodeFromPath(t, ops, p)
  2997  
  2998  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  2999  	expectedErr := idutil.NoSuchNameError{Name: p.TailName().Plaintext()}
  3000  
  3001  	// chmod a+x a
  3002  	if err := config.KBFSOps().SetEx(ctx, n, true); err == nil {
  3003  		t.Errorf("Got no expected error on setex")
  3004  	} else if err != expectedErr {
  3005  		t.Errorf("Got unexpected error on setex: %+v", err)
  3006  	}
  3007  }
  3008  
  3009  // Other SetEx failure cases are all the same as any other block sync
  3010  
  3011  func TestSetMtimeNull(t *testing.T) {
  3012  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  3013  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  3014  
  3015  	u, id, rmd := injectNewRMD(t, config)
  3016  
  3017  	rootID := kbfsblock.FakeID(42)
  3018  	aID := kbfsblock.FakeID(43)
  3019  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  3020  	oldMtime := time.Now().UnixNano()
  3021  	rootBlock.Children["a"] = data.DirEntry{
  3022  		BlockInfo: makeBIFromID(aID, u),
  3023  		EntryInfo: data.EntryInfo{
  3024  			Type:  data.File,
  3025  			Mtime: oldMtime,
  3026  		},
  3027  	}
  3028  	node := data.PathNode{
  3029  		BlockPointer: makeBP(rootID, rmd, config, u),
  3030  		Name:         testPPS("p"),
  3031  	}
  3032  	aNode := data.PathNode{
  3033  		BlockPointer: makeBP(aID, rmd, config, u),
  3034  		Name:         testPPS("a"),
  3035  	}
  3036  	p := data.Path{
  3037  		FolderBranch: data.FolderBranch{Tlf: id},
  3038  		Path:         []data.PathNode{node, aNode},
  3039  	}
  3040  	ops := getOps(config, id)
  3041  	n := nodeFromPath(t, ops, p)
  3042  
  3043  	if err := config.KBFSOps().SetMtime(ctx, n, nil); err != nil {
  3044  		t.Errorf("Got unexpected error on null setmtime: %+v", err)
  3045  	}
  3046  	newP := ops.nodeCache.PathFromNode(n)
  3047  	if rootBlock.Children["a"].Mtime != oldMtime {
  3048  		t.Errorf("a has wrong mtime: %v", rootBlock.Children["a"].Mtime)
  3049  	} else if newP.Path[0].ID != p.Path[0].ID {
  3050  		t.Errorf("Got back a changed path for null setmtime test: %v", newP)
  3051  	}
  3052  	checkBlockCache(ctx, t, config, id, nil, nil)
  3053  }
  3054  
  3055  func TestMtimeFailNoSuchName(t *testing.T) {
  3056  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  3057  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  3058  
  3059  	u, id, rmd := injectNewRMD(t, config)
  3060  
  3061  	rootID := kbfsblock.FakeID(42)
  3062  	rmd.data.Dir.ID = rootID
  3063  	aID := kbfsblock.FakeID(43)
  3064  	rootBlock := data.NewDirBlock().(*data.DirBlock)
  3065  	node := data.PathNode{
  3066  		BlockPointer: makeBP(rootID, rmd, config, u),
  3067  		Name:         testPPS("p"),
  3068  	}
  3069  	aNode := data.PathNode{
  3070  		BlockPointer: makeBP(aID, rmd, config, u),
  3071  		Name:         testPPS("a"),
  3072  	}
  3073  	p := data.Path{
  3074  		FolderBranch: data.FolderBranch{Tlf: id},
  3075  		Path:         []data.PathNode{node, aNode},
  3076  	}
  3077  	ops := getOps(config, id)
  3078  	n := nodeFromPath(t, ops, p)
  3079  
  3080  	testPutBlockInCache(t, config, node.BlockPointer, id, rootBlock)
  3081  	expectedErr := idutil.NoSuchNameError{Name: p.TailName().Plaintext()}
  3082  
  3083  	newMtime := time.Now()
  3084  	if err := config.KBFSOps().SetMtime(ctx, n, &newMtime); err == nil {
  3085  		t.Errorf("Got no expected error on setmtime")
  3086  	} else if err != expectedErr {
  3087  		t.Errorf("Got unexpected error on setmtime: %+v", err)
  3088  	}
  3089  }
  3090  
  3091  // SetMtime failure cases are all the same as any other block sync
  3092  
  3093  func TestSyncCleanSuccess(t *testing.T) {
  3094  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  3095  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  3096  
  3097  	u, id, rmd := injectNewRMD(t, config)
  3098  
  3099  	rootID := kbfsblock.FakeID(42)
  3100  	rmd.data.Dir.ID = rootID
  3101  	aID := kbfsblock.FakeID(43)
  3102  	node := data.PathNode{
  3103  		BlockPointer: makeBP(rootID, rmd, config, u),
  3104  		Name:         testPPS("p"),
  3105  	}
  3106  	aNode := data.PathNode{
  3107  		BlockPointer: makeBP(aID, rmd, config, u),
  3108  		Name:         testPPS("a"),
  3109  	}
  3110  	p := data.Path{
  3111  		FolderBranch: data.FolderBranch{Tlf: id},
  3112  		Path:         []data.PathNode{node, aNode},
  3113  	}
  3114  	ops := getOps(config, id)
  3115  	n := nodeFromPath(t, ops, p)
  3116  
  3117  	// fsync a
  3118  	if err := config.KBFSOps().SyncAll(ctx, n.GetFolderBranch()); err != nil {
  3119  		t.Errorf("Got unexpected error on sync: %+v", err)
  3120  	}
  3121  	newP := ops.nodeCache.PathFromNode(n)
  3122  	if len(newP.Path) != len(p.Path) {
  3123  		// should be the exact same path back
  3124  		t.Errorf("Got a different length path back: %v", newP)
  3125  	} else {
  3126  		for i, n := range newP.Path {
  3127  			if n != p.Path[i] {
  3128  				t.Errorf("Node %d differed: %v", i, n)
  3129  			}
  3130  		}
  3131  	}
  3132  	checkBlockCache(ctx, t, config, id, nil, nil)
  3133  }
  3134  
  3135  func TestKBFSOpsStatRootSuccess(t *testing.T) {
  3136  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  3137  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  3138  
  3139  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  3140  
  3141  	ops := getOps(config, id)
  3142  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  3143  	ops.headStatus = headTrusted
  3144  
  3145  	u := h.FirstResolvedWriter()
  3146  	rootID := kbfsblock.FakeID(42)
  3147  	node := data.PathNode{
  3148  		BlockPointer: makeBP(rootID, rmd, config, u),
  3149  		Name:         testPPS("p"),
  3150  	}
  3151  	p := data.Path{
  3152  		FolderBranch: data.FolderBranch{Tlf: id},
  3153  		Path:         []data.PathNode{node},
  3154  	}
  3155  	n := nodeFromPath(t, ops, p)
  3156  
  3157  	_, err := config.KBFSOps().Stat(ctx, n)
  3158  	if err != nil {
  3159  		t.Errorf("Error on Stat: %+v", err)
  3160  	}
  3161  }
  3162  
  3163  func TestKBFSOpsFailingRootOps(t *testing.T) {
  3164  	mockCtrl, config, ctx, cancel := kbfsOpsInit(t)
  3165  	defer kbfsTestShutdown(ctx, t, mockCtrl, config, cancel)
  3166  
  3167  	id, h, rmd := createNewRMD(t, config, "alice", tlf.Private)
  3168  
  3169  	ops := getOps(config, id)
  3170  	ops.head = makeImmutableRMDForTest(t, config, rmd, kbfsmd.FakeID(1))
  3171  	ops.headStatus = headTrusted
  3172  
  3173  	u := h.FirstResolvedWriter()
  3174  	rootID := kbfsblock.FakeID(42)
  3175  	rmd.data.Dir.BlockPointer = makeBP(rootID, rmd, config, u)
  3176  	node := data.PathNode{
  3177  		BlockPointer: rmd.data.Dir.BlockPointer,
  3178  		Name:         testPPS("p"),
  3179  	}
  3180  	p := data.Path{
  3181  		FolderBranch: data.FolderBranch{Tlf: id},
  3182  		Path:         []data.PathNode{node},
  3183  	}
  3184  	n := nodeFromPath(t, ops, p)
  3185  
  3186  	// TODO: Make sure Read, Write, and Truncate fail also with
  3187  	// InvalidPathError{}.
  3188  
  3189  	err := config.KBFSOps().SetEx(ctx, n, true)
  3190  	if _, ok := err.(InvalidParentPathError); !ok {
  3191  		t.Errorf("Unexpected error on SetEx: %+v", err)
  3192  	}
  3193  
  3194  	err = config.KBFSOps().SetMtime(ctx, n, &time.Time{})
  3195  	if _, ok := err.(InvalidParentPathError); !ok {
  3196  		t.Errorf("Unexpected error on SetMtime: %+v", err)
  3197  	}
  3198  
  3199  	// TODO: Sync succeeds, but it should fail. Fix this!
  3200  }
  3201  
  3202  // Tests that the background flusher will sync a dirty file if the
  3203  // application does not.
  3204  func TestKBFSOpsBackgroundFlush(t *testing.T) {
  3205  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
  3206  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3207  	config.noBGFlush = true
  3208  
  3209  	// create a file.
  3210  	rootNode := GetRootNodeOrBust(ctx, t, config, "alice,bob", tlf.Private)
  3211  
  3212  	kbfsOps := config.KBFSOps()
  3213  	nodeA, _, err := kbfsOps.CreateFile(
  3214  		ctx, rootNode, testPPS("a"), false, NoExcl)
  3215  	if err != nil {
  3216  		t.Fatalf("Couldn't create file: %+v", err)
  3217  	}
  3218  
  3219  	ops := getOps(config, rootNode.GetFolderBranch().Tlf)
  3220  	oldPtr := ops.nodeCache.PathFromNode(nodeA).TailPointer()
  3221  
  3222  	staller := NewNaïveStaller(config)
  3223  	staller.StallMDOp(StallableMDAfterPut, 1, false)
  3224  
  3225  	// start the background flusher
  3226  	config.SetBGFlushPeriod(1 * time.Millisecond)
  3227  	ops.goTracked(ops.backgroundFlusher)
  3228  
  3229  	// Wait for the stall to know the background work is done.
  3230  	staller.WaitForStallMDOp(StallableMDAfterPut)
  3231  	staller.UnstallOneMDOp(StallableMDAfterPut)
  3232  
  3233  	// Do our own SyncAll now to ensure we wait for the bg flusher to
  3234  	// finish.
  3235  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  3236  	if err != nil {
  3237  		t.Fatalf("Couldn't sync all: %+v", err)
  3238  	}
  3239  
  3240  	newPtr := ops.nodeCache.PathFromNode(nodeA).TailPointer()
  3241  	if oldPtr == newPtr {
  3242  		t.Fatalf("Background sync didn't update pointers")
  3243  	}
  3244  }
  3245  
  3246  func TestKBFSOpsWriteRenameStat(t *testing.T) {
  3247  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3248  	// TODO: Use kbfsTestShutdownNoMocks.
  3249  	defer kbfsTestShutdownNoMocksNoCheck(ctx, t, config, cancel)
  3250  
  3251  	// create a file.
  3252  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3253  
  3254  	kbfsOps := config.KBFSOps()
  3255  	fileNode, _, err := kbfsOps.CreateFile(
  3256  		ctx, rootNode, testPPS("a"), false, NoExcl)
  3257  	if err != nil {
  3258  		t.Fatalf("Couldn't create file: %+v", err)
  3259  	}
  3260  
  3261  	// Write to it.
  3262  	data := []byte{1}
  3263  	err = kbfsOps.Write(ctx, fileNode, data, 0)
  3264  	if err != nil {
  3265  		t.Fatalf("Couldn't write to file: %+v", err)
  3266  	}
  3267  
  3268  	// Stat it.
  3269  	ei, err := kbfsOps.Stat(ctx, fileNode)
  3270  	if err != nil {
  3271  		t.Fatalf("Couldn't stat file: %+v", err)
  3272  	}
  3273  	if ei.Size != 1 {
  3274  		t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
  3275  	}
  3276  
  3277  	// Rename it.
  3278  	err = kbfsOps.Rename(ctx, rootNode, testPPS("a"), rootNode, testPPS("b"))
  3279  	if err != nil {
  3280  		t.Fatalf("Couldn't rename; %+v", err)
  3281  	}
  3282  
  3283  	// Stat it again.
  3284  	newEi, err := kbfsOps.Stat(ctx, fileNode)
  3285  	if err != nil {
  3286  		t.Fatalf("Couldn't stat file: %+v", err)
  3287  	}
  3288  	// CTime is allowed to change after a rename, but nothing else.
  3289  	if ei.Type != newEi.Type || ei.Size != newEi.Size ||
  3290  		ei.Mtime != newEi.Mtime {
  3291  		t.Errorf("Entry info unexpectedly changed from %+v to %+v", ei, newEi)
  3292  	}
  3293  }
  3294  
  3295  func TestKBFSOpsWriteRenameGetDirChildren(t *testing.T) {
  3296  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3297  	// TODO: Use kbfsTestShutdownNoMocks.
  3298  	defer kbfsTestShutdownNoMocksNoCheck(ctx, t, config, cancel)
  3299  
  3300  	// create a file.
  3301  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3302  
  3303  	kbfsOps := config.KBFSOps()
  3304  	fileNode, _, err := kbfsOps.CreateFile(
  3305  		ctx, rootNode, testPPS("a"), false, NoExcl)
  3306  	if err != nil {
  3307  		t.Fatalf("Couldn't create file: %+v", err)
  3308  	}
  3309  
  3310  	// Write to it.
  3311  	data := []byte{1}
  3312  	err = kbfsOps.Write(ctx, fileNode, data, 0)
  3313  	if err != nil {
  3314  		t.Fatalf("Couldn't write to file: %+v", err)
  3315  	}
  3316  
  3317  	// Stat it.
  3318  	ei, err := kbfsOps.Stat(ctx, fileNode)
  3319  	if err != nil {
  3320  		t.Fatalf("Couldn't stat file: %+v", err)
  3321  	}
  3322  	if ei.Size != 1 {
  3323  		t.Errorf("Stat size %d unexpectedly not 1", ei.Size)
  3324  	}
  3325  
  3326  	// Rename it.
  3327  	err = kbfsOps.Rename(ctx, rootNode, testPPS("a"), rootNode, testPPS("b"))
  3328  	if err != nil {
  3329  		t.Fatalf("Couldn't rename; %+v", err)
  3330  	}
  3331  
  3332  	// Get the stats via GetDirChildren.
  3333  	eis, err := kbfsOps.GetDirChildren(ctx, rootNode)
  3334  	if err != nil {
  3335  		t.Fatalf("Couldn't stat file: %+v", err)
  3336  	}
  3337  	// CTime is allowed to change after a rename, but nothing else.
  3338  	if newEi := eis[rootNode.ChildName("b")]; ei.Type != newEi.Type ||
  3339  		ei.Size != newEi.Size || ei.Mtime != newEi.Mtime {
  3340  		t.Errorf("Entry info unexpectedly changed from %+v to %+v",
  3341  			ei, eis[rootNode.ChildName("b")])
  3342  	}
  3343  }
  3344  
  3345  func TestKBFSOpsCreateFileWithArchivedBlock(t *testing.T) {
  3346  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3347  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3348  
  3349  	// create a file.
  3350  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3351  
  3352  	kbfsOps := config.KBFSOps()
  3353  	_, _, err := kbfsOps.CreateFile(ctx, rootNode, testPPS("a"), false, NoExcl)
  3354  	if err != nil {
  3355  		t.Fatalf("Couldn't create file: %+v", err)
  3356  	}
  3357  
  3358  	// Remove the file, which will archive the block
  3359  	err = kbfsOps.RemoveEntry(ctx, rootNode, testPPS("a"))
  3360  	if err != nil {
  3361  		t.Fatalf("Couldn't remove file: %+v", err)
  3362  	}
  3363  
  3364  	// Wait for the archiving to finish
  3365  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  3366  	if err != nil {
  3367  		t.Fatalf("Couldn't sync from server")
  3368  	}
  3369  
  3370  	// Create a second file, which will use the same initial block ID
  3371  	// from the cache, even though it's been archived, and will be
  3372  	// forced to try again.
  3373  	_, _, err = kbfsOps.CreateFile(ctx, rootNode, testPPS("b"), false, NoExcl)
  3374  	if err != nil {
  3375  		t.Fatalf("Couldn't create second file: %+v", err)
  3376  	}
  3377  }
  3378  
  3379  func TestKBFSOpsMultiBlockSyncWithArchivedBlock(t *testing.T) {
  3380  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3381  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3382  
  3383  	// Make the blocks small, with multiple levels of indirection, but
  3384  	// make the unembedded size large, so we don't create thousands of
  3385  	// unembedded block change blocks.
  3386  	blockSize := int64(5)
  3387  	bsplit, err := data.NewBlockSplitterSimpleExact(blockSize, 2, 100*1024)
  3388  	require.NoError(t, err)
  3389  	config.SetBlockSplitter(bsplit)
  3390  
  3391  	// create a file.
  3392  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3393  
  3394  	kbfsOps := config.KBFSOps()
  3395  	fileNode, _, err := kbfsOps.CreateFile(
  3396  		ctx, rootNode, testPPS("a"), false, NoExcl)
  3397  	if err != nil {
  3398  		t.Fatalf("Couldn't create file: %+v", err)
  3399  	}
  3400  
  3401  	// Write a few blocks
  3402  	data := []byte{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
  3403  	err = kbfsOps.Write(ctx, fileNode, data, 0)
  3404  	if err != nil {
  3405  		t.Fatalf("Couldn't write file: %+v", err)
  3406  	}
  3407  
  3408  	err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
  3409  	if err != nil {
  3410  		t.Fatalf("Couldn't sync file: %+v", err)
  3411  	}
  3412  
  3413  	// Now overwrite those blocks to archive them
  3414  	newData := []byte{11, 12, 13, 14, 15, 16, 17, 18, 19, 20}
  3415  	err = kbfsOps.Write(ctx, fileNode, newData, 0)
  3416  	if err != nil {
  3417  		t.Fatalf("Couldn't write file: %+v", err)
  3418  	}
  3419  
  3420  	err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
  3421  	if err != nil {
  3422  		t.Fatalf("Couldn't sync file: %+v", err)
  3423  	}
  3424  
  3425  	// Wait for the archiving to finish
  3426  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  3427  	if err != nil {
  3428  		t.Fatalf("Couldn't sync from server")
  3429  	}
  3430  
  3431  	// Now write the original first block, which has been archived,
  3432  	// and make sure it works.
  3433  	err = kbfsOps.Write(ctx, fileNode, data[0:blockSize], 0)
  3434  	if err != nil {
  3435  		t.Fatalf("Couldn't write file: %+v", err)
  3436  	}
  3437  
  3438  	err = kbfsOps.SyncAll(ctx, fileNode.GetFolderBranch())
  3439  	if err != nil {
  3440  		t.Fatalf("Couldn't sync file: %+v", err)
  3441  	}
  3442  }
  3443  
  3444  type corruptBlockServer struct {
  3445  	BlockServer
  3446  }
  3447  
  3448  func (cbs corruptBlockServer) Get(
  3449  	ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
  3450  	context kbfsblock.Context, cacheType DiskBlockCacheType) (
  3451  	[]byte, kbfscrypto.BlockCryptKeyServerHalf, error) {
  3452  	data, keyServerHalf, err := cbs.BlockServer.Get(
  3453  		ctx, tlfID, id, context, cacheType)
  3454  	if err != nil {
  3455  		return nil, kbfscrypto.BlockCryptKeyServerHalf{}, err
  3456  	}
  3457  	return append(data, 0), keyServerHalf, nil
  3458  }
  3459  
  3460  func TestKBFSOpsFailToReadUnverifiableBlock(t *testing.T) {
  3461  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3462  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3463  	config.SetBlockServer(&corruptBlockServer{
  3464  		BlockServer: config.BlockServer(),
  3465  	})
  3466  
  3467  	// create a file.
  3468  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3469  
  3470  	kbfsOps := config.KBFSOps()
  3471  	_, _, err := kbfsOps.CreateFile(ctx, rootNode, testPPS("a"), false, NoExcl)
  3472  	require.NoError(t, err)
  3473  	if err != nil {
  3474  		t.Fatalf("Couldn't create file: %+v", err)
  3475  	}
  3476  
  3477  	// Read using a different "device"
  3478  	config2 := ConfigAsUser(config, "test_user")
  3479  	defer CheckConfigAndShutdown(ctx, t, config2)
  3480  	// Shutdown the mdserver explicitly before the state checker tries to run
  3481  	defer config2.MDServer().Shutdown()
  3482  
  3483  	rootNode2, err := GetRootNodeForTest(ctx, config2, "test_user", tlf.Private)
  3484  	require.NoError(t, err)
  3485  	_, err = config2.KBFSOps().GetDirChildren(ctx, rootNode2)
  3486  	require.IsType(t, kbfshash.HashMismatchError{}, errors.Cause(err))
  3487  }
  3488  
  3489  // Test that the size of a single empty block doesn't change.  If this
  3490  // test ever fails, consult max or strib before merging.
  3491  func TestKBFSOpsEmptyTlfSize(t *testing.T) {
  3492  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3493  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3494  
  3495  	// Create a TLF.
  3496  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3497  	status, _, err := config.KBFSOps().FolderStatus(ctx,
  3498  		rootNode.GetFolderBranch())
  3499  	if err != nil {
  3500  		t.Fatalf("Couldn't get folder status: %+v", err)
  3501  	}
  3502  	if status.DiskUsage != 313 {
  3503  		t.Fatalf("Disk usage of an empty TLF is no longer 313.  " +
  3504  			"Talk to max or strib about why this matters.")
  3505  	}
  3506  }
  3507  
  3508  type cryptoFixedTlf struct {
  3509  	Crypto
  3510  	tlf tlf.ID
  3511  }
  3512  
  3513  func (c cryptoFixedTlf) MakeRandomTlfID(t tlf.Type) (tlf.ID, error) {
  3514  	return c.tlf, nil
  3515  }
  3516  
  3517  // TestKBFSOpsMaliciousMDServerRange tries to trick KBFSOps into
  3518  // accepting bad MDs.
  3519  func TestKBFSOpsMaliciousMDServerRange(t *testing.T) {
  3520  	config1, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "mallory")
  3521  	// TODO: Use kbfsTestShutdownNoMocks.
  3522  	defer kbfsTestShutdownNoMocksNoCheck(ctx, t, config1, cancel)
  3523  	// Turn off tlf edit history because it messes with the FBO state
  3524  	// asynchronously.
  3525  	config1.SetMode(modeNoHistory{config1.Mode()})
  3526  
  3527  	// Create alice's TLF.
  3528  	rootNode1 := GetRootNodeOrBust(ctx, t, config1, "alice", tlf.Private)
  3529  	fb1 := rootNode1.GetFolderBranch()
  3530  
  3531  	kbfsOps1 := config1.KBFSOps()
  3532  
  3533  	_, _, err := kbfsOps1.CreateFile(
  3534  		ctx, rootNode1, testPPS("dummy.txt"), false, NoExcl)
  3535  	require.NoError(t, err)
  3536  	err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
  3537  	require.NoError(t, err)
  3538  
  3539  	// Create mallory's fake TLF using the same TLF ID as alice's.
  3540  	config2 := ConfigAsUser(config1, "mallory")
  3541  	defer func() { _ = config2.Shutdown(ctx) }()
  3542  	config2.SetMode(modeNoHistory{config2.Mode()})
  3543  	crypto2 := cryptoFixedTlf{config2.Crypto(), fb1.Tlf}
  3544  	config2.SetCrypto(crypto2)
  3545  	mdserver2, err := NewMDServerMemory(mdServerLocalConfigAdapter{config2})
  3546  	require.NoError(t, err)
  3547  	config2.MDServer().Shutdown()
  3548  	config2.SetMDServer(mdserver2)
  3549  	config2.SetMDCache(NewMDCacheStandard(1))
  3550  
  3551  	rootNode2 := GetRootNodeOrBust(
  3552  		ctx, t, config2, "alice,mallory", tlf.Private)
  3553  	require.Equal(t, fb1.Tlf, rootNode2.GetFolderBranch().Tlf)
  3554  
  3555  	kbfsOps2 := config2.KBFSOps()
  3556  
  3557  	// Add some operations to get mallory's TLF to have a higher
  3558  	// MetadataVersion.
  3559  	_, _, err = kbfsOps2.CreateFile(
  3560  		ctx, rootNode2, testPPS("dummy.txt"), false, NoExcl)
  3561  	require.NoError(t, err)
  3562  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  3563  	require.NoError(t, err)
  3564  	err = kbfsOps2.RemoveEntry(ctx, rootNode2, testPPS("dummy.txt"))
  3565  	require.NoError(t, err)
  3566  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  3567  	require.NoError(t, err)
  3568  
  3569  	// Now route alice's TLF to mallory's MD server.
  3570  	config1.SetMDServer(mdserver2.copy(mdServerLocalConfigAdapter{config1}))
  3571  
  3572  	// Simulate the server triggering alice to update.
  3573  	config1.SetKeyCache(NewKeyCacheStandard(1))
  3574  	err = kbfsOps1.SyncFromServer(ctx, fb1, nil)
  3575  	// TODO: We can actually fake out the PrevRoot pointer, too
  3576  	// and then we'll be caught by the handle check. But when we
  3577  	// have MDOps do the handle check, that'll trigger first.
  3578  	require.IsType(t, kbfsmd.MDPrevRootMismatch{}, err)
  3579  }
  3580  
  3581  // TODO: Test malicious mdserver and rekey flow against wrong
  3582  // TLFs being introduced upon rekey.
  3583  
  3584  // Test that if GetTLFCryptKeys fails to create a TLF, the second
  3585  // attempt will also fail with the same error.  Regression test for
  3586  // KBFS-1929.
  3587  func TestGetTLFCryptKeysAfterFirstError(t *testing.T) {
  3588  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice")
  3589  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3590  
  3591  	createErr := errors.New("Cannot create this TLF")
  3592  	mdserver := &shimMDServer{
  3593  		MDServer: config.MDServer(),
  3594  		nextErr:  createErr,
  3595  	}
  3596  	config.SetMDServer(mdserver)
  3597  
  3598  	id := tlf.FakeID(1, tlf.Private)
  3599  	h := parseTlfHandleOrBust(t, config, "alice", tlf.Private, id)
  3600  
  3601  	_, _, err := config.KBFSOps().GetTLFCryptKeys(ctx, h)
  3602  	if err != createErr {
  3603  		t.Fatalf("Got unexpected error when creating TLF: %+v", err)
  3604  	}
  3605  
  3606  	// Reset the error.
  3607  	mdserver.nextErr = createErr
  3608  	// Should get the same error, otherwise something's wrong.
  3609  	_, _, err = config.KBFSOps().GetTLFCryptKeys(ctx, h)
  3610  	if err != createErr {
  3611  		t.Fatalf("Got unexpected error when creating TLF: %+v", err)
  3612  	}
  3613  }
  3614  
  3615  func TestForceFastForwardOnEmptyTLF(t *testing.T) {
  3616  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "alice", "bob")
  3617  	// TODO: Use kbfsTestShutdownNoMocks.
  3618  	defer kbfsTestShutdownNoMocksNoCheck(ctx, t, config, cancel)
  3619  
  3620  	// Look up bob's public folder.
  3621  	h, err := tlfhandle.ParseHandle(
  3622  		ctx, config.KBPKI(), config.MDOps(), nil, "bob", tlf.Public)
  3623  	require.NoError(t, err)
  3624  	_, _, err = config.KBFSOps().GetOrCreateRootNode(ctx, h, data.MasterBranch)
  3625  	if _, ok := err.(tlfhandle.WriteAccessError); !ok {
  3626  		t.Fatalf("Unexpected err reading a public TLF: %+v", err)
  3627  	}
  3628  
  3629  	// There's only one folder at this point.
  3630  	kbfsOps := config.KBFSOps().(*KBFSOpsStandard)
  3631  	kbfsOps.opsLock.RLock()
  3632  	var ops *folderBranchOps
  3633  	for _, fbo := range kbfsOps.ops {
  3634  		ops = fbo
  3635  		break
  3636  	}
  3637  	kbfsOps.opsLock.RUnlock()
  3638  
  3639  	// FastForward shouldn't do anything, since the TLF hasn't been
  3640  	// cleared yet.
  3641  	config.KBFSOps().ForceFastForward(ctx)
  3642  	err = ops.forcedFastForwards.Wait(ctx)
  3643  	if err != nil {
  3644  		t.Fatalf("Couldn't wait for fast forward: %+v", err)
  3645  	}
  3646  }
  3647  
  3648  // Regression test for KBFS-2161.
  3649  func TestDirtyPathsAfterRemoveDir(t *testing.T) {
  3650  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3651  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3652  
  3653  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3654  	kbfsOps := config.KBFSOps()
  3655  
  3656  	// Don't let the prefetcher bring the block back into the cache.
  3657  	config.BlockOps().Prefetcher().Shutdown()
  3658  
  3659  	// Create a/b/c.
  3660  	nodeA, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  3661  	require.NoError(t, err)
  3662  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  3663  	require.NoError(t, err)
  3664  	nodeB, _, err := kbfsOps.CreateDir(ctx, nodeA, testPPS("b"))
  3665  	require.NoError(t, err)
  3666  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  3667  	require.NoError(t, err)
  3668  	nodeC, _, err := kbfsOps.CreateFile(ctx, nodeB, testPPS("c"), false, NoExcl)
  3669  	require.NoError(t, err)
  3670  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  3671  	require.NoError(t, err)
  3672  
  3673  	// Remove node c from the block cache and the server, to guarantee
  3674  	// it's not needed during the removal.
  3675  	ops := getOps(config, rootNode.GetFolderBranch().Tlf)
  3676  	ptrC := ops.nodeCache.PathFromNode(nodeC).TailPointer()
  3677  	err = config.BlockCache().DeleteTransient(
  3678  		ptrC.ID, rootNode.GetFolderBranch().Tlf)
  3679  	require.NoError(t, err)
  3680  
  3681  	// Remove c.
  3682  	err = kbfsOps.RemoveEntry(ctx, nodeB, testPPS("c"))
  3683  	require.NoError(t, err)
  3684  
  3685  	// Now a/b should be dirty.
  3686  	status, _, err := kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
  3687  	require.NoError(t, err)
  3688  	require.Len(t, status.DirtyPaths, 1)
  3689  	require.Equal(t, "test_user/a/b", status.DirtyPaths[0])
  3690  
  3691  	// Now remove b, and make sure a/b is no longer dirty.
  3692  	err = kbfsOps.RemoveDir(ctx, nodeA, testPPS("b"))
  3693  	require.NoError(t, err)
  3694  	status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
  3695  	require.NoError(t, err)
  3696  	require.Len(t, status.DirtyPaths, 1)
  3697  	require.Equal(t, "test_user/a", status.DirtyPaths[0])
  3698  
  3699  	// Also make sure we can no longer create anything in the removed
  3700  	// directory.
  3701  	_, _, err = kbfsOps.CreateDir(ctx, nodeB, testPPS("d"))
  3702  	require.IsType(t, UnsupportedOpInUnlinkedDirError{}, errors.Cause(err))
  3703  
  3704  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  3705  	require.NoError(t, err)
  3706  	status, _, err = kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
  3707  	require.NoError(t, err)
  3708  	require.Len(t, status.DirtyPaths, 0)
  3709  
  3710  	// If the block made it back into the cache, we have a problem.
  3711  	// It shouldn't be needed for removal.
  3712  	_, err = config.BlockCache().Get(ptrC)
  3713  	require.NotNil(t, err)
  3714  }
  3715  
  3716  func TestKBFSOpsBasicTeamTLF(t *testing.T) {
  3717  	var u1, u2, u3 kbname.NormalizedUsername = "u1", "u2", "u3"
  3718  	config1, uid1, ctx, cancel := kbfsOpsInitNoMocks(t, u1, u2, u3)
  3719  	defer kbfsTestShutdownNoMocks(ctx, t, config1, cancel)
  3720  
  3721  	config2 := ConfigAsUser(config1, u2)
  3722  	defer CheckConfigAndShutdown(ctx, t, config2)
  3723  	session2, err := config2.KBPKI().GetCurrentSession(ctx)
  3724  	if err != nil {
  3725  		t.Fatal(err)
  3726  	}
  3727  	uid2 := session2.UID
  3728  
  3729  	config3 := ConfigAsUser(config1, u3)
  3730  	defer CheckConfigAndShutdown(ctx, t, config3)
  3731  	session3, err := config3.KBPKI().GetCurrentSession(ctx)
  3732  	if err != nil {
  3733  		t.Fatal(err)
  3734  	}
  3735  	uid3 := session3.UID
  3736  
  3737  	// These are deterministic, and should add the same TeamInfos for
  3738  	// both user configs.
  3739  	t.Log("Add teams")
  3740  	name := kbname.NormalizedUsername("t1")
  3741  	teamInfos := AddEmptyTeamsForTestOrBust(t, config1, name)
  3742  	_ = AddEmptyTeamsForTestOrBust(t, config2, name)
  3743  	_ = AddEmptyTeamsForTestOrBust(t, config3, name)
  3744  	tid := teamInfos[0].TID
  3745  	AddTeamWriterForTestOrBust(t, config1, tid, uid1)
  3746  	AddTeamWriterForTestOrBust(t, config2, tid, uid1)
  3747  	AddTeamWriterForTestOrBust(t, config3, tid, uid1)
  3748  	AddTeamWriterForTestOrBust(t, config1, tid, uid2)
  3749  	AddTeamWriterForTestOrBust(t, config2, tid, uid2)
  3750  	AddTeamWriterForTestOrBust(t, config3, tid, uid2)
  3751  	AddTeamReaderForTestOrBust(t, config1, tid, uid3)
  3752  	AddTeamReaderForTestOrBust(t, config2, tid, uid3)
  3753  	AddTeamReaderForTestOrBust(t, config3, tid, uid3)
  3754  
  3755  	t.Log("Look up bob's public folder.")
  3756  	h, err := tlfhandle.ParseHandle(
  3757  		ctx, config1.KBPKI(), config1.MDOps(), nil, string(name),
  3758  		tlf.SingleTeam)
  3759  	require.NoError(t, err)
  3760  	kbfsOps1 := config1.KBFSOps()
  3761  	rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(
  3762  		ctx, h, data.MasterBranch)
  3763  	require.NoError(t, err)
  3764  
  3765  	t.Log("Create a small file.")
  3766  	nodeA1, _, err := kbfsOps1.CreateFile(
  3767  		ctx, rootNode1, testPPS("a"), false, NoExcl)
  3768  	require.NoError(t, err)
  3769  	buf := []byte{1}
  3770  	err = kbfsOps1.Write(ctx, nodeA1, buf, 0)
  3771  	require.NoError(t, err)
  3772  	err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
  3773  	require.NoError(t, err)
  3774  
  3775  	t.Log("The other writer should be able to read it.")
  3776  	kbfsOps2 := config2.KBFSOps()
  3777  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(
  3778  		ctx, h, data.MasterBranch)
  3779  	require.NoError(t, err)
  3780  	nodeA2, _, err := kbfsOps2.Lookup(ctx, rootNode2, testPPS("a"))
  3781  	require.NoError(t, err)
  3782  	gotData2 := make([]byte, len(buf))
  3783  	_, err = kbfsOps2.Read(ctx, nodeA2, gotData2, 0)
  3784  	require.NoError(t, err)
  3785  	require.True(t, bytes.Equal(buf, gotData2))
  3786  	t.Log("And also should be able to write.")
  3787  	_, _, err = kbfsOps2.CreateFile(ctx, rootNode2, testPPS("b"), false, NoExcl)
  3788  	require.NoError(t, err)
  3789  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  3790  	require.NoError(t, err)
  3791  
  3792  	t.Log("The reader should be able to read it.")
  3793  	kbfsOps3 := config3.KBFSOps()
  3794  	rootNode3, _, err := kbfsOps3.GetOrCreateRootNode(
  3795  		ctx, h, data.MasterBranch)
  3796  	require.NoError(t, err)
  3797  	nodeA3, _, err := kbfsOps3.Lookup(ctx, rootNode3, testPPS("a"))
  3798  	require.NoError(t, err)
  3799  	gotData3 := make([]byte, len(buf))
  3800  	_, err = kbfsOps3.Read(ctx, nodeA3, gotData3, 0)
  3801  	require.NoError(t, err)
  3802  	require.True(t, bytes.Equal(buf, gotData3))
  3803  	_, _, err = kbfsOps3.CreateFile(ctx, rootNode3, testPPS("c"), false, NoExcl)
  3804  	require.IsType(t, tlfhandle.WriteAccessError{}, errors.Cause(err))
  3805  
  3806  	// Verify that "a" has the correct writer.
  3807  	ei, err := kbfsOps3.GetNodeMetadata(ctx, nodeA3)
  3808  	require.NoError(t, err)
  3809  	require.Equal(t, u1, ei.LastWriterUnverified)
  3810  }
  3811  
  3812  type wrappedReadonlyTestIDType int
  3813  
  3814  const wrappedReadonlyTestID wrappedReadonlyTestIDType = 1
  3815  
  3816  type wrappedReadonlyNode struct {
  3817  	Node
  3818  }
  3819  
  3820  func (wrn wrappedReadonlyNode) Readonly(ctx context.Context) bool {
  3821  	return ctx.Value(wrappedReadonlyTestID) != nil
  3822  }
  3823  
  3824  func (wrn wrappedReadonlyNode) WrapChild(child Node) Node {
  3825  	return wrappedReadonlyNode{wrn.Node.WrapChild(child)}
  3826  }
  3827  
  3828  func TestKBFSOpsReadonlyNodes(t *testing.T) {
  3829  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3830  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3831  
  3832  	config.AddRootNodeWrapper(func(root Node) Node {
  3833  		return wrappedReadonlyNode{root}
  3834  	})
  3835  
  3836  	// Not read-only, should work.
  3837  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3838  	kbfsOps := config.KBFSOps()
  3839  	_, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  3840  	require.NoError(t, err)
  3841  
  3842  	// Read-only, shouldn't work.
  3843  	readonlyCtx := context.WithValue(ctx, wrappedReadonlyTestID, 1)
  3844  	_, _, err = kbfsOps.CreateDir(readonlyCtx, rootNode, testPPS("b"))
  3845  	require.IsType(t, WriteToReadonlyNodeError{}, errors.Cause(err))
  3846  }
  3847  
  3848  type fakeFileInfo struct {
  3849  	et data.EntryType
  3850  }
  3851  
  3852  var _ os.FileInfo = (*fakeFileInfo)(nil)
  3853  
  3854  func (fi *fakeFileInfo) Name() string {
  3855  	return ""
  3856  }
  3857  
  3858  func (fi *fakeFileInfo) Size() int64 {
  3859  	return 0
  3860  }
  3861  
  3862  func (fi *fakeFileInfo) Mode() os.FileMode {
  3863  	if fi.et == data.Dir || fi.et == data.Exec {
  3864  		return 0700
  3865  	}
  3866  	return 0600
  3867  }
  3868  
  3869  func (fi *fakeFileInfo) ModTime() time.Time {
  3870  	return time.Time{}
  3871  }
  3872  
  3873  func (fi *fakeFileInfo) IsDir() bool {
  3874  	return fi.et == data.Dir
  3875  }
  3876  
  3877  func (fi *fakeFileInfo) Sys() interface{} {
  3878  	return nil
  3879  }
  3880  
  3881  type wrappedAutocreateNode struct {
  3882  	Node
  3883  	et      data.EntryType
  3884  	sympath data.PathPartString
  3885  }
  3886  
  3887  func (wan wrappedAutocreateNode) ShouldCreateMissedLookup(
  3888  	ctx context.Context, _ data.PathPartString) (
  3889  	bool, context.Context, data.EntryType, os.FileInfo, data.PathPartString,
  3890  	data.BlockPointer) {
  3891  	return true, ctx, wan.et, &fakeFileInfo{wan.et}, wan.sympath, data.ZeroPtr
  3892  }
  3893  
  3894  func testKBFSOpsAutocreateNodes(
  3895  	t *testing.T, et data.EntryType, sympath data.PathPartString) {
  3896  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  3897  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  3898  
  3899  	config.AddRootNodeWrapper(func(root Node) Node {
  3900  		return wrappedAutocreateNode{root, et, sympath}
  3901  	})
  3902  
  3903  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  3904  	kbfsOps := config.KBFSOps()
  3905  	n, ei, err := kbfsOps.Lookup(ctx, rootNode, testPPS("a"))
  3906  	require.NoError(t, err)
  3907  	if et != data.Sym {
  3908  		require.NotNil(t, n)
  3909  	} else {
  3910  		require.Equal(t, sympath.Plaintext(), ei.SymPath)
  3911  	}
  3912  	require.Equal(t, et, ei.Type)
  3913  }
  3914  
  3915  func TestKBFSOpsAutocreateNodesFile(t *testing.T) {
  3916  	testKBFSOpsAutocreateNodes(t, data.File, testPPS(""))
  3917  }
  3918  
  3919  func TestKBFSOpsAutocreateNodesExec(t *testing.T) {
  3920  	testKBFSOpsAutocreateNodes(t, data.Exec, testPPS(""))
  3921  }
  3922  
  3923  func TestKBFSOpsAutocreateNodesDir(t *testing.T) {
  3924  	testKBFSOpsAutocreateNodes(t, data.Dir, testPPS(""))
  3925  }
  3926  
  3927  func TestKBFSOpsAutocreateNodesSym(t *testing.T) {
  3928  	testKBFSOpsAutocreateNodes(t, data.Sym, testPPS("sympath"))
  3929  }
  3930  
  3931  func testKBFSOpsMigrateToImplicitTeam(
  3932  	t *testing.T, ty tlf.Type, name string, initialMDVer kbfsmd.MetadataVer) {
  3933  	var u1, u2 kbname.NormalizedUsername = "u1", "u2"
  3934  	config1, _, ctx, cancel := kbfsOpsConcurInit(t, u1, u2)
  3935  	defer kbfsConcurTestShutdown(ctx, t, config1, cancel)
  3936  	config1.SetMetadataVersion(initialMDVer)
  3937  
  3938  	config2 := ConfigAsUser(config1, u2)
  3939  	defer CheckConfigAndShutdown(ctx, t, config2)
  3940  	config2.SetMetadataVersion(initialMDVer)
  3941  
  3942  	t.Log("Create the folder before implicit teams are enabled.")
  3943  	h, err := tlfhandle.ParseHandle(
  3944  		ctx, config1.KBPKI(), config1.MDOps(), nil, name, ty)
  3945  	require.NoError(t, err)
  3946  	require.False(t, h.IsBackedByTeam())
  3947  	kbfsOps1 := config1.KBFSOps()
  3948  	rootNode1, _, err := kbfsOps1.GetOrCreateRootNode(
  3949  		ctx, h, data.MasterBranch)
  3950  	require.NoError(t, err)
  3951  	_, _, err = kbfsOps1.CreateDir(ctx, rootNode1, testPPS("a"))
  3952  	require.NoError(t, err)
  3953  	err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
  3954  	require.NoError(t, err)
  3955  
  3956  	t.Log("Load the folder for u2.")
  3957  	kbfsOps2 := config2.KBFSOps()
  3958  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(
  3959  		ctx, h, data.MasterBranch)
  3960  	require.NoError(t, err)
  3961  	eis, err := kbfsOps2.GetDirChildren(ctx, rootNode2)
  3962  	require.NoError(t, err)
  3963  	require.Len(t, eis, 1)
  3964  	_, ok := eis[rootNode2.ChildName("a")]
  3965  	require.True(t, ok)
  3966  
  3967  	// These are deterministic, and should add the same
  3968  	// ImplicitTeamInfos for both user configs.
  3969  	err = EnableImplicitTeamsForTest(config1)
  3970  	require.NoError(t, err)
  3971  	teamID := AddImplicitTeamForTestOrBust(t, config1, name, "", 1, ty)
  3972  	_ = AddImplicitTeamForTestOrBust(t, config2, name, "", 1, ty)
  3973  	// The service should be adding the team TLF ID to the iteam's
  3974  	// sigchain before they call `StartMigration`.
  3975  	err = config1.KBPKI().CreateTeamTLF(ctx, teamID, h.TlfID())
  3976  	require.NoError(t, err)
  3977  	err = config2.KBPKI().CreateTeamTLF(ctx, teamID, h.TlfID())
  3978  	require.NoError(t, err)
  3979  	config1.SetMetadataVersion(kbfsmd.ImplicitTeamsVer)
  3980  	config2.SetMetadataVersion(kbfsmd.ImplicitTeamsVer)
  3981  
  3982  	t.Log("Starting migration to implicit team")
  3983  	err = kbfsOps1.MigrateToImplicitTeam(ctx, h.TlfID())
  3984  	require.NoError(t, err)
  3985  	_, _, err = kbfsOps1.CreateDir(ctx, rootNode1, testPPS("b"))
  3986  	require.NoError(t, err)
  3987  	err = kbfsOps1.SyncAll(ctx, rootNode1.GetFolderBranch())
  3988  	require.NoError(t, err)
  3989  
  3990  	t.Log("Check migration from other client")
  3991  	err = kbfsOps2.SyncFromServer(ctx, rootNode2.GetFolderBranch(), nil)
  3992  	require.NoError(t, err)
  3993  	eis, err = kbfsOps2.GetDirChildren(ctx, rootNode2)
  3994  	require.NoError(t, err)
  3995  	require.Len(t, eis, 2)
  3996  	_, ok = eis[rootNode2.ChildName("a")]
  3997  	require.True(t, ok)
  3998  	_, ok = eis[rootNode2.ChildName("b")]
  3999  	require.True(t, ok)
  4000  
  4001  	t.Log("Make sure the new MD really is keyed for the implicit team")
  4002  	ops1 := getOps(config1, rootNode1.GetFolderBranch().Tlf)
  4003  	lState := makeFBOLockState()
  4004  	md, err := ops1.getMDForRead(ctx, lState, mdReadNeedIdentify)
  4005  	require.NoError(t, err)
  4006  	require.Equal(t, tlf.TeamKeying, md.TypeForKeying())
  4007  	require.Equal(t, kbfsmd.ImplicitTeamsVer, md.Version())
  4008  }
  4009  
  4010  func TestKBFSOpsMigratePrivateToImplicitTeam(t *testing.T) {
  4011  	testKBFSOpsMigrateToImplicitTeam(
  4012  		t, tlf.Private, "u1,u2", kbfsmd.SegregatedKeyBundlesVer)
  4013  }
  4014  
  4015  func TestKBFSOpsMigratePrivateWithReaderToImplicitTeam(t *testing.T) {
  4016  	testKBFSOpsMigrateToImplicitTeam(
  4017  		t, tlf.Private, "u1#u2", kbfsmd.SegregatedKeyBundlesVer)
  4018  }
  4019  
  4020  func TestKBFSOpsMigratePrivateWithSBSToImplicitTeam(t *testing.T) {
  4021  	testKBFSOpsMigrateToImplicitTeam(
  4022  		t, tlf.Private, "u1,u2,zzz@twitter", kbfsmd.SegregatedKeyBundlesVer)
  4023  }
  4024  
  4025  func TestKBFSOpsMigratePublicToImplicitTeam(t *testing.T) {
  4026  	testKBFSOpsMigrateToImplicitTeam(
  4027  		t, tlf.Public, "u1,u2", kbfsmd.SegregatedKeyBundlesVer)
  4028  }
  4029  
  4030  func TestKBFSOpsMigratePrivateV2ToImplicitTeam(t *testing.T) {
  4031  	testKBFSOpsMigrateToImplicitTeam(
  4032  		t, tlf.Private, "u1,u2", kbfsmd.InitialExtraMetadataVer)
  4033  }
  4034  
  4035  func TestKBFSOpsMigratePublicV2ToImplicitTeam(t *testing.T) {
  4036  	testKBFSOpsMigrateToImplicitTeam(
  4037  		t, tlf.Public, "u1,u2", kbfsmd.InitialExtraMetadataVer)
  4038  }
  4039  
  4040  func TestKBFSOpsArchiveBranchType(t *testing.T) {
  4041  	var u1 kbname.NormalizedUsername = "u1"
  4042  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4043  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4044  
  4045  	t.Log("Create a private folder for the master branch.")
  4046  	name := "u1"
  4047  	h, err := tlfhandle.ParseHandle(
  4048  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4049  	require.NoError(t, err)
  4050  	kbfsOps := config.KBFSOps()
  4051  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4052  	require.NoError(t, err)
  4053  	require.False(t, rootNode.Readonly(ctx))
  4054  	fb := rootNode.GetFolderBranch()
  4055  
  4056  	t.Log("Make a new revision")
  4057  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  4058  	require.NoError(t, err)
  4059  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  4060  	require.NoError(t, err)
  4061  
  4062  	t.Log("Create an archived version for the same TLF.")
  4063  	rootNodeArchived, _, err := kbfsOps.GetRootNode(
  4064  		ctx, h, data.MakeRevBranchName(1))
  4065  	require.NoError(t, err)
  4066  
  4067  	eis, err := kbfsOps.GetDirChildren(ctx, rootNodeArchived)
  4068  	require.NoError(t, err)
  4069  	require.Len(t, eis, 0)
  4070  
  4071  	eis, err = kbfsOps.GetDirChildren(ctx, rootNode)
  4072  	require.NoError(t, err)
  4073  	require.Len(t, eis, 1)
  4074  
  4075  	archiveFB := data.FolderBranch{
  4076  		Tlf:    fb.Tlf,
  4077  		Branch: data.MakeRevBranchName(1),
  4078  	}
  4079  	require.Equal(t, archiveFB, rootNodeArchived.GetFolderBranch())
  4080  	require.True(t, rootNodeArchived.Readonly(ctx))
  4081  }
  4082  
  4083  type testKBFSOpsMemFileNode struct {
  4084  	Node
  4085  	fs   billy.Filesystem
  4086  	name string
  4087  }
  4088  
  4089  func (n testKBFSOpsMemFileNode) GetFile(_ context.Context) billy.File {
  4090  	f, err := n.fs.Open(n.name)
  4091  	if err != nil {
  4092  		return nil
  4093  	}
  4094  	return f
  4095  }
  4096  
  4097  type testKBFSOpsMemFSNode struct {
  4098  	Node
  4099  	fs billy.Filesystem
  4100  }
  4101  
  4102  func (n testKBFSOpsMemFSNode) GetFS(_ context.Context) NodeFSReadOnly {
  4103  	return n.fs
  4104  }
  4105  
  4106  func (n testKBFSOpsMemFSNode) WrapChild(child Node) Node {
  4107  	child = n.Node.WrapChild(child)
  4108  	name := child.GetBasename()
  4109  	fi, err := n.fs.Lstat(name.Plaintext())
  4110  	if err != nil {
  4111  		return child
  4112  	}
  4113  	if fi.IsDir() {
  4114  		childFS, err := n.fs.Chroot(name.Plaintext())
  4115  		if err != nil {
  4116  			return child
  4117  		}
  4118  		return &testKBFSOpsMemFSNode{
  4119  			Node: child,
  4120  			fs:   childFS,
  4121  		}
  4122  	}
  4123  	return &testKBFSOpsMemFileNode{
  4124  		Node: child,
  4125  		fs:   n.fs,
  4126  		name: name.Plaintext(),
  4127  	}
  4128  }
  4129  
  4130  type testKBFSOpsRootNode struct {
  4131  	Node
  4132  	fs billy.Filesystem
  4133  }
  4134  
  4135  func (n testKBFSOpsRootNode) ShouldCreateMissedLookup(
  4136  	ctx context.Context, name data.PathPartString) (
  4137  	bool, context.Context, data.EntryType, os.FileInfo, data.PathPartString,
  4138  	data.BlockPointer) {
  4139  	if name.Plaintext() == "memfs" {
  4140  		return true, ctx, data.FakeDir, nil, testPPS(""), data.ZeroPtr
  4141  	}
  4142  	return n.Node.ShouldCreateMissedLookup(ctx, name)
  4143  }
  4144  
  4145  func (n testKBFSOpsRootNode) WrapChild(child Node) Node {
  4146  	child = n.Node.WrapChild(child)
  4147  	if child.GetBasename().Plaintext() == "memfs" {
  4148  		return &testKBFSOpsMemFSNode{
  4149  			Node: &ReadonlyNode{Node: child},
  4150  			fs:   n.fs,
  4151  		}
  4152  	}
  4153  	return child
  4154  }
  4155  
  4156  type testKBFSOpsRootWrapper struct {
  4157  	fs billy.Filesystem
  4158  }
  4159  
  4160  func (w testKBFSOpsRootWrapper) wrap(node Node) Node {
  4161  	return &testKBFSOpsRootNode{node, w.fs}
  4162  }
  4163  
  4164  func TestKBFSOpsReadonlyFSNodes(t *testing.T) {
  4165  	var u1 kbname.NormalizedUsername = "u1"
  4166  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4167  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4168  
  4169  	fs := memfs.New()
  4170  	rw := testKBFSOpsRootWrapper{fs}
  4171  	config.AddRootNodeWrapper(rw.wrap)
  4172  
  4173  	t.Log("Populate a memory file system with a few dirs and files")
  4174  	err := fs.MkdirAll("a/b", 0700)
  4175  	require.NoError(t, err)
  4176  	c, err := fs.Create("a/b/c")
  4177  	require.NoError(t, err)
  4178  	_, err = c.Write([]byte("cdata"))
  4179  	require.NoError(t, err)
  4180  	err = c.Close()
  4181  	require.NoError(t, err)
  4182  	d, err := fs.Create("d")
  4183  	require.NoError(t, err)
  4184  	_, err = d.Write([]byte("ddata"))
  4185  	require.NoError(t, err)
  4186  	err = d.Close()
  4187  	require.NoError(t, err)
  4188  
  4189  	name := "u1"
  4190  	h, err := tlfhandle.ParseHandle(
  4191  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4192  	require.NoError(t, err)
  4193  	kbfsOps := config.KBFSOps()
  4194  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4195  	require.NoError(t, err)
  4196  
  4197  	fsNode, _, err := kbfsOps.Lookup(ctx, rootNode, testPPS("memfs"))
  4198  	require.NoError(t, err)
  4199  	children, err := kbfsOps.GetDirChildren(ctx, fsNode)
  4200  	require.NoError(t, err)
  4201  	require.Len(t, children, 2)
  4202  
  4203  	aNode, _, err := kbfsOps.Lookup(ctx, fsNode, testPPS("a"))
  4204  	require.NoError(t, err)
  4205  	children, err = kbfsOps.GetDirChildren(ctx, aNode)
  4206  	require.NoError(t, err)
  4207  	require.Len(t, children, 1)
  4208  
  4209  	bNode, _, err := kbfsOps.Lookup(ctx, aNode, testPPS("b"))
  4210  	require.NoError(t, err)
  4211  	children, err = kbfsOps.GetDirChildren(ctx, bNode)
  4212  	require.NoError(t, err)
  4213  	require.Len(t, children, 1)
  4214  
  4215  	cNode, _, err := kbfsOps.Lookup(ctx, bNode, testPPS("c"))
  4216  	require.NoError(t, err)
  4217  	data := make([]byte, 5)
  4218  	n, err := kbfsOps.Read(ctx, cNode, data, 0)
  4219  	require.NoError(t, err)
  4220  	require.Equal(t, int64(5), n)
  4221  	require.Equal(t, "cdata", string(data))
  4222  
  4223  	dNode, _, err := kbfsOps.Lookup(ctx, fsNode, testPPS("d"))
  4224  	require.NoError(t, err)
  4225  	data = make([]byte, 5)
  4226  	n, err = kbfsOps.Read(ctx, dNode, data, 0)
  4227  	require.NoError(t, err)
  4228  	require.Equal(t, int64(5), n)
  4229  	require.Equal(t, "ddata", string(data))
  4230  }
  4231  
  4232  type mdServerShutdownOverride struct {
  4233  	mdServerLocal
  4234  	override bool
  4235  }
  4236  
  4237  func (md mdServerShutdownOverride) isShutdown() bool {
  4238  	if md.override {
  4239  		return true
  4240  	}
  4241  	return md.mdServerLocal.isShutdown()
  4242  }
  4243  
  4244  func TestKBFSOpsReset(t *testing.T) {
  4245  	var u1 kbname.NormalizedUsername = "u1"
  4246  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4247  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4248  	md := &mdServerShutdownOverride{config.MDServer().(mdServerLocal), false}
  4249  	config.SetMDServer(md)
  4250  
  4251  	err := EnableImplicitTeamsForTest(config)
  4252  	require.NoError(t, err)
  4253  
  4254  	t.Log("Create a private folder.")
  4255  	name := "u1"
  4256  	h, err := tlfhandle.ParseHandle(
  4257  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4258  	require.NoError(t, err)
  4259  	kbfsOps := config.KBFSOps()
  4260  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4261  	require.NoError(t, err)
  4262  
  4263  	oldID := h.TlfID()
  4264  	t.Logf("Make a new revision for TLF ID %s", oldID)
  4265  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  4266  	require.NoError(t, err)
  4267  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  4268  	require.NoError(t, err)
  4269  
  4270  	t.Log("Reset the TLF")
  4271  	// Pretend the mdserver is shutdown, to avoid checking merged
  4272  	// state when shutting down the FBO (which causes a deadlock).
  4273  	md.override = true
  4274  	err = kbfsOps.Reset(ctx, h, nil)
  4275  	require.NoError(t, err)
  4276  	require.NotEqual(t, oldID, h.TlfID())
  4277  	md.override = false
  4278  
  4279  	t.Logf("Make a new revision for new TLF ID %s", h.TlfID())
  4280  	rootNode, _, err = kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4281  	require.NoError(t, err)
  4282  	children, err := kbfsOps.GetDirChildren(ctx, rootNode)
  4283  	require.NoError(t, err)
  4284  	require.Len(t, children, 0)
  4285  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("b"))
  4286  	require.NoError(t, err)
  4287  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  4288  	require.NoError(t, err)
  4289  	children, err = kbfsOps.GetDirChildren(ctx, rootNode)
  4290  	require.NoError(t, err)
  4291  	require.Len(t, children, 1)
  4292  
  4293  	t.Logf("Reset it back")
  4294  	md.override = true
  4295  	err = kbfsOps.Reset(ctx, h, &oldID)
  4296  	require.NoError(t, err)
  4297  	require.Equal(t, oldID, h.TlfID())
  4298  	md.override = false
  4299  
  4300  	t.Logf("Check that the old revision is back")
  4301  	rootNode, _, err = kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4302  	require.NoError(t, err)
  4303  	children, err = kbfsOps.GetDirChildren(ctx, rootNode)
  4304  	require.NoError(t, err)
  4305  	require.Len(t, children, 1)
  4306  	require.Contains(t, children, testPPS("a"))
  4307  }
  4308  
  4309  // diskMDCacheWithCommitChan notifies a channel whenever an MD is committed.
  4310  type diskMDCacheWithCommitChan struct {
  4311  	DiskMDCache
  4312  	commitCh chan<- kbfsmd.Revision
  4313  
  4314  	lock sync.Mutex
  4315  	seen map[kbfsmd.Revision]bool
  4316  }
  4317  
  4318  func newDiskMDCacheWithCommitChan(
  4319  	dmc DiskMDCache, commitCh chan<- kbfsmd.Revision) DiskMDCache {
  4320  	return &diskMDCacheWithCommitChan{
  4321  		DiskMDCache: dmc,
  4322  		commitCh:    commitCh,
  4323  		seen:        make(map[kbfsmd.Revision]bool),
  4324  	}
  4325  }
  4326  
  4327  func (dmc *diskMDCacheWithCommitChan) Commit(
  4328  	ctx context.Context, tlfID tlf.ID, rev kbfsmd.Revision) error {
  4329  	err := dmc.DiskMDCache.Commit(ctx, tlfID, rev)
  4330  	if err != nil {
  4331  		return err
  4332  	}
  4333  	dmc.lock.Lock()
  4334  	defer dmc.lock.Unlock()
  4335  	if !dmc.seen[rev] {
  4336  		dmc.commitCh <- rev
  4337  		dmc.seen[rev] = true
  4338  	}
  4339  	return nil
  4340  }
  4341  
  4342  func TestKBFSOpsUnsyncedMDCommit(t *testing.T) {
  4343  	var u1 kbname.NormalizedUsername = "u1"
  4344  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4345  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4346  
  4347  	dmcLocal, tempdir := newDiskMDCacheLocalForTest(t)
  4348  	defer shutdownDiskMDCacheTest(dmcLocal, tempdir)
  4349  	commitCh := make(chan kbfsmd.Revision)
  4350  	dmc := newDiskMDCacheWithCommitChan(dmcLocal, commitCh)
  4351  	config.diskMDCache = dmc
  4352  
  4353  	t.Log("Create a private, unsynced TLF and make sure updates are committed")
  4354  	name := "u1"
  4355  	h, err := tlfhandle.ParseHandle(
  4356  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4357  	require.NoError(t, err)
  4358  	kbfsOps := config.KBFSOps()
  4359  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4360  	require.NoError(t, err)
  4361  	select {
  4362  	case rev := <-commitCh:
  4363  		require.Equal(t, kbfsmd.Revision(1), rev)
  4364  	case <-ctx.Done():
  4365  		t.Fatal(ctx.Err())
  4366  	}
  4367  
  4368  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  4369  	require.NoError(t, err)
  4370  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  4371  	require.NoError(t, err)
  4372  	select {
  4373  	case rev := <-commitCh:
  4374  		require.Equal(t, kbfsmd.Revision(2), rev)
  4375  	case <-ctx.Done():
  4376  		t.Fatal(ctx.Err())
  4377  	}
  4378  
  4379  	t.Log("Write using a different device")
  4380  	config2 := ConfigAsUser(config, u1)
  4381  	defer CheckConfigAndShutdown(ctx, t, config2)
  4382  	kbfsOps2 := config2.KBFSOps()
  4383  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4384  	require.NoError(t, err)
  4385  	_, _, err = kbfsOps2.CreateDir(ctx, rootNode2, testPPS("b"))
  4386  	require.NoError(t, err)
  4387  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4388  	require.NoError(t, err)
  4389  
  4390  	t.Log("Sync the first device")
  4391  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4392  	require.NoError(t, err)
  4393  
  4394  	select {
  4395  	case rev := <-commitCh:
  4396  		require.Equal(t, kbfsmd.Revision(3), rev)
  4397  	case <-ctx.Done():
  4398  		t.Fatal(ctx.Err())
  4399  	}
  4400  }
  4401  
  4402  // bserverPutToDiskCache is a simple shim over a block server that
  4403  // adds blocks to the disk cache.
  4404  type bserverPutToDiskCache struct {
  4405  	BlockServer
  4406  	dbc DiskBlockCache
  4407  }
  4408  
  4409  func (b bserverPutToDiskCache) Get(
  4410  	ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
  4411  	context kbfsblock.Context, cacheType DiskBlockCacheType) (
  4412  	buf []byte, serverHalf kbfscrypto.BlockCryptKeyServerHalf, err error) {
  4413  	buf, serverHalf, err = b.BlockServer.Get(
  4414  		ctx, tlfID, id, context, cacheType)
  4415  	if err != nil {
  4416  		return buf, serverHalf, err
  4417  	}
  4418  
  4419  	_ = b.dbc.Put(ctx, tlfID, id, buf, serverHalf, cacheType)
  4420  	return buf, serverHalf, nil
  4421  }
  4422  
  4423  func (b bserverPutToDiskCache) Put(
  4424  	ctx context.Context, tlfID tlf.ID, id kbfsblock.ID,
  4425  	context kbfsblock.Context, buf []byte,
  4426  	serverHalf kbfscrypto.BlockCryptKeyServerHalf,
  4427  	cacheType DiskBlockCacheType) (err error) {
  4428  	err = b.BlockServer.Put(ctx, tlfID, id, context, buf, serverHalf, cacheType)
  4429  	if err != nil {
  4430  		return err
  4431  	}
  4432  
  4433  	_ = b.dbc.Put(ctx, tlfID, id, buf, serverHalf, cacheType)
  4434  	return nil
  4435  }
  4436  
  4437  func enableDiskCacheForTest(
  4438  	t *testing.T, config *ConfigLocal, tempdir string) *diskBlockCacheWrapped {
  4439  	dbc, err := newDiskBlockCacheWrapped(config, "", config.Mode())
  4440  	require.NoError(t, err)
  4441  	config.diskBlockCache = dbc
  4442  	err = dbc.workingSetCache.WaitUntilStarted()
  4443  	require.NoError(t, err)
  4444  	err = dbc.syncCache.WaitUntilStarted()
  4445  	require.NoError(t, err)
  4446  	err = config.EnableDiskLimiter(tempdir)
  4447  	require.NoError(t, err)
  4448  	err = config.loadSyncedTlfsLocked()
  4449  	require.NoError(t, err)
  4450  	return dbc
  4451  }
  4452  
  4453  func TestKBFSOpsSyncedMDCommit(t *testing.T) {
  4454  	var u1 kbname.NormalizedUsername = "u1"
  4455  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4456  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4457  
  4458  	config2 := ConfigAsUser(config, u1)
  4459  	defer CheckConfigAndShutdown(ctx, t, config2)
  4460  
  4461  	dmcLocal, tempdir := newDiskMDCacheLocalForTest(t)
  4462  	defer shutdownDiskMDCacheTest(dmcLocal, tempdir)
  4463  	commitCh := make(chan kbfsmd.Revision)
  4464  	dmc := newDiskMDCacheWithCommitChan(dmcLocal, commitCh)
  4465  	config.diskMDCache = dmc
  4466  
  4467  	dbc := enableDiskCacheForTest(t, config, tempdir)
  4468  
  4469  	t.Log("Create a private, synced TLF")
  4470  	config.SetBlockServer(bserverPutToDiskCache{config.BlockServer(), dbc})
  4471  	name := "u1"
  4472  	h, err := tlfhandle.ParseHandle(
  4473  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4474  	require.NoError(t, err)
  4475  	kbfsOps := config.KBFSOps()
  4476  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4477  	require.NoError(t, err)
  4478  	select {
  4479  	case rev := <-commitCh:
  4480  		require.Equal(t, kbfsmd.Revision(1), rev)
  4481  	case <-ctx.Done():
  4482  		t.Fatal(ctx.Err())
  4483  	}
  4484  	_, err = config.SetTlfSyncState(
  4485  		ctx, rootNode.GetFolderBranch().Tlf, FolderSyncConfig{
  4486  			Mode: keybase1.FolderSyncMode_ENABLED,
  4487  		})
  4488  	require.NoError(t, err)
  4489  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  4490  	require.NoError(t, err)
  4491  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  4492  	require.NoError(t, err)
  4493  	select {
  4494  	case rev := <-commitCh:
  4495  		require.Equal(t, kbfsmd.Revision(2), rev)
  4496  	case <-ctx.Done():
  4497  		t.Fatal(ctx.Err())
  4498  	}
  4499  
  4500  	t.Log("Stall any block gets from the device")
  4501  	staller := NewNaïveStaller(config)
  4502  	staller.StallBlockOp(StallableBlockGet, 4)
  4503  	defer staller.UndoStallBlockOp(StallableBlockGet)
  4504  
  4505  	go func() {
  4506  		// Let the first (root fetch) block op through, but not the second.
  4507  		staller.WaitForStallBlockOp(StallableBlockGet)
  4508  		staller.UnstallOneBlockOp(StallableBlockGet)
  4509  	}()
  4510  
  4511  	t.Log("Write using a different device")
  4512  	kbfsOps2 := config2.KBFSOps()
  4513  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4514  	require.NoError(t, err)
  4515  	_, _, err = kbfsOps2.CreateDir(ctx, rootNode2, testPPS("b"))
  4516  	require.NoError(t, err)
  4517  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4518  	require.NoError(t, err)
  4519  
  4520  	t.Log("Sync the new revision, but ensure no MD commits yet")
  4521  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4522  	require.NoError(t, err)
  4523  	staller.WaitForStallBlockOp(StallableBlockGet)
  4524  	select {
  4525  	case rev := <-commitCh:
  4526  		t.Fatalf("No commit expected; rev=%d", rev)
  4527  	default:
  4528  	}
  4529  
  4530  	t.Log("Unstall the final block get, and the commit should finish")
  4531  	staller.UnstallOneBlockOp(StallableBlockGet)
  4532  	select {
  4533  	case rev := <-commitCh:
  4534  		require.Equal(t, kbfsmd.Revision(3), rev)
  4535  	case <-ctx.Done():
  4536  		t.Fatal(ctx.Err())
  4537  	}
  4538  
  4539  	go func() {
  4540  		// Let the first (root fetch) block op through, but not the second.
  4541  		staller.WaitForStallBlockOp(StallableBlockGet)
  4542  		staller.UnstallOneBlockOp(StallableBlockGet)
  4543  	}()
  4544  
  4545  	t.Log("Write again, and this time read the MD to force a commit")
  4546  	_, _, err = kbfsOps2.CreateDir(ctx, rootNode2, testPPS("c"))
  4547  	require.NoError(t, err)
  4548  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4549  	require.NoError(t, err)
  4550  
  4551  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4552  	require.NoError(t, err)
  4553  	staller.WaitForStallBlockOp(StallableBlockGet)
  4554  	select {
  4555  	case rev := <-commitCh:
  4556  		t.Fatalf("No commit expected; rev=%d", rev)
  4557  	default:
  4558  	}
  4559  
  4560  	_, err = kbfsOps.GetDirChildren(ctx, rootNode)
  4561  	require.NoError(t, err)
  4562  	// Since we read the MD, it should be committed, before the
  4563  	// prefetch completes.
  4564  	select {
  4565  	case rev := <-commitCh:
  4566  		require.Equal(t, kbfsmd.Revision(4), rev)
  4567  	case <-ctx.Done():
  4568  		t.Fatal(ctx.Err())
  4569  	}
  4570  	staller.UnstallOneBlockOp(StallableBlockGet)
  4571  }
  4572  
  4573  func TestKBFSOpsPartialSyncConfig(t *testing.T) {
  4574  	var u1 kbname.NormalizedUsername = "u1"
  4575  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4576  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4577  
  4578  	name := "u1"
  4579  	h, err := tlfhandle.ParseHandle(
  4580  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4581  	require.NoError(t, err)
  4582  	kbfsOps := config.KBFSOps()
  4583  
  4584  	tempdir, err := ioutil.TempDir(os.TempDir(), "disk_cache")
  4585  	require.NoError(t, err)
  4586  	defer func() {
  4587  		err := ioutil.RemoveAll(tempdir)
  4588  		require.NoError(t, err)
  4589  	}()
  4590  	_ = enableDiskCacheForTest(t, config, tempdir)
  4591  
  4592  	t.Log("Sync should start off as disabled.")
  4593  	syncConfig, err := kbfsOps.GetSyncConfig(ctx, h.TlfID())
  4594  	require.NoError(t, err)
  4595  	require.Equal(t, keybase1.FolderSyncMode_DISABLED, syncConfig.Mode)
  4596  
  4597  	t.Log("Expect an error before the TLF is initialized")
  4598  	syncConfig.Mode = keybase1.FolderSyncMode_PARTIAL
  4599  	pathsMap := map[string]bool{
  4600  		"a/b/c": true,
  4601  		"d/e/f": true,
  4602  	}
  4603  	syncConfig.Paths = make([]string, 0, 2)
  4604  	for p := range pathsMap {
  4605  		syncConfig.Paths = append(syncConfig.Paths, p)
  4606  	}
  4607  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4608  	require.Error(t, err)
  4609  
  4610  	t.Log("Initialize the TLF")
  4611  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4612  	require.NoError(t, err)
  4613  	_, _, err = kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  4614  	require.NoError(t, err)
  4615  
  4616  	t.Log("Set a partial sync config")
  4617  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4618  	require.NoError(t, err)
  4619  
  4620  	t.Log("Make sure the lower-level config is encrypted")
  4621  	lowLevelConfig := config.GetTlfSyncState(h.TlfID())
  4622  	require.Equal(t, keybase1.FolderSyncMode_PARTIAL, lowLevelConfig.Mode)
  4623  	require.NotEqual(t, data.ZeroPtr, lowLevelConfig.Paths.Ptr)
  4624  	var zeroBytes [32]byte
  4625  	require.False(t,
  4626  		bytes.Equal(zeroBytes[:], lowLevelConfig.Paths.ServerHalf.Bytes()))
  4627  
  4628  	t.Log("Read it back out unencrypted")
  4629  	config.ResetCaches()
  4630  	syncConfig, err = kbfsOps.GetSyncConfig(ctx, h.TlfID())
  4631  	require.NoError(t, err)
  4632  	require.Equal(t, keybase1.FolderSyncMode_PARTIAL, syncConfig.Mode)
  4633  	require.Len(t, syncConfig.Paths, len(pathsMap))
  4634  	for _, p := range syncConfig.Paths {
  4635  		require.True(t, pathsMap[p])
  4636  		delete(pathsMap, p)
  4637  	}
  4638  
  4639  	t.Log("Test some failure scenarios")
  4640  	syncConfig.Paths = []string{"a/b/c", "a/b/c"}
  4641  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4642  	require.Error(t, err)
  4643  	syncConfig.Paths = []string{"/a/b/c", "d/e/f"}
  4644  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4645  	require.Error(t, err)
  4646  	syncConfig.Paths = []string{"a/../a/b/c", "a/b/c"}
  4647  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4648  	require.Error(t, err)
  4649  	syncConfig.Paths = []string{"a/../../a/b/c"}
  4650  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4651  	require.Error(t, err)
  4652  
  4653  	t.Log("Make sure the paths are cleaned and ToSlash'd")
  4654  	pathsMap = map[string]bool{
  4655  		"a/b/c": true,
  4656  		"d/e/f": true,
  4657  	}
  4658  	syncConfig.Paths = []string{"a/../a/b/c", filepath.Join("d", "e", "f")}
  4659  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4660  	require.NoError(t, err)
  4661  	syncConfig, err = kbfsOps.GetSyncConfig(ctx, h.TlfID())
  4662  	require.NoError(t, err)
  4663  	require.Equal(t, keybase1.FolderSyncMode_PARTIAL, syncConfig.Mode)
  4664  	require.Len(t, syncConfig.Paths, len(pathsMap))
  4665  	for _, p := range syncConfig.Paths {
  4666  		require.True(t, pathsMap[p])
  4667  		delete(pathsMap, p)
  4668  	}
  4669  
  4670  	t.Log("Make sure the TLF path is correctly marked as synced")
  4671  	tlfPath := fmt.Sprintf("/keybase/private/%s", name)
  4672  	require.True(t, config.IsSyncedTlfPath(tlfPath))
  4673  }
  4674  
  4675  func waitForPrefetchInTest(
  4676  	ctx context.Context, t *testing.T, config Config, node Node) {
  4677  	t.Helper()
  4678  	md, err := config.KBFSOps().GetNodeMetadata(ctx, node)
  4679  	require.NoError(t, err)
  4680  	ch, err := config.BlockOps().Prefetcher().WaitChannelForBlockPrefetch(
  4681  		ctx, md.BlockInfo.BlockPointer)
  4682  	require.NoError(t, err)
  4683  	select {
  4684  	case <-ch:
  4685  	case <-ctx.Done():
  4686  		require.FailNow(t, ctx.Err().Error())
  4687  	}
  4688  }
  4689  
  4690  func waitForIndirectPtrBlocksInTest(
  4691  	ctx context.Context, t *testing.T, config Config, node Node,
  4692  	kmd libkey.KeyMetadata) {
  4693  	t.Helper()
  4694  	md, err := config.KBFSOps().GetNodeMetadata(ctx, node)
  4695  	require.NoError(t, err)
  4696  	block, err := config.BlockCache().Get(md.BlockInfo.BlockPointer)
  4697  	require.NoError(t, err)
  4698  	if !block.IsIndirect() {
  4699  		return
  4700  	}
  4701  	b := block.(data.BlockWithPtrs)
  4702  	require.NotNil(t, b)
  4703  	for i := 0; i < b.NumIndirectPtrs(); i++ {
  4704  		info, _ := b.IndirectPtr(i)
  4705  		newBlock := block.NewEmpty()
  4706  		t.Logf("Waiting for block %s", info.BlockPointer)
  4707  		err := config.BlockOps().Get(
  4708  			ctx, kmd, info.BlockPointer, newBlock, data.TransientEntry,
  4709  			data.MasterBranch)
  4710  		require.NoError(t, err)
  4711  	}
  4712  }
  4713  
  4714  func TestKBFSOpsPartialSync(t *testing.T) {
  4715  	var u1 kbname.NormalizedUsername = "u1"
  4716  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4717  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4718  	config.SetVLogLevel(libkb.VLog2String)
  4719  
  4720  	name := "u1"
  4721  	h, err := tlfhandle.ParseHandle(
  4722  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4723  	require.NoError(t, err)
  4724  	kbfsOps := config.KBFSOps()
  4725  
  4726  	tempdir, err := ioutil.TempDir(os.TempDir(), "disk_cache")
  4727  	require.NoError(t, err)
  4728  	defer func() {
  4729  		err := ioutil.RemoveAll(tempdir)
  4730  		require.NoError(t, err)
  4731  	}()
  4732  	dbc := enableDiskCacheForTest(t, config, tempdir)
  4733  
  4734  	// config2 is the writer.
  4735  	config2 := ConfigAsUser(config, u1)
  4736  	defer CheckConfigAndShutdown(ctx, t, config2)
  4737  	kbfsOps2 := config2.KBFSOps()
  4738  	// Turn the directories into indirect blocks when they have more
  4739  	// than one entry, to make sure we sync the entire parent
  4740  	// directories on partial paths.
  4741  	config2.BlockSplitter().(*data.BlockSplitterSimple).
  4742  		SetMaxDirEntriesPerBlockForTesting(1)
  4743  
  4744  	t.Log("Initialize the TLF")
  4745  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4746  	require.NoError(t, err)
  4747  	aNode, _, err := kbfsOps2.CreateDir(ctx, rootNode2, testPPS("a"))
  4748  	require.NoError(t, err)
  4749  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4750  	require.NoError(t, err)
  4751  
  4752  	t.Log("Set the sync config on first device")
  4753  	config.SetBlockServer(bserverPutToDiskCache{config.BlockServer(), dbc})
  4754  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4755  	require.NoError(t, err)
  4756  	syncConfig := keybase1.FolderSyncConfig{
  4757  		Mode:  keybase1.FolderSyncMode_PARTIAL,
  4758  		Paths: []string{"a/b/c"},
  4759  	}
  4760  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4761  	require.NoError(t, err)
  4762  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4763  	require.NoError(t, err)
  4764  
  4765  	ops := getOps(config, rootNode.GetFolderBranch().Tlf)
  4766  	kmd := ops.head
  4767  
  4768  	t.Log("Root block and 'a' block should be synced")
  4769  	checkSyncCache := func(expectedBlocks uint64, nodesToWaitOn ...Node) {
  4770  		for _, node := range nodesToWaitOn {
  4771  			waitForPrefetchInTest(ctx, t, config, node)
  4772  		}
  4773  
  4774  		// We can't wait for root and `a` to be prefetched, because
  4775  		// `a/b2` will not be prefetched, so those node prefetches
  4776  		// won't necessarily complete in this test.  Instead, wait for
  4777  		// all their indirect pointers to be retrieved and cached, so
  4778  		// the sync cache counts will be correct.
  4779  		waitForIndirectPtrBlocksInTest(ctx, t, config, rootNode, kmd)
  4780  		waitForIndirectPtrBlocksInTest(ctx, t, config, aNode, kmd)
  4781  
  4782  		syncStatusMap := dbc.syncCache.Status(ctx)
  4783  		require.Len(t, syncStatusMap, 1)
  4784  		syncStatus, ok := syncStatusMap[syncCacheName]
  4785  		require.True(t, ok)
  4786  		require.Equal(t, expectedBlocks, syncStatus.NumBlocks)
  4787  	}
  4788  	checkSyncCache(2, rootNode, aNode)
  4789  
  4790  	t.Log("First device completes synced path, along with others")
  4791  	bNode, _, err := kbfsOps2.CreateDir(ctx, aNode, testPPS("b"))
  4792  	require.NoError(t, err)
  4793  	b2Node, _, err := kbfsOps2.CreateDir(ctx, aNode, testPPS("b2"))
  4794  	require.NoError(t, err)
  4795  	cNode, _, err := kbfsOps2.CreateDir(ctx, bNode, testPPS("c"))
  4796  	require.NoError(t, err)
  4797  	_, _, err = kbfsOps2.CreateDir(ctx, b2Node, testPPS("c2"))
  4798  	require.NoError(t, err)
  4799  	dNode, _, err := kbfsOps2.CreateDir(ctx, rootNode2, testPPS("d"))
  4800  	require.NoError(t, err)
  4801  	c, err := DisableUpdatesForTesting(config, rootNode.GetFolderBranch())
  4802  	require.NoError(t, err)
  4803  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4804  	require.NoError(t, err)
  4805  	err = kbfsOps2.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4806  	require.NoError(t, err)
  4807  
  4808  	t.Log("Blocks 'b' and 'c' should be synced, nothing else")
  4809  	c <- struct{}{}
  4810  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4811  	require.NoError(t, err)
  4812  
  4813  	// 8 blocks: root node (1 indirect, 2 direct), `a` node (1
  4814  	// indirect, 2 direct), `b` node, `c` node (and the old archived
  4815  	// ones have been GC'd from the sync cache).
  4816  	checkSyncCache(8, bNode, cNode)
  4817  
  4818  	checkStatus := func(node Node, expectedStatus PrefetchStatus) {
  4819  		t.Helper()
  4820  		md, err := kbfsOps.GetNodeMetadata(ctx, node)
  4821  		require.NoError(t, err)
  4822  		// Get the prefetch status directly from the sync cache.
  4823  		dmd, err := config.DiskBlockCache().(*diskBlockCacheWrapped).syncCache.
  4824  			GetMetadata(ctx, md.BlockInfo.ID)
  4825  		var ps PrefetchStatus
  4826  		if errors.Cause(err) == ldberrors.ErrNotFound {
  4827  			ps = NoPrefetch
  4828  		} else {
  4829  			require.NoError(t, err)
  4830  			ps = dmd.PrefetchStatus()
  4831  		}
  4832  		require.Equal(t, expectedStatus, ps)
  4833  	}
  4834  	// Note that we're deliberately passing in Nodes created by
  4835  	// kbfsOps2 into kbfsOps here.  That's necessary to avoid
  4836  	// prefetching on the normal path by kbfsOps on the lookups it
  4837  	// would take to make those nodes.
  4838  	checkStatus(cNode, FinishedPrefetch)
  4839  
  4840  	t.Log("Add more data under prefetched path")
  4841  	eNode, _, err := kbfsOps2.CreateDir(ctx, cNode, testPPS("e"))
  4842  	require.NoError(t, err)
  4843  	fNode, _, err := kbfsOps2.CreateFile(
  4844  		ctx, eNode, testPPS("f"), false, NoExcl)
  4845  	require.NoError(t, err)
  4846  	err = kbfsOps2.Write(ctx, fNode, []byte("fdata"), 0)
  4847  	require.NoError(t, err)
  4848  	c, err = DisableUpdatesForTesting(config, rootNode.GetFolderBranch())
  4849  	require.NoError(t, err)
  4850  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4851  	require.NoError(t, err)
  4852  	err = kbfsOps2.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4853  	require.NoError(t, err)
  4854  
  4855  	t.Log("Check that two new blocks are synced")
  4856  	c <- struct{}{}
  4857  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4858  	require.NoError(t, err)
  4859  
  4860  	checkSyncCache(10, cNode)
  4861  	checkStatus(cNode, FinishedPrefetch)
  4862  	checkStatus(eNode, FinishedPrefetch)
  4863  	checkStatus(fNode, FinishedPrefetch)
  4864  
  4865  	t.Log("Add something that's not synced")
  4866  	gNode, _, err := kbfsOps2.CreateDir(ctx, dNode, testPPS("g"))
  4867  	require.NoError(t, err)
  4868  	c, err = DisableUpdatesForTesting(config, rootNode.GetFolderBranch())
  4869  	require.NoError(t, err)
  4870  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4871  	require.NoError(t, err)
  4872  	err = kbfsOps2.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4873  	require.NoError(t, err)
  4874  
  4875  	t.Log("Check that the updated root block is synced, but nothing new")
  4876  	c <- struct{}{}
  4877  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4878  	require.NoError(t, err)
  4879  
  4880  	checkSyncCache(10, cNode)
  4881  	checkStatus(cNode, FinishedPrefetch)
  4882  	checkStatus(eNode, FinishedPrefetch)
  4883  	checkStatus(fNode, FinishedPrefetch)
  4884  
  4885  	t.Log("Sync the new path")
  4886  	syncConfig.Paths = append(syncConfig.Paths, "d")
  4887  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4888  	require.NoError(t, err)
  4889  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4890  	require.NoError(t, err)
  4891  
  4892  	checkSyncCache(12, cNode, dNode)
  4893  	checkStatus(cNode, FinishedPrefetch)
  4894  	checkStatus(eNode, FinishedPrefetch)
  4895  	checkStatus(fNode, FinishedPrefetch)
  4896  	checkStatus(dNode, FinishedPrefetch)
  4897  	checkStatus(gNode, FinishedPrefetch)
  4898  
  4899  	t.Log("Remove a synced path")
  4900  	syncConfig.Paths = syncConfig.Paths[:len(syncConfig.Paths)-1]
  4901  	_, err = kbfsOps.SetSyncConfig(ctx, h.TlfID(), syncConfig)
  4902  	require.NoError(t, err)
  4903  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4904  	require.NoError(t, err)
  4905  
  4906  	checkSyncCache(10, cNode)
  4907  	checkStatus(cNode, FinishedPrefetch)
  4908  	checkStatus(eNode, FinishedPrefetch)
  4909  	checkStatus(fNode, FinishedPrefetch)
  4910  	checkStatus(dNode, NoPrefetch)
  4911  	checkStatus(gNode, NoPrefetch)
  4912  
  4913  	t.Log("Move a synced subdirectory somewhere else")
  4914  	err = kbfsOps2.Rename(ctx, cNode, testPPS("e"), dNode, testPPS("e"))
  4915  	require.NoError(t, err)
  4916  	c, err = DisableUpdatesForTesting(config, rootNode.GetFolderBranch())
  4917  	require.NoError(t, err)
  4918  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4919  	require.NoError(t, err)
  4920  	err = kbfsOps2.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4921  	require.NoError(t, err)
  4922  	c <- struct{}{}
  4923  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4924  	require.NoError(t, err)
  4925  
  4926  	t.Log("Trigger a mark-and-sweep right away, to simulate the timer")
  4927  	ops.triggerMarkAndSweepLocked()
  4928  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4929  	require.NoError(t, err)
  4930  
  4931  	checkSyncCache(8, cNode)
  4932  	checkStatus(cNode, FinishedPrefetch)
  4933  	checkStatus(eNode, NoPrefetch)
  4934  	checkStatus(fNode, NoPrefetch)
  4935  }
  4936  
  4937  type modeTestWithPrefetch struct {
  4938  	modeTest
  4939  }
  4940  
  4941  func (mtwp modeTestWithPrefetch) EditHistoryPrefetchingEnabled() bool {
  4942  	return true
  4943  }
  4944  
  4945  func TestKBFSOpsRecentHistorySync(t *testing.T) {
  4946  	var u1 kbname.NormalizedUsername = "u1"
  4947  	config, _, ctx, cancel := kbfsOpsConcurInit(t, u1)
  4948  	defer kbfsConcurTestShutdown(ctx, t, config, cancel)
  4949  	// kbfsOpsConcurInit turns off notifications, so turn them back on.
  4950  	config.SetMode(
  4951  		modeTestWithPrefetch{modeTest{NewInitModeFromType(InitDefault)}})
  4952  	config.SetVLogLevel(libkb.VLog2String)
  4953  
  4954  	name := "u1"
  4955  	h, err := tlfhandle.ParseHandle(
  4956  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  4957  	require.NoError(t, err)
  4958  	kbfsOps := config.KBFSOps()
  4959  
  4960  	tempdir, err := ioutil.TempDir(os.TempDir(), "disk_cache")
  4961  	require.NoError(t, err)
  4962  	defer func() {
  4963  		err := ioutil.RemoveAll(tempdir)
  4964  		require.NoError(t, err)
  4965  	}()
  4966  	dbc := enableDiskCacheForTest(t, config, tempdir)
  4967  
  4968  	// config2 is the writer.
  4969  	config2 := ConfigAsUser(config, u1)
  4970  	defer CheckConfigAndShutdown(ctx, t, config2)
  4971  	config2.SetMode(
  4972  		modeTestWithPrefetch{modeTest{NewInitModeFromType(InitDefault)}})
  4973  	kbfsOps2 := config2.KBFSOps()
  4974  
  4975  	config.SetBlockServer(bserverPutToDiskCache{config.BlockServer(), dbc})
  4976  
  4977  	t.Log("Initialize the TLF")
  4978  	rootNode2, _, err := kbfsOps2.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4979  	require.NoError(t, err)
  4980  	aNode, _, err := kbfsOps2.CreateDir(ctx, rootNode2, testPPS("a"))
  4981  	require.NoError(t, err)
  4982  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  4983  	require.NoError(t, err)
  4984  
  4985  	t.Log("No files were edited, but fetching the root block will prefetch a")
  4986  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  4987  	require.NoError(t, err)
  4988  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  4989  	require.NoError(t, err)
  4990  
  4991  	checkWorkingSetCache := func(expectedBlocks uint64) {
  4992  		waitForPrefetchInTest(ctx, t, config, rootNode)
  4993  		waitForPrefetchInTest(ctx, t, config, aNode)
  4994  
  4995  		statusMap := dbc.workingSetCache.Status(ctx)
  4996  		require.Len(t, statusMap, 1)
  4997  		status, ok := statusMap[workingSetCacheName]
  4998  		require.True(t, ok)
  4999  		require.Equal(t, expectedBlocks, status.NumBlocks)
  5000  	}
  5001  	checkWorkingSetCache(2)
  5002  
  5003  	checkStatus := func(node Node, expectedStatus PrefetchStatus) {
  5004  		md, err := kbfsOps.GetNodeMetadata(ctx, node)
  5005  		require.NoError(t, err)
  5006  		require.Equal(t, expectedStatus, md.PrefetchStatus)
  5007  	}
  5008  	checkStatus(rootNode, FinishedPrefetch)
  5009  	checkStatus(aNode, FinishedPrefetch)
  5010  
  5011  	t.Log("Writer adds a file, which gets prefetched")
  5012  	bNode, _, err := kbfsOps2.CreateFile(
  5013  		ctx, aNode, testPPS("b"), false, NoExcl)
  5014  	require.NoError(t, err)
  5015  	err = kbfsOps2.Write(ctx, bNode, []byte("bdata"), 0)
  5016  	require.NoError(t, err)
  5017  	err = kbfsOps2.SyncAll(ctx, rootNode2.GetFolderBranch())
  5018  	require.NoError(t, err)
  5019  	err = kbfsOps2.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  5020  	require.NoError(t, err)
  5021  
  5022  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  5023  	require.NoError(t, err)
  5024  	checkWorkingSetCache(3)
  5025  	checkStatus(bNode, FinishedPrefetch)
  5026  }
  5027  
  5028  // Regression test for HOTPOT-1612.
  5029  func TestDirtyAfterTruncateNoop(t *testing.T) {
  5030  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, "test_user")
  5031  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  5032  
  5033  	rootNode := GetRootNodeOrBust(ctx, t, config, "test_user", tlf.Private)
  5034  	kbfsOps := config.KBFSOps()
  5035  
  5036  	t.Log("Create 0-byte file")
  5037  	nodeA, _, err := kbfsOps.CreateFile(
  5038  		ctx, rootNode, testPPS("a"), false, NoExcl)
  5039  	require.NoError(t, err)
  5040  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  5041  	require.NoError(t, err)
  5042  
  5043  	t.Log("Truncate the file to 0 bytes, which should be a no-op")
  5044  	err = kbfsOps.Truncate(ctx, nodeA, 0)
  5045  	require.NoError(t, err)
  5046  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  5047  	require.NoError(t, err)
  5048  
  5049  	t.Log("Nothing should actually be dirty")
  5050  	ops := getOps(config, rootNode.GetFolderBranch().Tlf)
  5051  	lState := makeFBOLockState()
  5052  	require.Equal(t, cleanState, ops.blocks.GetState(lState))
  5053  	status, _, err := kbfsOps.FolderStatus(ctx, rootNode.GetFolderBranch())
  5054  	require.NoError(t, err)
  5055  	require.Len(t, status.DirtyPaths, 0)
  5056  }
  5057  
  5058  func TestKBFSOpsCancelUploads(t *testing.T) {
  5059  	var userName kbname.NormalizedUsername = "u1"
  5060  	config, _, ctx, cancel := kbfsOpsInitNoMocks(t, userName)
  5061  	defer kbfsTestShutdownNoMocks(ctx, t, config, cancel)
  5062  
  5063  	tempdir, err := ioutil.TempDir(os.TempDir(), "kbfs_ops_test")
  5064  	require.NoError(t, err)
  5065  	defer func() {
  5066  		err := ioutil.RemoveAll(tempdir)
  5067  		require.NoError(t, err)
  5068  	}()
  5069  
  5070  	err = config.EnableDiskLimiter(tempdir)
  5071  	require.NoError(t, err)
  5072  	err = config.EnableJournaling(ctx, tempdir, TLFJournalBackgroundWorkEnabled)
  5073  	require.NoError(t, err)
  5074  
  5075  	name := "u1"
  5076  	h, err := tlfhandle.ParseHandle(
  5077  		ctx, config.KBPKI(), config.MDOps(), nil, name, tlf.Private)
  5078  	require.NoError(t, err)
  5079  	kbfsOps := config.KBFSOps()
  5080  
  5081  	t.Log("Create an initial directory")
  5082  	rootNode, _, err := kbfsOps.GetOrCreateRootNode(ctx, h, data.MasterBranch)
  5083  	require.NoError(t, err)
  5084  	aNode, _, err := kbfsOps.CreateDir(ctx, rootNode, testPPS("a"))
  5085  	require.NoError(t, err)
  5086  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  5087  	require.NoError(t, err)
  5088  	err = kbfsOps.SyncFromServer(ctx, rootNode.GetFolderBranch(), nil)
  5089  	require.NoError(t, err)
  5090  
  5091  	t.Log("Pause the journal to queue uploads")
  5092  	jManager, err := GetJournalManager(config)
  5093  	require.NoError(t, err)
  5094  	jManager.PauseBackgroundWork(ctx, rootNode.GetFolderBranch().Tlf)
  5095  
  5096  	t.Log("Add a few files")
  5097  	bNode, _, err := kbfsOps.CreateFile(ctx, aNode, testPPS("b"), false, NoExcl)
  5098  	require.NoError(t, err)
  5099  	err = kbfsOps.Write(ctx, bNode, []byte("bdata"), 0)
  5100  	require.NoError(t, err)
  5101  	cNode, _, err := kbfsOps.CreateFile(ctx, aNode, testPPS("c"), false, NoExcl)
  5102  	require.NoError(t, err)
  5103  	err = kbfsOps.Write(ctx, cNode, []byte("cdata"), 0)
  5104  	require.NoError(t, err)
  5105  	err = kbfsOps.SyncAll(ctx, rootNode.GetFolderBranch())
  5106  	require.NoError(t, err)
  5107  
  5108  	t.Log("Cancel the uploads and make sure we reverted")
  5109  	err = kbfsOps.CancelUploads(ctx, rootNode.GetFolderBranch())
  5110  	require.NoError(t, err)
  5111  	children, err := kbfsOps.GetDirChildren(ctx, aNode)
  5112  	require.NoError(t, err)
  5113  	require.Len(t, children, 0)
  5114  }