github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/folder_block_ops.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	pathlib "path"
    10  	"time"
    11  
    12  	"github.com/keybase/client/go/kbfs/data"
    13  	"github.com/keybase/client/go/kbfs/idutil"
    14  	"github.com/keybase/client/go/kbfs/kbfscodec"
    15  	"github.com/keybase/client/go/kbfs/kbfssync"
    16  	"github.com/keybase/client/go/kbfs/libkey"
    17  	"github.com/keybase/client/go/kbfs/tlf"
    18  	"github.com/keybase/client/go/kbfs/tlfhandle"
    19  	"github.com/keybase/client/go/libkb"
    20  	"github.com/keybase/client/go/logger"
    21  	"github.com/keybase/client/go/protocol/keybase1"
    22  	"github.com/pkg/errors"
    23  	"golang.org/x/net/context"
    24  	"golang.org/x/sync/errgroup"
    25  )
    26  
    27  type overallBlockState int
    28  
    29  const (
    30  	// cleanState: no outstanding local writes.
    31  	cleanState overallBlockState = iota
    32  	// dirtyState: there are outstanding local writes that haven't yet been
    33  	// synced.
    34  	dirtyState
    35  )
    36  
    37  const (
    38  	// numBlockSizeWorkersMax is the max number of workers to use when
    39  	// fetching a set of block sizes.
    40  	numBlockSizeWorkersMax = 50
    41  	// How many pointers to downgrade in a single block size call.
    42  	numBlockSizesPerChunk = 20
    43  	// truncateExtendCutoffPoint is the amount of data in extending
    44  	// truncate that will trigger the extending with a hole algorithm.
    45  	truncateExtendCutoffPoint = 128 * 1024
    46  )
    47  
    48  type mdToCleanIfUnused struct {
    49  	md  ReadOnlyRootMetadata
    50  	bps blockPutStateCopiable
    51  }
    52  
    53  type syncInfo struct {
    54  	oldInfo         data.BlockInfo
    55  	op              *syncOp
    56  	unrefs          []data.BlockInfo
    57  	bps             blockPutStateCopiable
    58  	refBytes        uint64
    59  	unrefBytes      uint64
    60  	toCleanIfUnused []mdToCleanIfUnused
    61  }
    62  
    63  func (si *syncInfo) DeepCopy(
    64  	ctx context.Context, codec kbfscodec.Codec) (newSi *syncInfo, err error) {
    65  	newSi = &syncInfo{
    66  		oldInfo:    si.oldInfo,
    67  		refBytes:   si.refBytes,
    68  		unrefBytes: si.unrefBytes,
    69  	}
    70  	newSi.unrefs = make([]data.BlockInfo, len(si.unrefs))
    71  	copy(newSi.unrefs, si.unrefs)
    72  	if si.bps != nil {
    73  		newSi.bps, err = si.bps.deepCopy(ctx)
    74  		if err != nil {
    75  			return nil, err
    76  		}
    77  	}
    78  	if si.op != nil {
    79  		err := kbfscodec.Update(codec, &newSi.op, si.op)
    80  		if err != nil {
    81  			return nil, err
    82  		}
    83  	}
    84  	newSi.toCleanIfUnused = make([]mdToCleanIfUnused, len(si.toCleanIfUnused))
    85  	for i, toClean := range si.toCleanIfUnused {
    86  		// It might be overkill to deep-copy these MDs and bpses,
    87  		// which are probably immutable, but for now let's do the safe
    88  		// thing.
    89  		copyMd, err := toClean.md.deepCopy(codec)
    90  		if err != nil {
    91  			return nil, err
    92  		}
    93  		newSi.toCleanIfUnused[i].md = copyMd.ReadOnly()
    94  		newSi.toCleanIfUnused[i].bps, err = toClean.bps.deepCopy(ctx)
    95  		if err != nil {
    96  			return nil, err
    97  		}
    98  	}
    99  	return newSi, nil
   100  }
   101  
   102  func (si *syncInfo) removeReplacedBlock(ctx context.Context,
   103  	log logger.Logger, ptr data.BlockPointer) {
   104  	for i, ref := range si.op.RefBlocks {
   105  		if ref == ptr {
   106  			log.CDebugf(ctx, "Replacing old ref %v", ptr)
   107  			si.op.RefBlocks = append(si.op.RefBlocks[:i],
   108  				si.op.RefBlocks[i+1:]...)
   109  			for j, unref := range si.unrefs {
   110  				if unref.BlockPointer == ptr {
   111  					si.unrefs = append(si.unrefs[:j], si.unrefs[j+1:]...)
   112  				}
   113  			}
   114  			break
   115  		}
   116  	}
   117  }
   118  
   119  func (si *syncInfo) mergeUnrefCache(md *RootMetadata) {
   120  	for _, info := range si.unrefs {
   121  		// it's ok if we push the same ptr.ID/RefNonce multiple times,
   122  		// because the subsequent ones should have a QuotaSize of 0.
   123  		md.AddUnrefBlock(info)
   124  	}
   125  }
   126  
   127  type deferredState struct {
   128  	// Writes and truncates for blocks that were being sync'd, and
   129  	// need to be replayed after the sync finishes on top of the new
   130  	// versions of the blocks.
   131  	writes []func(
   132  		context.Context, *kbfssync.LockState, KeyMetadataWithRootDirEntry,
   133  		data.Path) error
   134  	// Blocks that need to be deleted from the dirty cache before any
   135  	// deferred writes are replayed.
   136  	dirtyDeletes []data.BlockPointer
   137  	waitBytes    int64
   138  }
   139  
   140  // folderBlockOps contains all the fields that must be synchronized by
   141  // blockLock. It will eventually also contain all the methods that
   142  // must be synchronized by blockLock, so that folderBranchOps will
   143  // have no knowledge of blockLock.
   144  //
   145  // -- And now, a primer on tracking dirty bytes --
   146  //
   147  // The DirtyBlockCache tracks the number of bytes that are dirtied
   148  // system-wide, as the number of bytes that haven't yet been synced
   149  // ("unsynced"), and a number of bytes that haven't yet been resolved
   150  // yet because the overall file Sync hasn't finished yet ("total").
   151  // This data helps us decide when we need to block incoming Writes, in
   152  // order to keep memory usage from exploding.
   153  //
   154  // It's the responsibility of folderBlockOps (and its helper struct
   155  // dirtyFile) to update these totals in DirtyBlockCache for the
   156  // individual files within this TLF.  This is complicated by a few things:
   157  //   - New writes to a file are "deferred" while a Sync is happening, and
   158  //     are replayed after the Sync finishes.
   159  //   - Syncs can be canceled or error out halfway through syncing the blocks,
   160  //     leaving the file in a dirty state until the next Sync.
   161  //   - Syncs can fail with a /recoverable/ error, in which case they get
   162  //     retried automatically by folderBranchOps.  In that case, the retried
   163  //     Sync also sucks in any outstanding deferred writes.
   164  //
   165  // With all that in mind, here is the rough breakdown of how this
   166  // bytes-tracking is implemented:
   167  //   - On a Write/Truncate to a block, folderBranchOps counts all the
   168  //     newly-dirtied bytes in a file as "unsynced".  That is, if the block was
   169  //     already in the dirty cache (and not already being synced), only
   170  //     extensions to the block count as "unsynced" bytes.
   171  //   - When a Sync starts, dirtyFile remembers the total of bytes being synced,
   172  //     and the size of each block being synced.
   173  //   - When each block put finishes successfully, dirtyFile subtracts the size
   174  //     of that block from "unsynced".
   175  //   - When a Sync finishes successfully, the total sum of bytes in that sync
   176  //     are subtracted from the "total" dirty bytes outstanding.
   177  //   - If a Sync fails, but some blocks were put successfully, those blocks
   178  //     are "re-dirtied", which means they count as unsynced bytes again.
   179  //     dirtyFile handles this.
   180  //   - When a Write/Truncate is deferred due to an ongoing Sync, its bytes
   181  //     still count towards the "unsynced" total.  In fact, this essentially
   182  //     creates a new copy of those blocks, and the whole size of that block
   183  //     (not just the newly-dirtied bytes) count for the total.  However,
   184  //     when the write gets replayed, folderBlockOps first subtracts those bytes
   185  //     from the system-wide numbers, since they are about to be replayed.
   186  //   - When a Sync is retried after a recoverable failure, dirtyFile adds
   187  //     the newly-dirtied deferred bytes to the system-wide numbers, since they
   188  //     are now being assimilated into this Sync.
   189  //   - dirtyFile also exposes a concept of "orphaned" blocks.  These are child
   190  //     blocks being synced that are now referenced via a new, permanent block
   191  //     ID from the parent indirect block.  This matters for when hard failures
   192  //     occur during a Sync -- the blocks will no longer be accessible under
   193  //     their previous old pointers, and so dirtyFile needs to know their old
   194  //     bytes can be cleaned up now.
   195  type folderBlockOps struct {
   196  	config       Config
   197  	log          logger.Logger
   198  	vlog         *libkb.VDebugLog
   199  	folderBranch data.FolderBranch
   200  	observers    *observerList
   201  
   202  	// forceSyncChan can be sent on to trigger an immediate
   203  	// Sync().  It is a blocking channel.
   204  	forceSyncChan chan<- struct{}
   205  
   206  	// protects access to blocks in this folder and all fields
   207  	// below.
   208  	blockLock blockLock
   209  
   210  	// Which files are currently dirty and have dirty blocks that are either
   211  	// currently syncing, or waiting to be sync'd.
   212  	dirtyFiles map[data.BlockPointer]*data.DirtyFile
   213  
   214  	// For writes and truncates, track the unsynced to-be-unref'd
   215  	// block infos, per-path.
   216  	unrefCache map[data.BlockRef]*syncInfo
   217  
   218  	// dirtyDirs track which directories are currently dirty in this
   219  	// TLF.
   220  	dirtyDirs          map[data.BlockPointer][]data.BlockInfo
   221  	dirtyDirsSyncing   bool
   222  	deferredDirUpdates []func(lState *kbfssync.LockState) error
   223  
   224  	// dirtyRootDirEntry is a DirEntry representing the root of the
   225  	// TLF (to be copied into the RootMetadata on a sync).
   226  	dirtyRootDirEntry *data.DirEntry
   227  
   228  	chargedTo keybase1.UserOrTeamID
   229  
   230  	// Track deferred operations on a per-file basis.
   231  	deferred map[data.BlockRef]deferredState
   232  
   233  	// set to true if this write or truncate should be deferred
   234  	doDeferWrite bool
   235  
   236  	// While this channel is non-nil and non-closed, writes get blocked.
   237  	holdNewWritesCh <-chan struct{}
   238  
   239  	// nodeCache itself is goroutine-safe, but write/truncate must
   240  	// call PathFromNode() only under blockLock (see nodeCache
   241  	// comments in folder_branch_ops.go).
   242  	nodeCache NodeCache
   243  }
   244  
   245  // Only exported methods of folderBlockOps should be used outside of this
   246  // file.
   247  //
   248  // Although, temporarily, folderBranchOps is allowed to reach in and
   249  // manipulate folderBlockOps fields and methods directly.
   250  
   251  func (fbo *folderBlockOps) id() tlf.ID {
   252  	return fbo.folderBranch.Tlf
   253  }
   254  
   255  func (fbo *folderBlockOps) branch() data.BranchName {
   256  	return fbo.folderBranch.Branch
   257  }
   258  
   259  func (fbo *folderBlockOps) isSyncedTlf() bool {
   260  	return fbo.branch() == data.MasterBranch && fbo.config.IsSyncedTlf(fbo.id())
   261  }
   262  
   263  // GetState returns the overall block state of this TLF.
   264  func (fbo *folderBlockOps) GetState(
   265  	lState *kbfssync.LockState) overallBlockState {
   266  	fbo.blockLock.RLock(lState)
   267  	defer fbo.blockLock.RUnlock(lState)
   268  	if len(fbo.dirtyFiles) == 0 && len(fbo.dirtyDirs) == 0 &&
   269  		fbo.dirtyRootDirEntry == nil {
   270  		return cleanState
   271  	}
   272  	return dirtyState
   273  }
   274  
   275  // getCleanEncodedBlockSizesLocked retrieves the encoded sizes and
   276  // block statuses of the clean blocks pointed to each of the block
   277  // pointers in `ptrs`, which must be valid, either from the cache or
   278  // from the server.  If `rtype` is `blockReadParallel`, it's assumed
   279  // that some coordinating goroutine is holding the correct locks, and
   280  // in that case `lState` must be `nil`.
   281  func (fbo *folderBlockOps) getCleanEncodedBlockSizesLocked(ctx context.Context,
   282  	lState *kbfssync.LockState, kmd libkey.KeyMetadata,
   283  	ptrs []data.BlockPointer, branch data.BranchName,
   284  	rtype data.BlockReqType, assumeCacheIsLive bool) (
   285  	sizes []uint32, statuses []keybase1.BlockStatus, err error) {
   286  	if rtype != data.BlockReadParallel {
   287  		if rtype == data.BlockWrite {
   288  			panic("Cannot get the size of a block for writing")
   289  		}
   290  		fbo.blockLock.AssertAnyLocked(lState)
   291  	} else if lState != nil {
   292  		panic("Non-nil lState passed to getCleanEncodedBlockSizeLocked " +
   293  			"with blockReadParallel")
   294  	}
   295  
   296  	sizes = make([]uint32, len(ptrs))
   297  	statuses = make([]keybase1.BlockStatus, len(ptrs))
   298  	var toFetchIndices []int
   299  	var ptrsToFetch []data.BlockPointer
   300  	for i, ptr := range ptrs {
   301  		if !ptr.IsValid() {
   302  			return nil, nil, InvalidBlockRefError{ptr.Ref()}
   303  		}
   304  
   305  		if assumeCacheIsLive {
   306  			// If we're assuming all blocks in the cache are live, we just
   307  			// need to get the block size, which we can do from either one
   308  			// of the caches.
   309  			if block, err := fbo.config.BlockCache().Get(ptr); err == nil {
   310  				sizes[i] = block.GetEncodedSize()
   311  				statuses[i] = keybase1.BlockStatus_LIVE
   312  				continue
   313  			}
   314  			if diskBCache := fbo.config.DiskBlockCache(); diskBCache != nil {
   315  				cacheType := DiskBlockAnyCache
   316  				if fbo.isSyncedTlf() {
   317  					cacheType = DiskBlockSyncCache
   318  				}
   319  				if buf, _, _, err := diskBCache.Get(
   320  					ctx, fbo.id(), ptr.ID, cacheType); err == nil {
   321  					sizes[i] = uint32(len(buf))
   322  					statuses[i] = keybase1.BlockStatus_LIVE
   323  					continue
   324  				}
   325  			}
   326  		}
   327  
   328  		if err := checkDataVersion(fbo.config, data.Path{}, ptr); err != nil {
   329  			return nil, nil, err
   330  		}
   331  
   332  		// Fetch this block from the server.
   333  		ptrsToFetch = append(ptrsToFetch, ptr)
   334  		toFetchIndices = append(toFetchIndices, i)
   335  	}
   336  
   337  	defer func() {
   338  		fbo.vlog.CLogf(
   339  			ctx, libkb.VLog1, "GetEncodedSizes ptrs=%v sizes=%d statuses=%s: "+
   340  				"%+v", ptrs, sizes, statuses, err)
   341  		if err != nil {
   342  			return
   343  		}
   344  
   345  		// In certain testing situations, a block might be represented
   346  		// with a 0 size in our journal or be missing from our local
   347  		// data stores, and we need to reconstruct the size using the
   348  		// cache in order to make the accounting work out for the test.
   349  		for i, ptr := range ptrs {
   350  			if sizes[i] == 0 {
   351  				if block, cerr := fbo.config.BlockCache().Get(
   352  					ptr); cerr == nil {
   353  					fbo.vlog.CLogf(
   354  						ctx, libkb.VLog1,
   355  						"Fixing encoded size of %v with cached copy", ptr)
   356  					sizes[i] = block.GetEncodedSize()
   357  				}
   358  			}
   359  		}
   360  	}()
   361  
   362  	// Unlock the blockLock while we wait for the network, only if
   363  	// it's locked for reading by a single goroutine.  If it's locked
   364  	// for writing, that indicates we are performing an atomic write
   365  	// operation, and we need to ensure that nothing else comes in and
   366  	// modifies the blocks, so don't unlock.
   367  	//
   368  	// If there may be multiple goroutines fetching blocks under the
   369  	// same lState, we can't safely unlock since some of the other
   370  	// goroutines may be operating on the data assuming they have the
   371  	// lock.
   372  	bops := fbo.config.BlockOps()
   373  	var fetchedSizes []uint32
   374  	var fetchedStatuses []keybase1.BlockStatus
   375  	if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
   376  		fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
   377  			fetchedSizes, fetchedStatuses, err = bops.GetEncodedSizes(
   378  				ctx, kmd, ptrsToFetch)
   379  		})
   380  	} else {
   381  		fetchedSizes, fetchedStatuses, err = bops.GetEncodedSizes(
   382  			ctx, kmd, ptrsToFetch)
   383  	}
   384  	if err != nil {
   385  		return nil, nil, err
   386  	}
   387  
   388  	for i, j := range toFetchIndices {
   389  		sizes[j] = fetchedSizes[i]
   390  		statuses[j] = fetchedStatuses[i]
   391  	}
   392  
   393  	return sizes, statuses, nil
   394  }
   395  
   396  // getBlockHelperLocked retrieves the block pointed to by ptr, which
   397  // must be valid, either from the cache or from the server. If
   398  // notifyPath is valid and the block isn't cached, trigger a read
   399  // notification.  If `rtype` is `blockReadParallel`, it's assumed that
   400  // some coordinating goroutine is holding the correct locks, and
   401  // in that case `lState` must be `nil`.
   402  //
   403  // This must be called only by get{File,Dir}BlockHelperLocked().
   404  func (fbo *folderBlockOps) getBlockHelperLocked(ctx context.Context,
   405  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   406  	branch data.BranchName, newBlock makeNewBlock, lifetime data.BlockCacheLifetime,
   407  	notifyPath data.Path, rtype data.BlockReqType) (data.Block, error) {
   408  	if rtype != data.BlockReadParallel {
   409  		fbo.blockLock.AssertAnyLocked(lState)
   410  	} else if lState != nil {
   411  		panic("Non-nil lState passed to getBlockHelperLocked " +
   412  			"with blockReadParallel")
   413  	}
   414  
   415  	if !ptr.IsValid() {
   416  		return nil, InvalidBlockRefError{ptr.Ref()}
   417  	}
   418  
   419  	if block, err := fbo.config.DirtyBlockCache().Get(
   420  		ctx, fbo.id(), ptr, branch); err == nil {
   421  		return block, nil
   422  	}
   423  
   424  	if block, lifetime, err := fbo.config.BlockCache().GetWithLifetime(ptr); err == nil {
   425  		if lifetime != data.PermanentEntry {
   426  			// If the block was cached in the past, and is not a permanent
   427  			// block (i.e., currently being written by the user), we need
   428  			// to handle it as if it's an on-demand request so that its
   429  			// downstream prefetches are triggered correctly according to
   430  			// the new on-demand fetch priority.
   431  			action := fbo.config.Mode().DefaultBlockRequestAction()
   432  			if fbo.isSyncedTlf() {
   433  				action = action.AddSync()
   434  			}
   435  			prefetchStatus := fbo.config.PrefetchStatus(ctx, fbo.id(), ptr)
   436  			fbo.config.BlockOps().Prefetcher().ProcessBlockForPrefetch(ctx, ptr,
   437  				block, kmd, defaultOnDemandRequestPriority-1, lifetime,
   438  				prefetchStatus, action)
   439  		}
   440  		return block, nil
   441  	}
   442  
   443  	if err := checkDataVersion(fbo.config, notifyPath, ptr); err != nil {
   444  		return nil, err
   445  	}
   446  
   447  	if notifyPath.IsValidForNotification() {
   448  		fbo.config.Reporter().Notify(ctx, readNotification(notifyPath, false))
   449  		defer fbo.config.Reporter().Notify(ctx,
   450  			readNotification(notifyPath, true))
   451  	}
   452  
   453  	// Unlock the blockLock while we wait for the network, only if
   454  	// it's locked for reading by a single goroutine.  If it's locked
   455  	// for writing, that indicates we are performing an atomic write
   456  	// operation, and we need to ensure that nothing else comes in and
   457  	// modifies the blocks, so don't unlock.
   458  	//
   459  	// If there may be multiple goroutines fetching blocks under the
   460  	// same lState, we can't safely unlock since some of the other
   461  	// goroutines may be operating on the data assuming they have the
   462  	// lock.
   463  	// fetch the block, and add to cache
   464  	block := newBlock()
   465  	bops := fbo.config.BlockOps()
   466  	var err error
   467  	if rtype != data.BlockReadParallel && rtype != data.BlockLookup {
   468  		fbo.blockLock.DoRUnlockedIfPossible(lState, func(*kbfssync.LockState) {
   469  			err = bops.Get(ctx, kmd, ptr, block, lifetime, fbo.branch())
   470  		})
   471  	} else {
   472  		err = bops.Get(ctx, kmd, ptr, block, lifetime, fbo.branch())
   473  	}
   474  	if err != nil {
   475  		return nil, err
   476  	}
   477  
   478  	return block, nil
   479  }
   480  
   481  // getFileBlockHelperLocked retrieves the block pointed to by ptr,
   482  // which must be valid, either from an internal cache, the block
   483  // cache, or from the server. An error is returned if the retrieved
   484  // block is not a file block.  If `rtype` is `blockReadParallel`, it's
   485  // assumed that some coordinating goroutine is holding the correct
   486  // locks, and in that case `lState` must be `nil`.
   487  //
   488  // This must be called only by GetFileBlockForReading(),
   489  // getFileBlockLocked(), and getFileLocked().
   490  //
   491  // p is used only when reporting errors and sending read
   492  // notifications, and can be empty.
   493  func (fbo *folderBlockOps) getFileBlockHelperLocked(ctx context.Context,
   494  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   495  	branch data.BranchName, p data.Path, rtype data.BlockReqType) (
   496  	*data.FileBlock, error) {
   497  	if rtype != data.BlockReadParallel {
   498  		fbo.blockLock.AssertAnyLocked(lState)
   499  	} else if lState != nil {
   500  		panic("Non-nil lState passed to getFileBlockHelperLocked " +
   501  			"with blockReadParallel")
   502  	}
   503  
   504  	block, err := fbo.getBlockHelperLocked(
   505  		ctx, lState, kmd, ptr, branch, data.NewFileBlock, data.TransientEntry, p, rtype)
   506  	if err != nil {
   507  		return nil, err
   508  	}
   509  
   510  	fblock, ok := block.(*data.FileBlock)
   511  	if !ok {
   512  		return nil, NotFileBlockError{ptr, branch, p}
   513  	}
   514  
   515  	return fblock, nil
   516  }
   517  
   518  // GetCleanEncodedBlocksSizeSum retrieves the sum of the encoded sizes
   519  // of the blocks pointed to by ptrs, all of which must be valid,
   520  // either from the cache or from the server.
   521  //
   522  // The caller can specify a set of pointers using
   523  // `ignoreRecoverableForRemovalErrors` for which "recoverable" fetch
   524  // errors are tolerated.  In that case, the returned sum will not
   525  // include the size for any pointers in the
   526  // `ignoreRecoverableForRemovalErrors` set that hit such an error.
   527  //
   528  // This should be called for "internal" operations, like conflict
   529  // resolution and state checking, which don't know what kind of block
   530  // the pointers refer to.  Any downloaded blocks will not be cached,
   531  // if they weren't in the cache already.
   532  //
   533  // If `onlyCountIfLive` is true, the sum includes blocks that the
   534  // bserver thinks are currently reachable from the merged branch
   535  // (i.e., un-archived).
   536  func (fbo *folderBlockOps) GetCleanEncodedBlocksSizeSum(ctx context.Context,
   537  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptrs []data.BlockPointer,
   538  	ignoreRecoverableForRemovalErrors map[data.BlockPointer]bool,
   539  	branch data.BranchName, onlyCountIfLive bool) (uint64, error) {
   540  	fbo.blockLock.RLock(lState)
   541  	defer fbo.blockLock.RUnlock(lState)
   542  
   543  	ptrCh := make(chan []data.BlockPointer, len(ptrs))
   544  	sumCh := make(chan uint32, len(ptrs))
   545  
   546  	numChunks := (len(ptrs) + numBlockSizesPerChunk - 1) /
   547  		numBlockSizesPerChunk
   548  	numWorkers := numBlockSizeWorkersMax
   549  	if numChunks < numWorkers {
   550  		numWorkers = numChunks
   551  	}
   552  
   553  	currChunk := make([]data.BlockPointer, 0, numBlockSizesPerChunk)
   554  	for _, ptr := range ptrs {
   555  		currChunk = append(currChunk, ptr)
   556  		if len(currChunk) == numBlockSizesPerChunk {
   557  			ptrCh <- currChunk
   558  			currChunk = make([]data.BlockPointer, 0, numBlockSizesPerChunk)
   559  		}
   560  	}
   561  	if len(currChunk) > 0 {
   562  		ptrCh <- currChunk
   563  	}
   564  
   565  	// If we don't care if something's live or not, there's no reason
   566  	// not to use the cached block.
   567  	assumeCacheIsLive := !onlyCountIfLive
   568  	eg, groupCtx := errgroup.WithContext(ctx)
   569  	for i := 0; i < numWorkers; i++ {
   570  		eg.Go(func() error {
   571  			for ptrs := range ptrCh {
   572  				sizes, statuses, err := fbo.getCleanEncodedBlockSizesLocked(
   573  					groupCtx, nil, kmd, ptrs, branch,
   574  					data.BlockReadParallel, assumeCacheIsLive)
   575  				for i, ptr := range ptrs {
   576  					// TODO: we might be able to recover the size of the
   577  					// top-most block of a removed file using the merged
   578  					// directory entry, the same way we do in
   579  					// `folderBranchOps.unrefEntry`.
   580  					if isRecoverableBlockErrorForRemoval(err) &&
   581  						ignoreRecoverableForRemovalErrors[ptr] {
   582  						fbo.log.CDebugf(
   583  							groupCtx, "Hit an ignorable, recoverable "+
   584  								"error for block %v: %v", ptr, err)
   585  						continue
   586  					}
   587  					if err != nil {
   588  						return err
   589  					}
   590  
   591  					if onlyCountIfLive &&
   592  						statuses[i] != keybase1.BlockStatus_LIVE {
   593  						sumCh <- 0
   594  					} else {
   595  						sumCh <- sizes[i]
   596  					}
   597  				}
   598  			}
   599  			return nil
   600  		})
   601  	}
   602  	close(ptrCh)
   603  
   604  	if err := eg.Wait(); err != nil {
   605  		return 0, err
   606  	}
   607  	close(sumCh)
   608  
   609  	var sum uint64
   610  	for size := range sumCh {
   611  		sum += uint64(size)
   612  	}
   613  	return sum, nil
   614  }
   615  
   616  // getDirBlockHelperLocked retrieves the block pointed to by ptr, which
   617  // must be valid, either from the cache or from the server. An error
   618  // is returned if the retrieved block is not a dir block.
   619  //
   620  // This must be called only by GetDirBlockForReading() and
   621  // getDirLocked().
   622  //
   623  // p is used only when reporting errors, and can be empty.
   624  func (fbo *folderBlockOps) getDirBlockHelperLocked(ctx context.Context,
   625  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   626  	branch data.BranchName, p data.Path, rtype data.BlockReqType) (*data.DirBlock, error) {
   627  	if rtype != data.BlockReadParallel {
   628  		fbo.blockLock.AssertAnyLocked(lState)
   629  	}
   630  
   631  	// Check data version explicitly here, with the right path, since
   632  	// we pass an empty path below.
   633  	if err := checkDataVersion(fbo.config, p, ptr); err != nil {
   634  		return nil, err
   635  	}
   636  
   637  	// Pass in an empty notify path because notifications should only
   638  	// trigger for file reads.
   639  	block, err := fbo.getBlockHelperLocked(
   640  		ctx, lState, kmd, ptr, branch, data.NewDirBlock, data.TransientEntry,
   641  		data.Path{}, rtype)
   642  	if err != nil {
   643  		return nil, err
   644  	}
   645  
   646  	dblock, ok := block.(*data.DirBlock)
   647  	if !ok {
   648  		return nil, NotDirBlockError{ptr, branch, p}
   649  	}
   650  
   651  	return dblock, nil
   652  }
   653  
   654  // GetFileBlockForReading retrieves the block pointed to by ptr, which
   655  // must be valid, either from the cache or from the server. An error
   656  // is returned if the retrieved block is not a file block.
   657  //
   658  // This should be called for "internal" operations, like conflict
   659  // resolution and state checking. "Real" operations should use
   660  // getFileBlockLocked() and getFileLocked() instead.
   661  //
   662  // p is used only when reporting errors, and can be empty.
   663  func (fbo *folderBlockOps) GetFileBlockForReading(ctx context.Context,
   664  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   665  	branch data.BranchName, p data.Path) (*data.FileBlock, error) {
   666  	fbo.blockLock.RLock(lState)
   667  	defer fbo.blockLock.RUnlock(lState)
   668  	return fbo.getFileBlockHelperLocked(
   669  		ctx, lState, kmd, ptr, branch, p, data.BlockRead)
   670  }
   671  
   672  // GetDirBlockForReading retrieves the block pointed to by ptr, which
   673  // must be valid, either from the cache or from the server. An error
   674  // is returned if the retrieved block is not a dir block.
   675  //
   676  // This should be called for "internal" operations, like conflict
   677  // resolution and state checking. "Real" operations should use
   678  // getDirLocked() instead.
   679  //
   680  // p is used only when reporting errors, and can be empty.
   681  func (fbo *folderBlockOps) GetDirBlockForReading(ctx context.Context,
   682  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   683  	branch data.BranchName, p data.Path) (*data.DirBlock, error) {
   684  	fbo.blockLock.RLock(lState)
   685  	defer fbo.blockLock.RUnlock(lState)
   686  	return fbo.getDirBlockHelperLocked(
   687  		ctx, lState, kmd, ptr, branch, p, data.BlockRead)
   688  }
   689  
   690  // getFileBlockLocked retrieves the block pointed to by ptr, which
   691  // must be valid, either from the cache or from the server. An error
   692  // is returned if the retrieved block is not a file block.
   693  //
   694  // The given path must be valid, and the given pointer must be its
   695  // tail pointer or an indirect pointer from it. A read notification is
   696  // triggered for the given path only if the block isn't in the cache.
   697  //
   698  // This shouldn't be called for "internal" operations, like conflict
   699  // resolution and state checking -- use GetFileBlockForReading() for
   700  // those instead.
   701  //
   702  // When rtype == blockWrite and the cached version of the block is
   703  // currently clean, or the block is currently being synced, this
   704  // method makes a copy of the file block and returns it.  If this
   705  // method might be called again for the same block within a single
   706  // operation, it is the caller's responsibility to write that block
   707  // back to the cache as dirty.
   708  //
   709  // Note that blockLock must be locked exactly when rtype ==
   710  // blockWrite, and must be r-locked when rtype == blockRead.  (This
   711  // differs from getDirLocked.)  This is because a write operation
   712  // (like write, truncate and sync which lock blockLock) fetching a
   713  // file block will almost always need to modify that block, and so
   714  // will pass in blockWrite.  If rtype == blockReadParallel, it's
   715  // assumed that some coordinating goroutine is holding the correct
   716  // locks, and in that case `lState` must be `nil`.
   717  //
   718  // file is used only when reporting errors and sending read
   719  // notifications, and can be empty except that file.Branch must be set
   720  // correctly.
   721  //
   722  // This method also returns whether the block was already dirty.
   723  func (fbo *folderBlockOps) getFileBlockLocked(ctx context.Context,
   724  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer,
   725  	file data.Path, rtype data.BlockReqType) (
   726  	fblock *data.FileBlock, wasDirty bool, err error) {
   727  	switch rtype {
   728  	case data.BlockRead:
   729  		fbo.blockLock.AssertRLocked(lState)
   730  	case data.BlockWrite:
   731  		fbo.blockLock.AssertLocked(lState)
   732  	case data.BlockReadParallel:
   733  		// This goroutine might not be the official lock holder, so
   734  		// don't make any assertions.
   735  		if lState != nil {
   736  			panic("Non-nil lState passed to getFileBlockLocked " +
   737  				"with blockReadParallel")
   738  		}
   739  	case data.BlockLookup:
   740  		panic("blockLookup should only be used for directory blocks")
   741  	default:
   742  		panic(fmt.Sprintf("Unknown block req type: %d", rtype))
   743  	}
   744  
   745  	fblock, err = fbo.getFileBlockHelperLocked(
   746  		ctx, lState, kmd, ptr, file.Branch, file, rtype)
   747  	if err != nil {
   748  		return nil, false, err
   749  	}
   750  
   751  	wasDirty = fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, file.Branch)
   752  	if rtype == data.BlockWrite {
   753  		// Copy the block if it's for writing, and either the
   754  		// block is not yet dirty or the block is currently
   755  		// being sync'd and needs a copy even though it's
   756  		// already dirty.
   757  		df := fbo.dirtyFiles[file.TailPointer()]
   758  		if !wasDirty || (df != nil && df.BlockNeedsCopy(ptr)) {
   759  			fblock = fblock.DeepCopy()
   760  		}
   761  	}
   762  	return fblock, wasDirty, nil
   763  }
   764  
   765  // getFileLocked is getFileBlockLocked called with file.tailPointer().
   766  func (fbo *folderBlockOps) getFileLocked(ctx context.Context,
   767  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
   768  	rtype data.BlockReqType) (*data.FileBlock, error) {
   769  	// Callers should have already done this check, but it doesn't
   770  	// hurt to do it again.
   771  	if !file.IsValid() {
   772  		return nil, errors.WithStack(InvalidPathError{file})
   773  	}
   774  	fblock, _, err := fbo.getFileBlockLocked(
   775  		ctx, lState, kmd, file.TailPointer(), file, rtype)
   776  	return fblock, err
   777  }
   778  
   779  func (fbo *folderBlockOps) getIndirectFileBlockInfosLocked(
   780  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
   781  	file data.Path) ([]data.BlockInfo, error) {
   782  	fbo.blockLock.AssertRLocked(lState)
   783  	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
   784  	fd := fbo.newFileData(lState, file, id, kmd)
   785  	return fd.GetIndirectFileBlockInfos(ctx)
   786  }
   787  
   788  // GetIndirectFileBlockInfos returns a list of BlockInfos for all
   789  // indirect blocks of the given file. If the returned error is a
   790  // recoverable one (as determined by
   791  // isRecoverableBlockErrorForRemoval), the returned list may still be
   792  // non-empty, and holds all the BlockInfos for all found indirect
   793  // blocks.
   794  func (fbo *folderBlockOps) GetIndirectFileBlockInfos(ctx context.Context,
   795  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path) (
   796  	[]data.BlockInfo, error) {
   797  	fbo.blockLock.RLock(lState)
   798  	defer fbo.blockLock.RUnlock(lState)
   799  	return fbo.getIndirectFileBlockInfosLocked(ctx, lState, kmd, file)
   800  }
   801  
   802  // GetIndirectDirBlockInfos returns a list of BlockInfos for all
   803  // indirect blocks of the given directory. If the returned error is a
   804  // recoverable one (as determined by
   805  // isRecoverableBlockErrorForRemoval), the returned list may still be
   806  // non-empty, and holds all the BlockInfos for all found indirect
   807  // blocks.
   808  func (fbo *folderBlockOps) GetIndirectDirBlockInfos(
   809  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
   810  	dir data.Path) ([]data.BlockInfo, error) {
   811  	fbo.blockLock.RLock(lState)
   812  	defer fbo.blockLock.RUnlock(lState)
   813  	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
   814  	fd := fbo.newDirDataLocked(lState, dir, id, kmd)
   815  	return fd.GetIndirectDirBlockInfos(ctx)
   816  }
   817  
   818  // GetIndirectFileBlockInfosWithTopBlock returns a list of BlockInfos
   819  // for all indirect blocks of the given file, starting from the given
   820  // top-most block. If the returned error is a recoverable one (as
   821  // determined by isRecoverableBlockErrorForRemoval), the returned list
   822  // may still be non-empty, and holds all the BlockInfos for all found
   823  // indirect blocks. (This will be relevant when we handle multiple
   824  // levels of indirection.)
   825  func (fbo *folderBlockOps) GetIndirectFileBlockInfosWithTopBlock(
   826  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
   827  	topBlock *data.FileBlock) (
   828  	[]data.BlockInfo, error) {
   829  	fbo.blockLock.RLock(lState)
   830  	defer fbo.blockLock.RUnlock(lState)
   831  	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
   832  	fd := fbo.newFileData(lState, file, id, kmd)
   833  	return fd.GetIndirectFileBlockInfosWithTopBlock(ctx, topBlock)
   834  }
   835  
   836  func (fbo *folderBlockOps) getChargedToLocked(
   837  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata) (
   838  	keybase1.UserOrTeamID, error) {
   839  	fbo.blockLock.AssertAnyLocked(lState)
   840  	if !fbo.chargedTo.IsNil() {
   841  		return fbo.chargedTo, nil
   842  	}
   843  	chargedTo, err := chargedToForTLF(
   844  		ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
   845  		kmd.GetTlfHandle())
   846  	if err != nil {
   847  		return keybase1.UserOrTeamID(""), err
   848  	}
   849  	fbo.chargedTo = chargedTo
   850  	return chargedTo, nil
   851  }
   852  
   853  // ClearChargedTo clears out the cached chargedTo UID for this FBO.
   854  func (fbo *folderBlockOps) ClearChargedTo(lState *kbfssync.LockState) {
   855  	fbo.blockLock.Lock(lState)
   856  	defer fbo.blockLock.Unlock(lState)
   857  	fbo.chargedTo = keybase1.UserOrTeamID("")
   858  }
   859  
   860  // DeepCopyFile makes a complete copy of the given file, deduping leaf
   861  // blocks and making new random BlockPointers for all indirect blocks.
   862  // It returns the new top pointer of the copy, and all the new child
   863  // pointers in the copy.  It takes a custom DirtyBlockCache, which
   864  // directs where the resulting block copies are stored.
   865  func (fbo *folderBlockOps) deepCopyFileLocked(
   866  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path,
   867  	dirtyBcache data.DirtyBlockCacheSimple, dataVer data.Ver) (
   868  	newTopPtr data.BlockPointer, allChildPtrs []data.BlockPointer, err error) {
   869  	// Deep copying doesn't alter any data in use, it only makes copy,
   870  	// so only a read lock is needed.
   871  	fbo.blockLock.AssertRLocked(lState)
   872  	chargedTo, err := chargedToForTLF(
   873  		ctx, fbo.config.KBPKI(), fbo.config.KBPKI(), fbo.config,
   874  		kmd.GetTlfHandle())
   875  	if err != nil {
   876  		return data.BlockPointer{}, nil, err
   877  	}
   878  	fd := fbo.newFileDataWithCache(
   879  		lState, file, chargedTo, kmd, dirtyBcache)
   880  	return fd.DeepCopy(ctx, dataVer)
   881  }
   882  
   883  func (fbo *folderBlockOps) cacheHashBehavior() data.BlockCacheHashBehavior {
   884  	return cacheHashBehavior(fbo.config, fbo.config, fbo.id())
   885  }
   886  
   887  func (fbo *folderBlockOps) UndupChildrenInCopy(ctx context.Context,
   888  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path, bps blockPutState,
   889  	dirtyBcache data.DirtyBlockCacheSimple, topBlock *data.FileBlock) (
   890  	[]data.BlockInfo, error) {
   891  	fbo.blockLock.Lock(lState)
   892  	defer fbo.blockLock.Unlock(lState)
   893  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
   894  	if err != nil {
   895  		return nil, err
   896  	}
   897  	fd := fbo.newFileDataWithCache(
   898  		lState, file, chargedTo, kmd, dirtyBcache)
   899  	return fd.UndupChildrenInCopy(ctx, fbo.config.BlockCache(),
   900  		fbo.config.BlockOps(), bps, topBlock, fbo.cacheHashBehavior())
   901  }
   902  
   903  func (fbo *folderBlockOps) ReadyNonLeafBlocksInCopy(ctx context.Context,
   904  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, file data.Path, bps blockPutState,
   905  	dirtyBcache data.DirtyBlockCacheSimple, topBlock *data.FileBlock) (
   906  	[]data.BlockInfo, error) {
   907  	fbo.blockLock.RLock(lState)
   908  	defer fbo.blockLock.RUnlock(lState)
   909  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
   910  	if err != nil {
   911  		return nil, err
   912  	}
   913  
   914  	fd := fbo.newFileDataWithCache(
   915  		lState, file, chargedTo, kmd, dirtyBcache)
   916  	return fd.ReadyNonLeafBlocksInCopy(ctx, fbo.config.BlockCache(),
   917  		fbo.config.BlockOps(), bps, topBlock, fbo.cacheHashBehavior())
   918  }
   919  
   920  // getDirLocked retrieves the block pointed to by the tail pointer of
   921  // the given path, which must be valid, either from the cache or from
   922  // the server. An error is returned if the retrieved block is not a
   923  // dir block.
   924  //
   925  // This shouldn't be called for "internal" operations, like conflict
   926  // resolution and state checking -- use GetDirBlockForReading() for
   927  // those instead.
   928  //
   929  // When rtype == blockWrite and the cached version of the block is
   930  // currently clean, this method makes a copy of the directory block
   931  // and returns it.  If this method might be called again for the same
   932  // block within a single operation, it is the caller's responsibility
   933  // to write that block back to the cache as dirty.
   934  //
   935  // Note that blockLock must be either r-locked or locked, but
   936  // independently of rtype. (This differs from getFileLocked and
   937  // getFileBlockLocked.) File write operations (which lock blockLock)
   938  // don't need a copy of parent dir blocks, and non-file write
   939  // operations do need to copy dir blocks for modifications.
   940  func (fbo *folderBlockOps) getDirLocked(ctx context.Context,
   941  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, ptr data.BlockPointer, dir data.Path,
   942  	rtype data.BlockReqType) (*data.DirBlock, bool, error) {
   943  	switch rtype {
   944  	case data.BlockRead, data.BlockWrite, data.BlockLookup:
   945  		fbo.blockLock.AssertAnyLocked(lState)
   946  	case data.BlockReadParallel:
   947  		// This goroutine might not be the official lock holder, so
   948  		// don't make any assertions.
   949  		if lState != nil {
   950  			panic("Non-nil lState passed to getFileBlockLocked " +
   951  				"with blockReadParallel")
   952  		}
   953  	default:
   954  		panic(fmt.Sprintf("Unknown block req type: %d", rtype))
   955  	}
   956  
   957  	// Callers should have already done this check, but it doesn't
   958  	// hurt to do it again.
   959  	if !dir.IsValid() {
   960  		return nil, false, errors.WithStack(InvalidPathError{dir})
   961  	}
   962  
   963  	// Get the block for the last element in the path.
   964  	dblock, err := fbo.getDirBlockHelperLocked(
   965  		ctx, lState, kmd, ptr, dir.Branch, dir, rtype)
   966  	if err != nil {
   967  		return nil, false, err
   968  	}
   969  
   970  	wasDirty := fbo.config.DirtyBlockCache().IsDirty(fbo.id(), ptr, dir.Branch)
   971  	if rtype == data.BlockWrite && !wasDirty {
   972  		// Copy the block if it's for writing and the block is
   973  		// not yet dirty.
   974  		dblock = dblock.DeepCopy()
   975  	}
   976  	return dblock, wasDirty, nil
   977  }
   978  
   979  // GetDir retrieves the block pointed to by the tail pointer of the
   980  // given path, which must be valid, either from the cache or from the
   981  // server. An error is returned if the retrieved block is not a dir
   982  // block.
   983  //
   984  // This shouldn't be called for "internal" operations, like conflict
   985  // resolution and state checking -- use GetDirBlockForReading() for
   986  // those instead.
   987  //
   988  // When rtype == blockWrite and the cached version of the block is
   989  // currently clean, this method makes a copy of the directory block
   990  // and returns it.  If this method might be called again for the same
   991  // block within a single operation, it is the caller's responsibility
   992  // to write that block back to the cache as dirty.
   993  func (fbo *folderBlockOps) GetDir(
   994  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
   995  	rtype data.BlockReqType) (*data.DirBlock, error) {
   996  	fbo.blockLock.RLock(lState)
   997  	defer fbo.blockLock.RUnlock(lState)
   998  	dblock, _, err := fbo.getDirLocked(
   999  		ctx, lState, kmd, dir.TailPointer(), dir, rtype)
  1000  	return dblock, err
  1001  }
  1002  
  1003  type dirCacheUndoFn func(lState *kbfssync.LockState)
  1004  
  1005  func (fbo *folderBlockOps) wrapWithBlockLock(fn func()) dirCacheUndoFn {
  1006  	return func(lState *kbfssync.LockState) {
  1007  		if fn == nil {
  1008  			return
  1009  		}
  1010  		fbo.blockLock.Lock(lState)
  1011  		defer fbo.blockLock.Unlock(lState)
  1012  		fn()
  1013  	}
  1014  }
  1015  
  1016  func (fbo *folderBlockOps) newDirDataLocked(lState *kbfssync.LockState,
  1017  	dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata) *data.DirData {
  1018  	fbo.blockLock.AssertAnyLocked(lState)
  1019  	return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
  1020  		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
  1021  			dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
  1022  			lState := lState
  1023  			if rtype == data.BlockReadParallel {
  1024  				lState = nil
  1025  			}
  1026  			return fbo.getDirLocked(
  1027  				ctx, lState, kmd, ptr, dir, rtype)
  1028  		},
  1029  		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
  1030  			return fbo.config.DirtyBlockCache().Put(
  1031  				ctx, fbo.id(), ptr, dir.Branch, block)
  1032  		}, fbo.log, fbo.vlog)
  1033  }
  1034  
  1035  // newDirDataWithDBMLocked creates a new `dirData` that reads from and
  1036  // puts into a local dir block cache.  If it reads a block out from
  1037  // anything but the `dbm`, it makes a copy of it before inserting it
  1038  // into the `dbm`.
  1039  func (fbo *folderBlockOps) newDirDataWithDBMLocked(lState *kbfssync.LockState,
  1040  	dir data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
  1041  	dbm dirBlockMap) *data.DirData {
  1042  	fbo.blockLock.AssertRLocked(lState)
  1043  	return data.NewDirData(dir, chargedTo, fbo.config.BlockSplitter(), kmd,
  1044  		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
  1045  			dir data.Path, rtype data.BlockReqType) (*data.DirBlock, bool, error) {
  1046  			hasBlock, err := dbm.hasBlock(ctx, ptr)
  1047  			if err != nil {
  1048  				return nil, false, err
  1049  			}
  1050  			if hasBlock {
  1051  				block, err := dbm.getBlock(ctx, ptr)
  1052  				if err != nil {
  1053  					return nil, false, err
  1054  				}
  1055  				return block, true, nil
  1056  			}
  1057  
  1058  			localLState := lState
  1059  			getRtype := rtype
  1060  			switch rtype {
  1061  			case data.BlockReadParallel:
  1062  				localLState = nil
  1063  			case data.BlockWrite:
  1064  				getRtype = data.BlockRead
  1065  			}
  1066  
  1067  			block, wasDirty, err := fbo.getDirLocked(
  1068  				ctx, localLState, kmd, ptr, dir, getRtype)
  1069  			if err != nil {
  1070  				return nil, false, err
  1071  			}
  1072  
  1073  			if rtype == data.BlockWrite {
  1074  				// Make a copy before we stick it in the local block cache.
  1075  				block = block.DeepCopy()
  1076  				err = dbm.putBlock(ctx, ptr, block)
  1077  				if err != nil {
  1078  					return nil, false, err
  1079  				}
  1080  			}
  1081  			return block, wasDirty, nil
  1082  		},
  1083  		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
  1084  			return dbm.putBlock(ctx, ptr, block.(*data.DirBlock))
  1085  		}, fbo.log, fbo.vlog)
  1086  }
  1087  
  1088  // newDirDataWithDBM is like `newDirDataWithDBMLocked`, but it must be
  1089  // called with `blockLock` unlocked, and the returned function must be
  1090  // called when the returned `dirData` is no longer in use.
  1091  func (fbo *folderBlockOps) newDirDataWithDBM(
  1092  	lState *kbfssync.LockState, dir data.Path, chargedTo keybase1.UserOrTeamID,
  1093  	kmd libkey.KeyMetadata, dbm dirBlockMap) (*data.DirData, func()) {
  1094  	// Lock and fetch for reading only, we want any dirty
  1095  	// blocks to go into the dbm.
  1096  	fbo.blockLock.RLock(lState)
  1097  	cleanupFn := func() { fbo.blockLock.RUnlock(lState) }
  1098  	return fbo.newDirDataWithDBMLocked(lState, dir, chargedTo, kmd, dbm),
  1099  		cleanupFn
  1100  }
  1101  
  1102  func (fbo *folderBlockOps) makeDirDirtyLocked(
  1103  	lState *kbfssync.LockState, ptr data.BlockPointer, unrefs []data.BlockInfo) func() {
  1104  	fbo.blockLock.AssertLocked(lState)
  1105  	oldUnrefs, wasDirty := fbo.dirtyDirs[ptr]
  1106  	oldLen := len(oldUnrefs)
  1107  	fbo.dirtyDirs[ptr] = append(oldUnrefs, unrefs...)
  1108  	return func() {
  1109  		dirtyBcache := fbo.config.DirtyBlockCache()
  1110  		if wasDirty {
  1111  			fbo.dirtyDirs[ptr] = oldUnrefs[:oldLen:oldLen]
  1112  		} else {
  1113  			_ = dirtyBcache.Delete(fbo.id(), ptr, fbo.branch())
  1114  			delete(fbo.dirtyDirs, ptr)
  1115  		}
  1116  		for _, unref := range unrefs {
  1117  			_ = dirtyBcache.Delete(fbo.id(), unref.BlockPointer, fbo.branch())
  1118  		}
  1119  	}
  1120  }
  1121  
  1122  func (fbo *folderBlockOps) updateParentDirEntryLocked(
  1123  	ctx context.Context, lState *kbfssync.LockState, dir data.Path,
  1124  	kmd KeyMetadataWithRootDirEntry, setMtime, setCtime bool) (func(), error) {
  1125  	fbo.blockLock.AssertLocked(lState)
  1126  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1127  	if err != nil {
  1128  		return nil, err
  1129  	}
  1130  	now := fbo.nowUnixNano()
  1131  	pp := *dir.ParentPath()
  1132  	if pp.IsValid() {
  1133  		dd := fbo.newDirDataLocked(lState, pp, chargedTo, kmd)
  1134  		de, err := dd.Lookup(ctx, dir.TailName())
  1135  		if err != nil {
  1136  			return nil, err
  1137  		}
  1138  		newDe := de
  1139  		if setMtime {
  1140  			newDe.Mtime = now
  1141  		}
  1142  		if setCtime {
  1143  			newDe.Ctime = now
  1144  		}
  1145  		unrefs, err := dd.UpdateEntry(ctx, dir.TailName(), newDe)
  1146  		if err != nil {
  1147  			return nil, err
  1148  		}
  1149  		undoDirtyFn := fbo.makeDirDirtyLocked(lState, pp.TailPointer(), unrefs)
  1150  		return func() {
  1151  			_, _ = dd.UpdateEntry(ctx, dir.TailName(), de)
  1152  			undoDirtyFn()
  1153  		}, nil
  1154  	}
  1155  
  1156  	// If the parent isn't a valid path, we need to update the root entry.
  1157  	var de *data.DirEntry
  1158  	if fbo.dirtyRootDirEntry == nil {
  1159  		deCopy := kmd.GetRootDirEntry()
  1160  		fbo.dirtyRootDirEntry = &deCopy
  1161  	} else {
  1162  		deCopy := *fbo.dirtyRootDirEntry
  1163  		de = &deCopy
  1164  	}
  1165  	if setMtime {
  1166  		fbo.dirtyRootDirEntry.Mtime = now
  1167  	}
  1168  	if setCtime {
  1169  		fbo.dirtyRootDirEntry.Ctime = now
  1170  	}
  1171  	return func() {
  1172  		fbo.dirtyRootDirEntry = de
  1173  	}, nil
  1174  }
  1175  
  1176  func (fbo *folderBlockOps) addDirEntryInCacheLocked(
  1177  	ctx context.Context, lState *kbfssync.LockState,
  1178  	kmd KeyMetadataWithRootDirEntry, dir data.Path, newName data.PathPartString,
  1179  	newDe data.DirEntry) (func(), error) {
  1180  	fbo.blockLock.AssertLocked(lState)
  1181  
  1182  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1183  	if err != nil {
  1184  		return nil, err
  1185  	}
  1186  	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
  1187  	unrefs, err := dd.AddEntry(ctx, newName, newDe)
  1188  	if err != nil {
  1189  		return nil, err
  1190  	}
  1191  	parentUndo, err := fbo.updateParentDirEntryLocked(
  1192  		ctx, lState, dir, kmd, true, true)
  1193  	if err != nil {
  1194  		_, _ = dd.RemoveEntry(ctx, newName)
  1195  		return nil, err
  1196  	}
  1197  
  1198  	undoDirtyFn := fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
  1199  	return func() {
  1200  		_, _ = dd.RemoveEntry(ctx, newName)
  1201  		undoDirtyFn()
  1202  		parentUndo()
  1203  	}, nil
  1204  }
  1205  
  1206  // AddDirEntryInCache adds a brand new entry to the given directory
  1207  // and updates the directory's own mtime and ctime.  It returns a
  1208  // function that can be called if the change needs to be undone.
  1209  func (fbo *folderBlockOps) AddDirEntryInCache(
  1210  	ctx context.Context, lState *kbfssync.LockState,
  1211  	kmd KeyMetadataWithRootDirEntry, dir data.Path, newName data.PathPartString,
  1212  	newDe data.DirEntry) (dirCacheUndoFn, error) {
  1213  	fbo.blockLock.Lock(lState)
  1214  	defer fbo.blockLock.Unlock(lState)
  1215  	fn, err := fbo.addDirEntryInCacheLocked(
  1216  		ctx, lState, kmd, dir, newName, newDe)
  1217  	if err != nil {
  1218  		return nil, err
  1219  	}
  1220  	return fbo.wrapWithBlockLock(fn), nil
  1221  }
  1222  
  1223  func (fbo *folderBlockOps) removeDirEntryInCacheLocked(
  1224  	ctx context.Context, lState *kbfssync.LockState,
  1225  	kmd KeyMetadataWithRootDirEntry, dir data.Path, oldName data.PathPartString,
  1226  	oldDe data.DirEntry) (func(), error) {
  1227  	fbo.blockLock.AssertLocked(lState)
  1228  
  1229  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1230  	if err != nil {
  1231  		return nil, err
  1232  	}
  1233  	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
  1234  	unrefs, err := dd.RemoveEntry(ctx, oldName)
  1235  	if err != nil {
  1236  		return nil, err
  1237  	}
  1238  	if oldDe.Type == data.Dir {
  1239  		// The parent dir inherits any dirty unrefs from the removed
  1240  		// directory.
  1241  		if childUnrefs, ok := fbo.dirtyDirs[oldDe.BlockPointer]; ok {
  1242  			unrefs = append(unrefs, childUnrefs...)
  1243  		}
  1244  	}
  1245  
  1246  	unlinkUndoFn := fbo.nodeCache.Unlink(
  1247  		oldDe.Ref(), dir.ChildPath(
  1248  			oldName, oldDe.BlockPointer, fbo.nodeCache.ObfuscatorMaker()()),
  1249  		oldDe)
  1250  
  1251  	parentUndo, err := fbo.updateParentDirEntryLocked(
  1252  		ctx, lState, dir, kmd, true, true)
  1253  	if err != nil {
  1254  		if unlinkUndoFn != nil {
  1255  			unlinkUndoFn()
  1256  		}
  1257  		_, _ = dd.AddEntry(ctx, oldName, oldDe)
  1258  		return nil, err
  1259  	}
  1260  
  1261  	undoDirtyFn := fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
  1262  	return func() {
  1263  		_, _ = dd.AddEntry(ctx, oldName, oldDe)
  1264  		if undoDirtyFn != nil {
  1265  			undoDirtyFn()
  1266  		}
  1267  		if parentUndo != nil {
  1268  			parentUndo()
  1269  		}
  1270  		if unlinkUndoFn != nil {
  1271  			unlinkUndoFn()
  1272  		}
  1273  	}, nil
  1274  }
  1275  
  1276  // RemoveDirEntryInCache removes an entry from the given directory //
  1277  // and updates the directory's own mtime and ctime.  It returns a
  1278  // function that can be called if the change needs to be undone.
  1279  func (fbo *folderBlockOps) RemoveDirEntryInCache(
  1280  	ctx context.Context, lState *kbfssync.LockState,
  1281  	kmd KeyMetadataWithRootDirEntry, dir data.Path, oldName data.PathPartString,
  1282  	oldDe data.DirEntry) (dirCacheUndoFn, error) {
  1283  	fbo.blockLock.Lock(lState)
  1284  	defer fbo.blockLock.Unlock(lState)
  1285  	fn, err := fbo.removeDirEntryInCacheLocked(
  1286  		ctx, lState, kmd, dir, oldName, oldDe)
  1287  	if err != nil {
  1288  		return nil, err
  1289  	}
  1290  	return fbo.wrapWithBlockLock(fn), nil
  1291  }
  1292  
  1293  // RenameDirEntryInCache updates the entries of both the old and new
  1294  // parent dirs for the given target dir atomically (with respect to
  1295  // blockLock).  It also updates the cache entry for the target, which
  1296  // would have its Ctime changed. The updates will get applied to the
  1297  // dirty blocks on subsequent fetches.
  1298  //
  1299  // The returned bool indicates whether or not the caller should clean
  1300  // up the target cache entry when the effects of the operation are no
  1301  // longer needed.
  1302  func (fbo *folderBlockOps) RenameDirEntryInCache(
  1303  	ctx context.Context, lState *kbfssync.LockState,
  1304  	kmd KeyMetadataWithRootDirEntry, oldParent data.Path,
  1305  	oldName data.PathPartString, newParent data.Path,
  1306  	newName data.PathPartString, newDe data.DirEntry,
  1307  	replacedDe data.DirEntry) (undo dirCacheUndoFn, err error) {
  1308  	fbo.blockLock.Lock(lState)
  1309  	defer fbo.blockLock.Unlock(lState)
  1310  	if newParent.TailPointer() == oldParent.TailPointer() &&
  1311  		oldName == newName {
  1312  		// Noop
  1313  		return nil, nil
  1314  	}
  1315  
  1316  	var undoReplace func()
  1317  	if replacedDe.IsInitialized() {
  1318  		undoReplace, err = fbo.removeDirEntryInCacheLocked(
  1319  			ctx, lState, kmd, newParent, newName, replacedDe)
  1320  		if err != nil {
  1321  			return nil, err
  1322  		}
  1323  	}
  1324  	defer func() {
  1325  		if err != nil && undoReplace != nil {
  1326  			undoReplace()
  1327  		}
  1328  	}()
  1329  
  1330  	undoAdd, err := fbo.addDirEntryInCacheLocked(
  1331  		ctx, lState, kmd, newParent, newName, newDe)
  1332  	if err != nil {
  1333  		return nil, err
  1334  	}
  1335  	defer func() {
  1336  		if err != nil && undoAdd != nil {
  1337  			undoAdd()
  1338  		}
  1339  	}()
  1340  
  1341  	undoRm, err := fbo.removeDirEntryInCacheLocked(
  1342  		ctx, lState, kmd, oldParent, oldName, data.DirEntry{})
  1343  	if err != nil {
  1344  		return nil, err
  1345  	}
  1346  	defer func() {
  1347  		if err != nil && undoRm != nil {
  1348  			undoRm()
  1349  		}
  1350  	}()
  1351  
  1352  	newParentNode := fbo.nodeCache.Get(newParent.TailRef())
  1353  	undoMove, err := fbo.nodeCache.Move(newDe.Ref(), newParentNode, newName)
  1354  	if err != nil {
  1355  		return nil, err
  1356  	}
  1357  
  1358  	return fbo.wrapWithBlockLock(func() {
  1359  		if undoMove != nil {
  1360  			undoMove()
  1361  		}
  1362  		if undoRm != nil {
  1363  			undoRm()
  1364  		}
  1365  		if undoAdd != nil {
  1366  			undoAdd()
  1367  		}
  1368  		if undoReplace != nil {
  1369  			undoReplace()
  1370  		}
  1371  	}), nil
  1372  }
  1373  
  1374  func (fbo *folderBlockOps) setCachedAttrLocked(
  1375  	ctx context.Context, lState *kbfssync.LockState,
  1376  	kmd KeyMetadataWithRootDirEntry, dir data.Path, name data.PathPartString,
  1377  	attr attrChange, realEntry data.DirEntry) (dirCacheUndoFn, error) {
  1378  	fbo.blockLock.AssertLocked(lState)
  1379  
  1380  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1381  	if err != nil {
  1382  		return nil, err
  1383  	}
  1384  
  1385  	if !dir.IsValid() {
  1386  		// Can't set attrs directly on the root entry, primarily
  1387  		// because there's no way to indicate it's dirty.  TODO: allow
  1388  		// mtime-setting on the root dir?
  1389  		return nil, InvalidParentPathError{dir}
  1390  	}
  1391  	var de data.DirEntry
  1392  	var unlinkedNode Node
  1393  
  1394  	dd := fbo.newDirDataLocked(lState, dir, chargedTo, kmd)
  1395  	de, err = dd.Lookup(ctx, name)
  1396  	if _, noExist := errors.Cause(err).(idutil.NoSuchNameError); noExist {
  1397  		// The node may be unlinked.
  1398  		unlinkedNode = fbo.nodeCache.Get(realEntry.Ref())
  1399  		if unlinkedNode != nil && !fbo.nodeCache.IsUnlinked(unlinkedNode) {
  1400  			unlinkedNode = nil
  1401  		}
  1402  		if unlinkedNode != nil {
  1403  			de = fbo.nodeCache.UnlinkedDirEntry(unlinkedNode)
  1404  		} else {
  1405  			return nil, err
  1406  		}
  1407  	} else if err != nil {
  1408  		return nil, err
  1409  	}
  1410  
  1411  	oldDe := de
  1412  	switch attr {
  1413  	case exAttr:
  1414  		de.Type = realEntry.Type
  1415  	case mtimeAttr:
  1416  		de.Mtime = realEntry.Mtime
  1417  	}
  1418  	de.Ctime = realEntry.Ctime
  1419  
  1420  	var undoDirtyFn func()
  1421  	if unlinkedNode != nil {
  1422  		fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
  1423  	} else {
  1424  		unrefs, err := dd.UpdateEntry(ctx, name, de)
  1425  		if err != nil {
  1426  			return nil, err
  1427  		}
  1428  		undoDirtyFn = fbo.makeDirDirtyLocked(lState, dir.TailPointer(), unrefs)
  1429  	}
  1430  
  1431  	return fbo.wrapWithBlockLock(func() {
  1432  		if unlinkedNode != nil {
  1433  			fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, oldDe)
  1434  		} else {
  1435  			_, _ = dd.UpdateEntry(ctx, name, oldDe)
  1436  			undoDirtyFn()
  1437  		}
  1438  	}), nil
  1439  }
  1440  
  1441  // SetAttrInDirEntryInCache updates an entry from the given directory.
  1442  func (fbo *folderBlockOps) SetAttrInDirEntryInCache(
  1443  	ctx context.Context, lState *kbfssync.LockState,
  1444  	kmd KeyMetadataWithRootDirEntry, p data.Path, newDe data.DirEntry,
  1445  	attr attrChange) (dirCacheUndoFn, error) {
  1446  	fbo.blockLock.Lock(lState)
  1447  	defer fbo.blockLock.Unlock(lState)
  1448  	return fbo.setCachedAttrLocked(
  1449  		ctx, lState, kmd, *p.ParentPath(), p.TailName(), attr, newDe)
  1450  }
  1451  
  1452  // getDirtyDirLocked composes getDirLocked and
  1453  // updateWithDirtyEntriesLocked. Note that a dirty dir means that it
  1454  // has entries possibly pointing to dirty files, and/or that its
  1455  // children list is dirty.
  1456  func (fbo *folderBlockOps) getDirtyDirLocked(ctx context.Context,
  1457  	lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path, rtype data.BlockReqType) (
  1458  	*data.DirBlock, error) {
  1459  	fbo.blockLock.AssertAnyLocked(lState)
  1460  
  1461  	dblock, _, err := fbo.getDirLocked(
  1462  		ctx, lState, kmd, dir.TailPointer(), dir, rtype)
  1463  	if err != nil {
  1464  		return nil, err
  1465  	}
  1466  	return dblock, err
  1467  }
  1468  
  1469  // GetDirtyDirCopy returns a deep copy of the directory block for a
  1470  // dirty directory, while under lock, updated with all cached dirty
  1471  // entries.
  1472  func (fbo *folderBlockOps) GetDirtyDirCopy(
  1473  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, dir data.Path,
  1474  	rtype data.BlockReqType) (*data.DirBlock, error) {
  1475  	fbo.blockLock.RLock(lState)
  1476  	defer fbo.blockLock.RUnlock(lState)
  1477  	dblock, err := fbo.getDirtyDirLocked(ctx, lState, kmd, dir, rtype)
  1478  	if err != nil {
  1479  		return nil, err
  1480  	}
  1481  	// Copy it while under lock.  Otherwise, another operation like
  1482  	// `Write` can modify it while the caller is trying to copy it,
  1483  	// leading to a panic like in KBFS-3407.
  1484  	return dblock.DeepCopy(), nil
  1485  }
  1486  
  1487  // GetChildren returns a map of EntryInfos for the (possibly dirty)
  1488  // children entries of the given directory.
  1489  func (fbo *folderBlockOps) GetChildren(
  1490  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
  1491  	dir data.Path) (map[data.PathPartString]data.EntryInfo, error) {
  1492  	fbo.blockLock.RLock(lState)
  1493  	defer fbo.blockLock.RUnlock(lState)
  1494  	dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
  1495  	return dd.GetChildren(ctx)
  1496  }
  1497  
  1498  // GetEntries returns a map of DirEntries for the (possibly dirty)
  1499  // children entries of the given directory.
  1500  func (fbo *folderBlockOps) GetEntries(
  1501  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
  1502  	dir data.Path) (map[data.PathPartString]data.DirEntry, error) {
  1503  	fbo.blockLock.RLock(lState)
  1504  	defer fbo.blockLock.RUnlock(lState)
  1505  	dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
  1506  	return dd.GetEntries(ctx)
  1507  }
  1508  
  1509  func (fbo *folderBlockOps) getEntryLocked(ctx context.Context,
  1510  	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
  1511  	includeDeleted bool) (de data.DirEntry, err error) {
  1512  	fbo.blockLock.AssertAnyLocked(lState)
  1513  
  1514  	// See if this is the root.
  1515  	if !file.HasValidParent() {
  1516  		if fbo.dirtyRootDirEntry != nil {
  1517  			return *fbo.dirtyRootDirEntry, nil
  1518  		}
  1519  		return kmd.GetRootDirEntry(), nil
  1520  	}
  1521  
  1522  	dd := fbo.newDirDataLocked(
  1523  		lState, *file.ParentPath(), keybase1.UserOrTeamID(""), kmd)
  1524  	de, err = dd.Lookup(ctx, file.TailName())
  1525  	_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
  1526  	if includeDeleted && (noExist || de.BlockPointer != file.TailPointer()) {
  1527  		unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
  1528  		if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
  1529  			return fbo.nodeCache.UnlinkedDirEntry(unlinkedNode), nil
  1530  		}
  1531  		return data.DirEntry{}, err
  1532  	} else if err != nil {
  1533  		return data.DirEntry{}, err
  1534  	}
  1535  	return de, nil
  1536  }
  1537  
  1538  // file must have a valid parent.
  1539  func (fbo *folderBlockOps) updateEntryLocked(ctx context.Context,
  1540  	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, file data.Path,
  1541  	de data.DirEntry, includeDeleted bool) error {
  1542  	fbo.blockLock.AssertAnyLocked(lState)
  1543  
  1544  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1545  	if err != nil {
  1546  		return err
  1547  	}
  1548  	parentPath := *file.ParentPath()
  1549  	dd := fbo.newDirDataLocked(lState, parentPath, chargedTo, kmd)
  1550  	unrefs, err := dd.UpdateEntry(ctx, file.TailName(), de)
  1551  	_, noExist := errors.Cause(err).(idutil.NoSuchNameError)
  1552  	switch {
  1553  	case noExist && includeDeleted:
  1554  		unlinkedNode := fbo.nodeCache.Get(file.TailPointer().Ref())
  1555  		if unlinkedNode != nil && fbo.nodeCache.IsUnlinked(unlinkedNode) {
  1556  			fbo.nodeCache.UpdateUnlinkedDirEntry(unlinkedNode, de)
  1557  			return nil
  1558  		}
  1559  		return err
  1560  	case err != nil:
  1561  		return err
  1562  	default:
  1563  		_ = fbo.makeDirDirtyLocked(lState, parentPath.TailPointer(), unrefs)
  1564  	}
  1565  
  1566  	// If we're in the middle of syncing the directories, but the
  1567  	// current file is not yet being synced, we need to re-apply this
  1568  	// update after the sync is done, so it doesn't get lost after the
  1569  	// syncing directory block is readied.  This only applies to dir
  1570  	// updates being caused by file changes; other types of dir writes
  1571  	// are protected by `folderBranchOps.syncLock`, which is held
  1572  	// during `SyncAll`.
  1573  	if fbo.dirtyDirsSyncing && !fbo.doDeferWrite {
  1574  		fbo.log.CDebugf(ctx, "Deferring update entry during sync")
  1575  		n := fbo.nodeCache.Get(file.TailRef())
  1576  		fbo.deferredDirUpdates = append(
  1577  			fbo.deferredDirUpdates, func(lState *kbfssync.LockState) error {
  1578  				file := fbo.nodeCache.PathFromNode(n)
  1579  				de.BlockPointer = file.TailPointer()
  1580  				return fbo.updateEntryLocked(
  1581  					ctx, lState, kmd, file, de, includeDeleted)
  1582  			})
  1583  	}
  1584  
  1585  	return nil
  1586  }
  1587  
  1588  // GetEntry returns the possibly-dirty DirEntry of the given file in
  1589  // its parent DirBlock. file must have a valid parent.
  1590  func (fbo *folderBlockOps) GetEntry(
  1591  	ctx context.Context, lState *kbfssync.LockState,
  1592  	kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
  1593  	fbo.blockLock.RLock(lState)
  1594  	defer fbo.blockLock.RUnlock(lState)
  1595  	return fbo.getEntryLocked(ctx, lState, kmd, file, false)
  1596  }
  1597  
  1598  // GetEntryEvenIfDeleted returns the possibly-dirty DirEntry of the
  1599  // given file in its parent DirBlock, even if the file has been
  1600  // deleted. file must have a valid parent.
  1601  func (fbo *folderBlockOps) GetEntryEvenIfDeleted(
  1602  	ctx context.Context, lState *kbfssync.LockState,
  1603  	kmd KeyMetadataWithRootDirEntry, file data.Path) (data.DirEntry, error) {
  1604  	fbo.blockLock.RLock(lState)
  1605  	defer fbo.blockLock.RUnlock(lState)
  1606  	return fbo.getEntryLocked(ctx, lState, kmd, file, true)
  1607  }
  1608  
  1609  func (fbo *folderBlockOps) getChildNodeLocked(
  1610  	lState *kbfssync.LockState, dir Node, name data.PathPartString,
  1611  	de data.DirEntry) (Node, error) {
  1612  	fbo.blockLock.AssertRLocked(lState)
  1613  
  1614  	if de.Type == data.Sym {
  1615  		return nil, nil
  1616  	}
  1617  
  1618  	return fbo.nodeCache.GetOrCreate(de.BlockPointer, name, dir, de.Type)
  1619  }
  1620  
  1621  func (fbo *folderBlockOps) GetChildNode(
  1622  	lState *kbfssync.LockState, dir Node, name data.PathPartString,
  1623  	de data.DirEntry) (Node, error) {
  1624  	fbo.blockLock.RLock(lState)
  1625  	defer fbo.blockLock.RUnlock(lState)
  1626  	return fbo.getChildNodeLocked(lState, dir, name, de)
  1627  }
  1628  
  1629  // Lookup returns the possibly-dirty DirEntry of the given file in its
  1630  // parent DirBlock, and a Node for the file if it exists.  It has to
  1631  // do all of this under the block lock to avoid races with
  1632  // UpdatePointers.
  1633  func (fbo *folderBlockOps) Lookup(
  1634  	ctx context.Context, lState *kbfssync.LockState,
  1635  	kmd KeyMetadataWithRootDirEntry, dir Node, name data.PathPartString) (
  1636  	Node, data.DirEntry, error) {
  1637  	fbo.blockLock.RLock(lState)
  1638  	defer fbo.blockLock.RUnlock(lState)
  1639  
  1640  	// Protect against non-dir nodes being passed in by mistake.
  1641  	// TODO: we should make this a more specific error probably, but
  1642  	// then we need to update some places that check for
  1643  	// `NoSuchNameError` to check for this one as well.
  1644  	if dir.EntryType() != data.Dir {
  1645  		fbo.log.CDebugf(
  1646  			ctx, "Got unexpected node type when looking up %s: %s",
  1647  			name, dir.EntryType())
  1648  		return nil, data.DirEntry{}, idutil.NoSuchNameError{Name: name.String()}
  1649  	}
  1650  
  1651  	dirPath := fbo.nodeCache.PathFromNode(dir)
  1652  	if !dirPath.IsValid() {
  1653  		return nil, data.DirEntry{}, errors.WithStack(InvalidPathError{dirPath})
  1654  	}
  1655  
  1656  	childPath := dirPath.ChildPathNoPtr(name, fbo.nodeCache.ObfuscatorMaker()())
  1657  	de, err := fbo.getEntryLocked(ctx, lState, kmd, childPath, false)
  1658  	if err != nil {
  1659  		return nil, data.DirEntry{}, err
  1660  	}
  1661  
  1662  	node, err := fbo.getChildNodeLocked(lState, dir, name, de)
  1663  	if err != nil {
  1664  		return nil, data.DirEntry{}, err
  1665  	}
  1666  	return node, de, nil
  1667  }
  1668  
  1669  func (fbo *folderBlockOps) getOrCreateDirtyFileLocked(
  1670  	lState *kbfssync.LockState, file data.Path) *data.DirtyFile {
  1671  	fbo.blockLock.AssertLocked(lState)
  1672  	ptr := file.TailPointer()
  1673  	df := fbo.dirtyFiles[ptr]
  1674  	if df == nil {
  1675  		df = data.NewDirtyFile(file, fbo.config.DirtyBlockCache())
  1676  		fbo.dirtyFiles[ptr] = df
  1677  	}
  1678  	return df
  1679  }
  1680  
  1681  // cacheBlockIfNotYetDirtyLocked puts a block into the cache, but only
  1682  // does so if the block isn't already marked as dirty in the cache.
  1683  // This is useful when operating on a dirty copy of a block that may
  1684  // already be in the cache.
  1685  func (fbo *folderBlockOps) cacheBlockIfNotYetDirtyLocked(
  1686  	ctx context.Context, lState *kbfssync.LockState, ptr data.BlockPointer,
  1687  	file data.Path, block data.Block) error {
  1688  	fbo.blockLock.AssertLocked(lState)
  1689  	df := fbo.getOrCreateDirtyFileLocked(lState, file)
  1690  	needsCaching, isSyncing := df.SetBlockDirty(ptr)
  1691  
  1692  	if needsCaching {
  1693  		err := fbo.config.DirtyBlockCache().Put(
  1694  			ctx, fbo.id(), ptr, file.Branch, block)
  1695  		if err != nil {
  1696  			return err
  1697  		}
  1698  	}
  1699  
  1700  	if isSyncing {
  1701  		fbo.doDeferWrite = true
  1702  	}
  1703  	return nil
  1704  }
  1705  
  1706  func (fbo *folderBlockOps) getOrCreateSyncInfoLocked(
  1707  	lState *kbfssync.LockState, de data.DirEntry) (*syncInfo, error) {
  1708  	fbo.blockLock.AssertLocked(lState)
  1709  	ref := de.Ref()
  1710  	si, ok := fbo.unrefCache[ref]
  1711  	if !ok {
  1712  		so, err := newSyncOp(de.BlockPointer)
  1713  		if err != nil {
  1714  			return nil, err
  1715  		}
  1716  		si = &syncInfo{
  1717  			oldInfo: de.BlockInfo,
  1718  			op:      so,
  1719  		}
  1720  		fbo.unrefCache[ref] = si
  1721  	}
  1722  	return si, nil
  1723  }
  1724  
  1725  // GetDirtyFileBlockRefs returns a list of references of all known dirty
  1726  // files.
  1727  func (fbo *folderBlockOps) GetDirtyFileBlockRefs(
  1728  	lState *kbfssync.LockState) []data.BlockRef {
  1729  	fbo.blockLock.RLock(lState)
  1730  	defer fbo.blockLock.RUnlock(lState)
  1731  	var dirtyRefs []data.BlockRef
  1732  	for ref := range fbo.unrefCache {
  1733  		dirtyRefs = append(dirtyRefs, ref)
  1734  	}
  1735  	return dirtyRefs
  1736  }
  1737  
  1738  // GetDirtyDirBlockRefs returns a list of references of all known
  1739  // dirty directories.  Also returns a channel that, while it is open,
  1740  // all future writes will be blocked until it is closed -- this lets
  1741  // the caller ensure that the directory entries will remain stable
  1742  // (not updated with new file sizes by the writes) until all of the
  1743  // directory blocks have been safely copied.  The caller *must* close
  1744  // this channel once they are done processing the dirty directory
  1745  // blocks.
  1746  func (fbo *folderBlockOps) GetDirtyDirBlockRefs(
  1747  	lState *kbfssync.LockState) ([]data.BlockRef, chan<- struct{}) {
  1748  	fbo.blockLock.Lock(lState)
  1749  	defer fbo.blockLock.Unlock(lState)
  1750  	var dirtyRefs []data.BlockRef
  1751  	for ptr := range fbo.dirtyDirs {
  1752  		dirtyRefs = append(dirtyRefs, ptr.Ref())
  1753  	}
  1754  	if fbo.dirtyDirsSyncing {
  1755  		panic("GetDirtyDirBlockRefs() called twice")
  1756  	}
  1757  	fbo.dirtyDirsSyncing = true
  1758  	ch := make(chan struct{})
  1759  	fbo.holdNewWritesCh = ch
  1760  	return dirtyRefs, ch
  1761  }
  1762  
  1763  // GetDirtyDirBlockRefsDone is called to indicate the caller is done
  1764  // with the data previously returned from `GetDirtyDirBlockRefs()`.
  1765  func (fbo *folderBlockOps) GetDirtyDirBlockRefsDone(
  1766  	lState *kbfssync.LockState) {
  1767  	fbo.blockLock.Lock(lState)
  1768  	defer fbo.blockLock.Unlock(lState)
  1769  	fbo.dirtyDirsSyncing = false
  1770  	fbo.deferredDirUpdates = nil
  1771  	fbo.holdNewWritesCh = nil
  1772  }
  1773  
  1774  // getDirtyDirUnrefsLocked returns a list of block infos that need to be
  1775  // unreferenced for the given directory.
  1776  func (fbo *folderBlockOps) getDirtyDirUnrefsLocked(
  1777  	lState *kbfssync.LockState, ptr data.BlockPointer) []data.BlockInfo {
  1778  	fbo.blockLock.AssertRLocked(lState)
  1779  	return fbo.dirtyDirs[ptr]
  1780  }
  1781  
  1782  // fixChildBlocksAfterRecoverableErrorLocked should be called when a sync
  1783  // failed with a recoverable block error on a multi-block file.  It
  1784  // makes sure that any outstanding dirty versions of the file are
  1785  // fixed up to reflect the fact that some of the indirect pointers now
  1786  // need to change.
  1787  func (fbo *folderBlockOps) fixChildBlocksAfterRecoverableErrorLocked(
  1788  	ctx context.Context, lState *kbfssync.LockState, file data.Path, kmd libkey.KeyMetadata,
  1789  	redirtyOnRecoverableError map[data.BlockPointer]data.BlockPointer) {
  1790  	fbo.blockLock.AssertLocked(lState)
  1791  
  1792  	defer func() {
  1793  		// Below, this function can end up writing dirty blocks back
  1794  		// to the cache, which will set `doDeferWrite` to `true`.
  1795  		// This leads to future writes being unnecessarily deferred
  1796  		// when a Sync is not happening, and can lead to dirty data
  1797  		// being synced twice and sticking around for longer than
  1798  		// needed.  So just reset `doDeferWrite` once we're
  1799  		// done. We're under `blockLock`, so this is safe.
  1800  		fbo.doDeferWrite = false
  1801  	}()
  1802  
  1803  	df := fbo.dirtyFiles[file.TailPointer()]
  1804  	if df != nil {
  1805  		// Un-orphan old blocks, since we are reverting back to the
  1806  		// previous state.
  1807  		for _, oldPtr := range redirtyOnRecoverableError {
  1808  			fbo.vlog.CLogf(ctx, libkb.VLog1, "Un-orphaning %v", oldPtr)
  1809  			df.SetBlockOrphaned(oldPtr, false)
  1810  		}
  1811  	}
  1812  
  1813  	dirtyBcache := fbo.config.DirtyBlockCache()
  1814  	topBlock, err := dirtyBcache.Get(
  1815  		ctx, fbo.id(), file.TailPointer(), fbo.branch())
  1816  	fblock, ok := topBlock.(*data.FileBlock)
  1817  	if err != nil || !ok {
  1818  		fbo.log.CWarningf(ctx, "Couldn't find dirtied "+
  1819  			"top-block for %v: %v", file.TailPointer(), err)
  1820  		return
  1821  	}
  1822  
  1823  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  1824  	if err != nil {
  1825  		fbo.log.CWarningf(ctx, "Couldn't find uid during recovery: %v", err)
  1826  		return
  1827  	}
  1828  	fd := fbo.newFileData(lState, file, chargedTo, kmd)
  1829  
  1830  	// If a copy of the top indirect block was made, we need to
  1831  	// redirty all the sync'd blocks under their new IDs, so that
  1832  	// future syncs will know they failed.
  1833  	newPtrs := make(map[data.BlockPointer]bool, len(redirtyOnRecoverableError))
  1834  	for newPtr := range redirtyOnRecoverableError {
  1835  		newPtrs[newPtr] = true
  1836  	}
  1837  	found, err := fd.FindIPtrsAndClearSize(ctx, fblock, newPtrs)
  1838  	if err != nil {
  1839  		fbo.log.CWarningf(
  1840  			ctx, "Couldn't find and clear iptrs during recovery: %v", err)
  1841  		return
  1842  	}
  1843  	for newPtr, oldPtr := range redirtyOnRecoverableError {
  1844  		if !found[newPtr] {
  1845  			continue
  1846  		}
  1847  
  1848  		fbo.vlog.CLogf(
  1849  			ctx, libkb.VLog1, "Re-dirtying %v (and deleting dirty block %v)",
  1850  			newPtr, oldPtr)
  1851  		// These blocks would have been permanent, so they're
  1852  		// definitely still in the cache.
  1853  		b, err := fbo.config.BlockCache().Get(newPtr)
  1854  		if err != nil {
  1855  			fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
  1856  			continue
  1857  		}
  1858  		if err = fbo.cacheBlockIfNotYetDirtyLocked(
  1859  			ctx, lState, newPtr, file, b); err != nil {
  1860  			fbo.log.CWarningf(ctx, "Couldn't re-dirty %v: %v", newPtr, err)
  1861  		}
  1862  		fbo.vlog.CLogf(
  1863  			ctx, libkb.VLog1, "Deleting dirty ptr %v after recoverable error",
  1864  			oldPtr)
  1865  		err = dirtyBcache.Delete(fbo.id(), oldPtr, fbo.branch())
  1866  		if err != nil {
  1867  			fbo.vlog.CLogf(
  1868  				ctx, libkb.VLog1, "Couldn't del-dirty %v: %v", oldPtr, err)
  1869  		}
  1870  	}
  1871  }
  1872  
  1873  func (fbo *folderBlockOps) nowUnixNano() int64 {
  1874  	return fbo.config.Clock().Now().UnixNano()
  1875  }
  1876  
  1877  // PrepRename prepares the given rename operation. It returns the old
  1878  // and new parent block (which may be the same, and which shouldn't be
  1879  // modified), and what is to be the new DirEntry.
  1880  func (fbo *folderBlockOps) PrepRename(
  1881  	ctx context.Context, lState *kbfssync.LockState,
  1882  	kmd KeyMetadataWithRootDirEntry, oldParent data.Path,
  1883  	oldName data.PathPartString, newParent data.Path,
  1884  	newName data.PathPartString) (
  1885  	newDe, replacedDe data.DirEntry, ro *renameOp, err error) {
  1886  	fbo.blockLock.RLock(lState)
  1887  	defer fbo.blockLock.RUnlock(lState)
  1888  
  1889  	// Look up in the old path. Won't be modified, so only fetch for reading.
  1890  	newDe, err = fbo.getEntryLocked(
  1891  		ctx, lState, kmd, oldParent.ChildPathNoPtr(oldName, nil), false)
  1892  	if err != nil {
  1893  		return data.DirEntry{}, data.DirEntry{}, nil, err
  1894  	}
  1895  
  1896  	oldParentPtr := oldParent.TailPointer()
  1897  	newParentPtr := newParent.TailPointer()
  1898  	ro, err = newRenameOp(
  1899  		oldName.Plaintext(), oldParentPtr, newName.Plaintext(), newParentPtr,
  1900  		newDe.BlockPointer, newDe.Type)
  1901  	if err != nil {
  1902  		return data.DirEntry{}, data.DirEntry{}, nil, err
  1903  	}
  1904  	ro.AddUpdate(oldParentPtr, oldParentPtr)
  1905  	ro.setFinalPath(newParent)
  1906  	ro.oldFinalPath = oldParent
  1907  	if oldParentPtr.ID != newParentPtr.ID {
  1908  		ro.AddUpdate(newParentPtr, newParentPtr)
  1909  	}
  1910  
  1911  	replacedDe, err = fbo.getEntryLocked(
  1912  		ctx, lState, kmd, newParent.ChildPathNoPtr(newName, nil), false)
  1913  	if _, notExists := errors.Cause(err).(idutil.NoSuchNameError); notExists {
  1914  		return newDe, data.DirEntry{}, ro, nil
  1915  	} else if err != nil {
  1916  		return data.DirEntry{}, data.DirEntry{}, nil, err
  1917  	}
  1918  
  1919  	return newDe, replacedDe, ro, nil
  1920  }
  1921  
  1922  func (fbo *folderBlockOps) newFileData(lState *kbfssync.LockState,
  1923  	file data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata) *data.FileData {
  1924  	fbo.blockLock.AssertAnyLocked(lState)
  1925  	return data.NewFileData(file, chargedTo, fbo.config.BlockSplitter(), kmd,
  1926  		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
  1927  			file data.Path, rtype data.BlockReqType) (*data.FileBlock, bool, error) {
  1928  			lState := lState
  1929  			if rtype == data.BlockReadParallel {
  1930  				lState = nil
  1931  			}
  1932  			return fbo.getFileBlockLocked(
  1933  				ctx, lState, kmd, ptr, file, rtype)
  1934  		},
  1935  		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
  1936  			return fbo.cacheBlockIfNotYetDirtyLocked(
  1937  				ctx, lState, ptr, file, block)
  1938  		}, fbo.log, fbo.vlog)
  1939  }
  1940  
  1941  func (fbo *folderBlockOps) newFileDataWithCache(lState *kbfssync.LockState,
  1942  	file data.Path, chargedTo keybase1.UserOrTeamID, kmd libkey.KeyMetadata,
  1943  	dirtyBcache data.DirtyBlockCacheSimple) *data.FileData {
  1944  	fbo.blockLock.AssertAnyLocked(lState)
  1945  	return data.NewFileData(file, chargedTo, fbo.config.BlockSplitter(), kmd,
  1946  		func(ctx context.Context, kmd libkey.KeyMetadata, ptr data.BlockPointer,
  1947  			file data.Path, rtype data.BlockReqType) (*data.FileBlock, bool, error) {
  1948  			block, err := dirtyBcache.Get(ctx, file.Tlf, ptr, file.Branch)
  1949  			if fblock, ok := block.(*data.FileBlock); ok && err == nil {
  1950  				return fblock, true, nil
  1951  			}
  1952  			lState := lState
  1953  			if rtype == data.BlockReadParallel {
  1954  				lState = nil
  1955  			}
  1956  			return fbo.getFileBlockLocked(
  1957  				ctx, lState, kmd, ptr, file, rtype)
  1958  		},
  1959  		func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
  1960  			return dirtyBcache.Put(ctx, file.Tlf, ptr, file.Branch, block)
  1961  		}, fbo.log, fbo.vlog)
  1962  }
  1963  
  1964  // Read reads from the given file into the given buffer at the given
  1965  // offset. It returns the number of bytes read and nil, or 0 and the
  1966  // error if there was one.
  1967  func (fbo *folderBlockOps) Read(
  1968  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata, file Node,
  1969  	dest []byte, off int64) (int64, error) {
  1970  	fbo.blockLock.RLock(lState)
  1971  	defer fbo.blockLock.RUnlock(lState)
  1972  
  1973  	filePath := fbo.nodeCache.PathFromNode(file)
  1974  
  1975  	fbo.vlog.CLogf(ctx, libkb.VLog1, "Reading from %v", filePath.TailPointer())
  1976  
  1977  	var id keybase1.UserOrTeamID // Data reads don't depend on the id.
  1978  	fd := fbo.newFileData(lState, filePath, id, kmd)
  1979  	return fd.Read(ctx, dest, data.Int64Offset(off))
  1980  }
  1981  
  1982  func (fbo *folderBlockOps) maybeWaitOnDeferredWrites(
  1983  	ctx context.Context, lState *kbfssync.LockState, file Node,
  1984  	c data.DirtyPermChan) error {
  1985  	var errListener chan error
  1986  	registerErr := func() error {
  1987  		fbo.blockLock.Lock(lState)
  1988  		defer fbo.blockLock.Unlock(lState)
  1989  		filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
  1990  		if err != nil {
  1991  			return err
  1992  		}
  1993  		df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
  1994  		errListener = make(chan error, 1)
  1995  		df.AddErrListener(errListener)
  1996  		return nil
  1997  	}
  1998  	err := registerErr()
  1999  	if err != nil {
  2000  		return err
  2001  	}
  2002  
  2003  	logTimer := time.After(100 * time.Millisecond)
  2004  	doLogUnblocked := false
  2005  	for {
  2006  		var err error
  2007  	outerSelect:
  2008  		select {
  2009  		case <-c:
  2010  			if doLogUnblocked {
  2011  				fbo.vlog.CLogf(ctx, libkb.VLog1, "Write unblocked")
  2012  			}
  2013  			// Make sure there aren't any queued errors.
  2014  			select {
  2015  			case err = <-errListener:
  2016  				// Break the select to check the cause of the error below.
  2017  				break outerSelect
  2018  			default:
  2019  			}
  2020  			return nil
  2021  		case <-logTimer:
  2022  			// Print a log message once if it's taking too long.
  2023  			fbo.log.CDebugf(ctx,
  2024  				"Blocking a write because of a full dirty buffer")
  2025  			doLogUnblocked = true
  2026  		case <-ctx.Done():
  2027  			return ctx.Err()
  2028  		case err = <-errListener:
  2029  			// Fall through to check the cause of the error below.
  2030  		}
  2031  		// Context errors are safe to ignore, since they are likely to
  2032  		// be specific to a previous sync (e.g., a user hit ctrl-c
  2033  		// during an fsync, or a sync timed out, or a test was
  2034  		// provoking an error specifically [KBFS-2164]).
  2035  		cause := errors.Cause(err)
  2036  		if cause == context.Canceled || cause == context.DeadlineExceeded {
  2037  			fbo.vlog.CLogf(ctx, libkb.VLog1, "Ignoring sync err: %+v", err)
  2038  			err := registerErr()
  2039  			if err != nil {
  2040  				return err
  2041  			}
  2042  			continue
  2043  		} else if err != nil {
  2044  			// Treat other errors as fatal to this write -- e.g., the
  2045  			// user's quota is full, the local journal is broken,
  2046  			// etc. XXX: should we ignore errors that are specific
  2047  			// only to some other file being sync'd (e.g.,
  2048  			// "recoverable" block errors from which we couldn't
  2049  			// recover)?
  2050  			return err
  2051  		}
  2052  	}
  2053  }
  2054  
  2055  func (fbo *folderBlockOps) pathFromNodeForBlockWriteLocked(
  2056  	lState *kbfssync.LockState, n Node) (data.Path, error) {
  2057  	fbo.blockLock.AssertLocked(lState)
  2058  	p := fbo.nodeCache.PathFromNode(n)
  2059  	if !p.IsValid() {
  2060  		return data.Path{}, errors.WithStack(InvalidPathError{p})
  2061  	}
  2062  	return p, nil
  2063  }
  2064  
  2065  // writeGetFileLocked checks write permissions explicitly for
  2066  // writeDataLocked, truncateLocked etc and returns
  2067  func (fbo *folderBlockOps) writeGetFileLocked(
  2068  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
  2069  	file data.Path) (*data.FileBlock, error) {
  2070  	fbo.blockLock.AssertLocked(lState)
  2071  
  2072  	session, err := fbo.config.KBPKI().GetCurrentSession(ctx)
  2073  	if err != nil {
  2074  		return nil, err
  2075  	}
  2076  	isWriter, err := kmd.IsWriter(
  2077  		ctx, fbo.config.KBPKI(), fbo.config, session.UID, session.VerifyingKey)
  2078  	if err != nil {
  2079  		return nil, err
  2080  	}
  2081  	if !isWriter {
  2082  		return nil, tlfhandle.NewWriteAccessError(kmd.GetTlfHandle(),
  2083  			session.Name, file.String())
  2084  	}
  2085  	fblock, err := fbo.getFileLocked(ctx, lState, kmd, file, data.BlockWrite)
  2086  	if err != nil {
  2087  		return nil, err
  2088  	}
  2089  	return fblock, nil
  2090  }
  2091  
  2092  // Returns the set of blocks dirtied during this write that might need
  2093  // to be cleaned up if the write is deferred.
  2094  func (fbo *folderBlockOps) writeDataLocked(
  2095  	ctx context.Context, lState *kbfssync.LockState,
  2096  	kmd KeyMetadataWithRootDirEntry, file data.Path, buf []byte, off int64) (
  2097  	latestWrite WriteRange, dirtyPtrs []data.BlockPointer,
  2098  	newlyDirtiedChildBytes int64, err error) {
  2099  	_, wasAlreadyUnref := fbo.unrefCache[file.TailPointer().Ref()]
  2100  	defer func() {
  2101  		// if the write didn't succeed, and the file wasn't already
  2102  		// being cached, clear out any cached state.
  2103  		if err != nil && !wasAlreadyUnref {
  2104  			_ = fbo.clearCacheInfoLocked(lState, file)
  2105  		}
  2106  	}()
  2107  
  2108  	if jManager, err := GetJournalManager(fbo.config); err == nil {
  2109  		jManager.dirtyOpStart(fbo.id())
  2110  		defer jManager.dirtyOpEnd(fbo.id())
  2111  	}
  2112  
  2113  	fbo.blockLock.AssertLocked(lState)
  2114  	fbo.vlog.CLogf(ctx, libkb.VLog1, "writeDataLocked on file pointer %v",
  2115  		file.TailPointer())
  2116  	defer func() {
  2117  		fbo.vlog.CLogf(ctx, libkb.VLog1, "writeDataLocked done: %v", err)
  2118  	}()
  2119  
  2120  	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
  2121  	if err != nil {
  2122  		return WriteRange{}, nil, 0, err
  2123  	}
  2124  
  2125  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  2126  	if err != nil {
  2127  		return WriteRange{}, nil, 0, err
  2128  	}
  2129  
  2130  	fd := fbo.newFileData(lState, file, chargedTo, kmd)
  2131  
  2132  	dirtyBcache := fbo.config.DirtyBlockCache()
  2133  	df := fbo.getOrCreateDirtyFileLocked(lState, file)
  2134  	defer func() {
  2135  		// Always update unsynced bytes and potentially force a sync,
  2136  		// even on an error, since the previously-dirty bytes stay in
  2137  		// the cache.
  2138  		df.UpdateNotYetSyncingBytes(newlyDirtiedChildBytes)
  2139  		if dirtyBcache.ShouldForceSync(fbo.id()) {
  2140  			select {
  2141  			// If we can't send on the channel, that means a sync is
  2142  			// already in progress.
  2143  			case fbo.forceSyncChan <- struct{}{}:
  2144  				fbo.vlog.CLogf(
  2145  					ctx, libkb.VLog1, "Forcing a sync due to full buffer")
  2146  			default:
  2147  			}
  2148  		}
  2149  	}()
  2150  
  2151  	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
  2152  	if err != nil {
  2153  		return WriteRange{}, nil, 0, err
  2154  	}
  2155  	if de.BlockPointer != file.TailPointer() {
  2156  		fbo.log.CDebugf(ctx, "DirEntry and file tail pointer don't match: "+
  2157  			"%v vs %v, parent=%s", de.BlockPointer, file.TailPointer(),
  2158  			file.ParentPath().TailPointer())
  2159  	}
  2160  
  2161  	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
  2162  	if err != nil {
  2163  		return WriteRange{}, nil, 0, err
  2164  	}
  2165  
  2166  	newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, bytesExtended, err :=
  2167  		fd.Write(ctx, buf, data.Int64Offset(off), fblock, de, df)
  2168  	// Record the unrefs before checking the error so we remember the
  2169  	// state of newly dirtied blocks.
  2170  	si.unrefs = append(si.unrefs, unrefs...)
  2171  	if err != nil {
  2172  		return WriteRange{}, nil, newlyDirtiedChildBytes, err
  2173  	}
  2174  
  2175  	// Update the file's directory entry.
  2176  	now := fbo.nowUnixNano()
  2177  	newDe.Mtime = now
  2178  	newDe.Ctime = now
  2179  	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
  2180  	if err != nil {
  2181  		return WriteRange{}, nil, newlyDirtiedChildBytes, err
  2182  	}
  2183  
  2184  	if fbo.doDeferWrite {
  2185  		df.AddDeferredNewBytes(bytesExtended)
  2186  	}
  2187  
  2188  	latestWrite = si.op.addWrite(uint64(off), uint64(len(buf)))
  2189  
  2190  	return latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
  2191  }
  2192  
  2193  func (fbo *folderBlockOps) holdWritesLocked(
  2194  	ctx context.Context, lState *kbfssync.LockState) error {
  2195  	fbo.blockLock.AssertLocked(lState)
  2196  
  2197  	// Loop until either the hold channel is nil, or it has been
  2198  	// closed.  However, we can't hold the lock while we're waiting
  2199  	// for it to close, as that will cause deadlocks.  So we need to
  2200  	// verify that it's the _same_ channel that was closed after we
  2201  	// re-take the lock; otherwise, we need to wait again on the new
  2202  	// channel.
  2203  	for fbo.holdNewWritesCh != nil {
  2204  		ch := fbo.holdNewWritesCh
  2205  		fbo.blockLock.Unlock(lState)
  2206  		fbo.vlog.CLogf(ctx, libkb.VLog1, "Blocking write on hold channel")
  2207  		select {
  2208  		case <-ch:
  2209  			fbo.blockLock.Lock(lState)
  2210  			// If the channel hasn't changed since we checked it
  2211  			// outside of the lock, we are good to proceed.
  2212  			if ch == fbo.holdNewWritesCh {
  2213  				fbo.vlog.CLogf(
  2214  					ctx, libkb.VLog1, "Unblocking write on hold channel")
  2215  				return nil
  2216  			}
  2217  		case <-ctx.Done():
  2218  			fbo.blockLock.Lock(lState)
  2219  			return ctx.Err()
  2220  		}
  2221  	}
  2222  	return nil
  2223  }
  2224  
  2225  // Write writes the given data to the given file. May block if there
  2226  // is too much unflushed data; in that case, it will be unblocked by a
  2227  // future sync.
  2228  func (fbo *folderBlockOps) Write(
  2229  	ctx context.Context, lState *kbfssync.LockState,
  2230  	kmd KeyMetadataWithRootDirEntry, file Node, buf []byte, off int64) error {
  2231  	// If there is too much unflushed data, we should wait until some
  2232  	// of it gets flush so our memory usage doesn't grow without
  2233  	// bound.
  2234  	c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
  2235  		fbo.id(), int64(len(buf)))
  2236  	if err != nil {
  2237  		return err
  2238  	}
  2239  	defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
  2240  		-int64(len(buf)), false)
  2241  	err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
  2242  	if err != nil {
  2243  		return err
  2244  	}
  2245  
  2246  	fbo.blockLock.Lock(lState)
  2247  	defer fbo.blockLock.Unlock(lState)
  2248  
  2249  	err = fbo.holdWritesLocked(ctx, lState)
  2250  	if err != nil {
  2251  		return err
  2252  	}
  2253  
  2254  	filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
  2255  	if err != nil {
  2256  		return err
  2257  	}
  2258  
  2259  	defer func() {
  2260  		fbo.doDeferWrite = false
  2261  	}()
  2262  
  2263  	latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.writeDataLocked(
  2264  		ctx, lState, kmd, filePath, buf, off)
  2265  	if err != nil {
  2266  		return err
  2267  	}
  2268  
  2269  	fbo.observers.localChange(ctx, file, latestWrite)
  2270  
  2271  	if fbo.doDeferWrite {
  2272  		// There's an ongoing sync, and this write altered dirty
  2273  		// blocks that are in the process of syncing.  So, we have to
  2274  		// redo this write once the sync is complete, using the new
  2275  		// file path.
  2276  		//
  2277  		// There is probably a less terrible of doing this that
  2278  		// doesn't involve so much copying and rewriting, but this is
  2279  		// the most obviously correct way.
  2280  		bufCopy := make([]byte, len(buf))
  2281  		copy(bufCopy, buf)
  2282  		fbo.vlog.CLogf(
  2283  			ctx, libkb.VLog1, "Deferring a write to file %v off=%d len=%d",
  2284  			filePath.TailPointer(), off, len(buf))
  2285  		ds := fbo.deferred[filePath.TailRef()]
  2286  		ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
  2287  		ds.writes = append(ds.writes,
  2288  			func(ctx context.Context, lState *kbfssync.LockState,
  2289  				kmd KeyMetadataWithRootDirEntry, f data.Path) error {
  2290  				// We are about to re-dirty these bytes, so mark that
  2291  				// they will no longer be synced via the old file.
  2292  				df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
  2293  				df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
  2294  
  2295  				// Write the data again.  We know this won't be
  2296  				// deferred, so no need to check the new ptrs.
  2297  				_, _, _, err = fbo.writeDataLocked(
  2298  					ctx, lState, kmd, f, bufCopy, off)
  2299  				return err
  2300  			})
  2301  		ds.waitBytes += newlyDirtiedChildBytes
  2302  		fbo.deferred[filePath.TailRef()] = ds
  2303  	}
  2304  
  2305  	return nil
  2306  }
  2307  
  2308  // truncateExtendLocked is called by truncateLocked to extend a file and
  2309  // creates a hole.
  2310  func (fbo *folderBlockOps) truncateExtendLocked(
  2311  	ctx context.Context, lState *kbfssync.LockState,
  2312  	kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64,
  2313  	parentBlocks []data.ParentBlockAndChildIndex) (
  2314  	WriteRange, []data.BlockPointer, error) {
  2315  	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
  2316  	if err != nil {
  2317  		return WriteRange{}, nil, err
  2318  	}
  2319  
  2320  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  2321  	if err != nil {
  2322  		return WriteRange{}, nil, err
  2323  	}
  2324  
  2325  	fd := fbo.newFileData(lState, file, chargedTo, kmd)
  2326  
  2327  	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
  2328  	if err != nil {
  2329  		return WriteRange{}, nil, err
  2330  	}
  2331  	df := fbo.getOrCreateDirtyFileLocked(lState, file)
  2332  	newDe, dirtyPtrs, err := fd.TruncateExtend(
  2333  		ctx, size, fblock, parentBlocks, de, df)
  2334  	if err != nil {
  2335  		return WriteRange{}, nil, err
  2336  	}
  2337  
  2338  	now := fbo.nowUnixNano()
  2339  	newDe.Mtime = now
  2340  	newDe.Ctime = now
  2341  	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
  2342  	if err != nil {
  2343  		return WriteRange{}, nil, err
  2344  	}
  2345  
  2346  	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
  2347  	if err != nil {
  2348  		return WriteRange{}, nil, err
  2349  	}
  2350  	latestWrite := si.op.addTruncate(size)
  2351  
  2352  	if fbo.config.DirtyBlockCache().ShouldForceSync(fbo.id()) {
  2353  		select {
  2354  		// If we can't send on the channel, that means a sync is
  2355  		// already in progress
  2356  		case fbo.forceSyncChan <- struct{}{}:
  2357  			fbo.vlog.CLogf(
  2358  				ctx, libkb.VLog1, "Forcing a sync due to full buffer")
  2359  		default:
  2360  		}
  2361  	}
  2362  
  2363  	fbo.vlog.CLogf(ctx, libkb.VLog1, "truncateExtendLocked: done")
  2364  	return latestWrite, dirtyPtrs, nil
  2365  }
  2366  
  2367  // Returns the set of newly-ID'd blocks created during this truncate
  2368  // that might need to be cleaned up if the truncate is deferred.
  2369  func (fbo *folderBlockOps) truncateLocked(
  2370  	ctx context.Context, lState *kbfssync.LockState,
  2371  	kmd KeyMetadataWithRootDirEntry, file data.Path, size uint64) (
  2372  	wr *WriteRange, ptrs []data.BlockPointer, dirtyBytes int64, err error) {
  2373  	_, wasAlreadyUnref := fbo.unrefCache[file.TailPointer().Ref()]
  2374  	defer func() {
  2375  		// if the truncate didn't succeed, and the file wasn't already
  2376  		// being cached, clear out any cached state.
  2377  		if err != nil && !wasAlreadyUnref {
  2378  			_ = fbo.clearCacheInfoLocked(lState, file)
  2379  		}
  2380  	}()
  2381  
  2382  	if jManager, err := GetJournalManager(fbo.config); err == nil {
  2383  		jManager.dirtyOpStart(fbo.id())
  2384  		defer jManager.dirtyOpEnd(fbo.id())
  2385  	}
  2386  
  2387  	fblock, err := fbo.writeGetFileLocked(ctx, lState, kmd, file)
  2388  	if err != nil {
  2389  		return &WriteRange{}, nil, 0, err
  2390  	}
  2391  
  2392  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  2393  	if err != nil {
  2394  		return &WriteRange{}, nil, 0, err
  2395  	}
  2396  
  2397  	fd := fbo.newFileData(lState, file, chargedTo, kmd)
  2398  
  2399  	// find the block where the file should now end
  2400  	iSize := int64(size) // TODO: deal with overflow
  2401  	_, parentBlocks, block, nextBlockOff, startOff, _, err :=
  2402  		fd.GetFileBlockAtOffset(
  2403  			ctx, fblock, data.Int64Offset(iSize), data.BlockWrite)
  2404  	if err != nil {
  2405  		return &WriteRange{}, nil, 0, err
  2406  	}
  2407  
  2408  	currLen := int64(startOff) + int64(len(block.Contents))
  2409  	switch {
  2410  	case currLen+truncateExtendCutoffPoint < iSize:
  2411  		latestWrite, dirtyPtrs, err := fbo.truncateExtendLocked(
  2412  			ctx, lState, kmd, file, uint64(iSize), parentBlocks)
  2413  		if err != nil {
  2414  			return &latestWrite, dirtyPtrs, 0, err
  2415  		}
  2416  		return &latestWrite, dirtyPtrs, 0, err
  2417  	case currLen < iSize:
  2418  		moreNeeded := iSize - currLen
  2419  		latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err :=
  2420  			fbo.writeDataLocked(
  2421  				ctx, lState, kmd, file, make([]byte, moreNeeded), currLen)
  2422  		if err != nil {
  2423  			return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
  2424  		}
  2425  		return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err
  2426  	case currLen == iSize && nextBlockOff < 0:
  2427  		// same size!
  2428  		if !wasAlreadyUnref {
  2429  			_ = fbo.clearCacheInfoLocked(lState, file)
  2430  		}
  2431  		return nil, nil, 0, nil
  2432  	}
  2433  
  2434  	// update the local entry size
  2435  	de, err := fbo.getEntryLocked(ctx, lState, kmd, file, true)
  2436  	if err != nil {
  2437  		return nil, nil, 0, err
  2438  	}
  2439  
  2440  	si, err := fbo.getOrCreateSyncInfoLocked(lState, de)
  2441  	if err != nil {
  2442  		return nil, nil, 0, err
  2443  	}
  2444  
  2445  	newDe, dirtyPtrs, unrefs, newlyDirtiedChildBytes, err := fd.TruncateShrink(
  2446  		ctx, size, fblock, de)
  2447  	// Record the unrefs before checking the error so we remember the
  2448  	// state of newly dirtied blocks.
  2449  	si.unrefs = append(si.unrefs, unrefs...)
  2450  	if err != nil {
  2451  		return nil, nil, newlyDirtiedChildBytes, err
  2452  	}
  2453  
  2454  	// Update dirtied bytes and unrefs regardless of error.
  2455  	df := fbo.getOrCreateDirtyFileLocked(lState, file)
  2456  	df.UpdateNotYetSyncingBytes(newlyDirtiedChildBytes)
  2457  
  2458  	latestWrite := si.op.addTruncate(size)
  2459  	now := fbo.nowUnixNano()
  2460  	newDe.Mtime = now
  2461  	newDe.Ctime = now
  2462  	err = fbo.updateEntryLocked(ctx, lState, kmd, file, newDe, true)
  2463  	if err != nil {
  2464  		return nil, nil, newlyDirtiedChildBytes, err
  2465  	}
  2466  
  2467  	return &latestWrite, dirtyPtrs, newlyDirtiedChildBytes, nil
  2468  }
  2469  
  2470  // Truncate truncates or extends the given file to the given size.
  2471  // May block if there is too much unflushed data; in that case, it
  2472  // will be unblocked by a future sync.
  2473  func (fbo *folderBlockOps) Truncate(
  2474  	ctx context.Context, lState *kbfssync.LockState,
  2475  	kmd KeyMetadataWithRootDirEntry, file Node, size uint64) error {
  2476  	// If there is too much unflushed data, we should wait until some
  2477  	// of it gets flush so our memory usage doesn't grow without
  2478  	// bound.
  2479  	//
  2480  	// Assume the whole remaining file will be dirty after this
  2481  	// truncate.  TODO: try to figure out how many bytes actually will
  2482  	// be dirtied ahead of time?
  2483  	c, err := fbo.config.DirtyBlockCache().RequestPermissionToDirty(ctx,
  2484  		fbo.id(), int64(size))
  2485  	if err != nil {
  2486  		return err
  2487  	}
  2488  	defer fbo.config.DirtyBlockCache().UpdateUnsyncedBytes(fbo.id(),
  2489  		-int64(size), false)
  2490  	err = fbo.maybeWaitOnDeferredWrites(ctx, lState, file, c)
  2491  	if err != nil {
  2492  		return err
  2493  	}
  2494  
  2495  	fbo.blockLock.Lock(lState)
  2496  	defer fbo.blockLock.Unlock(lState)
  2497  
  2498  	err = fbo.holdWritesLocked(ctx, lState)
  2499  	if err != nil {
  2500  		return err
  2501  	}
  2502  
  2503  	filePath, err := fbo.pathFromNodeForBlockWriteLocked(lState, file)
  2504  	if err != nil {
  2505  		return err
  2506  	}
  2507  
  2508  	defer func() {
  2509  		fbo.doDeferWrite = false
  2510  	}()
  2511  
  2512  	latestWrite, dirtyPtrs, newlyDirtiedChildBytes, err := fbo.truncateLocked(
  2513  		ctx, lState, kmd, filePath, size)
  2514  	if err != nil {
  2515  		return err
  2516  	}
  2517  
  2518  	if latestWrite != nil {
  2519  		fbo.observers.localChange(ctx, file, *latestWrite)
  2520  	}
  2521  
  2522  	if fbo.doDeferWrite {
  2523  		// There's an ongoing sync, and this truncate altered
  2524  		// dirty blocks that are in the process of syncing.  So,
  2525  		// we have to redo this truncate once the sync is complete,
  2526  		// using the new file path.
  2527  		fbo.vlog.CLogf(
  2528  			ctx, libkb.VLog1, "Deferring a truncate to file %v",
  2529  			filePath.TailPointer())
  2530  		ds := fbo.deferred[filePath.TailRef()]
  2531  		ds.dirtyDeletes = append(ds.dirtyDeletes, dirtyPtrs...)
  2532  		ds.writes = append(ds.writes,
  2533  			func(ctx context.Context, lState *kbfssync.LockState,
  2534  				kmd KeyMetadataWithRootDirEntry, f data.Path) error {
  2535  				// We are about to re-dirty these bytes, so mark that
  2536  				// they will no longer be synced via the old file.
  2537  				df := fbo.getOrCreateDirtyFileLocked(lState, filePath)
  2538  				df.UpdateNotYetSyncingBytes(-newlyDirtiedChildBytes)
  2539  
  2540  				// Truncate the file again.  We know this won't be
  2541  				// deferred, so no need to check the new ptrs.
  2542  				_, _, _, err := fbo.truncateLocked(
  2543  					ctx, lState, kmd, f, size)
  2544  				return err
  2545  			})
  2546  		ds.waitBytes += newlyDirtiedChildBytes
  2547  		fbo.deferred[filePath.TailRef()] = ds
  2548  	}
  2549  
  2550  	return nil
  2551  }
  2552  
  2553  // IsDirty returns whether the given file is dirty; if false is
  2554  // returned, then the file doesn't need to be synced.
  2555  func (fbo *folderBlockOps) IsDirty(lState *kbfssync.LockState, file data.Path) bool {
  2556  	fbo.blockLock.RLock(lState)
  2557  	defer fbo.blockLock.RUnlock(lState)
  2558  	// A dirty file should probably match all three of these, but
  2559  	// check them individually just in case.
  2560  	if fbo.config.DirtyBlockCache().IsDirty(
  2561  		fbo.id(), file.TailPointer(), file.Branch) {
  2562  		return true
  2563  	}
  2564  
  2565  	if _, ok := fbo.dirtyFiles[file.TailPointer()]; ok {
  2566  		return ok
  2567  	}
  2568  
  2569  	_, ok := fbo.unrefCache[file.TailRef()]
  2570  	return ok
  2571  }
  2572  
  2573  func (fbo *folderBlockOps) clearCacheInfoLocked(lState *kbfssync.LockState,
  2574  	file data.Path) error {
  2575  	fbo.blockLock.AssertLocked(lState)
  2576  	ref := file.TailRef()
  2577  	delete(fbo.unrefCache, ref)
  2578  	df := fbo.dirtyFiles[file.TailPointer()]
  2579  	if df != nil {
  2580  		err := df.FinishSync()
  2581  		if err != nil {
  2582  			return err
  2583  		}
  2584  		delete(fbo.dirtyFiles, file.TailPointer())
  2585  	}
  2586  	return nil
  2587  }
  2588  
  2589  func (fbo *folderBlockOps) clearAllDirtyDirsLocked(
  2590  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata) {
  2591  	fbo.blockLock.AssertLocked(lState)
  2592  	dirtyBCache := fbo.config.DirtyBlockCache()
  2593  	for ptr := range fbo.dirtyDirs {
  2594  		dir := data.Path{
  2595  			FolderBranch: fbo.folderBranch,
  2596  			Path: []data.PathNode{
  2597  				{BlockPointer: ptr,
  2598  					Name: data.NewPathPartString(ptr.String(), nil),
  2599  				},
  2600  			},
  2601  		}
  2602  		dd := fbo.newDirDataLocked(lState, dir, keybase1.UserOrTeamID(""), kmd)
  2603  		childPtrs, err := dd.GetDirtyChildPtrs(ctx, dirtyBCache)
  2604  		if err != nil {
  2605  			fbo.log.CDebugf(ctx, "Failed to get child ptrs for %v: %+v",
  2606  				ptr, err)
  2607  		}
  2608  		for childPtr := range childPtrs {
  2609  			err := dirtyBCache.Delete(fbo.id(), childPtr, fbo.branch())
  2610  			if err != nil {
  2611  				fbo.log.CDebugf(
  2612  					ctx, "Failed to delete %v from dirty "+"cache: %+v",
  2613  					childPtr, err)
  2614  			}
  2615  		}
  2616  
  2617  		err = dirtyBCache.Delete(fbo.id(), ptr, fbo.branch())
  2618  		if err != nil {
  2619  			fbo.log.CDebugf(ctx, "Failed to delete %v from dirty cache: %+v",
  2620  				ptr, err)
  2621  		}
  2622  	}
  2623  	fbo.dirtyDirs = make(map[data.BlockPointer][]data.BlockInfo)
  2624  	fbo.dirtyRootDirEntry = nil
  2625  	fbo.dirtyDirsSyncing = false
  2626  	deferredDirUpdates := fbo.deferredDirUpdates
  2627  	fbo.deferredDirUpdates = nil
  2628  	// Re-apply any deferred directory updates related to files that
  2629  	// weren't synced as part of this batch.
  2630  	for _, f := range deferredDirUpdates {
  2631  		err := f(lState)
  2632  		if err != nil {
  2633  			fbo.log.CWarningf(ctx, "Deferred entry update failed: %+v", err)
  2634  		}
  2635  	}
  2636  }
  2637  
  2638  // ClearCacheInfo removes any cached info for the the given file.
  2639  func (fbo *folderBlockOps) ClearCacheInfo(
  2640  	lState *kbfssync.LockState, file data.Path) error {
  2641  	fbo.blockLock.Lock(lState)
  2642  	defer fbo.blockLock.Unlock(lState)
  2643  	return fbo.clearCacheInfoLocked(lState, file)
  2644  }
  2645  
  2646  // revertSyncInfoAfterRecoverableError updates the saved sync info to
  2647  // include all the blocks from before the error, except for those that
  2648  // have encountered recoverable block errors themselves.
  2649  func (fbo *folderBlockOps) revertSyncInfoAfterRecoverableError(
  2650  	ctx context.Context, blocksToRemove []data.BlockPointer, result fileSyncState) {
  2651  	si := result.si
  2652  	savedSi := result.savedSi
  2653  
  2654  	// Save the blocks we need to clean up on the next attempt.
  2655  	toClean := si.toCleanIfUnused
  2656  
  2657  	newIndirect := make(map[data.BlockPointer]bool)
  2658  	for _, ptr := range result.newIndirectFileBlockPtrs {
  2659  		newIndirect[ptr] = true
  2660  	}
  2661  
  2662  	// Propagate all unrefs forward, except those that belong to new
  2663  	// blocks that were created during the sync.
  2664  	unrefs := make([]data.BlockInfo, 0, len(si.unrefs))
  2665  	for _, unref := range si.unrefs {
  2666  		if newIndirect[unref.BlockPointer] {
  2667  			fbo.vlog.CLogf(ctx, libkb.VLog1, "Dropping unref %v", unref)
  2668  			continue
  2669  		}
  2670  		unrefs = append(unrefs, unref)
  2671  	}
  2672  
  2673  	// This sync will be retried and needs new blocks, so
  2674  	// reset everything in the sync info.
  2675  	*si = *savedSi
  2676  	si.toCleanIfUnused = toClean
  2677  	si.unrefs = unrefs
  2678  	if si.bps == nil {
  2679  		return
  2680  	}
  2681  
  2682  	// Mark any bad pointers so they get skipped next time.
  2683  	blocksToRemoveSet := make(map[data.BlockPointer]bool)
  2684  	for _, ptr := range blocksToRemove {
  2685  		blocksToRemoveSet[ptr] = true
  2686  	}
  2687  
  2688  	newBps, err := savedSi.bps.deepCopyWithBlacklist(ctx, blocksToRemoveSet)
  2689  	if err != nil {
  2690  		return
  2691  	}
  2692  	si.bps = newBps
  2693  }
  2694  
  2695  // fileSyncState holds state for a sync operation for a single
  2696  // file.
  2697  type fileSyncState struct {
  2698  	// If fblock is non-nil, the (dirty, indirect, cached) block
  2699  	// it points to will be set to savedFblock on a recoverable
  2700  	// error.
  2701  	fblock, savedFblock *data.FileBlock
  2702  
  2703  	// redirtyOnRecoverableError, which is non-nil only when fblock is
  2704  	// non-nil, contains pointers that need to be re-dirtied if the
  2705  	// top block gets copied during the sync, and a recoverable error
  2706  	// happens.  Maps to the old block pointer for the block, which
  2707  	// would need a DirtyBlockCache.Delete.
  2708  	redirtyOnRecoverableError map[data.BlockPointer]data.BlockPointer
  2709  
  2710  	// If si is non-nil, its updated state will be reset on
  2711  	// error. Also, if the error is recoverable, it will be
  2712  	// reverted to savedSi.
  2713  	//
  2714  	// TODO: Working with si in this way is racy, since si is a
  2715  	// member of unrefCache.
  2716  	si, savedSi *syncInfo
  2717  
  2718  	// oldFileBlockPtrs is a list of transient entries in the
  2719  	// block cache for the file, which should be removed when the
  2720  	// sync finishes.
  2721  	oldFileBlockPtrs []data.BlockPointer
  2722  
  2723  	// newIndirectFileBlockPtrs is a list of permanent entries
  2724  	// added to the block cache for the file, which should be
  2725  	// removed after the blocks have been sent to the server.
  2726  	// They are not removed on an error, because in that case the
  2727  	// file is still dirty locally and may get another chance to
  2728  	// be sync'd.
  2729  	//
  2730  	// TODO: This can be a list of IDs instead.
  2731  	newIndirectFileBlockPtrs []data.BlockPointer
  2732  }
  2733  
  2734  // startSyncWrite contains the portion of StartSync() that's done
  2735  // while write-locking blockLock.  If there is no dirty de cache
  2736  // entry, dirtyDe will be nil.
  2737  func (fbo *folderBlockOps) startSyncWrite(ctx context.Context,
  2738  	lState *kbfssync.LockState, md *RootMetadata, file data.Path) (
  2739  	fblock *data.FileBlock, bps blockPutStateCopiable, syncState fileSyncState,
  2740  	dirtyDe *data.DirEntry, err error) {
  2741  	fbo.blockLock.Lock(lState)
  2742  	defer fbo.blockLock.Unlock(lState)
  2743  
  2744  	// update the parent directories, and write all the new blocks out
  2745  	// to disk
  2746  	fblock, err = fbo.getFileLocked(ctx, lState, md.ReadOnly(), file, data.BlockWrite)
  2747  	if err != nil {
  2748  		return nil, nil, syncState, nil, err
  2749  	}
  2750  
  2751  	fileRef := file.TailRef()
  2752  	si, ok := fbo.unrefCache[fileRef]
  2753  	if !ok {
  2754  		return nil, nil, syncState, nil,
  2755  			fmt.Errorf("No syncOp found for file ref %v", fileRef)
  2756  	}
  2757  
  2758  	// Collapse the write range to reduce the size of the sync op.
  2759  	si.op.Writes = si.op.collapseWriteRange(nil)
  2760  	// If this function returns a success, we need to make sure the op
  2761  	// in `md` is not the same variable as the op in `unrefCache`,
  2762  	// because the latter could get updated still by local writes
  2763  	// before `md` is flushed to the server.  We don't copy it here
  2764  	// because code below still needs to modify it (and by extension,
  2765  	// the one stored in `syncState.si`).
  2766  	si.op.setFinalPath(file)
  2767  	md.AddOp(si.op)
  2768  
  2769  	// Fill in syncState.
  2770  	if fblock.IsInd {
  2771  		fblockCopy := fblock.DeepCopy()
  2772  		syncState.fblock = fblock
  2773  		syncState.savedFblock = fblockCopy
  2774  		syncState.redirtyOnRecoverableError = make(map[data.BlockPointer]data.BlockPointer)
  2775  	}
  2776  	syncState.si = si
  2777  	syncState.savedSi, err = si.DeepCopy(ctx, fbo.config.Codec())
  2778  	if err != nil {
  2779  		return nil, nil, syncState, nil, err
  2780  	}
  2781  
  2782  	if si.bps == nil {
  2783  		si.bps = newBlockPutStateMemory(1)
  2784  	} else {
  2785  		// reinstate byte accounting from the previous Sync
  2786  		md.SetRefBytes(si.refBytes)
  2787  		md.AddDiskUsage(si.refBytes)
  2788  		md.SetUnrefBytes(si.unrefBytes)
  2789  		md.SetMDRefBytes(0) // this will be calculated anew
  2790  		md.SetDiskUsage(md.DiskUsage() - si.unrefBytes)
  2791  		syncState.newIndirectFileBlockPtrs = append(
  2792  			syncState.newIndirectFileBlockPtrs, si.op.Refs()...)
  2793  	}
  2794  	defer func() {
  2795  		si.refBytes = md.RefBytes()
  2796  		si.unrefBytes = md.UnrefBytes()
  2797  	}()
  2798  
  2799  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
  2800  	if err != nil {
  2801  		return nil, nil, syncState, nil, err
  2802  	}
  2803  
  2804  	dirtyBcache := fbo.config.DirtyBlockCache()
  2805  	df := fbo.getOrCreateDirtyFileLocked(lState, file)
  2806  	fd := fbo.newFileData(lState, file, chargedTo, md.ReadOnly())
  2807  
  2808  	// Note: below we add possibly updated file blocks as "unref" and
  2809  	// "ref" blocks.  This is fine, since conflict resolution or
  2810  	// notifications will never happen within a file.
  2811  
  2812  	// If needed, split the children blocks up along new boundaries
  2813  	// (e.g., if using a fingerprint-based block splitter).
  2814  	unrefs, err := fd.Split(ctx, fbo.id(), dirtyBcache, fblock, df)
  2815  	// Preserve any unrefs before checking the error.
  2816  	for _, unref := range unrefs {
  2817  		md.AddUnrefBlock(unref)
  2818  	}
  2819  	if err != nil {
  2820  		return nil, nil, syncState, nil, err
  2821  	}
  2822  
  2823  	// Ready all children blocks, if any.
  2824  	oldPtrs, err := fd.Ready(ctx, fbo.id(), fbo.config.BlockCache(),
  2825  		fbo.config.DirtyBlockCache(), fbo.config.BlockOps(), si.bps, fblock, df,
  2826  		fbo.cacheHashBehavior())
  2827  	if err != nil {
  2828  		return nil, nil, syncState, nil, err
  2829  	}
  2830  
  2831  	for newInfo, oldPtr := range oldPtrs {
  2832  		syncState.newIndirectFileBlockPtrs = append(
  2833  			syncState.newIndirectFileBlockPtrs, newInfo.BlockPointer)
  2834  		df.SetBlockOrphaned(oldPtr, true)
  2835  
  2836  		// Defer the DirtyBlockCache.Delete until after the new path
  2837  		// is ready, in case anyone tries to read the dirty file in
  2838  		// the meantime.
  2839  		syncState.oldFileBlockPtrs = append(syncState.oldFileBlockPtrs, oldPtr)
  2840  
  2841  		md.AddRefBlock(newInfo)
  2842  
  2843  		// If this block is replacing a block from a previous, failed
  2844  		// Sync, we need to take that block out of the refs list, and
  2845  		// avoid unrefing it as well.
  2846  		si.removeReplacedBlock(ctx, fbo.log, oldPtr)
  2847  
  2848  		err = df.SetBlockSyncing(ctx, oldPtr)
  2849  		if err != nil {
  2850  			return nil, nil, syncState, nil, err
  2851  		}
  2852  		syncState.redirtyOnRecoverableError[newInfo.BlockPointer] = oldPtr
  2853  	}
  2854  
  2855  	err = df.SetBlockSyncing(ctx, file.TailPointer())
  2856  	if err != nil {
  2857  		return nil, nil, syncState, nil, err
  2858  	}
  2859  	syncState.oldFileBlockPtrs = append(
  2860  		syncState.oldFileBlockPtrs, file.TailPointer())
  2861  
  2862  	// Capture the current de before we release the block lock, so
  2863  	// other deferred writes don't slip in.
  2864  	dd := fbo.newDirDataLocked(lState, *file.ParentPath(), chargedTo, md)
  2865  	de, err := dd.Lookup(ctx, file.TailName())
  2866  	if err != nil {
  2867  		return nil, nil, syncState, nil, err
  2868  	}
  2869  	dirtyDe = &de
  2870  
  2871  	// Leave a copy of the syncOp in `unrefCache`, since it may be
  2872  	// modified by future local writes while the syncOp in `md` should
  2873  	// only be modified by the rest of this sync process.
  2874  	var syncOpCopy *syncOp
  2875  	err = kbfscodec.Update(fbo.config.Codec(), &syncOpCopy, si.op)
  2876  	if err != nil {
  2877  		return nil, nil, syncState, nil, err
  2878  	}
  2879  	fbo.unrefCache[fileRef].op = syncOpCopy
  2880  
  2881  	// If there are any deferred bytes, it must be because this is
  2882  	// a retried sync and some blocks snuck in between sync. Those
  2883  	// blocks will get transferred now, but they are also on the
  2884  	// deferred list and will be retried on the next sync as well.
  2885  	df.AssimilateDeferredNewBytes()
  2886  
  2887  	// TODO: Returning si.bps in this way is racy, since si is a
  2888  	// member of unrefCache.
  2889  	return fblock, si.bps, syncState, dirtyDe, nil
  2890  }
  2891  
  2892  func prepDirtyEntryForSync(md *RootMetadata, si *syncInfo, dirtyDe *data.DirEntry) {
  2893  	// Add in the cached unref'd blocks.
  2894  	si.mergeUnrefCache(md)
  2895  	// Update the file's directory entry to the cached copy.
  2896  	if dirtyDe != nil {
  2897  		dirtyDe.EncodedSize = si.oldInfo.EncodedSize
  2898  	}
  2899  }
  2900  
  2901  // mergeDirtyEntryWithDBM sets the entry for a file into a directory,
  2902  // storing all the affected blocks into `dbm` rather than the dirty
  2903  // block cache.  It must only be called with an entry that's already
  2904  // been written to the dirty block cache, such that no new blocks are
  2905  // dirtied.
  2906  func (fbo *folderBlockOps) mergeDirtyEntryWithDBM(
  2907  	ctx context.Context, lState *kbfssync.LockState, file data.Path, md libkey.KeyMetadata,
  2908  	dbm dirBlockMap, dirtyDe data.DirEntry) error {
  2909  	// Lock and fetch for reading only, any dirty blocks will go into
  2910  	// the dbm.
  2911  	fbo.blockLock.RLock(lState)
  2912  	defer fbo.blockLock.RUnlock(lState)
  2913  
  2914  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, md)
  2915  	if err != nil {
  2916  		return err
  2917  	}
  2918  
  2919  	dd := fbo.newDirDataWithDBMLocked(
  2920  		lState, *file.ParentPath(), chargedTo, md, dbm)
  2921  	unrefs, err := dd.SetEntry(ctx, file.TailName(), dirtyDe)
  2922  	if err != nil {
  2923  		return err
  2924  	}
  2925  	if len(unrefs) != 0 {
  2926  		return errors.Errorf(
  2927  			"Merging dirty entry produced %d new unrefs", len(unrefs))
  2928  	}
  2929  	return nil
  2930  }
  2931  
  2932  // StartSync starts a sync for the given file. It returns the new
  2933  // FileBlock which has the readied top-level block which includes all
  2934  // writes since the last sync. Must be used with CleanupSyncState()
  2935  // and UpdatePointers/FinishSyncLocked() like so:
  2936  //
  2937  //		fblock, bps, dirtyDe, syncState, err :=
  2938  //			...fbo.StartSync(ctx, lState, md, uid, file)
  2939  //		defer func() {
  2940  //			...fbo.CleanupSyncState(
  2941  //				ctx, lState, md, file, ..., syncState, err)
  2942  //		}()
  2943  //		if err != nil {
  2944  //			...
  2945  //		}
  2946  //	     ...
  2947  //
  2948  //
  2949  //		... = fbo.UpdatePointers(..., func() error {
  2950  //	     ...fbo.FinishSyncLocked(ctx, lState, file, ..., syncState)
  2951  //	 })
  2952  func (fbo *folderBlockOps) StartSync(ctx context.Context,
  2953  	lState *kbfssync.LockState, md *RootMetadata, file data.Path) (
  2954  	fblock *data.FileBlock, bps blockPutStateCopiable, dirtyDe *data.DirEntry,
  2955  	syncState fileSyncState, err error) {
  2956  	if jManager, err := GetJournalManager(fbo.config); err == nil {
  2957  		jManager.dirtyOpStart(fbo.id())
  2958  	}
  2959  
  2960  	fblock, bps, syncState, dirtyDe, err = fbo.startSyncWrite(
  2961  		ctx, lState, md, file)
  2962  	if err != nil {
  2963  		return nil, nil, nil, syncState, err
  2964  	}
  2965  
  2966  	prepDirtyEntryForSync(md, syncState.si, dirtyDe)
  2967  	return fblock, bps, dirtyDe, syncState, err
  2968  }
  2969  
  2970  // Does any clean-up for a sync of the given file, given an error
  2971  // (which may be nil) that happens during or after StartSync() and
  2972  // before FinishSync(). blocksToRemove may be nil.
  2973  func (fbo *folderBlockOps) CleanupSyncState(
  2974  	ctx context.Context, lState *kbfssync.LockState, md ReadOnlyRootMetadata,
  2975  	file data.Path, blocksToRemove []data.BlockPointer,
  2976  	result fileSyncState, err error) {
  2977  	if jManager, err := GetJournalManager(fbo.config); err == nil {
  2978  		defer jManager.dirtyOpEnd(fbo.id())
  2979  	}
  2980  
  2981  	if err == nil {
  2982  		return
  2983  	}
  2984  
  2985  	fbo.blockLock.Lock(lState)
  2986  	defer fbo.blockLock.Unlock(lState)
  2987  
  2988  	// Notify error listeners before we reset the dirty blocks and
  2989  	// permissions to be granted.
  2990  	fbo.notifyErrListenersLocked(lState, file.TailPointer(), err)
  2991  
  2992  	// If there was an error, we need to back out any changes that
  2993  	// might have been filled into the sync op, because it could
  2994  	// get reused again in a later Sync call.
  2995  	if result.si != nil {
  2996  		result.si.op.resetUpdateState()
  2997  
  2998  		// Save this MD for later, so we can clean up its
  2999  		// newly-referenced block pointers if necessary.
  3000  		bpsCopy, err := result.si.bps.deepCopy(ctx)
  3001  		if err != nil {
  3002  			return
  3003  		}
  3004  		result.si.toCleanIfUnused = append(result.si.toCleanIfUnused,
  3005  			mdToCleanIfUnused{md, bpsCopy})
  3006  	}
  3007  	if isRecoverableBlockError(err) {
  3008  		if result.si != nil {
  3009  			fbo.revertSyncInfoAfterRecoverableError(ctx, blocksToRemove, result)
  3010  		}
  3011  		if result.fblock != nil {
  3012  			result.fblock.Set(result.savedFblock)
  3013  			fbo.fixChildBlocksAfterRecoverableErrorLocked(
  3014  				ctx, lState, file, md,
  3015  				result.redirtyOnRecoverableError)
  3016  		}
  3017  	} else {
  3018  		// Since the sync has errored out unrecoverably, the deferred
  3019  		// bytes are already accounted for.
  3020  		ds := fbo.deferred[file.TailRef()]
  3021  		if df := fbo.dirtyFiles[file.TailPointer()]; df != nil {
  3022  			df.UpdateNotYetSyncingBytes(-ds.waitBytes)
  3023  
  3024  			// Some blocks that were dirty are now clean under their
  3025  			// readied block ID, and now live in the bps rather than
  3026  			// the dirty bcache, so we can delete them from the dirty
  3027  			// bcache.
  3028  			dirtyBcache := fbo.config.DirtyBlockCache()
  3029  			for _, ptr := range result.oldFileBlockPtrs {
  3030  				if df.IsBlockOrphaned(ptr) {
  3031  					fbo.vlog.CLogf(
  3032  						ctx, libkb.VLog1, "Deleting dirty orphan: %v", ptr)
  3033  					if err := dirtyBcache.Delete(fbo.id(), ptr,
  3034  						fbo.branch()); err != nil {
  3035  						fbo.vlog.CLogf(
  3036  							ctx, libkb.VLog1, "Couldn't delete %v", ptr)
  3037  					}
  3038  				}
  3039  			}
  3040  		}
  3041  
  3042  		// On an unrecoverable error, the deferred writes aren't
  3043  		// needed anymore since they're already part of the
  3044  		// (still-)dirty blocks.
  3045  		delete(fbo.deferred, file.TailRef())
  3046  	}
  3047  
  3048  	// The sync is over, due to an error, so reset the map so that we
  3049  	// don't defer any subsequent writes.
  3050  	// Old syncing blocks are now just dirty
  3051  	if df := fbo.dirtyFiles[file.TailPointer()]; df != nil {
  3052  		df.ResetSyncingBlocksToDirty()
  3053  	}
  3054  }
  3055  
  3056  // cleanUpUnusedBlocks cleans up the blocks from any previous failed
  3057  // sync attempts.
  3058  func (fbo *folderBlockOps) cleanUpUnusedBlocks(ctx context.Context,
  3059  	md ReadOnlyRootMetadata, syncState fileSyncState, fbm *folderBlockManager) error {
  3060  	numToClean := len(syncState.si.toCleanIfUnused)
  3061  	if numToClean == 0 {
  3062  		return nil
  3063  	}
  3064  
  3065  	// What blocks are referenced in the successful MD?
  3066  	refs := make(map[data.BlockPointer]bool)
  3067  	for _, op := range md.data.Changes.Ops {
  3068  		for _, ptr := range op.Refs() {
  3069  			if ptr == data.ZeroPtr {
  3070  				panic("Unexpected zero ref ptr in a sync MD revision")
  3071  			}
  3072  			refs[ptr] = true
  3073  		}
  3074  		for _, update := range op.allUpdates() {
  3075  			if update.Ref == data.ZeroPtr {
  3076  				panic("Unexpected zero update ref ptr in a sync MD revision")
  3077  			}
  3078  
  3079  			refs[update.Ref] = true
  3080  		}
  3081  	}
  3082  
  3083  	// For each MD to clean, clean up the old failed blocks
  3084  	// immediately if the merge status matches the successful put, if
  3085  	// they didn't get referenced in the successful put.  If the merge
  3086  	// status is different (e.g., we ended up on a conflict branch),
  3087  	// clean it up only if the original revision failed.  If the same
  3088  	// block appears more than once, the one with a different merged
  3089  	// status takes precedence (which will always come earlier in the
  3090  	// list of MDs).
  3091  	blocksSeen := make(map[data.BlockPointer]bool)
  3092  	for _, oldMD := range syncState.si.toCleanIfUnused {
  3093  		bdType := blockDeleteAlways
  3094  		if oldMD.md.MergedStatus() != md.MergedStatus() {
  3095  			bdType = blockDeleteOnMDFail
  3096  		}
  3097  
  3098  		failedBps := newBlockPutStateMemory(oldMD.bps.numBlocks())
  3099  		for _, ptr := range oldMD.bps.Ptrs() {
  3100  			if ptr == data.ZeroPtr {
  3101  				panic("Unexpected zero block ptr in an old sync MD revision")
  3102  			}
  3103  			if blocksSeen[ptr] {
  3104  				continue
  3105  			}
  3106  			blocksSeen[ptr] = true
  3107  			if refs[ptr] && bdType == blockDeleteAlways {
  3108  				continue
  3109  			}
  3110  			failedBps.blockStates[ptr] = blockState{}
  3111  			fbo.vlog.CLogf(
  3112  				ctx, libkb.VLog1, "Cleaning up block %v from a previous "+
  3113  					"failed revision %d (oldMD is %s, bdType=%d)", ptr,
  3114  				oldMD.md.Revision(), oldMD.md.MergedStatus(), bdType)
  3115  		}
  3116  
  3117  		if len(failedBps.blockStates) > 0 {
  3118  			fbm.cleanUpBlockState(oldMD.md, failedBps, bdType)
  3119  		}
  3120  	}
  3121  	return nil
  3122  }
  3123  
  3124  func (fbo *folderBlockOps) doDeferredWritesLocked(ctx context.Context,
  3125  	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry,
  3126  	oldPath, newPath data.Path) (stillDirty bool, err error) {
  3127  	fbo.blockLock.AssertLocked(lState)
  3128  
  3129  	// Redo any writes or truncates that happened to our file while
  3130  	// the sync was happening.
  3131  	ds := fbo.deferred[oldPath.TailRef()]
  3132  	stillDirty = len(ds.writes) != 0
  3133  	delete(fbo.deferred, oldPath.TailRef())
  3134  
  3135  	// Clear any dirty blocks that resulted from a write/truncate
  3136  	// happening during the sync, since we're redoing them below.
  3137  	dirtyBcache := fbo.config.DirtyBlockCache()
  3138  	for _, ptr := range ds.dirtyDeletes {
  3139  		fbo.vlog.CLogf(
  3140  			ctx, libkb.VLog1, "Deleting deferred dirty ptr %v", ptr)
  3141  		if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
  3142  			return true, err
  3143  		}
  3144  	}
  3145  
  3146  	for _, f := range ds.writes {
  3147  		err = f(ctx, lState, kmd, newPath)
  3148  		if err != nil {
  3149  			// It's a little weird to return an error from a deferred
  3150  			// write here. Hopefully that will never happen.
  3151  			return true, err
  3152  		}
  3153  	}
  3154  	return stillDirty, nil
  3155  }
  3156  
  3157  // FinishSyncLocked finishes the sync process for a file, given the
  3158  // state from StartSync. Specifically, it re-applies any writes that
  3159  // happened since the call to StartSync.
  3160  func (fbo *folderBlockOps) FinishSyncLocked(
  3161  	ctx context.Context, lState *kbfssync.LockState,
  3162  	oldPath, newPath data.Path, md ReadOnlyRootMetadata,
  3163  	syncState fileSyncState, fbm *folderBlockManager) (
  3164  	stillDirty bool, err error) {
  3165  	fbo.blockLock.AssertLocked(lState)
  3166  
  3167  	dirtyBcache := fbo.config.DirtyBlockCache()
  3168  	for _, ptr := range syncState.oldFileBlockPtrs {
  3169  		fbo.vlog.CLogf(ctx, libkb.VLog1, "Deleting dirty ptr %v", ptr)
  3170  		if err := dirtyBcache.Delete(fbo.id(), ptr, fbo.branch()); err != nil {
  3171  			return true, err
  3172  		}
  3173  	}
  3174  
  3175  	bcache := fbo.config.BlockCache()
  3176  	for _, ptr := range syncState.newIndirectFileBlockPtrs {
  3177  		err := bcache.DeletePermanent(ptr.ID)
  3178  		if err != nil {
  3179  			fbo.log.CWarningf(ctx, "Error when deleting %v from cache: %v",
  3180  				ptr.ID, err)
  3181  		}
  3182  	}
  3183  
  3184  	stillDirty, err = fbo.doDeferredWritesLocked(
  3185  		ctx, lState, md, oldPath, newPath)
  3186  	if err != nil {
  3187  		return true, err
  3188  	}
  3189  
  3190  	// Clear cached info for the old path.  We are guaranteed that any
  3191  	// concurrent write to this file was deferred, even if it was to a
  3192  	// block that wasn't currently being sync'd, since the top-most
  3193  	// block is always in dirtyFiles and is always dirtied during a
  3194  	// write/truncate.
  3195  	//
  3196  	// Also, we can get rid of all the sync state that might have
  3197  	// happened during the sync, since we will replay the writes
  3198  	// below anyway.
  3199  	if err := fbo.clearCacheInfoLocked(lState, oldPath); err != nil {
  3200  		return true, err
  3201  	}
  3202  
  3203  	if err := fbo.cleanUpUnusedBlocks(ctx, md, syncState, fbm); err != nil {
  3204  		return true, err
  3205  	}
  3206  
  3207  	return stillDirty, nil
  3208  }
  3209  
  3210  // notifyErrListeners notifies any write operations that are blocked
  3211  // on a file so that they can learn about unrecoverable sync errors.
  3212  func (fbo *folderBlockOps) notifyErrListenersLocked(
  3213  	lState *kbfssync.LockState, ptr data.BlockPointer, err error) {
  3214  	fbo.blockLock.AssertLocked(lState)
  3215  	if isRecoverableBlockError(err) {
  3216  		// Don't bother any listeners with this error, since the sync
  3217  		// will be retried.  Unless the sync has reached its retry
  3218  		// limit, but in that case the listeners will just proceed as
  3219  		// normal once the dirty block cache bytes are freed, and
  3220  		// that's ok since this error isn't fatal.
  3221  		return
  3222  	}
  3223  	df := fbo.dirtyFiles[ptr]
  3224  	if df != nil {
  3225  		df.NotifyErrListeners(err)
  3226  	}
  3227  }
  3228  
  3229  type searchWithOutOfDateCacheError struct {
  3230  }
  3231  
  3232  func (e searchWithOutOfDateCacheError) Error() string {
  3233  	return fmt.Sprintf("Search is using an out-of-date node cache; " +
  3234  		"try again with a clean cache.")
  3235  }
  3236  
  3237  // searchForNodesInDirLocked recursively tries to find a path, and
  3238  // ultimately a node, to ptr, given the set of pointers that were
  3239  // updated in a particular operation.  The keys in nodeMap make up the
  3240  // set of BlockPointers that are being searched for, and nodeMap is
  3241  // updated in place to include the corresponding discovered nodes.
  3242  //
  3243  // Returns the number of nodes found by this invocation.  If the error
  3244  // it returns is searchWithOutOfDateCache, the search should be
  3245  // retried by the caller with a clean cache.
  3246  func (fbo *folderBlockOps) searchForNodesInDirLocked(ctx context.Context,
  3247  	lState *kbfssync.LockState, cache NodeCache, newPtrs map[data.BlockPointer]bool,
  3248  	kmd libkey.KeyMetadata, rootNode Node, currDir data.Path, nodeMap map[data.BlockPointer]Node,
  3249  	numNodesFoundSoFar int) (int, error) {
  3250  	fbo.blockLock.AssertAnyLocked(lState)
  3251  
  3252  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  3253  	if err != nil {
  3254  		return 0, err
  3255  	}
  3256  	dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
  3257  	entries, err := dd.GetEntries(ctx)
  3258  	if err != nil {
  3259  		return 0, err
  3260  	}
  3261  
  3262  	// getDirLocked may have unlocked blockLock, which means the cache
  3263  	// could have changed out from under us.  Verify that didn't
  3264  	// happen, so we can avoid messing it up with nodes from an old MD
  3265  	// version.  If it did happen, return a special error that lets
  3266  	// the caller know they should retry with a fresh cache.
  3267  	if currDir.Path[0].BlockPointer !=
  3268  		cache.PathFromNode(rootNode).TailPointer() {
  3269  		return 0, searchWithOutOfDateCacheError{}
  3270  	}
  3271  
  3272  	if numNodesFoundSoFar >= len(nodeMap) {
  3273  		return 0, nil
  3274  	}
  3275  
  3276  	numNodesFound := 0
  3277  	for name, de := range entries {
  3278  		childPath := currDir.ChildPath(name, de.BlockPointer, nil)
  3279  		if _, ok := nodeMap[de.BlockPointer]; ok {
  3280  			// make a node for every pathnode
  3281  			n := rootNode
  3282  			for i, pn := range childPath.Path[1:] {
  3283  				if !pn.BlockPointer.IsValid() {
  3284  					// Temporary debugging output for KBFS-1764 -- the
  3285  					// GetOrCreate call below will panic.
  3286  					fbo.log.CDebugf(ctx, "Invalid block pointer, path=%s, "+
  3287  						"path.path=%v (index %d), name=%s, de=%#v, "+
  3288  						"nodeMap=%v, newPtrs=%v, kmd=%#v",
  3289  						childPath, childPath.Path, i, name, de, nodeMap,
  3290  						newPtrs, kmd)
  3291  				}
  3292  				et := data.Dir
  3293  				if i == len(childPath.Path)-2 {
  3294  					et = de.Type
  3295  				}
  3296  				n, err = cache.GetOrCreate(pn.BlockPointer, pn.Name, n, et)
  3297  				if err != nil {
  3298  					return 0, err
  3299  				}
  3300  			}
  3301  			childPath.ChildObfuscator = n.Obfuscator()
  3302  			nodeMap[de.BlockPointer] = n
  3303  			numNodesFound++
  3304  			if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
  3305  				return numNodesFound, nil
  3306  			}
  3307  		}
  3308  
  3309  		// otherwise, recurse if this represents an updated block
  3310  		if _, ok := newPtrs[de.BlockPointer]; de.Type == data.Dir && ok {
  3311  			if childPath.Obfuscator() == nil {
  3312  				childPath.ChildObfuscator = fbo.nodeCache.ObfuscatorMaker()()
  3313  			}
  3314  			n, err := fbo.searchForNodesInDirLocked(ctx, lState, cache,
  3315  				newPtrs, kmd, rootNode, childPath, nodeMap,
  3316  				numNodesFoundSoFar+numNodesFound)
  3317  			if err != nil {
  3318  				return 0, err
  3319  			}
  3320  			numNodesFound += n
  3321  			if numNodesFoundSoFar+numNodesFound >= len(nodeMap) {
  3322  				return numNodesFound, nil
  3323  			}
  3324  		}
  3325  	}
  3326  
  3327  	return numNodesFound, nil
  3328  }
  3329  
  3330  func (fbo *folderBlockOps) trySearchWithCacheLocked(ctx context.Context,
  3331  	lState *kbfssync.LockState, cache NodeCache, ptrs []data.BlockPointer,
  3332  	newPtrs map[data.BlockPointer]bool, kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (
  3333  	map[data.BlockPointer]Node, error) {
  3334  	fbo.blockLock.AssertAnyLocked(lState)
  3335  
  3336  	nodeMap := make(map[data.BlockPointer]Node)
  3337  	for _, ptr := range ptrs {
  3338  		nodeMap[ptr] = nil
  3339  	}
  3340  
  3341  	if len(ptrs) == 0 {
  3342  		return nodeMap, nil
  3343  	}
  3344  
  3345  	var node Node
  3346  	// The node cache used by the main part of KBFS is
  3347  	// fbo.nodeCache. This basically maps from BlockPointers to
  3348  	// Nodes. Nodes are used by the callers of the library, but
  3349  	// internally we need to know the series of BlockPointers and
  3350  	// file/dir names that make up the path of the corresponding
  3351  	// file/dir. fbo.nodeCache is long-lived and never invalidated.
  3352  	//
  3353  	// As folderBranchOps gets informed of new local or remote MD
  3354  	// updates, which change the BlockPointers of some subset of the
  3355  	// nodes in this TLF, it calls nodeCache.UpdatePointer for each
  3356  	// change. Then, when a caller passes some old Node they have
  3357  	// lying around into an FBO call, we can translate it to its
  3358  	// current path using fbo.nodeCache. Note that on every TLF
  3359  	// modification, we are guaranteed that the BlockPointer of the
  3360  	// root directory will change (because of the merkle-ish tree of
  3361  	// content hashes we use to assign BlockPointers).
  3362  	//
  3363  	// fbo.nodeCache needs to maintain the absolute latest mappings
  3364  	// for the TLF, or else FBO calls won't see up-to-date data. The
  3365  	// tension in search comes from the fact that we are trying to
  3366  	// discover the BlockPointers of certain files at a specific point
  3367  	// in the MD history, which is not necessarily the same as the
  3368  	// most-recently-seen MD update. Specifically, some callers
  3369  	// process a specific range of MDs, but folderBranchOps may have
  3370  	// heard about a newer one before, or during, when the caller
  3371  	// started processing. That means fbo.nodeCache may have been
  3372  	// updated to reflect the newest BlockPointers, and is no longer
  3373  	// correct as a cache for our search for the data at the old point
  3374  	// in time.
  3375  	if cache == fbo.nodeCache {
  3376  		// Root node should already exist if we have an up-to-date md.
  3377  		node = cache.Get(rootPtr.Ref())
  3378  		if node == nil {
  3379  			return nil, searchWithOutOfDateCacheError{}
  3380  		}
  3381  	} else {
  3382  		// Root node may or may not exist.
  3383  		var err error
  3384  		node, err = cache.GetOrCreate(rootPtr,
  3385  			data.NewPathPartString(
  3386  				string(kmd.GetTlfHandle().GetCanonicalName()), nil),
  3387  			nil, data.Dir)
  3388  		if err != nil {
  3389  			return nil, err
  3390  		}
  3391  	}
  3392  	if node == nil {
  3393  		return nil, fmt.Errorf("Cannot find root node corresponding to %v",
  3394  			rootPtr)
  3395  	}
  3396  
  3397  	// are they looking for the root directory?
  3398  	numNodesFound := 0
  3399  	if _, ok := nodeMap[rootPtr]; ok {
  3400  		nodeMap[rootPtr] = node
  3401  		numNodesFound++
  3402  		if numNodesFound >= len(nodeMap) {
  3403  			return nodeMap, nil
  3404  		}
  3405  	}
  3406  
  3407  	rootPath := cache.PathFromNode(node)
  3408  	if len(rootPath.Path) != 1 {
  3409  		return nil, fmt.Errorf("Invalid root path for %v: %s",
  3410  			rootPtr, rootPath)
  3411  	}
  3412  
  3413  	_, err := fbo.searchForNodesInDirLocked(ctx, lState, cache, newPtrs,
  3414  		kmd, node, rootPath, nodeMap, numNodesFound)
  3415  	if err != nil {
  3416  		return nil, err
  3417  	}
  3418  
  3419  	if rootPtr != cache.PathFromNode(node).TailPointer() {
  3420  		return nil, searchWithOutOfDateCacheError{}
  3421  	}
  3422  
  3423  	return nodeMap, nil
  3424  }
  3425  
  3426  func (fbo *folderBlockOps) searchForNodesLocked(ctx context.Context,
  3427  	lState *kbfssync.LockState, cache NodeCache, ptrs []data.BlockPointer,
  3428  	newPtrs map[data.BlockPointer]bool, kmd libkey.KeyMetadata,
  3429  	rootPtr data.BlockPointer) (map[data.BlockPointer]Node, NodeCache, error) {
  3430  	fbo.blockLock.AssertAnyLocked(lState)
  3431  
  3432  	// First try the passed-in cache.  If it doesn't work because the
  3433  	// cache is out of date, try again with a clean cache.
  3434  	nodeMap, err := fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
  3435  		newPtrs, kmd, rootPtr)
  3436  	if _, ok := err.(searchWithOutOfDateCacheError); ok {
  3437  		// The md is out-of-date, so use a throwaway cache so we
  3438  		// don't pollute the real node cache with stale nodes.
  3439  		fbo.vlog.CLogf(
  3440  			ctx, libkb.VLog1, "Root node %v doesn't exist in the node "+
  3441  				"cache; using a throwaway node cache instead",
  3442  			rootPtr)
  3443  		cache = newNodeCacheStandard(fbo.folderBranch)
  3444  		cache.SetObfuscatorMaker(fbo.nodeCache.ObfuscatorMaker())
  3445  		nodeMap, err = fbo.trySearchWithCacheLocked(ctx, lState, cache, ptrs,
  3446  			newPtrs, kmd, rootPtr)
  3447  	}
  3448  
  3449  	if err != nil {
  3450  		return nil, nil, err
  3451  	}
  3452  
  3453  	// Return the whole map even if some nodes weren't found.
  3454  	return nodeMap, cache, nil
  3455  }
  3456  
  3457  // SearchForNodes tries to resolve all the given pointers to a Node
  3458  // object, using only the updated pointers specified in newPtrs.
  3459  // Returns an error if any subset of the pointer paths do not exist;
  3460  // it is the caller's responsibility to decide to error on particular
  3461  // unresolved nodes.  It also returns the cache that ultimately
  3462  // contains the nodes -- this might differ from the passed-in cache if
  3463  // another goroutine updated that cache and it no longer contains the
  3464  // root pointer specified in md.
  3465  func (fbo *folderBlockOps) SearchForNodes(ctx context.Context,
  3466  	cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
  3467  	kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (
  3468  	map[data.BlockPointer]Node, NodeCache, error) {
  3469  	lState := makeFBOLockState()
  3470  	fbo.blockLock.RLock(lState)
  3471  	defer fbo.blockLock.RUnlock(lState)
  3472  	return fbo.searchForNodesLocked(
  3473  		ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
  3474  }
  3475  
  3476  // SearchForPaths is like SearchForNodes, except it returns a
  3477  // consistent view of all the paths of the searched-for pointers.
  3478  func (fbo *folderBlockOps) SearchForPaths(ctx context.Context,
  3479  	cache NodeCache, ptrs []data.BlockPointer, newPtrs map[data.BlockPointer]bool,
  3480  	kmd libkey.KeyMetadata, rootPtr data.BlockPointer) (map[data.BlockPointer]data.Path, error) {
  3481  	lState := makeFBOLockState()
  3482  	// Hold the lock while processing the paths so they can't be changed.
  3483  	fbo.blockLock.RLock(lState)
  3484  	defer fbo.blockLock.RUnlock(lState)
  3485  	nodeMap, cache, err :=
  3486  		fbo.searchForNodesLocked(
  3487  			ctx, lState, cache, ptrs, newPtrs, kmd, rootPtr)
  3488  	if err != nil {
  3489  		return nil, err
  3490  	}
  3491  
  3492  	paths := make(map[data.BlockPointer]data.Path)
  3493  	for ptr, n := range nodeMap {
  3494  		if n == nil {
  3495  			paths[ptr] = data.Path{}
  3496  			continue
  3497  		}
  3498  
  3499  		p := cache.PathFromNode(n)
  3500  		if p.TailPointer() != ptr {
  3501  			return nil, NodeNotFoundError{ptr}
  3502  		}
  3503  		paths[ptr] = p
  3504  	}
  3505  
  3506  	return paths, nil
  3507  }
  3508  
  3509  // UpdateCachedEntryAttributesOnRemovedFile updates any cached entry
  3510  // for the given path of an unlinked file, according to the given op,
  3511  // and it makes a new dirty cache entry if one doesn't exist yet.  We
  3512  // assume Sync will be called eventually on the corresponding open
  3513  // file handle, which will clear out the entry.
  3514  func (fbo *folderBlockOps) UpdateCachedEntryAttributesOnRemovedFile(
  3515  	ctx context.Context, lState *kbfssync.LockState,
  3516  	kmd KeyMetadataWithRootDirEntry, op *setAttrOp, p data.Path, de data.DirEntry) error {
  3517  	fbo.blockLock.Lock(lState)
  3518  	defer fbo.blockLock.Unlock(lState)
  3519  	_, err := fbo.setCachedAttrLocked(
  3520  		ctx, lState, kmd, *p.ParentPath(), p.TailName(), op.Attr, de)
  3521  	return err
  3522  }
  3523  
  3524  func (fbo *folderBlockOps) getDeferredWriteCountForTest(
  3525  	lState *kbfssync.LockState) int {
  3526  	fbo.blockLock.RLock(lState)
  3527  	defer fbo.blockLock.RUnlock(lState)
  3528  	writes := 0
  3529  	for _, ds := range fbo.deferred {
  3530  		writes += len(ds.writes)
  3531  	}
  3532  	return writes
  3533  }
  3534  
  3535  func (fbo *folderBlockOps) updatePointer(kmd libkey.KeyMetadata, oldPtr data.BlockPointer, newPtr data.BlockPointer, shouldPrefetch bool) NodeID {
  3536  	updatedNode := fbo.nodeCache.UpdatePointer(oldPtr.Ref(), newPtr)
  3537  	if updatedNode == nil || oldPtr.ID == newPtr.ID {
  3538  		return nil
  3539  	}
  3540  
  3541  	// Only prefetch if the updated pointer is a new block ID.
  3542  	// TODO: Remove this comment when we're done debugging because it'll be everywhere.
  3543  	ctx := context.TODO()
  3544  	fbo.vlog.CLogf(
  3545  		ctx, libkb.VLog1, "Updated reference for pointer %s to %s.",
  3546  		oldPtr.ID, newPtr.ID)
  3547  	if shouldPrefetch {
  3548  		// Prefetch the new ref, but only if the old ref already exists in
  3549  		// the block cache. Ideally we'd always prefetch it, but we need
  3550  		// the type of the block so that we can call `NewEmpty`.
  3551  		block, lifetime, err := fbo.config.BlockCache().GetWithLifetime(oldPtr)
  3552  		if err != nil {
  3553  			return updatedNode
  3554  		}
  3555  
  3556  		// No need to cache because it's already cached.
  3557  		action := fbo.config.Mode().DefaultBlockRequestAction()
  3558  		if fbo.branch() != data.MasterBranch {
  3559  			action = action.AddNonMasterBranch()
  3560  		}
  3561  		_ = fbo.config.BlockOps().BlockRetriever().Request(
  3562  			ctx, updatePointerPrefetchPriority, kmd, newPtr, block.NewEmpty(),
  3563  			lifetime, action)
  3564  	}
  3565  	// Cancel any prefetches for the old pointer from the prefetcher.
  3566  	fbo.config.BlockOps().Prefetcher().CancelPrefetch(oldPtr)
  3567  	return updatedNode
  3568  }
  3569  
  3570  // UpdatePointers updates all the pointers in the node cache
  3571  // atomically.  If `afterUpdateFn` is non-nil, it's called under the
  3572  // same block lock under which the pointers were updated.
  3573  func (fbo *folderBlockOps) UpdatePointers(
  3574  	kmd libkey.KeyMetadata, lState *kbfssync.LockState, op op, shouldPrefetch bool,
  3575  	afterUpdateFn func() error) (affectedNodeIDs []NodeID, err error) {
  3576  	fbo.blockLock.Lock(lState)
  3577  	defer fbo.blockLock.Unlock(lState)
  3578  	for _, update := range op.allUpdates() {
  3579  		updatedNode := fbo.updatePointer(
  3580  			kmd, update.Unref, update.Ref, shouldPrefetch)
  3581  		if updatedNode != nil {
  3582  			affectedNodeIDs = append(affectedNodeIDs, updatedNode)
  3583  		}
  3584  	}
  3585  
  3586  	// Cancel any prefetches for all unreferenced block pointers.
  3587  	for _, unref := range op.Unrefs() {
  3588  		fbo.config.BlockOps().Prefetcher().CancelPrefetch(unref)
  3589  	}
  3590  
  3591  	if afterUpdateFn == nil {
  3592  		return affectedNodeIDs, nil
  3593  	}
  3594  
  3595  	return affectedNodeIDs, afterUpdateFn()
  3596  }
  3597  
  3598  func (fbo *folderBlockOps) unlinkDuringFastForwardLocked(ctx context.Context,
  3599  	lState *kbfssync.LockState, kmd KeyMetadataWithRootDirEntry, ref data.BlockRef) (undoFn func()) {
  3600  	fbo.blockLock.AssertLocked(lState)
  3601  	oldNode := fbo.nodeCache.Get(ref)
  3602  	if oldNode == nil {
  3603  		return nil
  3604  	}
  3605  	oldPath := fbo.nodeCache.PathFromNode(oldNode)
  3606  	fbo.vlog.CLogf(
  3607  		ctx, libkb.VLog1, "Unlinking missing node %s/%v during "+
  3608  			"fast-forward", oldPath, ref)
  3609  	de, err := fbo.getEntryLocked(ctx, lState, kmd, oldPath, true)
  3610  	if err != nil {
  3611  		fbo.log.CDebugf(ctx, "Couldn't find old dir entry for %s/%v: %+v",
  3612  			oldPath, ref, err)
  3613  	}
  3614  	return fbo.nodeCache.Unlink(ref, oldPath, de)
  3615  }
  3616  
  3617  type nodeChildrenMap map[string]map[data.PathNode]bool
  3618  
  3619  func (ncm nodeChildrenMap) addDirChange(
  3620  	node Node, p data.Path, changes []NodeChange, affectedNodeIDs []NodeID) (
  3621  	[]NodeChange, []NodeID) {
  3622  	change := NodeChange{Node: node}
  3623  	for subchild := range ncm[p.String()] {
  3624  		change.DirUpdated = append(change.DirUpdated, subchild.Name)
  3625  	}
  3626  	changes = append(changes, change)
  3627  	affectedNodeIDs = append(affectedNodeIDs, node.GetID())
  3628  	return changes, affectedNodeIDs
  3629  }
  3630  
  3631  func (nodeChildrenMap) addFileChange(
  3632  	node Node, changes []NodeChange, affectedNodeIDs []NodeID) (
  3633  	[]NodeChange, []NodeID) {
  3634  	// Invalidate the entire file contents.
  3635  	changes = append(changes, NodeChange{
  3636  		Node:        node,
  3637  		FileUpdated: []WriteRange{{Len: 0, Off: 0}},
  3638  	})
  3639  	affectedNodeIDs = append(affectedNodeIDs, node.GetID())
  3640  	return changes, affectedNodeIDs
  3641  }
  3642  
  3643  func (fbo *folderBlockOps) fastForwardDirAndChildrenLocked(ctx context.Context,
  3644  	lState *kbfssync.LockState, currDir data.Path, children nodeChildrenMap,
  3645  	kmd KeyMetadataWithRootDirEntry,
  3646  	updates map[data.BlockPointer]data.BlockPointer) (
  3647  	changes []NodeChange, affectedNodeIDs []NodeID, undoFns []func(),
  3648  	err error) {
  3649  	fbo.blockLock.AssertLocked(lState)
  3650  
  3651  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  3652  	if err != nil {
  3653  		return nil, nil, undoFns, err
  3654  	}
  3655  	dd := fbo.newDirDataLocked(lState, currDir, chargedTo, kmd)
  3656  	entries, err := dd.GetEntries(ctx)
  3657  	if err != nil {
  3658  		return nil, nil, undoFns, err
  3659  	}
  3660  
  3661  	prefix := currDir.String()
  3662  
  3663  	// TODO: parallelize me?
  3664  	for child := range children[prefix] {
  3665  		entry, ok := entries[child.Name]
  3666  		if !ok {
  3667  			undoFn := fbo.unlinkDuringFastForwardLocked(
  3668  				ctx, lState, kmd, child.BlockPointer.Ref())
  3669  			if undoFn != nil {
  3670  				undoFns = append(undoFns, undoFn)
  3671  			}
  3672  			continue
  3673  		}
  3674  
  3675  		fbo.vlog.CLogf(
  3676  			ctx, libkb.VLog1, "Fast-forwarding %v -> %v",
  3677  			child.BlockPointer, entry.BlockPointer)
  3678  		fbo.updatePointer(kmd, child.BlockPointer,
  3679  			entry.BlockPointer, true)
  3680  		updates[child.BlockPointer] = entry.BlockPointer
  3681  		node := fbo.nodeCache.Get(entry.BlockPointer.Ref())
  3682  		if node == nil {
  3683  			fbo.vlog.CLogf(
  3684  				ctx, libkb.VLog1, "Skipping missing node for %s",
  3685  				entry.BlockPointer)
  3686  			continue
  3687  		}
  3688  		if entry.Type == data.Dir {
  3689  			newPath := fbo.nodeCache.PathFromNode(node)
  3690  			changes, affectedNodeIDs = children.addDirChange(
  3691  				node, newPath, changes, affectedNodeIDs)
  3692  
  3693  			childChanges, childAffectedNodeIDs, childUndoFns, err :=
  3694  				fbo.fastForwardDirAndChildrenLocked(
  3695  					ctx, lState, newPath, children, kmd, updates)
  3696  			undoFns = append(undoFns, childUndoFns...)
  3697  			if err != nil {
  3698  				return nil, nil, undoFns, err
  3699  			}
  3700  			changes = append(changes, childChanges...)
  3701  			affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
  3702  		} else {
  3703  			// File -- invalidate the entire file contents.
  3704  			changes, affectedNodeIDs = children.addFileChange(
  3705  				node, changes, affectedNodeIDs)
  3706  		}
  3707  	}
  3708  	delete(children, prefix)
  3709  	return changes, affectedNodeIDs, undoFns, nil
  3710  }
  3711  
  3712  func (fbo *folderBlockOps) makeChildrenTreeFromNodesLocked(
  3713  	lState *kbfssync.LockState, nodes []Node) (
  3714  	rootPath data.Path, children nodeChildrenMap) {
  3715  	fbo.blockLock.AssertLocked(lState)
  3716  
  3717  	// Build a "tree" representation for each interesting path prefix.
  3718  	children = make(nodeChildrenMap)
  3719  	for _, n := range nodes {
  3720  		p := fbo.nodeCache.PathFromNode(n)
  3721  		if len(p.Path) == 1 {
  3722  			rootPath = p
  3723  		}
  3724  		prevPath := ""
  3725  		for _, pn := range p.Path {
  3726  			if prevPath != "" {
  3727  				childPNs := children[prevPath]
  3728  				if childPNs == nil {
  3729  					childPNs = make(map[data.PathNode]bool)
  3730  					children[prevPath] = childPNs
  3731  				}
  3732  				childPNs[pn] = true
  3733  			}
  3734  			prevPath = pathlib.Join(prevPath, pn.Name.Plaintext())
  3735  		}
  3736  	}
  3737  	return rootPath, children
  3738  }
  3739  
  3740  // FastForwardAllNodes attempts to update the block pointers
  3741  // associated with nodes in the cache by searching for their paths in
  3742  // the current version of the TLF.  If it can't find a corresponding
  3743  // node, it assumes it's been deleted and unlinks it.  Returns the set
  3744  // of node changes that resulted.  If there are no nodes, it returns a
  3745  // nil error because there's nothing to be done.
  3746  func (fbo *folderBlockOps) FastForwardAllNodes(ctx context.Context,
  3747  	lState *kbfssync.LockState, md ReadOnlyRootMetadata) (
  3748  	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
  3749  	if fbo.nodeCache == nil {
  3750  		// Nothing needs to be done!
  3751  		return nil, nil, nil
  3752  	}
  3753  
  3754  	// Take a hard lock through this whole process.  TODO: is there
  3755  	// any way to relax this?  It could lead to file system operation
  3756  	// timeouts, even on reads, if we hold it too long.
  3757  	fbo.blockLock.Lock(lState)
  3758  	defer fbo.blockLock.Unlock(lState)
  3759  
  3760  	nodes := fbo.nodeCache.AllNodes()
  3761  	if len(nodes) == 0 {
  3762  		// Nothing needs to be done!
  3763  		return nil, nil, nil
  3764  	}
  3765  	fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forwarding %d nodes", len(nodes))
  3766  	defer func() {
  3767  		fbo.vlog.CLogf(ctx, libkb.VLog1, "Fast-forward complete: %v", err)
  3768  	}()
  3769  
  3770  	rootPath, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
  3771  	if !rootPath.IsValid() {
  3772  		return nil, nil, errors.New("Couldn't find the root path")
  3773  	}
  3774  
  3775  	fbo.vlog.CLogf(
  3776  		ctx, libkb.VLog1, "Fast-forwarding root %v -> %v",
  3777  		rootPath.Path[0].BlockPointer, md.data.Dir.BlockPointer)
  3778  	fbo.updatePointer(md, rootPath.Path[0].BlockPointer,
  3779  		md.data.Dir.BlockPointer, false)
  3780  
  3781  	// Keep track of all the pointer updates done, and unwind them if
  3782  	// there's any error.
  3783  	updates := make(map[data.BlockPointer]data.BlockPointer)
  3784  	updates[rootPath.Path[0].BlockPointer] = md.data.Dir.BlockPointer
  3785  	var undoFns []func()
  3786  	defer func() {
  3787  		if err == nil {
  3788  			return
  3789  		}
  3790  		for oldID, newID := range updates {
  3791  			fbo.updatePointer(md, newID, oldID, false)
  3792  		}
  3793  		for _, f := range undoFns {
  3794  			f()
  3795  		}
  3796  	}()
  3797  
  3798  	rootPath.Path[0].BlockPointer = md.data.Dir.BlockPointer
  3799  	rootNode := fbo.nodeCache.Get(md.data.Dir.BlockPointer.Ref())
  3800  	if rootNode != nil {
  3801  		change := NodeChange{Node: rootNode}
  3802  		for child := range children[rootPath.String()] {
  3803  			change.DirUpdated = append(change.DirUpdated, child.Name)
  3804  		}
  3805  		changes = append(changes, change)
  3806  		affectedNodeIDs = append(affectedNodeIDs, rootNode.GetID())
  3807  	}
  3808  
  3809  	childChanges, childAffectedNodeIDs, undoFns, err :=
  3810  		fbo.fastForwardDirAndChildrenLocked(
  3811  			ctx, lState, rootPath, children, md, updates)
  3812  	if err != nil {
  3813  		return nil, nil, err
  3814  	}
  3815  	changes = append(changes, childChanges...)
  3816  	affectedNodeIDs = append(affectedNodeIDs, childAffectedNodeIDs...)
  3817  
  3818  	// Unlink any children that remain.
  3819  	for _, childPNs := range children {
  3820  		for child := range childPNs {
  3821  			fbo.unlinkDuringFastForwardLocked(
  3822  				ctx, lState, md, child.BlockPointer.Ref())
  3823  		}
  3824  	}
  3825  	return changes, affectedNodeIDs, nil
  3826  }
  3827  
  3828  func (fbo *folderBlockOps) getInvalidationChangesForNodes(
  3829  	ctx context.Context, lState *kbfssync.LockState, nodes []Node) (
  3830  	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
  3831  	fbo.blockLock.AssertLocked(lState)
  3832  	if len(nodes) == 0 {
  3833  		// Nothing needs to be done!
  3834  		return nil, nil, nil
  3835  	}
  3836  
  3837  	_, children := fbo.makeChildrenTreeFromNodesLocked(lState, nodes)
  3838  	for _, node := range nodes {
  3839  		p := fbo.nodeCache.PathFromNode(node)
  3840  		prefix := p.String()
  3841  		childNodes := children[prefix]
  3842  		if len(childNodes) > 0 {
  3843  			// This must be a directory.  Invalidate all children.
  3844  			changes, affectedNodeIDs = children.addDirChange(
  3845  				node, p, changes, affectedNodeIDs)
  3846  			fbo.vlog.CLogf(
  3847  				ctx, libkb.VLog1, "Invalidating dir node %p/%s", node, prefix)
  3848  		} else {
  3849  			// This might be a file.  In any case, it doesn't have any
  3850  			// children that need invalidation, so just send the file
  3851  			// change.
  3852  			changes, affectedNodeIDs = children.addFileChange(
  3853  				node, changes, affectedNodeIDs)
  3854  			fbo.vlog.CLogf(
  3855  				ctx, libkb.VLog1, "Invalidating possible file node %p/%s",
  3856  				node, prefix)
  3857  		}
  3858  	}
  3859  	return changes, affectedNodeIDs, nil
  3860  }
  3861  
  3862  // GetInvalidationChangesForNode returns the list of invalidation
  3863  // notifications for all the nodes rooted at the given node.
  3864  func (fbo *folderBlockOps) GetInvalidationChangesForNode(
  3865  	ctx context.Context, lState *kbfssync.LockState, node Node) (
  3866  	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
  3867  	if fbo.nodeCache == nil {
  3868  		// Nothing needs to be done!
  3869  		return nil, nil, nil
  3870  	}
  3871  
  3872  	fbo.blockLock.Lock(lState)
  3873  	defer fbo.blockLock.Unlock(lState)
  3874  	fbo.vlog.CLogf(
  3875  		ctx, libkb.VLog1, "About to get all children for node %p", node)
  3876  	childNodes := fbo.nodeCache.AllNodeChildren(node)
  3877  	fbo.vlog.CLogf(
  3878  		ctx, libkb.VLog1, "Found %d children for node %p", len(childNodes),
  3879  		node)
  3880  	return fbo.getInvalidationChangesForNodes(
  3881  		ctx, lState, append(childNodes, node))
  3882  }
  3883  
  3884  // GetInvalidationChangesForAll returns the list of invalidation
  3885  // notifications for the entire TLF.
  3886  func (fbo *folderBlockOps) GetInvalidationChangesForAll(
  3887  	ctx context.Context, lState *kbfssync.LockState) (
  3888  	changes []NodeChange, affectedNodeIDs []NodeID, err error) {
  3889  	if fbo.nodeCache == nil {
  3890  		// Nothing needs to be done!
  3891  		return nil, nil, nil
  3892  	}
  3893  
  3894  	fbo.blockLock.Lock(lState)
  3895  	defer fbo.blockLock.Unlock(lState)
  3896  	childNodes := fbo.nodeCache.AllNodes()
  3897  	fbo.vlog.CLogf(ctx, libkb.VLog1, "Found %d nodes", len(childNodes))
  3898  	return fbo.getInvalidationChangesForNodes(ctx, lState, childNodes)
  3899  }
  3900  
  3901  // MarkNode marks all the blocks in the node's block tree with the
  3902  // given tag.
  3903  func (fbo *folderBlockOps) MarkNode(
  3904  	ctx context.Context, lState *kbfssync.LockState, node Node, kmd libkey.KeyMetadata,
  3905  	tag string, cacheType DiskBlockCacheType) error {
  3906  	dbc := fbo.config.DiskBlockCache()
  3907  	if dbc == nil {
  3908  		return nil
  3909  	}
  3910  
  3911  	fbo.blockLock.RLock(lState)
  3912  	defer fbo.blockLock.RUnlock(lState)
  3913  
  3914  	chargedTo, err := fbo.getChargedToLocked(ctx, lState, kmd)
  3915  	if err != nil {
  3916  		return err
  3917  	}
  3918  	p := fbo.nodeCache.PathFromNode(node)
  3919  	err = dbc.Mark(ctx, p.TailPointer().ID, tag, cacheType)
  3920  	if err != nil {
  3921  		return err
  3922  	}
  3923  	var infos []data.BlockInfo
  3924  	if node.EntryType() == data.Dir {
  3925  		dd := fbo.newDirDataLocked(lState, p, chargedTo, kmd)
  3926  		infos, err = dd.GetIndirectDirBlockInfos(ctx)
  3927  	} else {
  3928  		fd := fbo.newFileData(lState, p, chargedTo, kmd)
  3929  		infos, err = fd.GetIndirectFileBlockInfos(ctx)
  3930  	}
  3931  	if err != nil {
  3932  		return err
  3933  	}
  3934  
  3935  	for _, info := range infos {
  3936  		err = dbc.Mark(ctx, info.BlockPointer.ID, tag, cacheType)
  3937  		switch errors.Cause(err).(type) {
  3938  		case nil:
  3939  		case data.NoSuchBlockError:
  3940  		default:
  3941  			return err
  3942  		}
  3943  	}
  3944  	return nil
  3945  }
  3946  
  3947  type chainsPathPopulator interface {
  3948  	populateChainPaths(context.Context, logger.Logger, *crChains, bool) error
  3949  	obfuscatorMaker() func() data.Obfuscator
  3950  }
  3951  
  3952  // populateChainPaths updates all the paths in all the ops tracked by
  3953  // `chains`, using the main nodeCache.
  3954  func (fbo *folderBlockOps) populateChainPaths(ctx context.Context,
  3955  	log logger.Logger, chains *crChains, includeCreates bool) error {
  3956  	_, err := chains.getPaths(
  3957  		ctx, fbo, log, fbo.nodeCache, includeCreates,
  3958  		fbo.config.Mode().IsTestMode())
  3959  	return err
  3960  }
  3961  
  3962  func (fbo *folderBlockOps) obfuscatorMaker() func() data.Obfuscator {
  3963  	return fbo.nodeCache.ObfuscatorMaker()
  3964  }
  3965  
  3966  var _ chainsPathPopulator = (*folderBlockOps)(nil)