github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/folder_update_prepper.go (about)

     1  // Copyright 2017 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	"sync"
    10  
    11  	"github.com/keybase/client/go/kbfs/data"
    12  	"github.com/keybase/client/go/kbfs/idutil"
    13  	"github.com/keybase/client/go/kbfs/kbfsblock"
    14  	"github.com/keybase/client/go/kbfs/kbfssync"
    15  	"github.com/keybase/client/go/kbfs/libkey"
    16  	"github.com/keybase/client/go/kbfs/tlf"
    17  	"github.com/keybase/client/go/libkb"
    18  	"github.com/keybase/client/go/logger"
    19  	"github.com/keybase/client/go/protocol/keybase1"
    20  	"github.com/pkg/errors"
    21  	"golang.org/x/net/context"
    22  )
    23  
    24  // folderUpdatePrepper is a helper struct for preparing blocks and MD
    25  // updates before they get synced to the backend servers.  It can be
    26  // used for a single update or for a batch of updates (e.g. conflict
    27  // resolution).
    28  type folderUpdatePrepper struct {
    29  	config       Config
    30  	folderBranch data.FolderBranch
    31  	blocks       *folderBlockOps
    32  	log          logger.Logger
    33  	vlog         *libkb.VDebugLog
    34  
    35  	cacheLock   sync.Mutex
    36  	cachedInfos map[data.BlockPointer]data.BlockInfo
    37  }
    38  
    39  func (fup *folderUpdatePrepper) id() tlf.ID {
    40  	return fup.folderBranch.Tlf
    41  }
    42  
    43  func (fup *folderUpdatePrepper) branch() data.BranchName {
    44  	return fup.folderBranch.Branch
    45  }
    46  
    47  func (fup *folderUpdatePrepper) nowUnixNano() int64 {
    48  	return fup.config.Clock().Now().UnixNano()
    49  }
    50  
    51  func (fup *folderUpdatePrepper) cacheHashBehavior() data.BlockCacheHashBehavior {
    52  	return fup.blocks.cacheHashBehavior()
    53  }
    54  
    55  func (fup *folderUpdatePrepper) readyBlockMultiple(ctx context.Context,
    56  	kmd libkey.KeyMetadata, currBlock data.Block, chargedTo keybase1.UserOrTeamID,
    57  	bps blockPutState, bType keybase1.BlockType) (
    58  	info data.BlockInfo, plainSize int, err error) {
    59  	info, plainSize, readyBlockData, err :=
    60  		data.ReadyBlock(ctx, fup.config.BlockCache(), fup.config.BlockOps(),
    61  			kmd, currBlock, chargedTo, bType, fup.cacheHashBehavior())
    62  	if err != nil {
    63  		return data.BlockInfo{}, 0, err
    64  	}
    65  
    66  	err = bps.AddNewBlock(
    67  		ctx, info.BlockPointer, currBlock, readyBlockData, nil)
    68  	if err != nil {
    69  		return data.BlockInfo{}, 0, err
    70  	}
    71  	return info, plainSize, nil
    72  }
    73  
    74  func (fup *folderUpdatePrepper) unembedBlockChanges(
    75  	ctx context.Context, bps blockPutState, md *RootMetadata,
    76  	changes *BlockChanges, chargedTo keybase1.UserOrTeamID) error {
    77  	buf, err := fup.config.Codec().Encode(changes)
    78  	if err != nil {
    79  		return err
    80  	}
    81  
    82  	// Treat the block change list as a file so we can reuse all the
    83  	// indirection code in fileData.
    84  	block := data.NewFileBlock().(*data.FileBlock)
    85  	id, err := fup.config.cryptoPure().MakeTemporaryBlockID()
    86  	if err != nil {
    87  		return err
    88  	}
    89  	ptr := data.BlockPointer{
    90  		ID:         id,
    91  		KeyGen:     md.LatestKeyGeneration(),
    92  		DataVer:    fup.config.DataVersion(),
    93  		DirectType: data.DirectBlock,
    94  		Context: kbfsblock.MakeFirstContext(
    95  			chargedTo, keybase1.BlockType_MD),
    96  	}
    97  	file := data.Path{
    98  		FolderBranch: fup.folderBranch,
    99  		Path: []data.PathNode{{
   100  			BlockPointer: ptr,
   101  			Name: data.NewPathPartString(
   102  				fmt.Sprintf("<MD rev %d>", md.Revision()), nil),
   103  		},
   104  		}}
   105  
   106  	dirtyBcache := data.SimpleDirtyBlockCacheStandard()
   107  	// Simple dirty bcaches don't need to be shut down.
   108  
   109  	getter := func(ctx context.Context, _ libkey.KeyMetadata, ptr data.BlockPointer,
   110  		_ data.Path, _ data.BlockReqType) (*data.FileBlock, bool, error) {
   111  		block, err := dirtyBcache.Get(ctx, fup.id(), ptr, fup.branch())
   112  		if err != nil {
   113  			return nil, false, err
   114  		}
   115  		fblock, ok := block.(*data.FileBlock)
   116  		if !ok {
   117  			return nil, false, errors.Errorf(
   118  				"Block for %s is not a file block, block type: %T", ptr, block)
   119  		}
   120  		return fblock, true, nil
   121  	}
   122  	cacher := func(ctx context.Context, ptr data.BlockPointer, block data.Block) error {
   123  		return dirtyBcache.Put(ctx, fup.id(), ptr, fup.branch(), block)
   124  	}
   125  	// Start off the cache with the new block
   126  	err = cacher(ctx, ptr, block)
   127  	if err != nil {
   128  		return err
   129  	}
   130  
   131  	df := data.NewDirtyFile(file, dirtyBcache)
   132  	fd := data.NewFileData(
   133  		file, chargedTo, fup.config.BlockSplitter(), md.ReadOnly(), getter,
   134  		cacher, fup.log, fup.vlog)
   135  
   136  	// Write all the data.
   137  	_, _, _, _, _, err = fd.Write(ctx, buf, 0, block, data.DirEntry{}, df)
   138  	if err != nil {
   139  		return err
   140  	}
   141  
   142  	// There might be a new top block.
   143  	topBlock, err := dirtyBcache.Get(ctx, fup.id(), ptr, fup.branch())
   144  	if err != nil {
   145  		return err
   146  	}
   147  	block, ok := topBlock.(*data.FileBlock)
   148  	if !ok {
   149  		return errors.New("Top block change block no longer a file block")
   150  	}
   151  
   152  	// Ready all the child blocks.
   153  	infos, err := fd.Ready(
   154  		ctx, fup.id(), fup.config.BlockCache(), dirtyBcache,
   155  		fup.config.BlockOps(), bps, block, df, fup.cacheHashBehavior())
   156  	if err != nil {
   157  		return err
   158  	}
   159  	for info := range infos {
   160  		md.AddMDRefBytes(uint64(info.EncodedSize))
   161  		md.AddMDDiskUsage(uint64(info.EncodedSize))
   162  	}
   163  	fup.vlog.CLogf(ctx, libkb.VLog1, "%d unembedded child blocks", len(infos))
   164  
   165  	// Ready the top block.
   166  	info, _, err := fup.readyBlockMultiple(
   167  		ctx, md.ReadOnly(), block, chargedTo, bps, keybase1.BlockType_MD)
   168  	if err != nil {
   169  		return err
   170  	}
   171  
   172  	md.AddMDRefBytes(uint64(info.EncodedSize))
   173  	md.AddMDDiskUsage(uint64(info.EncodedSize))
   174  	md.data.cachedChanges = *changes
   175  	changes.Info = info
   176  	changes.Ops = nil
   177  	return nil
   178  }
   179  
   180  type isDirtyWithDBM struct {
   181  	dbm         dirBlockMap
   182  	dirtyBcache data.DirtyBlockCache
   183  }
   184  
   185  func (idwl isDirtyWithDBM) IsDirty(
   186  	tlfID tlf.ID, ptr data.BlockPointer, branch data.BranchName) bool {
   187  	hasBlock, err := idwl.dbm.hasBlock(context.Background(), ptr)
   188  	if err != nil {
   189  		// TODO: do something with this error.
   190  		return false
   191  	}
   192  	if hasBlock {
   193  		return true
   194  	}
   195  
   196  	return idwl.dirtyBcache.IsDirty(tlfID, ptr, branch)
   197  }
   198  
   199  // prepUpdateForPath updates, and readies, the blocks along the path
   200  // for the given write, up to the root of the tree or stopAt (if
   201  // specified).  When it updates the root of the tree, it also modifies
   202  // the given head object with a new revision number and root block ID.
   203  // It first checks the provided dbm for blocks that may have been
   204  // modified by previous prepUpdateForPath calls or the FS calls
   205  // themselves.  It returns the updated path to the changed directory,
   206  // the new or updated directory entry created as part of the call, and
   207  // a summary of all the blocks that now must be put to the block
   208  // server.
   209  //
   210  // This function is safe to use unlocked, but may modify MD to have
   211  // the same revision number as another one. Callers that require
   212  // serialized revision numbers must implement their own locking around
   213  // their instance.
   214  //
   215  // entryType must not be Sym.
   216  //
   217  // TODO: deal with multiple nodes for indirect blocks
   218  func (fup *folderUpdatePrepper) prepUpdateForPath(
   219  	ctx context.Context, lState *kbfssync.LockState,
   220  	chargedTo keybase1.UserOrTeamID, md *RootMetadata, newBlock data.Block,
   221  	newBlockPtr data.BlockPointer, dir data.Path, name data.PathPartString,
   222  	entryType data.EntryType, mtime bool, ctime bool, stopAt data.BlockPointer,
   223  	dbm dirBlockMap, bps blockPutState) (data.Path, data.DirEntry, error) {
   224  	// now ready each dblock and write the DirEntry for the next one
   225  	// in the path
   226  	currBlock := newBlock
   227  	var currDD *data.DirData
   228  	var currDDPtr data.BlockPointer
   229  	var cleanupFn func()
   230  	defer func() {
   231  		if cleanupFn != nil {
   232  			cleanupFn()
   233  		}
   234  	}()
   235  	if _, isDir := newBlock.(*data.DirBlock); isDir {
   236  		newPath := dir.ChildPath(
   237  			name, newBlockPtr, fup.blocks.nodeCache.ObfuscatorMaker()())
   238  		currDD, cleanupFn = fup.blocks.newDirDataWithDBM(
   239  			lState, newPath, chargedTo, md, dbm)
   240  		currDDPtr = newPath.TailPointer()
   241  	}
   242  	currName := name
   243  	newPath := data.Path{
   244  		FolderBranch: dir.FolderBranch,
   245  		Path:         make([]data.PathNode, 0, len(dir.Path)),
   246  	}
   247  	var newDe data.DirEntry
   248  	doSetTime := true
   249  	now := fup.nowUnixNano()
   250  	var uid keybase1.UID
   251  	for len(newPath.Path) < len(dir.Path)+1 {
   252  		if currDD != nil {
   253  			// Ready any non-top blocks in the directory.
   254  			newInfos, err := currDD.Ready(
   255  				ctx, fup.id(), fup.config.BlockCache(),
   256  				isDirtyWithDBM{dbm, fup.config.DirtyBlockCache()},
   257  				fup.config.BlockOps(), bps, currBlock.(*data.DirBlock),
   258  				fup.cacheHashBehavior())
   259  			if err != nil {
   260  				return data.Path{}, data.DirEntry{}, err
   261  			}
   262  			for newInfo := range newInfos {
   263  				md.AddRefBlock(newInfo)
   264  			}
   265  
   266  			dirUnrefs := fup.blocks.getDirtyDirUnrefsLocked(
   267  				lState, currDDPtr)
   268  			for _, unref := range dirUnrefs {
   269  				md.AddUnrefBlock(unref)
   270  			}
   271  			cleanupFn()
   272  			cleanupFn = nil
   273  		}
   274  
   275  		info, plainSize, err := fup.readyBlockMultiple(
   276  			ctx, md.ReadOnly(), currBlock, chargedTo, bps,
   277  			fup.config.DefaultBlockType())
   278  		if err != nil {
   279  			return data.Path{}, data.DirEntry{}, err
   280  		}
   281  		if dblock, ok := currBlock.(*data.DirBlock); ok {
   282  			plainSize = dblock.TotalPlainSizeEstimate(
   283  				plainSize, fup.config.BlockSplitter())
   284  		}
   285  
   286  		// prepend to path and setup next one
   287  		newPath.Path = append([]data.PathNode{
   288  			{BlockPointer: info.BlockPointer, Name: currName}},
   289  			newPath.Path...)
   290  
   291  		// get the parent block
   292  		prevIdx := len(dir.Path) - len(newPath.Path)
   293  		var de data.DirEntry
   294  		var nextName data.PathPartString
   295  		nextDoSetTime := false
   296  		if prevIdx < 0 {
   297  			// root dir, update the MD instead
   298  			de = md.data.Dir
   299  		} else {
   300  			prevDir := data.Path{
   301  				FolderBranch: dir.FolderBranch,
   302  				Path:         dir.Path[:prevIdx+1],
   303  			}
   304  
   305  			var dd *data.DirData
   306  			dd, cleanupFn = fup.blocks.newDirDataWithDBM(
   307  				lState, prevDir, chargedTo, md, dbm)
   308  			de, err = dd.Lookup(ctx, currName)
   309  			if _, noExists := errors.Cause(err).(idutil.NoSuchNameError); noExists {
   310  				// If this isn't the first time
   311  				// around, we have an error.
   312  				if len(newPath.Path) > 1 {
   313  					return data.Path{}, data.DirEntry{},
   314  						idutil.NoSuchNameError{Name: currName.String()}
   315  				}
   316  
   317  				// If this is a file, the size should be 0. (TODO:
   318  				// Ensure this.) If this is a directory, the size will
   319  				// be filled in below.  The times will be filled in
   320  				// below as well, since we should only be creating a
   321  				// new directory entry when doSetTime is true.
   322  				de = data.DirEntry{
   323  					EntryInfo: data.EntryInfo{
   324  						Type: entryType,
   325  						Size: 0,
   326  					},
   327  				}
   328  				// If we're creating a new directory entry, the
   329  				// parent's times must be set as well.
   330  				nextDoSetTime = true
   331  			} else if err != nil {
   332  				return data.Path{}, data.DirEntry{}, err
   333  			}
   334  
   335  			prevDblock, err := dd.GetTopBlock(ctx, data.BlockWrite)
   336  			if err != nil {
   337  				return data.Path{}, data.DirEntry{}, err
   338  			}
   339  			currBlock = prevDblock
   340  			currDD = dd
   341  			currDDPtr = prevDir.TailPointer()
   342  			nextName = prevDir.TailName()
   343  		}
   344  
   345  		if de.Type == data.Dir {
   346  			de.Size = uint64(plainSize)
   347  		}
   348  
   349  		if prevIdx < 0 {
   350  			md.AddUpdate(md.data.Dir.BlockInfo, info)
   351  			err = bps.SaveOldPtr(ctx, md.data.Dir.BlockPointer)
   352  			if err != nil {
   353  				return data.Path{}, data.DirEntry{}, err
   354  			}
   355  		} else if prevDe, err := currDD.Lookup(ctx, currName); err == nil {
   356  			md.AddUpdate(prevDe.BlockInfo, info)
   357  			err = bps.SaveOldPtr(ctx, prevDe.BlockPointer)
   358  			if err != nil {
   359  				return data.Path{}, data.DirEntry{}, err
   360  			}
   361  		} else {
   362  			// this is a new block
   363  			md.AddRefBlock(info)
   364  		}
   365  
   366  		de.BlockInfo = info
   367  		de.PrevRevisions = de.PrevRevisions.AddRevision(
   368  			md.Revision(), md.data.LastGCRevision)
   369  
   370  		if doSetTime {
   371  			if mtime {
   372  				de.Mtime = now
   373  			}
   374  			if ctime {
   375  				de.Ctime = now
   376  			}
   377  		}
   378  
   379  		if fup.id().Type() == tlf.SingleTeam {
   380  			if uid.IsNil() {
   381  				session, err := fup.config.KBPKI().GetCurrentSession(ctx)
   382  				if err != nil {
   383  					return data.Path{}, data.DirEntry{}, err
   384  				}
   385  				uid = session.UID
   386  			}
   387  			de.TeamWriter = uid
   388  		}
   389  
   390  		if !newDe.IsInitialized() {
   391  			newDe = de
   392  		}
   393  
   394  		if prevIdx < 0 {
   395  			md.data.Dir = de
   396  		} else {
   397  			unrefs, err := currDD.SetEntry(ctx, currName, de)
   398  			if err != nil {
   399  				return data.Path{}, data.DirEntry{}, err
   400  			}
   401  			for _, unref := range unrefs {
   402  				md.AddUnrefBlock(unref)
   403  			}
   404  			// Fetch the current block again, since `setEntry` might
   405  			// not modify the original `currBlock`, but some
   406  			// re-assembled version if the disk cache is in use.
   407  			currBlock, err = dbm.getBlock(ctx, currDDPtr)
   408  			if err != nil {
   409  				return data.Path{}, data.DirEntry{}, err
   410  			}
   411  		}
   412  		currName = nextName
   413  
   414  		// Stop before we get to the common ancestor; it will be taken care of
   415  		// on the next sync call
   416  		if prevIdx >= 0 && dir.Path[prevIdx].BlockPointer == stopAt {
   417  			break
   418  		}
   419  		doSetTime = nextDoSetTime
   420  	}
   421  
   422  	return newPath, newDe, nil
   423  }
   424  
   425  // pathTreeNode represents a particular node in the part of the FS
   426  // tree affected by a set of updates which needs to be sync'd.
   427  type pathTreeNode struct {
   428  	ptr        data.BlockPointer
   429  	parent     *pathTreeNode
   430  	children   map[string]*pathTreeNode
   431  	mergedPath data.Path
   432  }
   433  
   434  type prepFolderCopyBehavior int
   435  
   436  const (
   437  	prepFolderCopyIndirectFileBlocks     prepFolderCopyBehavior = 1
   438  	prepFolderDontCopyIndirectFileBlocks prepFolderCopyBehavior = 2
   439  )
   440  
   441  // prepTree, given a node in part of the FS tree that needs to be
   442  // sync'd, either calls prepUpdateForPath on it if the node has no
   443  // children of its own, or it calls prepTree recursively for all
   444  // children.  When calling itself recursively on its children, it
   445  // instructs each child to sync only up to this node, except for the
   446  // last child which may sync back to the given stopAt pointer.  This
   447  // ensures that the sync process will ready blocks that are complete
   448  // (with all child changes applied) before readying any parent blocks.
   449  // prepTree returns the merged blockPutState for itself and all of its
   450  // children.
   451  func (fup *folderUpdatePrepper) prepTree(
   452  	ctx context.Context, lState *kbfssync.LockState, unmergedChains *crChains,
   453  	newMD *RootMetadata, chargedTo keybase1.UserOrTeamID, node *pathTreeNode,
   454  	stopAt data.BlockPointer, dbm dirBlockMap, newFileBlocks fileBlockMap,
   455  	dirtyBcache data.DirtyBlockCacheSimple, bps blockPutState,
   456  	copyBehavior prepFolderCopyBehavior) error {
   457  	// If this has no children, then sync it, as far back as stopAt.
   458  	if len(node.children) == 0 {
   459  		// Look for the directory block or the new file block.
   460  		entryType := data.Dir
   461  		hasDirBlock, err := dbm.hasBlock(ctx, node.ptr)
   462  		if err != nil {
   463  			return err
   464  		}
   465  		// non-nil exactly when entryType != Dir.
   466  		var block data.Block
   467  		var fblock *data.FileBlock
   468  		if hasDirBlock {
   469  			dirBlock, err := dbm.getBlock(ctx, node.ptr)
   470  			if err != nil {
   471  				return err
   472  			}
   473  			block = dirBlock
   474  		} else {
   475  			// This must be a file, so look it up in the parent
   476  			if node.parent == nil {
   477  				return fmt.Errorf("No parent found for node %v while "+
   478  					"syncing path %v", node.ptr, node.mergedPath.Path)
   479  			}
   480  
   481  			var err error
   482  			fblock, err = newFileBlocks.GetTopBlock(
   483  				ctx, node.parent.ptr, node.mergedPath.TailName())
   484  			if err != nil {
   485  				return err
   486  			}
   487  			block = fblock
   488  			entryType = data.File // TODO: FIXME for Ex and Sym
   489  		}
   490  
   491  		// For an indirect file block, make sure a new
   492  		// reference is made for every child block.
   493  		if copyBehavior == prepFolderCopyIndirectFileBlocks &&
   494  			entryType != data.Dir && fblock.IsInd {
   495  			var infos []data.BlockInfo
   496  			var err error
   497  
   498  			// If journaling is enabled, new references aren't
   499  			// supported.  We have to fetch each block and ready
   500  			// it.  TODO: remove this when KBFS-1149 is fixed.
   501  			if TLFJournalEnabled(fup.config, fup.id()) {
   502  				infos, err = fup.blocks.UndupChildrenInCopy(
   503  					ctx, lState, newMD.ReadOnly(), node.mergedPath, bps,
   504  					dirtyBcache, fblock)
   505  				if err != nil {
   506  					return err
   507  				}
   508  			} else {
   509  				// Ready any mid-level internal children.
   510  				_, err = fup.blocks.ReadyNonLeafBlocksInCopy(
   511  					ctx, lState, newMD.ReadOnly(), node.mergedPath, bps,
   512  					dirtyBcache, fblock)
   513  				if err != nil {
   514  					return err
   515  				}
   516  
   517  				infos, err = fup.blocks.
   518  					GetIndirectFileBlockInfosWithTopBlock(
   519  						ctx, lState, newMD.ReadOnly(), node.mergedPath, fblock)
   520  				if err != nil {
   521  					return err
   522  				}
   523  
   524  				for _, info := range infos {
   525  					// The indirect blocks were already added to
   526  					// childBps, so only add the dedup'd leaf blocks.
   527  					if info.RefNonce != kbfsblock.ZeroRefNonce {
   528  						err = bps.AddNewBlock(
   529  							ctx, info.BlockPointer, nil, data.ReadyBlockData{}, nil)
   530  						if err != nil {
   531  							return err
   532  						}
   533  					}
   534  				}
   535  			}
   536  			for _, info := range infos {
   537  				newMD.AddRefBlock(info)
   538  			}
   539  		}
   540  
   541  		// Assume the mtime/ctime are already fixed up in the blocks
   542  		// in the dbm.
   543  		_, _, err = fup.prepUpdateForPath(
   544  			ctx, lState, chargedTo, newMD, block, node.ptr,
   545  			*node.mergedPath.ParentPath(), node.mergedPath.TailName(),
   546  			entryType, false, false, stopAt, dbm, bps)
   547  		if err != nil {
   548  			return err
   549  		}
   550  
   551  		return nil
   552  	}
   553  
   554  	// If there is more than one child, use this node as the stopAt
   555  	// since it is the branch point, except for the last child.
   556  	count := 0
   557  	for _, child := range node.children {
   558  		localStopAt := node.ptr
   559  		count++
   560  		if count == len(node.children) {
   561  			localStopAt = stopAt
   562  		}
   563  		err := fup.prepTree(
   564  			ctx, lState, unmergedChains, newMD, chargedTo, child, localStopAt,
   565  			dbm, newFileBlocks, dirtyBcache, bps, copyBehavior)
   566  		if err != nil {
   567  			return err
   568  		}
   569  	}
   570  	return nil
   571  }
   572  
   573  // updateResolutionUsageLockedCache figures out how many bytes are
   574  // referenced and unreferenced in the merged branch by this
   575  // resolution.  Only needs to be called for non-squash resolutions.
   576  // `fup.cacheLock` must be taken before calling.
   577  func (fup *folderUpdatePrepper) updateResolutionUsageLockedCache(
   578  	ctx context.Context, lState *kbfssync.LockState, md *RootMetadata,
   579  	bps blockPutState, unmergedChains, mergedChains *crChains,
   580  	mostRecentMergedMD ImmutableRootMetadata,
   581  	refs, unrefs map[data.BlockPointer]bool) error {
   582  	md.SetRefBytes(0)
   583  	md.SetUnrefBytes(0)
   584  	md.SetMDRefBytes(0)
   585  	md.SetDiskUsage(mostRecentMergedMD.DiskUsage())
   586  	md.SetMDDiskUsage(mostRecentMergedMD.MDDiskUsage())
   587  
   588  	localBlocks := make(map[data.BlockPointer]data.Block)
   589  	for _, ptr := range bps.Ptrs() {
   590  		if block, err := bps.GetBlock(ctx, ptr); err == nil && block != nil {
   591  			localBlocks[ptr] = block
   592  		}
   593  	}
   594  
   595  	// Add bytes for every ref'd block.
   596  	refPtrsToFetch := make([]data.BlockPointer, 0, len(refs))
   597  	var refSum uint64
   598  	for ptr := range refs {
   599  		if block, ok := localBlocks[ptr]; ok {
   600  			refSum += uint64(block.GetEncodedSize())
   601  		} else {
   602  			refPtrsToFetch = append(refPtrsToFetch, ptr)
   603  		}
   604  		fup.vlog.CLogf(ctx, libkb.VLog1, "Ref'ing block %v", ptr)
   605  	}
   606  
   607  	// Look up the total sum of the ref blocks in parallel to get
   608  	// their sizes.
   609  	//
   610  	// TODO: If the blocks weren't already in the cache, this call
   611  	// won't cache them, so it's kind of wasting work.  Furthermore,
   612  	// we might be able to get the encoded size from other sources as
   613  	// well (such as its directory entry or its indirect file block)
   614  	// if we happened to have come across it before.
   615  	refSumFetched, err := fup.blocks.GetCleanEncodedBlocksSizeSum(
   616  		ctx, lState, md.ReadOnly(), refPtrsToFetch, nil, fup.branch(), false)
   617  	if err != nil {
   618  		return err
   619  	}
   620  	refSum += refSumFetched
   621  
   622  	fup.vlog.CLogf(ctx, libkb.VLog1, "Ref'ing a total of %d bytes", refSum)
   623  	md.AddRefBytes(refSum)
   624  	md.AddDiskUsage(refSum)
   625  
   626  	unrefPtrsToFetch := make([]data.BlockPointer, 0, len(unrefs))
   627  	var unrefSum uint64
   628  	for ptr := range unrefs {
   629  		original, ok := unmergedChains.originals[ptr]
   630  		if !ok {
   631  			original = ptr
   632  		}
   633  		if original != ptr || unmergedChains.isCreated(original) {
   634  			// Only unref pointers that weren't created as part of the
   635  			// unmerged branch.  Either they existed already or they
   636  			// were created as part of the merged branch.
   637  			continue
   638  		}
   639  		// Also make sure this wasn't already removed or overwritten
   640  		// on the merged branch.
   641  		original, ok = mergedChains.originals[ptr]
   642  		if !ok {
   643  			original = ptr
   644  		}
   645  		mergedChain, ok := mergedChains.byOriginal[original]
   646  		if (ok && original != mergedChain.mostRecent && original == ptr) ||
   647  			mergedChains.isDeleted(original) {
   648  			continue
   649  		}
   650  
   651  		if info, ok := fup.cachedInfos[ptr]; ok {
   652  			unrefSum += uint64(info.EncodedSize)
   653  		} else {
   654  			unrefPtrsToFetch = append(unrefPtrsToFetch, ptr)
   655  		}
   656  	}
   657  
   658  	// Look up the unref blocks in parallel to get their sizes.  Since
   659  	// we don't know whether these are files or directories, just look
   660  	// them up generically.  Ignore any recoverable errors for unrefs.
   661  	// Note that we can't combine these with the above ref fetches
   662  	// since they require a different MD.  If the merged changes
   663  	// didn't change any blocks (in particular, the root block), we
   664  	// can assume all the blocks we are unreferencing were live;
   665  	// otherwise, we need to check with the server to make sure.
   666  	onlyCountIfLive := len(mergedChains.byOriginal) != 0
   667  	unrefSumFetched, err := fup.blocks.GetCleanEncodedBlocksSizeSum(
   668  		ctx, lState, mostRecentMergedMD, unrefPtrsToFetch, unrefs,
   669  		fup.branch(), onlyCountIfLive)
   670  	if err != nil {
   671  		return err
   672  	}
   673  	unrefSum += unrefSumFetched
   674  
   675  	// Subtract bytes for every unref'd block that wasn't created in
   676  	// the unmerged branch.
   677  	fup.vlog.CLogf(ctx, libkb.VLog1, "Unref'ing a total of %d bytes", unrefSum)
   678  	md.AddUnrefBytes(unrefSum)
   679  	md.SetDiskUsage(md.DiskUsage() - unrefSum)
   680  	return nil
   681  }
   682  
   683  // addUnrefToFinalResOp makes a resolutionOp at the end of opsList if
   684  // one doesn't exist yet, and then adds the given pointer as an unref
   685  // block to it.
   686  func addUnrefToFinalResOp(ops opsList, ptr data.BlockPointer,
   687  	doNotUnref map[data.BlockPointer]bool) opsList {
   688  	// Make sure the block ID we want to unref isn't in the "do not
   689  	// unref" list -- it could mean that block has already been GC'd
   690  	// by the merged branch.  We can't compare pointers directly
   691  	// because GC'd pointers contain no block context.
   692  	for noUnref := range doNotUnref {
   693  		if ptr.ID == noUnref.ID {
   694  			return ops
   695  		}
   696  	}
   697  
   698  	resOp, ok := ops[len(ops)-1].(*resolutionOp)
   699  	if !ok {
   700  		resOp = newResolutionOp()
   701  		ops = append(ops, resOp)
   702  	}
   703  	resOp.AddUncommittedUnrefBlock(ptr)
   704  	return ops
   705  }
   706  
   707  // updateResolutionUsageAndPointersLockedCache figures out how many
   708  // bytes are referenced and unreferenced in the merged branch by this
   709  // resolution (if needed), and adds referenced and unreferenced
   710  // pointers to a final `resolutionOp` as necessary. It should be
   711  // called before the block changes are unembedded in md.  It returns
   712  // the list of blocks that can be remove from the flushing queue, if
   713  // any.  `fup.cacheLock` must be taken before calling.
   714  func (fup *folderUpdatePrepper) updateResolutionUsageAndPointersLockedCache(
   715  	ctx context.Context, lState *kbfssync.LockState, md *RootMetadata,
   716  	bps blockPutState, unmergedChains, mergedChains *crChains,
   717  	mostRecentUnmergedMD, mostRecentMergedMD ImmutableRootMetadata,
   718  	isLocalSquash bool) (
   719  	blocksToDelete []kbfsblock.ID, err error) {
   720  
   721  	// Track the refs and unrefs in a set, to ensure no duplicates
   722  	refs := make(map[data.BlockPointer]bool)
   723  	unrefs := make(map[data.BlockPointer]bool)
   724  	for _, op := range md.data.Changes.Ops {
   725  		// Iterate in reverse since we may be deleting references as we go.
   726  		for i := len(op.Refs()) - 1; i >= 0; i-- {
   727  			ptr := op.Refs()[i]
   728  			// Don't add usage if it's an unembedded block change
   729  			// pointer.  Also, we shouldn't be referencing this
   730  			// anymore!
   731  			if unmergedChains.blockChangePointers[ptr] {
   732  				fup.vlog.CLogf(
   733  					ctx, libkb.VLog1, "Ignoring block change ptr %v", ptr)
   734  				op.DelRefBlock(ptr)
   735  			} else {
   736  				refs[ptr] = true
   737  			}
   738  		}
   739  		// Iterate in reverse since we may be deleting unrefs as we go.
   740  		for i := len(op.Unrefs()) - 1; i >= 0; i-- {
   741  			ptr := op.Unrefs()[i]
   742  			unrefs[ptr] = true
   743  			delete(refs, ptr)
   744  			if _, isCreateOp := op.(*createOp); isCreateOp {
   745  				// The only way a create op should have unref blocks
   746  				// is if it was created during conflict resolution.
   747  				// In that case, we should move the unref to a final
   748  				// resolution op, so it doesn't confuse future
   749  				// resolutions.
   750  				op.DelUnrefBlock(ptr)
   751  				md.data.Changes.Ops =
   752  					addUnrefToFinalResOp(
   753  						md.data.Changes.Ops, ptr, unmergedChains.doNotUnrefPointers)
   754  			}
   755  		}
   756  		for _, update := range op.allUpdates() {
   757  			if update.Unref != update.Ref {
   758  				unrefs[update.Unref] = true
   759  				delete(refs, update.Unref)
   760  				refs[update.Ref] = true
   761  			}
   762  		}
   763  	}
   764  
   765  	for _, resOp := range unmergedChains.resOps {
   766  		for _, ptr := range resOp.CommittedUnrefs() {
   767  			original, err := unmergedChains.originalFromMostRecentOrSame(ptr)
   768  			if err != nil {
   769  				return nil, err
   770  			}
   771  			if !unmergedChains.isCreated(original) {
   772  				fup.vlog.CLogf(
   773  					ctx, libkb.VLog1, "Unref'ing %v from old resOp", ptr)
   774  				unrefs[ptr] = true
   775  			}
   776  		}
   777  	}
   778  
   779  	// Unreference (and decrement the size) of any to-unref blocks
   780  	// that weren't created in the unmerged branch.  (Example: non-top
   781  	// dir blocks that were changed during the CR process.)
   782  	for ptr := range unmergedChains.toUnrefPointers {
   783  		original, err := unmergedChains.originalFromMostRecentOrSame(ptr)
   784  		if err != nil {
   785  			return nil, err
   786  		}
   787  		if !unmergedChains.isCreated(original) {
   788  			unrefs[ptr] = true
   789  		}
   790  	}
   791  
   792  	if isLocalSquash {
   793  		// Collect any references made in previous resolution ops that
   794  		// are being squashed together. These must be re-referenced in
   795  		// the MD object to survive the squash.
   796  		resToRef := make(map[data.BlockPointer]bool)
   797  		for _, resOp := range unmergedChains.resOps {
   798  			for _, ptr := range resOp.Refs() {
   799  				if !unrefs[ptr] {
   800  					resToRef[ptr] = true
   801  				}
   802  			}
   803  			for _, ptr := range resOp.Unrefs() {
   804  				delete(resToRef, ptr)
   805  			}
   806  			for _, update := range resOp.allUpdates() {
   807  				delete(resToRef, update.Unref)
   808  			}
   809  		}
   810  		for ptr := range resToRef {
   811  			fup.vlog.CLogf(ctx, libkb.VLog1, "Ref'ing %v from old resOp", ptr)
   812  			refs[ptr] = true
   813  			md.data.Changes.Ops[0].AddRefBlock(ptr)
   814  		}
   815  
   816  		unmergedUsage := mostRecentUnmergedMD.DiskUsage()
   817  		mergedUsage := mostRecentMergedMD.DiskUsage()
   818  
   819  		// Local squashes can just use the bytes and usage from the
   820  		// latest unmerged MD, and we can avoid all the block fetching
   821  		// done by `updateResolutionUsage()`.
   822  		md.SetDiskUsage(unmergedUsage)
   823  		// TODO: it might be better to add up all the ref bytes, and
   824  		// all the unref bytes, from all unmerged MDs, instead of just
   825  		// calculating the difference between the usages.  But that's
   826  		// not quite right either since it counts blocks that are
   827  		// ref'd and unref'd within the squash.
   828  		if md.DiskUsage() > mergedUsage {
   829  			md.SetRefBytes(md.DiskUsage() - mergedUsage)
   830  			md.SetUnrefBytes(0)
   831  		} else {
   832  			md.SetRefBytes(0)
   833  			md.SetUnrefBytes(mergedUsage - md.DiskUsage())
   834  		}
   835  
   836  		mergedMDUsage := mostRecentMergedMD.MDDiskUsage()
   837  		if md.MDDiskUsage() < mergedMDUsage {
   838  			return nil, fmt.Errorf("MD disk usage went down on unmerged "+
   839  				"branch: %d vs %d", md.MDDiskUsage(), mergedMDUsage)
   840  		}
   841  
   842  		// Additional MD disk usage will be determined entirely by the
   843  		// later `unembedBlockChanges()` call.
   844  		md.SetMDDiskUsage(mergedMDUsage)
   845  		md.SetMDRefBytes(0)
   846  	} else {
   847  		err = fup.updateResolutionUsageLockedCache(
   848  			ctx, lState, md, bps, unmergedChains, mergedChains,
   849  			mostRecentMergedMD, refs, unrefs)
   850  		if err != nil {
   851  			return nil, err
   852  		}
   853  	}
   854  
   855  	// Any blocks that were created on the unmerged branch and have
   856  	// been flushed, but didn't survive the resolution, should be
   857  	// marked as unreferenced in the resolution.
   858  	toUnref := make(map[data.BlockPointer]bool)
   859  	for ptr := range unmergedChains.originals {
   860  		if !refs[ptr] && !unrefs[ptr] {
   861  			toUnref[ptr] = true
   862  		}
   863  	}
   864  	for ptr := range unmergedChains.createdOriginals {
   865  		if !refs[ptr] && !unrefs[ptr] && unmergedChains.byOriginal[ptr] != nil {
   866  			toUnref[ptr] = true
   867  		} else if unmergedChains.blockChangePointers[ptr] {
   868  			toUnref[ptr] = true
   869  		}
   870  	}
   871  	for ptr := range unmergedChains.toUnrefPointers {
   872  		toUnref[ptr] = true
   873  	}
   874  	for _, resOp := range unmergedChains.resOps {
   875  		for _, ptr := range resOp.Refs() {
   876  			if !isLocalSquash && !refs[ptr] && !unrefs[ptr] {
   877  				toUnref[ptr] = true
   878  			}
   879  		}
   880  		for _, ptr := range resOp.Unrefs() {
   881  			if !refs[ptr] && !unrefs[ptr] {
   882  				toUnref[ptr] = true
   883  			}
   884  		}
   885  	}
   886  	deletedRefs := make(map[data.BlockPointer]bool)
   887  	deletedUnrefs := make(map[data.BlockPointer]bool)
   888  	for ptr := range toUnref {
   889  		if ptr == data.ZeroPtr || unmergedChains.doNotUnrefPointers[ptr] {
   890  			// A zero pointer can sneak in from the unrefs field of a
   891  			// syncOp following a failed syncOp, via
   892  			// `unmergedChains.toUnrefPointers` after a chain collapse.
   893  			continue
   894  		}
   895  		isUnflushed, err := fup.config.BlockServer().IsUnflushed(
   896  			ctx, fup.id(), ptr.ID)
   897  		if err != nil {
   898  			return nil, err
   899  		}
   900  		if isUnflushed {
   901  			blocksToDelete = append(blocksToDelete, ptr.ID)
   902  			deletedUnrefs[ptr] = true
   903  			// No need to unreference this since we haven't flushed it yet.
   904  			continue
   905  		}
   906  
   907  		deletedRefs[ptr] = true
   908  		// Put the unrefs in a new resOp after the final operation, to
   909  		// cancel out any stray refs in earlier ops.
   910  		fup.vlog.CLogf(ctx, libkb.VLog1, "Unreferencing dropped block %v", ptr)
   911  		md.data.Changes.Ops = addUnrefToFinalResOp(
   912  			md.data.Changes.Ops, ptr, unmergedChains.doNotUnrefPointers)
   913  	}
   914  
   915  	// Scrub all refs and unrefs of blocks that never made it to the
   916  	// server, for smaller updates and to make things easier on the
   917  	// StateChecker.  We scrub the refs too because in some cases
   918  	// (e.g., on a copied conflict file), we add an unref without
   919  	// removing the original ref, and if we remove the unref, the ref
   920  	// must go too.
   921  	if len(deletedRefs) > 0 || len(deletedUnrefs) > 0 {
   922  		for _, op := range md.data.Changes.Ops {
   923  			var toDelRef []data.BlockPointer
   924  			for _, ref := range op.Refs() {
   925  				if deletedRefs[ref] || deletedUnrefs[ref] {
   926  					toDelRef = append(toDelRef, ref)
   927  				}
   928  			}
   929  			for _, ref := range toDelRef {
   930  				fup.vlog.CLogf(ctx, libkb.VLog1, "Scrubbing ref %v", ref)
   931  				op.DelRefBlock(ref)
   932  			}
   933  			var toDelUnref []data.BlockPointer
   934  			for _, unref := range op.Unrefs() {
   935  				if deletedUnrefs[unref] {
   936  					toDelUnref = append(toDelUnref, unref)
   937  				}
   938  			}
   939  			for _, unref := range toDelUnref {
   940  				fup.vlog.CLogf(ctx, libkb.VLog1, "Scrubbing unref %v", unref)
   941  				op.DelUnrefBlock(unref)
   942  			}
   943  		}
   944  		for _, resOp := range unmergedChains.resOps {
   945  			for _, unref := range resOp.Unrefs() {
   946  				if deletedUnrefs[unref] {
   947  					fup.vlog.CLogf(
   948  						ctx, libkb.VLog1, "Scrubbing resOp unref %v", unref)
   949  					resOp.DelUnrefBlock(unref)
   950  				}
   951  			}
   952  		}
   953  	}
   954  
   955  	fup.log.CDebugf(ctx, "New md byte usage: %d ref, %d unref, %d total usage "+
   956  		"(previously %d)", md.RefBytes(), md.UnrefBytes(), md.DiskUsage(),
   957  		mostRecentMergedMD.DiskUsage())
   958  	return blocksToDelete, nil
   959  }
   960  
   961  func (fup *folderUpdatePrepper) setChildrenNodes(
   962  	ctx context.Context, lState *kbfssync.LockState, kmd libkey.KeyMetadata,
   963  	p data.Path, indexInPath int, dbm dirBlockMap, nextNode *pathTreeNode,
   964  	currPath data.Path, names []data.PathPartString) {
   965  	dd, cleanupFn := fup.blocks.newDirDataWithDBM(
   966  		lState, currPath, keybase1.UserOrTeamID(""), kmd, dbm)
   967  	defer cleanupFn()
   968  
   969  	pnode := p.Path[indexInPath]
   970  	for _, name := range names {
   971  		namePlain := name.Plaintext()
   972  		if _, ok := nextNode.children[namePlain]; ok {
   973  			continue
   974  		}
   975  		// Try to lookup the block pointer, but this might be
   976  		// for a new file.
   977  		var filePtr data.BlockPointer
   978  		name := data.NewPathPartString(namePlain, currPath.Obfuscator())
   979  		de, err := dd.Lookup(ctx, name)
   980  		switch errors.Cause(err).(type) {
   981  		case nil:
   982  			filePtr = de.BlockPointer
   983  		case idutil.NoSuchNameError:
   984  		default:
   985  			fup.log.CWarningf(ctx, "Couldn't look up child: %+v", err)
   986  			continue
   987  		}
   988  
   989  		fup.vlog.CLogf(
   990  			ctx, libkb.VLog1, "Creating child node for name %s for parent %v",
   991  			name, pnode.BlockPointer)
   992  		childPath := data.Path{
   993  			FolderBranch:    p.FolderBranch,
   994  			Path:            make([]data.PathNode, indexInPath+2),
   995  			ChildObfuscator: p.Path[indexInPath].Name.Obfuscator(),
   996  		}
   997  		copy(childPath.Path[0:indexInPath+1], p.Path[0:indexInPath+1])
   998  		childPath.Path[indexInPath+1] = data.PathNode{Name: name}
   999  		childNode := &pathTreeNode{
  1000  			ptr:        filePtr,
  1001  			parent:     nextNode,
  1002  			children:   make(map[string]*pathTreeNode),
  1003  			mergedPath: childPath,
  1004  		}
  1005  		nextNode.children[namePlain] = childNode
  1006  	}
  1007  }
  1008  
  1009  func (fup *folderUpdatePrepper) makeSyncTree(
  1010  	ctx context.Context, lState *kbfssync.LockState,
  1011  	resolvedPaths map[data.BlockPointer]data.Path, kmd libkey.KeyMetadata, dbm dirBlockMap,
  1012  	newFileBlocks fileBlockMap) *pathTreeNode {
  1013  	var root *pathTreeNode
  1014  	var cleanupFn func()
  1015  	defer func() {
  1016  		if cleanupFn != nil {
  1017  			cleanupFn()
  1018  		}
  1019  	}()
  1020  	for _, p := range resolvedPaths {
  1021  		fup.vlog.CLogf(
  1022  			ctx, libkb.VLog1, "Creating tree from merged path: %v", p.Path)
  1023  		var parent *pathTreeNode
  1024  		for i, pnode := range p.Path {
  1025  			var nextNode *pathTreeNode
  1026  			if parent != nil {
  1027  				nextNode = parent.children[pnode.Name.Plaintext()]
  1028  			} else if root != nil {
  1029  				nextNode = root
  1030  			}
  1031  			if nextNode == nil {
  1032  				fup.vlog.CLogf(
  1033  					ctx, libkb.VLog1, "Creating node with pointer %v",
  1034  					pnode.BlockPointer)
  1035  				nextNode = &pathTreeNode{
  1036  					ptr:      pnode.BlockPointer,
  1037  					parent:   parent,
  1038  					children: make(map[string]*pathTreeNode),
  1039  					// save the full path, since we'll only use this
  1040  					// at the leaves anyway.
  1041  					mergedPath: p,
  1042  				}
  1043  				if parent != nil {
  1044  					parent.children[pnode.Name.Plaintext()] = nextNode
  1045  				}
  1046  			}
  1047  			if parent == nil && root == nil {
  1048  				root = nextNode
  1049  			}
  1050  			parent = nextNode
  1051  
  1052  			// If this node is a directory that has files to sync,
  1053  			// make nodes for them as well.  (Because of
  1054  			// collapseActions, these files won't have their own
  1055  			// mergedPath.)
  1056  			names, err := newFileBlocks.getFilenames(ctx, pnode.BlockPointer)
  1057  			if err != nil {
  1058  				fup.log.CDebugf(ctx, "Error getting file names: %+v", err)
  1059  				continue
  1060  			}
  1061  			if len(names) == 0 {
  1062  				continue
  1063  			}
  1064  
  1065  			hasBlock, err := dbm.hasBlock(ctx, pnode.BlockPointer)
  1066  			if err != nil {
  1067  				fup.log.CDebugf(ctx, "Error getting dir block: %+v", err)
  1068  				continue
  1069  			}
  1070  			if !hasBlock {
  1071  				// If the top block of the dir hasn't been dirtied, we
  1072  				// can skip it completely.
  1073  				continue
  1074  			}
  1075  			var ob data.Obfuscator
  1076  			if i+1 < len(p.Path) {
  1077  				ob = p.Path[i+1].Name.Obfuscator()
  1078  			} else {
  1079  				ob = p.Obfuscator()
  1080  			}
  1081  			currPath := data.Path{
  1082  				FolderBranch:    p.FolderBranch,
  1083  				Path:            p.Path[:i+1],
  1084  				ChildObfuscator: ob,
  1085  			}
  1086  			fup.setChildrenNodes(
  1087  				ctx, lState, kmd, p, i, dbm, nextNode, currPath, names)
  1088  		}
  1089  	}
  1090  	return root
  1091  }
  1092  
  1093  // fixOpPointersForUpdate takes in a slice of "reverted" ops (all referring
  1094  // to the original BlockPointers) and a map of BlockPointer updates
  1095  // (from original to the new most recent pointer), and corrects all
  1096  // the ops to use the new most recent pointers instead.  It returns a
  1097  // new slice of these operations with room in the first slot for a
  1098  // dummy operation containing all the updates.
  1099  func fixOpPointersForUpdate(oldOps []op, updates map[data.BlockPointer]data.BlockPointer,
  1100  	chains *crChains) (
  1101  	[]op, error) {
  1102  	newOps := make([]op, 0, len(oldOps)+1)
  1103  	newOps = append(newOps, nil) // placeholder for dummy op
  1104  	for _, op := range oldOps {
  1105  		var updatesToFix []*blockUpdate
  1106  		var ptrsToFix []*data.BlockPointer
  1107  		switch realOp := op.(type) {
  1108  		case *createOp:
  1109  			updatesToFix = append(updatesToFix, &realOp.Dir)
  1110  			// Since the created node was made exclusively during this
  1111  			// branch, we can use the most recent pointer for that
  1112  			// node as its ref.
  1113  			refs := realOp.Refs()
  1114  			realOp.RefBlocks = make([]data.BlockPointer, len(refs))
  1115  			for i, ptr := range refs {
  1116  				mostRecent, err := chains.mostRecentFromOriginalOrSame(ptr)
  1117  				if err != nil {
  1118  					return nil, err
  1119  				}
  1120  				realOp.RefBlocks[i] = mostRecent
  1121  				ptrsToFix = append(ptrsToFix, &realOp.RefBlocks[i])
  1122  			}
  1123  			// The leading resolutionOp will take care of the updates.
  1124  			realOp.Updates = nil
  1125  		case *rmOp:
  1126  			updatesToFix = append(updatesToFix, &realOp.Dir)
  1127  			// Since the rm'd node was made exclusively during this
  1128  			// branch, we can use the original pointer for that
  1129  			// node as its unref.
  1130  			unrefs := realOp.Unrefs()
  1131  			realOp.UnrefBlocks = make([]data.BlockPointer, len(unrefs))
  1132  			for i, ptr := range unrefs {
  1133  				original, err := chains.originalFromMostRecentOrSame(ptr)
  1134  				if err != nil {
  1135  					return nil, err
  1136  				}
  1137  				realOp.UnrefBlocks[i] = original
  1138  			}
  1139  			// The leading resolutionOp will take care of the updates.
  1140  			realOp.Updates = nil
  1141  		case *renameOp:
  1142  			updatesToFix = append(updatesToFix, &realOp.OldDir, &realOp.NewDir)
  1143  			ptrsToFix = append(ptrsToFix, &realOp.Renamed)
  1144  			// Hack: we need to fixup local conflict renames so that the block
  1145  			// update changes to the new block pointer.
  1146  			for i := range realOp.Updates {
  1147  				ptrsToFix = append(ptrsToFix, &realOp.Updates[i].Ref)
  1148  			}
  1149  			// Note: Unrefs from the original renameOp are now in a
  1150  			// separate rm operation.
  1151  		case *syncOp:
  1152  			updatesToFix = append(updatesToFix, &realOp.File)
  1153  			realOp.Updates = nil
  1154  		case *setAttrOp:
  1155  			updatesToFix = append(updatesToFix, &realOp.Dir)
  1156  			ptrsToFix = append(ptrsToFix, &realOp.File)
  1157  			// The leading resolutionOp will take care of the updates.
  1158  			realOp.Updates = nil
  1159  		}
  1160  
  1161  		for _, update := range updatesToFix {
  1162  			newPtr, ok := updates[update.Unref]
  1163  			if !ok {
  1164  				continue
  1165  			}
  1166  			// Since the first op does all the heavy lifting of
  1167  			// updating pointers, we can set these to both just be the
  1168  			// new pointer
  1169  			var err error
  1170  			*update, err = makeBlockUpdate(newPtr, newPtr)
  1171  			if err != nil {
  1172  				return nil, err
  1173  			}
  1174  		}
  1175  		for _, ptr := range ptrsToFix {
  1176  			newPtr, ok := updates[*ptr]
  1177  			if !ok {
  1178  				continue
  1179  			}
  1180  			*ptr = newPtr
  1181  		}
  1182  
  1183  		newOps = append(newOps, op)
  1184  	}
  1185  	return newOps, nil
  1186  }
  1187  
  1188  // prepUpdateForPaths takes in the complete set of paths affected by a
  1189  // set of changes, and organizes them into a tree, which it then syncs
  1190  // using prepTree.  It returns a map describing how blocks were
  1191  // updated in the final update, as well as the complete set of blocks
  1192  // that need to be put to the server (and cached) to complete this
  1193  // update and a list of blocks that can be removed from the flushing
  1194  // queue.
  1195  func (fup *folderUpdatePrepper) prepUpdateForPaths(ctx context.Context,
  1196  	lState *kbfssync.LockState, md *RootMetadata,
  1197  	unmergedChains, mergedChains *crChains,
  1198  	mostRecentUnmergedMD, mostRecentMergedMD ImmutableRootMetadata,
  1199  	resolvedPaths map[data.BlockPointer]data.Path, dbm dirBlockMap,
  1200  	newFileBlocks fileBlockMap, dirtyBcache data.DirtyBlockCacheSimple,
  1201  	bps blockPutState, copyBehavior prepFolderCopyBehavior) (
  1202  	updates map[data.BlockPointer]data.BlockPointer,
  1203  	blocksToDelete []kbfsblock.ID, err error) {
  1204  	updates = make(map[data.BlockPointer]data.BlockPointer)
  1205  
  1206  	chargedTo, err := chargedToForTLF(
  1207  		ctx, fup.config.KBPKI(), fup.config.KBPKI(), fup.config,
  1208  		md.GetTlfHandle())
  1209  	if err != nil {
  1210  		return nil, nil, err
  1211  	}
  1212  
  1213  	oldOps := md.data.Changes.Ops
  1214  	resOp, ok := oldOps[len(oldOps)-1].(*resolutionOp)
  1215  	if !ok {
  1216  		return nil, nil, fmt.Errorf("dummy op is not gc: %s",
  1217  			oldOps[len(oldOps)-1])
  1218  	}
  1219  
  1220  	var mergedRoot data.BlockPointer
  1221  	if mergedChains.mostRecentChainMDInfo != nil {
  1222  		// This can happen when we are squashing and there weren't any
  1223  		// merged MD updates at all.
  1224  		mergedRoot =
  1225  			mergedChains.mostRecentChainMDInfo.GetRootDirEntry().BlockPointer
  1226  	}
  1227  	isSquash := mostRecentMergedMD.data.Dir.BlockPointer != mergedRoot
  1228  
  1229  	if isSquash {
  1230  		// Squashes don't need to sync anything new.  Just set the
  1231  		// root pointer to the most recent root pointer, and fill up
  1232  		// the resolution op with all the known chain updates for this
  1233  		// branch.
  1234  		md.data.Dir.BlockInfo =
  1235  			unmergedChains.mostRecentChainMDInfo.GetRootDirEntry().BlockInfo
  1236  		for original, chain := range unmergedChains.byOriginal {
  1237  			if unmergedChains.isCreated(original) ||
  1238  				unmergedChains.isDeleted(original) ||
  1239  				chain.original == chain.mostRecent {
  1240  				continue
  1241  			}
  1242  			resOp.AddUpdate(original, chain.mostRecent)
  1243  		}
  1244  	} else {
  1245  		// Construct a tree out of the merged paths, and do a sync at each leaf.
  1246  		root := fup.makeSyncTree(
  1247  			ctx, lState, resolvedPaths, md, dbm, newFileBlocks)
  1248  
  1249  		if root != nil {
  1250  			err = fup.prepTree(ctx, lState, unmergedChains,
  1251  				md, chargedTo, root, data.BlockPointer{}, dbm, newFileBlocks,
  1252  				dirtyBcache, bps, copyBehavior)
  1253  			if err != nil {
  1254  				return nil, nil, err
  1255  			}
  1256  		}
  1257  	}
  1258  
  1259  	// Create an update map, and fix up the gc ops.
  1260  	for i, update := range resOp.Updates {
  1261  		fup.vlog.CLogf(
  1262  			ctx, libkb.VLog1, "resOp update: %v -> %v", update.Unref,
  1263  			update.Ref)
  1264  		// The unref should represent the most recent merged pointer
  1265  		// for the block.  However, the other ops will be using the
  1266  		// original pointer as the unref, so use that as the key.
  1267  		updates[update.Unref] = update.Ref
  1268  		if chain, ok := mergedChains.byMostRecent[update.Unref]; ok {
  1269  			updates[chain.original] = update.Ref
  1270  		}
  1271  
  1272  		// Fix the gc updates to make sure they all unref the most
  1273  		// recent block pointer.  In cases where the two users create
  1274  		// the same directory independently, the update might
  1275  		// currently unref the unmerged most recent pointer.
  1276  		if chain, ok := unmergedChains.byMostRecent[update.Unref]; ok {
  1277  			// In case there was no merged chain above, map the
  1278  			// original to the ref again.
  1279  			updates[chain.original] = update.Ref
  1280  
  1281  			mergedMostRecent, err :=
  1282  				mergedChains.mostRecentFromOriginalOrSame(chain.original)
  1283  			if err != nil {
  1284  				return nil, nil, err
  1285  			}
  1286  			fup.vlog.CLogf(
  1287  				ctx, libkb.VLog1, "Fixing resOp update from unmerged most "+
  1288  					"recent %v to merged most recent %v",
  1289  				update.Unref, mergedMostRecent)
  1290  			err = update.setUnref(mergedMostRecent)
  1291  			if err != nil {
  1292  				return nil, nil, err
  1293  			}
  1294  			resOp.Updates[i] = update
  1295  			updates[update.Unref] = update.Ref
  1296  		}
  1297  	}
  1298  
  1299  	// Also add in file updates from sync operations, since the
  1300  	// resolutionOp may not include file-specific updates.  Start from
  1301  	// the end of the list, so we use the final sync op for each file.
  1302  	for i := len(oldOps) - 1; i >= 0; i-- {
  1303  		op := oldOps[i]
  1304  		so, ok := op.(*syncOp)
  1305  		if !ok {
  1306  			continue
  1307  		}
  1308  		if _, ok := updates[so.File.Unref]; !ok {
  1309  			fup.vlog.CLogf(
  1310  				ctx, libkb.VLog1, "Adding sync op update %v -> %v",
  1311  				so.File.Unref, so.File.Ref)
  1312  			updates[so.File.Unref] = so.File.Ref
  1313  			resOp.AddUpdate(so.File.Unref, so.File.Ref)
  1314  		}
  1315  	}
  1316  
  1317  	// For all chains that were created only in the unmerged branch,
  1318  	// make sure we update all the pointers to their most recent
  1319  	// version.
  1320  	for original, chain := range unmergedChains.byOriginal {
  1321  		if !unmergedChains.isCreated(original) ||
  1322  			mergedChains.isCreated(original) {
  1323  			continue
  1324  		}
  1325  		if _, ok := updates[chain.original]; !ok {
  1326  			updates[chain.original] = chain.mostRecent
  1327  		}
  1328  	}
  1329  
  1330  	// For all chains that were updated in both branches, make sure
  1331  	// the most recent unmerged pointer updates to the most recent
  1332  	// merged pointer.  Normally this would get fixed up in the resOp
  1333  	// loop above, but that will miss directories that were not
  1334  	// updated as part of the resolution.  (For example, if a file was
  1335  	// moved out of a directory in the merged branch, but an attr was
  1336  	// set on that file in the unmerged branch.)
  1337  	for unmergedOriginal := range unmergedChains.byOriginal {
  1338  		mergedChain, ok := mergedChains.byOriginal[unmergedOriginal]
  1339  		if !ok {
  1340  			continue
  1341  		}
  1342  		if _, ok := updates[unmergedOriginal]; !ok {
  1343  			updates[unmergedOriginal] = mergedChain.mostRecent
  1344  		}
  1345  	}
  1346  
  1347  	// For all chains that were renamed only in the unmerged branch,
  1348  	// make sure we update all the pointers to their most recent
  1349  	// version.
  1350  	for original := range unmergedChains.renamedOriginals {
  1351  		mergedChain, ok := mergedChains.byOriginal[original]
  1352  		if !ok {
  1353  			continue
  1354  		}
  1355  		updates[original] = mergedChain.mostRecent
  1356  	}
  1357  
  1358  	// Consolidate any chains of updates
  1359  	for k, v := range updates {
  1360  		if v2, ok := updates[v]; ok {
  1361  			updates[k] = v2
  1362  			delete(updates, v)
  1363  		}
  1364  	}
  1365  
  1366  	newOps, err := fixOpPointersForUpdate(oldOps[:len(oldOps)-1], updates,
  1367  		unmergedChains)
  1368  	if err != nil {
  1369  		return nil, nil, err
  1370  	}
  1371  
  1372  	// Clean up any gc updates that don't refer to blocks that exist
  1373  	// in the merged branch.
  1374  	var newUpdates []blockUpdate
  1375  	for _, update := range resOp.Updates {
  1376  		// Ignore it if it doesn't descend from an original block
  1377  		// pointer or one created in the merged branch.
  1378  		if _, ok := unmergedChains.originals[update.Unref]; !ok &&
  1379  			(unmergedChains.byOriginal[update.Unref] == nil ||
  1380  				unmergedChains.isCreated(update.Unref)) &&
  1381  			mergedChains.byMostRecent[update.Unref] == nil {
  1382  			fup.vlog.CLogf(
  1383  				ctx, libkb.VLog1,
  1384  				"Turning update from %v into just a ref for %v",
  1385  				update.Unref, update.Ref)
  1386  			resOp.AddRefBlock(update.Ref)
  1387  			continue
  1388  		}
  1389  		newUpdates = append(newUpdates, update)
  1390  	}
  1391  	resOp.Updates = newUpdates
  1392  
  1393  	// Also include rmop unrefs for chains that were deleted in the
  1394  	// unmerged branch but not yet included in `newOps`, and not
  1395  	// re-created by some action in the merged branch.  These need to
  1396  	// be in the resolution for proper block accounting and
  1397  	// invalidation.
  1398  	rmOpUnrefs := make(map[data.BlockPointer]bool)
  1399  	for _, op := range newOps {
  1400  		if _, ok := op.(*rmOp); !ok {
  1401  			continue
  1402  		}
  1403  		for _, unref := range op.Unrefs() {
  1404  			rmOpUnrefs[unref] = true
  1405  		}
  1406  	}
  1407  	for original, chain := range unmergedChains.byOriginal {
  1408  		mergedChain := mergedChains.byOriginal[original]
  1409  		if chain.isFile() || !unmergedChains.isDeleted(original) ||
  1410  			mergedChains.isDeleted(original) ||
  1411  			(mergedChain != nil && len(mergedChain.ops) > 0) {
  1412  			continue
  1413  		}
  1414  		for _, op := range chain.ops {
  1415  			if _, ok := op.(*rmOp); !ok {
  1416  				continue
  1417  			}
  1418  
  1419  			// TODO: We might need to include these rmOps in the
  1420  			// actual resolved MD, to send the proper invalidations
  1421  			// into the kernel before we rm the parent.
  1422  			for _, ptr := range op.Unrefs() {
  1423  				if unrefOrig, ok := unmergedChains.originals[ptr]; ok {
  1424  					ptr = unrefOrig
  1425  				}
  1426  				if rmOpUnrefs[ptr] {
  1427  					continue
  1428  				}
  1429  
  1430  				newOps = addUnrefToFinalResOp(
  1431  					newOps, ptr, unmergedChains.doNotUnrefPointers)
  1432  			}
  1433  		}
  1434  	}
  1435  
  1436  	if len(unmergedChains.resOps) > 0 {
  1437  		newBlocks := make(map[data.BlockPointer]bool)
  1438  		for _, ptr := range bps.Ptrs() {
  1439  			newBlocks[ptr] = true
  1440  		}
  1441  
  1442  		// Look into the previous unmerged resolution ops and decide
  1443  		// which updates we want to keep.  We should only keep those
  1444  		// that correspond to uploaded blocks, or ones that are the
  1445  		// most recent block on a chain and haven't yet been involved
  1446  		// in an update during this resolution.  Unreference any
  1447  		// blocks that aren't the most recent blocks on their chains.
  1448  		currMDPtr := md.data.Dir.BlockPointer
  1449  		unmergedMDPtr :=
  1450  			unmergedChains.mostRecentChainMDInfo.GetRootDirEntry().BlockPointer
  1451  		for _, unmergedResOp := range unmergedChains.resOps {
  1452  			// Updates go in the first one.
  1453  			for _, update := range unmergedResOp.allUpdates() {
  1454  				chain, isMostRecent := unmergedChains.byMostRecent[update.Ref]
  1455  				isDeleted := false
  1456  				alreadyUpdated := false
  1457  				if isMostRecent {
  1458  					isDeleted = unmergedChains.isDeleted(chain.original) ||
  1459  						unmergedChains.toUnrefPointers[update.Ref]
  1460  					_, alreadyUpdated = updates[chain.original]
  1461  				}
  1462  				if newBlocks[update.Ref] ||
  1463  					(isMostRecent && !isDeleted && !alreadyUpdated) {
  1464  					fup.vlog.CLogf(
  1465  						ctx, libkb.VLog1, "Including update from old resOp: "+
  1466  							"%v -> %v", update.Unref, update.Ref)
  1467  					resOp.AddUpdate(update.Unref, update.Ref)
  1468  
  1469  					if update.Unref == currMDPtr && update.Ref == unmergedMDPtr {
  1470  						// If the root block pointer didn't get
  1471  						// updated above, we may need to update it if
  1472  						// we're pulling in an updated root pointer
  1473  						// from a previous unmerged resolutionOp.
  1474  						fup.vlog.CLogf(
  1475  							ctx, libkb.VLog1, "Setting root blockpointer from "+
  1476  								"%v to %v based on unmerged update",
  1477  							currMDPtr, unmergedMDPtr)
  1478  						md.data.Dir.BlockInfo =
  1479  							unmergedChains.mostRecentChainMDInfo.
  1480  								GetRootDirEntry().BlockInfo
  1481  					}
  1482  				} else if !isMostRecent {
  1483  					fup.vlog.CLogf(
  1484  						ctx, libkb.VLog1, "Unrefing an update from old resOp: "+
  1485  							"%v (original=%v)", update.Ref, update.Unref)
  1486  					newOps = addUnrefToFinalResOp(
  1487  						newOps, update.Ref, unmergedChains.doNotUnrefPointers)
  1488  				}
  1489  			}
  1490  		}
  1491  	}
  1492  
  1493  	newOps[0] = resOp // move the dummy ops to the front
  1494  	md.data.Changes.Ops = newOps
  1495  
  1496  	for _, op := range newOps {
  1497  		fup.vlog.CLogf(
  1498  			ctx, libkb.VLog1, "remote op %s: refs: %v", op, op.Refs())
  1499  		fup.vlog.CLogf(
  1500  			ctx, libkb.VLog1, "remote op %s: unrefs: %v", op, op.Unrefs())
  1501  		for _, update := range op.allUpdates() {
  1502  			fup.vlog.CLogf(
  1503  				ctx, libkb.VLog1, "remote op %s: update: %v -> %v", op,
  1504  				update.Unref, update.Ref)
  1505  		}
  1506  	}
  1507  
  1508  	fup.cacheLock.Lock()
  1509  	defer fup.cacheLock.Unlock()
  1510  	blocksToDelete, err = fup.updateResolutionUsageAndPointersLockedCache(
  1511  		ctx, lState, md, bps, unmergedChains, mergedChains,
  1512  		mostRecentUnmergedMD, mostRecentMergedMD, isSquash)
  1513  	if err != nil {
  1514  		return nil, nil, err
  1515  	}
  1516  
  1517  	// Any refs (child block change pointers) and unrefs (dropped
  1518  	// unmerged block pointers) from previous resolutions go in a new
  1519  	// resolutionOp at the end, so we don't attempt to count any of
  1520  	// the bytes in the unref bytes count -- all of these pointers are
  1521  	// guaranteed to have been created purely within the unmerged
  1522  	// branch.
  1523  	if len(unmergedChains.resOps) > 0 {
  1524  		toDeleteMap := make(map[kbfsblock.ID]bool)
  1525  		for _, id := range blocksToDelete {
  1526  			toDeleteMap[id] = true
  1527  		}
  1528  		for _, unmergedResOp := range unmergedChains.resOps {
  1529  			for i := len(unmergedResOp.Refs()) - 1; i >= 0; i-- {
  1530  				ptr := unmergedResOp.Refs()[i]
  1531  				if unmergedChains.blockChangePointers[ptr] &&
  1532  					!toDeleteMap[ptr.ID] {
  1533  					fup.vlog.CLogf(
  1534  						ctx, libkb.VLog1, "Ignoring block change ptr %v", ptr)
  1535  					unmergedResOp.DelRefBlock(ptr)
  1536  					md.data.Changes.Ops =
  1537  						addUnrefToFinalResOp(md.data.Changes.Ops, ptr,
  1538  							unmergedChains.doNotUnrefPointers)
  1539  				}
  1540  			}
  1541  			for _, ptr := range unmergedResOp.Unrefs() {
  1542  				fup.vlog.CLogf(
  1543  					ctx, libkb.VLog1, "Unref pointer from old resOp: %v", ptr)
  1544  				original, err := unmergedChains.originalFromMostRecentOrSame(
  1545  					ptr)
  1546  				if err != nil {
  1547  					return nil, nil, err
  1548  				}
  1549  				if !unmergedChains.isCreated(original) {
  1550  					md.data.Changes.Ops = addUnrefToFinalResOp(
  1551  						md.data.Changes.Ops, ptr,
  1552  						unmergedChains.doNotUnrefPointers)
  1553  				}
  1554  			}
  1555  		}
  1556  	}
  1557  
  1558  	// do the block changes need their own blocks?
  1559  	bsplit := fup.config.BlockSplitter()
  1560  	if !bsplit.ShouldEmbedData(md.data.Changes.SizeEstimate()) {
  1561  		// The child blocks should be referenced in the resolution op.
  1562  		_, ok := md.data.Changes.Ops[len(md.data.Changes.Ops)-1].(*resolutionOp)
  1563  		if !ok {
  1564  			// Append directly to the ops list, rather than use AddOp,
  1565  			// because the size estimate was already calculated.
  1566  			md.data.Changes.Ops = append(md.data.Changes.Ops, newResolutionOp())
  1567  		}
  1568  
  1569  		err = fup.unembedBlockChanges(
  1570  			ctx, bps, md, &md.data.Changes, chargedTo)
  1571  		if err != nil {
  1572  			return nil, nil, err
  1573  		}
  1574  	}
  1575  	fup.cachedInfos = nil
  1576  	return updates, blocksToDelete, nil
  1577  }
  1578  
  1579  // cacheBlockInfos stores the given block infos temporarily, until the
  1580  // next prepUpdateForPaths completes, as an optimization.
  1581  func (fup *folderUpdatePrepper) cacheBlockInfos(infos []data.BlockInfo) {
  1582  	fup.cacheLock.Lock()
  1583  	defer fup.cacheLock.Unlock()
  1584  	if fup.cachedInfos == nil {
  1585  		fup.cachedInfos = make(map[data.BlockPointer]data.BlockInfo)
  1586  	}
  1587  	for _, info := range infos {
  1588  		fup.cachedInfos[info.BlockPointer] = info
  1589  	}
  1590  }