github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/data/block_tree.go (about)

     1  // Copyright 2018 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package data
     6  
     7  import (
     8  	"context"
     9  	"fmt"
    10  	"github.com/gammazero/workerpool"
    11  	"sort"
    12  	"sync"
    13  
    14  	"github.com/keybase/client/go/kbfs/kbfsblock"
    15  	"github.com/keybase/client/go/kbfs/libkey"
    16  	"github.com/keybase/client/go/kbfs/tlf"
    17  	"github.com/keybase/client/go/libkb"
    18  	"github.com/keybase/client/go/logger"
    19  	"github.com/keybase/client/go/protocol/keybase1"
    20  	"golang.org/x/sync/errgroup"
    21  )
    22  
    23  const (
    24  	// maxBlockFetchWorkers specifies the number of parallel
    25  	// goroutines allowed when fetching blocks recursively in
    26  	// parallel.
    27  	maxBlockFetchWorkers = 100
    28  
    29  	// maxParallelReadies specifies the number of block ready calls to
    30  	// be made simultaneously for a given block tree.
    31  	maxParallelReadies = 10
    32  )
    33  
    34  // blockGetterFn is a function that gets a block suitable for reading
    35  // or writing, and also returns whether the block was already dirty.
    36  // It may be called from new goroutines, and must handle any required
    37  // locks accordingly.
    38  type blockGetterFn func(context.Context, libkey.KeyMetadata, BlockPointer,
    39  	Path, BlockReqType) (block BlockWithPtrs, wasDirty bool, err error)
    40  
    41  // dirtyBlockCacher writes dirty blocks to a cache.
    42  type dirtyBlockCacher func(
    43  	ctx context.Context, ptr BlockPointer, block Block) error
    44  
    45  type blockTree struct {
    46  	file      Path
    47  	chargedTo keybase1.UserOrTeamID
    48  	kmd       libkey.KeyMetadata
    49  	bsplit    BlockSplitter
    50  	getter    blockGetterFn
    51  	cacher    dirtyBlockCacher
    52  	log       logger.Logger
    53  	vlog      *libkb.VDebugLog
    54  }
    55  
    56  // ParentBlockAndChildIndex is a node on a path down the tree to a
    57  // particular leaf node.  `pblock` is an indirect block corresponding
    58  // to one of that leaf node's parents, and `childIndex` is an index
    59  // into `pblock.IPtrs` to the next node along the path.
    60  type ParentBlockAndChildIndex struct {
    61  	pblock     BlockWithPtrs
    62  	childIndex int
    63  }
    64  
    65  func (pbci ParentBlockAndChildIndex) childIPtr() (BlockInfo, Offset) {
    66  	return pbci.pblock.IndirectPtr(pbci.childIndex)
    67  }
    68  
    69  func (pbci ParentBlockAndChildIndex) childBlockPtr() BlockPointer {
    70  	info, _ := pbci.pblock.IndirectPtr(pbci.childIndex)
    71  	return info.BlockPointer
    72  }
    73  
    74  func (pbci ParentBlockAndChildIndex) clearEncodedSize() {
    75  	pbci.pblock.ClearIndirectPtrSize(pbci.childIndex)
    76  }
    77  
    78  func (pbci ParentBlockAndChildIndex) setChildBlockInfo(info BlockInfo) {
    79  	pbci.pblock.SetIndirectPtrInfo(pbci.childIndex, info)
    80  }
    81  
    82  func (bt *blockTree) rootBlockPointer() BlockPointer {
    83  	return bt.file.TailPointer()
    84  }
    85  
    86  // getBlockAtOffset returns the leaf block containing the given
    87  // `off`, along with the set of indirect blocks leading to that leaf
    88  // (if any).
    89  func (bt *blockTree) getBlockAtOffset(ctx context.Context,
    90  	topBlock BlockWithPtrs, off Offset, rtype BlockReqType) (
    91  	ptr BlockPointer, parentBlocks []ParentBlockAndChildIndex,
    92  	block BlockWithPtrs, nextBlockStartOff, startOff Offset,
    93  	wasDirty bool, err error) {
    94  	// Find the block matching the offset, if it exists.
    95  	ptr = bt.rootBlockPointer()
    96  	block = topBlock
    97  	nextBlockStartOff = nil
    98  	startOff = topBlock.FirstOffset()
    99  
   100  	if !topBlock.IsIndirect() {
   101  		// If it's not an indirect block, we just need to figure out
   102  		// if it's dirty.
   103  		_, wasDirty, err = bt.getter(ctx, bt.kmd, ptr, bt.file, rtype)
   104  		if err != nil {
   105  			return ZeroPtr, nil, nil, nil, nil, false, err
   106  		}
   107  		return ptr, nil, block, nextBlockStartOff, startOff, wasDirty, nil
   108  	}
   109  
   110  	// Search until it's not an indirect block.
   111  	for block.IsIndirect() {
   112  		nextIndex := block.NumIndirectPtrs() - 1
   113  		for i := 0; i < block.NumIndirectPtrs(); i++ {
   114  			_, iptrOff := block.IndirectPtr(i)
   115  			if iptrOff.Equals(off) {
   116  				// Small optimization to avoid iterating past the correct ptr.
   117  				nextIndex = i
   118  				break
   119  			} else if off.Less(iptrOff) {
   120  				// Use the previous block.  i can never be 0, because
   121  				// the first ptr always has an offset at the beginning
   122  				// of the range.
   123  				nextIndex = i - 1
   124  				break
   125  			}
   126  		}
   127  		var info BlockInfo
   128  		info, startOff = block.IndirectPtr(nextIndex)
   129  		parentBlocks = append(parentBlocks,
   130  			ParentBlockAndChildIndex{block, nextIndex})
   131  		// There is more to read if we ever took a path through a
   132  		// ptr that wasn't the final ptr in its respective list.
   133  		if nextIndex != block.NumIndirectPtrs()-1 {
   134  			_, nextBlockStartOff = block.IndirectPtr(nextIndex + 1)
   135  		}
   136  		ptr = info.BlockPointer
   137  		block, wasDirty, err = bt.getter(
   138  			ctx, bt.kmd, info.BlockPointer, bt.file, rtype)
   139  		if err != nil {
   140  			return ZeroPtr, nil, nil, nil, nil, false, err
   141  		}
   142  	}
   143  
   144  	return ptr, parentBlocks, block, nextBlockStartOff, startOff, wasDirty, nil
   145  }
   146  
   147  // getNextDirtyBlockAtOffsetAtLevel does the same thing as
   148  // `getNextDirtyBlockAtOffset` (see the comments on that function)
   149  // on a subsection of the block tree (not necessarily starting from
   150  // the top block).
   151  func (bt *blockTree) getNextDirtyBlockAtOffsetAtLevel(ctx context.Context,
   152  	pblock BlockWithPtrs, off Offset, rtype BlockReqType,
   153  	dirtyBcache IsDirtyProvider, parentBlocks []ParentBlockAndChildIndex) (
   154  	ptr BlockPointer, newParentBlocks []ParentBlockAndChildIndex,
   155  	block BlockWithPtrs, nextBlockStartOff, startOff Offset, err error) {
   156  	// Search along paths of dirty blocks until we find a dirty leaf
   157  	// block with an offset equal or greater than `off`.
   158  	checkedPrevBlock := false
   159  	for i := 0; i < pblock.NumIndirectPtrs(); i++ {
   160  		info, iptrOff := pblock.IndirectPtr(i)
   161  		iptrLess := iptrOff.Less(off)
   162  		if iptrLess && i != pblock.NumIndirectPtrs()-1 {
   163  			continue
   164  		}
   165  
   166  		// No need to check the previous block if we align exactly
   167  		// with `off`, or this is the right-most leaf block.
   168  		if iptrLess || iptrOff.Equals(off) {
   169  			checkedPrevBlock = true
   170  		}
   171  
   172  		// If we haven't checked the previous block yet, do so now
   173  		// since it contains `off`.
   174  		index := -1
   175  		nextBlockStartOff = nil
   176  		var prevPtr BlockPointer
   177  		if !checkedPrevBlock && i > 0 {
   178  			prevInfo, _ := pblock.IndirectPtr(i - 1)
   179  			prevPtr = prevInfo.BlockPointer
   180  		}
   181  		if prevPtr.IsValid() && dirtyBcache.IsDirty(
   182  			bt.file.Tlf, prevPtr, bt.file.Branch) {
   183  			// Since we checked the previous block, stay on this
   184  			// index for the next iteration.
   185  			i--
   186  			index = i
   187  		} else if dirtyBcache.IsDirty(
   188  			bt.file.Tlf, info.BlockPointer, bt.file.Branch) {
   189  			// Now check the current block.
   190  			index = i
   191  		}
   192  		checkedPrevBlock = true
   193  
   194  		// Try the next child.
   195  		if index == -1 {
   196  			continue
   197  		}
   198  
   199  		indexInfo, indexOff := pblock.IndirectPtr(index)
   200  		ptr = indexInfo.BlockPointer
   201  		block, _, err = bt.getter(ctx, bt.kmd, ptr, bt.file, rtype)
   202  		if err != nil {
   203  			return ZeroPtr, nil, nil, nil, nil, err
   204  		}
   205  
   206  		newParentBlocks = make(
   207  			[]ParentBlockAndChildIndex, len(parentBlocks), len(parentBlocks)+1)
   208  		copy(newParentBlocks, parentBlocks)
   209  		newParentBlocks = append(newParentBlocks,
   210  			ParentBlockAndChildIndex{pblock, index})
   211  		// If this is a leaf block, we're done.
   212  		if !block.IsIndirect() {
   213  			// There is more to read if we ever took a path through a
   214  			// ptr that wasn't the final ptr in its respective list.
   215  			if index != pblock.NumIndirectPtrs()-1 {
   216  				_, nextBlockStartOff = pblock.IndirectPtr(index + 1)
   217  			}
   218  			return ptr, newParentBlocks, block, nextBlockStartOff, indexOff, nil
   219  		}
   220  
   221  		// Recurse to the next lower level.
   222  		ptr, newParentBlocks, block, nextBlockStartOff, startOff, err =
   223  			bt.getNextDirtyBlockAtOffsetAtLevel(
   224  				ctx, block, off, rtype, dirtyBcache, newParentBlocks)
   225  		if err != nil {
   226  			return ZeroPtr, nil, nil, nil, nil, err
   227  		}
   228  		// If we found a block, we're done.
   229  		if block != nil {
   230  			// If the block didn't have an immediate sibling to the
   231  			// right, set the next offset to the parent block's
   232  			// sibling's offset.
   233  			if nextBlockStartOff == nil && index != pblock.NumIndirectPtrs()-1 {
   234  				_, nextBlockStartOff = pblock.IndirectPtr(index + 1)
   235  			}
   236  			return ptr, newParentBlocks, block, nextBlockStartOff, startOff, nil
   237  		}
   238  	}
   239  
   240  	// There's no dirty block at or after `off`.
   241  	return ZeroPtr, nil, nil, pblock.FirstOffset(), pblock.FirstOffset(), nil
   242  }
   243  
   244  // getNextDirtyBlockAtOffset returns the next dirty leaf block with a
   245  // starting offset that is equal or greater than the given `off`.
   246  // This assumes that any code that dirties a leaf block also dirties
   247  // all of its parents, even if those parents haven't yet changed.  It
   248  // can be used iteratively (by feeding `nextBlockStartOff` back in as
   249  // `off`) to find all the dirty blocks.  Note that there is no need to
   250  // parallelize that process, since all the dirty blocks are guaranteed
   251  // to be local.  `nextBlockStartOff` is `nil` if there's no next block.
   252  func (bt *blockTree) getNextDirtyBlockAtOffset(ctx context.Context,
   253  	topBlock BlockWithPtrs, off Offset, rtype BlockReqType,
   254  	dirtyBcache IsDirtyProvider) (
   255  	ptr BlockPointer, parentBlocks []ParentBlockAndChildIndex,
   256  	block BlockWithPtrs, nextBlockStartOff, startOff Offset, err error) {
   257  	// Find the block matching the offset, if it exists.
   258  	ptr = bt.rootBlockPointer()
   259  	if !dirtyBcache.IsDirty(bt.file.Tlf, ptr, bt.file.Branch) {
   260  		// The top block isn't dirty, so we know none of the leaves
   261  		// are dirty.
   262  		return ZeroPtr, nil, nil, topBlock.FirstOffset(),
   263  			topBlock.FirstOffset(), nil
   264  	} else if !topBlock.IsIndirect() {
   265  		// A dirty, direct block.
   266  		return bt.rootBlockPointer(), nil, topBlock, nil,
   267  			topBlock.FirstOffset(), nil
   268  	}
   269  
   270  	ptr, parentBlocks, block, nextBlockStartOff, startOff, err =
   271  		bt.getNextDirtyBlockAtOffsetAtLevel(
   272  			ctx, topBlock, off, rtype, dirtyBcache, nil)
   273  	if err != nil {
   274  		return ZeroPtr, nil, nil, nil, nil, err
   275  	}
   276  	if block == nil {
   277  		return ZeroPtr, nil, nil, topBlock.FirstOffset(),
   278  			topBlock.FirstOffset(), nil
   279  	}
   280  
   281  	// The leaf block doesn't cover this index.  (If the contents
   282  	// length is 0, then this is the start or end of a hole, and it
   283  	// should still count as dirty.)
   284  	if block.OffsetExceedsData(startOff, off) {
   285  		return ZeroPtr, nil, nil, nil, topBlock.FirstOffset(), nil
   286  	}
   287  
   288  	return ptr, parentBlocks, block, nextBlockStartOff, startOff, nil
   289  }
   290  
   291  // getBlocksForOffsetRangeTask is used for passing data to
   292  // getBlocksForOffsetRange tasks.
   293  type getBlocksForOffsetRangeTask struct {
   294  	ptr        BlockPointer
   295  	pblock     BlockWithPtrs
   296  	pathPrefix []ParentBlockAndChildIndex
   297  	startOff   Offset
   298  	endOff     Offset
   299  	prefixOk   bool
   300  	getDirect  bool
   301  	// firstBlock is true if this is the first block in the range being fetched.
   302  	firstBlock bool
   303  }
   304  
   305  func (task *getBlocksForOffsetRangeTask) subTask(
   306  	childPtr BlockPointer, childPath []ParentBlockAndChildIndex,
   307  	firstBlock bool) getBlocksForOffsetRangeTask {
   308  	subTask := *task
   309  	subTask.ptr = childPtr
   310  	subTask.pblock = nil
   311  	subTask.pathPrefix = childPath
   312  	subTask.firstBlock = firstBlock
   313  	return subTask
   314  }
   315  
   316  // getBlocksForOffsetRangeResult is used for passing data back from
   317  // getBlocksForOffsetRange tasks.
   318  type getBlocksForOffsetRangeResult struct {
   319  	pathFromRoot    []ParentBlockAndChildIndex
   320  	ptr             BlockPointer
   321  	block           Block
   322  	nextBlockOffset Offset
   323  	firstBlock      bool
   324  	err             error
   325  }
   326  
   327  // processGetBlocksTask examines the block it is passed, enqueueing any children
   328  // in range into wp, and passing data back through results.
   329  func (bt *blockTree) processGetBlocksTask(ctx context.Context,
   330  	wg *sync.WaitGroup, wp *workerpool.WorkerPool,
   331  	job getBlocksForOffsetRangeTask,
   332  	results chan<- getBlocksForOffsetRangeResult) {
   333  	defer wg.Done()
   334  
   335  	select {
   336  	case <-ctx.Done():
   337  		results <- getBlocksForOffsetRangeResult{err: ctx.Err()}
   338  		return
   339  	default:
   340  	}
   341  
   342  	// We may have been passed just a pointer and need to fetch the block here.
   343  	var pblock BlockWithPtrs
   344  	if job.pblock == nil {
   345  		var err error
   346  		pblock, _, err = bt.getter(ctx, bt.kmd, job.ptr, bt.file, BlockReadParallel)
   347  		if err != nil {
   348  			results <- getBlocksForOffsetRangeResult{
   349  				firstBlock: job.firstBlock,
   350  				err:        err,
   351  			}
   352  			return
   353  		}
   354  	} else {
   355  		pblock = job.pblock
   356  	}
   357  
   358  	if !pblock.IsIndirect() {
   359  		// Return this block, under the assumption that the
   360  		// caller already checked the range for this block.
   361  		if job.getDirect {
   362  			results <- getBlocksForOffsetRangeResult{
   363  				pathFromRoot:    job.pathPrefix,
   364  				ptr:             job.ptr,
   365  				block:           pblock,
   366  				nextBlockOffset: nil,
   367  				firstBlock:      job.firstBlock,
   368  				err:             nil,
   369  			}
   370  		}
   371  		return
   372  	}
   373  
   374  	// Search all of the in-range child blocks, and their child
   375  	// blocks, etc, in parallel.
   376  	childIsFirstBlock := job.firstBlock
   377  	for i := 0; i < pblock.NumIndirectPtrs(); i++ {
   378  		info, iptrOff := pblock.IndirectPtr(i)
   379  		// Some byte of this block is included in the left side of the
   380  		// range if `job.startOff` is less than the largest byte offset in
   381  		// the block.
   382  		inRangeLeft := true
   383  		if i < pblock.NumIndirectPtrs()-1 {
   384  			_, off := pblock.IndirectPtr(i + 1)
   385  			inRangeLeft = job.startOff.Less(off)
   386  		}
   387  		if !inRangeLeft {
   388  			continue
   389  		}
   390  		// Some byte of this block is included in the right side of
   391  		// the range if `job.endOff` is bigger than the smallest byte
   392  		// offset in the block (or if we're explicitly reading all the
   393  		// data to the end).
   394  		inRangeRight := job.endOff == nil || iptrOff.Less(job.endOff)
   395  		if !inRangeRight {
   396  			// This block is the first one past the offset range
   397  			// amount the children.
   398  			results <- getBlocksForOffsetRangeResult{nextBlockOffset: iptrOff}
   399  			return
   400  		}
   401  
   402  		childPtr := info.BlockPointer
   403  		childIndex := i
   404  
   405  		childPath := make([]ParentBlockAndChildIndex, len(job.pathPrefix)+1)
   406  		copy(childPath, job.pathPrefix)
   407  		childPath[len(childPath)-1] = ParentBlockAndChildIndex{
   408  			pblock:     pblock,
   409  			childIndex: childIndex,
   410  		}
   411  
   412  		// We only need to fetch direct blocks if we've been asked
   413  		// to do so.  If the direct type of the pointer is
   414  		// unknown, we can assume all the children are direct
   415  		// blocks, since there weren't multiple levels of
   416  		// indirection before the introduction of the flag.
   417  		if job.getDirect || childPtr.DirectType == IndirectBlock {
   418  			subTask := job.subTask(childPtr, childPath, childIsFirstBlock)
   419  
   420  			// Enqueue the subTask with the WorkerPool.
   421  			wg.Add(1)
   422  			wp.Submit(func() {
   423  				bt.processGetBlocksTask(ctx, wg, wp, subTask, results)
   424  			})
   425  		} else {
   426  			results <- getBlocksForOffsetRangeResult{
   427  				pathFromRoot: childPath,
   428  				firstBlock:   childIsFirstBlock,
   429  			}
   430  		}
   431  		childIsFirstBlock = false
   432  	}
   433  }
   434  
   435  func checkForHolesAndTruncate(
   436  	pathsFromRoot [][]ParentBlockAndChildIndex) [][]ParentBlockAndChildIndex {
   437  	var prevPath []ParentBlockAndChildIndex
   438  	for pathIdx, Path := range pathsFromRoot {
   439  		// Each path after the first must immediately follow the preceding path.
   440  		if pathIdx == 0 {
   441  			prevPath = Path
   442  			continue
   443  		}
   444  		// Find the first place the 2 paths differ.
   445  		// Verify that path is immediately after prevPath.
   446  		if len(Path) != len(prevPath) {
   447  			return pathsFromRoot[:pathIdx]
   448  		}
   449  
   450  		foundIncrement := false
   451  		for idx := range Path {
   452  			prevChild := prevPath[idx].childIndex
   453  			thisChild := Path[idx].childIndex
   454  			if foundIncrement {
   455  				if thisChild != 0 || prevChild != prevPath[idx-1].
   456  					pblock.NumIndirectPtrs()-1 {
   457  					return pathsFromRoot[:pathIdx]
   458  				}
   459  			} else {
   460  				if prevChild+1 == thisChild {
   461  					foundIncrement = true
   462  				} else if prevChild != thisChild {
   463  					return pathsFromRoot[:pathIdx]
   464  				}
   465  			}
   466  		}
   467  		// If we never found where the two paths differ,
   468  		// then something has gone wrong.
   469  		if !foundIncrement {
   470  			return pathsFromRoot[:pathIdx]
   471  		}
   472  		prevPath = Path
   473  	}
   474  	return pathsFromRoot
   475  }
   476  
   477  // getBlocksForOffsetRange fetches all the blocks making up paths down
   478  // the block tree to leaf ("direct") blocks that encompass the given
   479  // offset range (half-inclusive) in the data.  If `endOff` is nil, it
   480  // returns blocks until reaching the end of the data.  If `prefixOk`
   481  // is true, the function will ignore context deadline errors and
   482  // return whatever prefix of the data it could fetch within the
   483  // deadine.  Return params:
   484  //
   485  //   - pathsFromRoot is a slice, ordered by offset, of paths from
   486  //     the root to each block that makes up the range.  If the path is
   487  //     empty, it indicates that pblock is a direct block and has no
   488  //     children.
   489  //   - blocks: a map from block pointer to a data-containing leaf node
   490  //     in the given range of offsets, if `getDirect` is true.
   491  //   - nextBlockOff is the offset of the block that follows the last
   492  //     block given in `pathsFromRoot`.  If `pathsFromRoot` contains
   493  //     the last block among the children, nextBlockOff is nil.
   494  func (bt *blockTree) getBlocksForOffsetRange(ctx context.Context,
   495  	ptr BlockPointer, pblock BlockWithPtrs, startOff, endOff Offset,
   496  	prefixOk bool, getDirect bool) (pathsFromRoot [][]ParentBlockAndChildIndex,
   497  	blocks map[BlockPointer]Block, nextBlockOffset Offset,
   498  	err error) {
   499  	// Make a WaitGroup to keep track of whether there's still work to be done.
   500  	var wg sync.WaitGroup
   501  
   502  	// Make a workerpool to limit the number of concurrent goroutines.
   503  	wp := workerpool.New(maxBlockFetchWorkers)
   504  
   505  	// Make a context to cancel all the jobs if something goes wrong.
   506  	groupCtx, cancel := context.WithCancel(ctx)
   507  	defer cancel()
   508  
   509  	// Make a queue for results coming back from workers.
   510  	results := make(chan getBlocksForOffsetRangeResult)
   511  
   512  	// Enqueue the top-level task. Increment the task counter by one.
   513  	rootTask := getBlocksForOffsetRangeTask{
   514  		ptr:        ptr,
   515  		pblock:     pblock,
   516  		pathPrefix: nil,
   517  		startOff:   startOff,
   518  		endOff:     endOff,
   519  		prefixOk:   prefixOk,
   520  		getDirect:  getDirect,
   521  		firstBlock: true,
   522  	}
   523  	wg.Add(1)
   524  	wp.Submit(func() {
   525  		bt.processGetBlocksTask(groupCtx, &wg, wp, rootTask, results)
   526  	})
   527  
   528  	// Once all the work is done, stop the WorkerPool and close `results` so
   529  	// that the loop below exits
   530  	go func() {
   531  		wg.Wait()
   532  		wp.Stop()
   533  		close(results)
   534  	}()
   535  
   536  	// Reduce all the results coming in over the results channel.
   537  	var minNextBlockOffset Offset
   538  	blocks = make(map[BlockPointer]Block)
   539  	pathsFromRoot = [][]ParentBlockAndChildIndex{}
   540  	mustCheckForHoles := false
   541  	gotFirstBlock := false
   542  	var errors []error
   543  	for res := range results {
   544  		if res.err != nil {
   545  			// If we are ok with just getting the prefix, don't treat a
   546  			// deadline exceeded error as fatal.
   547  			if prefixOk && res.err == context.DeadlineExceeded &&
   548  				!res.firstBlock && len(errors) == 0 {
   549  				mustCheckForHoles = true
   550  			} else {
   551  				errors = append(errors, res.err)
   552  			}
   553  			cancel()
   554  		}
   555  		if res.pathFromRoot != nil {
   556  			pathsFromRoot = append(pathsFromRoot, res.pathFromRoot)
   557  		}
   558  		if res.block != nil {
   559  			blocks[res.ptr] = res.block
   560  		}
   561  		if res.nextBlockOffset != nil &&
   562  			(minNextBlockOffset == nil ||
   563  				res.nextBlockOffset.Less(minNextBlockOffset)) {
   564  			minNextBlockOffset = res.nextBlockOffset
   565  		}
   566  		if res.firstBlock {
   567  			gotFirstBlock = true
   568  		}
   569  	}
   570  	nextBlockOffset = minNextBlockOffset
   571  
   572  	if len(errors) == 1 {
   573  		return nil, nil, nil, errors[0]
   574  	} else if len(errors) > 1 {
   575  		return nil, nil, nil, fmt.Errorf("multiple errors: %v", errors)
   576  	}
   577  
   578  	// Out-of-order traversal means the paths come back from workers unsorted.
   579  	// Sort them before returning them to the caller.
   580  	sort.Slice(pathsFromRoot, func(i, j int) bool {
   581  		pathI := pathsFromRoot[i]
   582  		pathJ := pathsFromRoot[j]
   583  		lastChildI := pathI[len(pathI)-1]
   584  		lastChildJ := pathJ[len(pathJ)-1]
   585  
   586  		_, offsetI := lastChildI.pblock.IndirectPtr(lastChildI.childIndex)
   587  		_, offsetJ := lastChildJ.pblock.IndirectPtr(lastChildJ.childIndex)
   588  
   589  		return offsetI.Less(offsetJ)
   590  	})
   591  
   592  	// If we are returning data even though not all the goroutines completed,
   593  	// we may need to return only some of the data we gathered in order to
   594  	// return a correct prefix of the data. Thus, we find the longest prefix of
   595  	// the data without any holes.
   596  	if !gotFirstBlock {
   597  		pathsFromRoot = [][]ParentBlockAndChildIndex{}
   598  	} else if mustCheckForHoles {
   599  		pathsFromRoot = checkForHolesAndTruncate(pathsFromRoot)
   600  	}
   601  
   602  	return pathsFromRoot, blocks, nextBlockOffset, nil
   603  }
   604  
   605  type createTopBlockFn func(context.Context, Ver) (BlockWithPtrs, error)
   606  type makeNewBlockWithPtrs func(isIndirect bool) BlockWithPtrs
   607  
   608  // newRightBlock creates space for a new rightmost block, creating
   609  // parent blocks and a new level of indirection in the tree as needed.
   610  // If there's no new level of indirection, it modifies the blocks in
   611  // `parentBlocks` to include the new right-most pointers
   612  // (`parentBlocks` must consist of blocks copied for writing).  It
   613  // also returns the set of parents pointing to the new block (whether
   614  // or not there is a new level of indirection), and also returns any
   615  // newly-dirtied block pointers.
   616  //
   617  // The new block is pointed to using offset `off`, and doesn't have to
   618  // represent the right-most block in a tree.  In particular, if `off`
   619  // is less than the offset of its leftmost neighbor, it's the caller's
   620  // responsibility to move the new right block into the correct place
   621  // in the tree (e.g., using `shiftBlocksToFillHole()`).
   622  func (bt *blockTree) newRightBlock(
   623  	ctx context.Context, parentBlocks []ParentBlockAndChildIndex, off Offset,
   624  	dver Ver, newBlock makeNewBlockWithPtrs, topBlocker createTopBlockFn) (
   625  	[]ParentBlockAndChildIndex, []BlockPointer, error) {
   626  	// Find the lowest block that can accommodate a new right block.
   627  	lowestAncestorWithRoom := -1
   628  	for i := len(parentBlocks) - 1; i >= 0; i-- {
   629  		pb := parentBlocks[i]
   630  		if pb.pblock.NumIndirectPtrs() < bt.bsplit.MaxPtrsPerBlock() {
   631  			lowestAncestorWithRoom = i
   632  			break
   633  		}
   634  	}
   635  
   636  	var newTopBlock BlockWithPtrs
   637  	var newDirtyPtrs []BlockPointer
   638  	if lowestAncestorWithRoom < 0 {
   639  		// Create a new level of indirection at the top.
   640  		var err error
   641  		newTopBlock, err = topBlocker(ctx, dver)
   642  		if err != nil {
   643  			return nil, nil, err
   644  		}
   645  
   646  		// The old top block needs to be cached under its new ID if it
   647  		// was indirect.
   648  		if len(parentBlocks) > 0 {
   649  			dType := DirectBlock
   650  			if parentBlocks[0].pblock.IsIndirect() {
   651  				dType = IndirectBlock
   652  			}
   653  			newTopBlock.SetIndirectPtrType(0, dType)
   654  			info, _ := newTopBlock.IndirectPtr(0)
   655  			ptr := info.BlockPointer
   656  			err = bt.cacher(ctx, ptr, parentBlocks[0].pblock)
   657  			if err != nil {
   658  				return nil, nil, err
   659  			}
   660  			newDirtyPtrs = append(newDirtyPtrs, ptr)
   661  		}
   662  
   663  		parentBlocks = append([]ParentBlockAndChildIndex{{newTopBlock, 0}},
   664  			parentBlocks...)
   665  		lowestAncestorWithRoom = 0
   666  	}
   667  	rightParentBlocks := make([]ParentBlockAndChildIndex, len(parentBlocks))
   668  
   669  	bt.vlog.CLogf(
   670  		ctx, libkb.VLog1, "Making new right block at off %s for entry %v, "+
   671  			"lowestAncestor at level %d", off, bt.rootBlockPointer(),
   672  		lowestAncestorWithRoom)
   673  
   674  	// Make a new right block for every parent, starting with the
   675  	// lowest ancestor with room.  Note that we're not iterating over
   676  	// the actual parent blocks here; we're only using its length to
   677  	// figure out how many levels need new blocks.
   678  	pblock := parentBlocks[lowestAncestorWithRoom].pblock
   679  	parentPtr := bt.rootBlockPointer()
   680  	if lowestAncestorWithRoom > 0 {
   681  		parentPtr = parentBlocks[lowestAncestorWithRoom-1].childBlockPtr()
   682  	}
   683  	for i := lowestAncestorWithRoom; i < len(parentBlocks); i++ {
   684  		newRID, err := kbfsblock.MakeTemporaryID()
   685  		if err != nil {
   686  			return nil, nil, err
   687  		}
   688  
   689  		newPtr := BlockPointer{
   690  			ID:      newRID,
   691  			KeyGen:  bt.kmd.LatestKeyGeneration(),
   692  			DataVer: dver,
   693  			Context: kbfsblock.MakeFirstContext(
   694  				bt.chargedTo, bt.rootBlockPointer().GetBlockType()),
   695  			DirectType: IndirectBlock,
   696  		}
   697  
   698  		if i == len(parentBlocks)-1 {
   699  			newPtr.DirectType = DirectBlock
   700  		}
   701  
   702  		bt.vlog.CLogf(
   703  			ctx, libkb.VLog1, "New right block for entry %v, level %d, ptr %v",
   704  			bt.rootBlockPointer(), i, newPtr)
   705  
   706  		pblock.AppendNewIndirectPtr(newPtr, off)
   707  		rightParentBlocks[i].pblock = pblock
   708  		rightParentBlocks[i].childIndex = pblock.NumIndirectPtrs() - 1
   709  		err = bt.cacher(ctx, parentPtr, pblock)
   710  		if err != nil {
   711  			return nil, nil, err
   712  		}
   713  
   714  		isInd := i != len(parentBlocks)-1
   715  		rblock := newBlock(isInd)
   716  		if isInd {
   717  			pblock = rblock
   718  			parentPtr = newPtr
   719  		}
   720  
   721  		err = bt.cacher(ctx, newPtr, rblock)
   722  		if err != nil {
   723  			return nil, nil, err
   724  		}
   725  
   726  		newDirtyPtrs = append(newDirtyPtrs, newPtr)
   727  	}
   728  
   729  	// All parents up to and including the lowest ancestor with room
   730  	// will have to change, so mark them as dirty.
   731  	ptr := bt.rootBlockPointer()
   732  	for i := 0; i <= lowestAncestorWithRoom; i++ {
   733  		pb := parentBlocks[i]
   734  		if err := bt.cacher(ctx, ptr, pb.pblock); err != nil {
   735  			return nil, nil, err
   736  		}
   737  		newDirtyPtrs = append(newDirtyPtrs, ptr)
   738  		ptr = pb.childBlockPtr()
   739  		rightParentBlocks[i].pblock = pb.pblock
   740  		rightParentBlocks[i].childIndex = pb.pblock.NumIndirectPtrs() - 1
   741  	}
   742  
   743  	return rightParentBlocks, newDirtyPtrs, nil
   744  }
   745  
   746  // setParentOffsets updates the parent offsets for a newly-moved
   747  // block, all the way up to its common ancestor (which is the one that
   748  // doesn't have a childIndex of 0).
   749  func (bt *blockTree) setParentOffsets(
   750  	ctx context.Context, newOff Offset,
   751  	parents []ParentBlockAndChildIndex, currIndex int) (
   752  	newDirtyPtrs []BlockPointer, newUnrefs []BlockInfo, err error) {
   753  	for level := len(parents) - 2; level >= 0; level-- {
   754  		// Cache the block below this level, which was just
   755  		// modified.
   756  		childInfo, _ := parents[level].childIPtr()
   757  		if err := bt.cacher(
   758  			ctx, childInfo.BlockPointer, parents[level+1].pblock); err != nil {
   759  			return nil, nil, err
   760  		}
   761  		newDirtyPtrs = append(newDirtyPtrs, childInfo.BlockPointer)
   762  		// Remember the size of the dirtied child.
   763  		if childInfo.EncodedSize != 0 {
   764  			newUnrefs = append(newUnrefs, childInfo)
   765  			parents[level].clearEncodedSize()
   766  		}
   767  
   768  		// If we've reached a level where the child indirect
   769  		// offset wasn't affected, we're done.  If not, update the
   770  		// offset at this level and move up the tree.
   771  		if currIndex > 0 {
   772  			break
   773  		}
   774  		currIndex = parents[level].childIndex
   775  		parents[level].pblock.SetIndirectPtrOff(currIndex, newOff)
   776  	}
   777  	return newDirtyPtrs, newUnrefs, nil
   778  }
   779  
   780  func (bt *blockTree) String() string {
   781  	block, _, err := bt.getter(
   782  		nil, bt.kmd, bt.rootBlockPointer(), bt.file, BlockRead)
   783  	if err != nil {
   784  		return "ERROR: " + err.Error()
   785  	}
   786  
   787  	level := []BlockWithPtrs{block}
   788  	// TODO: use a `bytes.Buffer` instead of a regular string here if
   789  	// we ever use this function from real code.
   790  	res := "\n---------------\n"
   791  	for len(level) > 0 {
   792  		var nextLevel []BlockWithPtrs
   793  		for i, block := range level {
   794  			if !block.IsIndirect() {
   795  				continue
   796  			}
   797  			for j := 0; j < block.NumIndirectPtrs(); j++ {
   798  				info, off := block.IndirectPtr(j)
   799  				res += fmt.Sprintf("\"%s\" ", off)
   800  				if info.DirectType == DirectBlock {
   801  					continue
   802  				}
   803  				child, _, err := bt.getter(
   804  					nil, bt.kmd, info.BlockPointer, bt.file, BlockRead)
   805  				if err != nil {
   806  					return "ERROR: " + err.Error()
   807  				}
   808  				nextLevel = append(nextLevel, child)
   809  			}
   810  			if i+1 < len(level) {
   811  				res += "| "
   812  			}
   813  		}
   814  		res += "\n"
   815  		level = nextLevel
   816  	}
   817  	res += "---------------\n"
   818  	return res
   819  }
   820  
   821  // shiftBlocksToFillHole should be called after newRightBlock when the
   822  // offset for the new block is smaller than the final offset of the
   823  // tree.  This happens when there is a hole in the file, or when
   824  // expanding an internal leaf for a directory, and the user is now
   825  // writing data into that expanded area.  This function moves the new
   826  // block into the correct place, and rearranges all the indirect
   827  // pointers in the file as needed.  It returns any block pointers that
   828  // were dirtied in the process.
   829  func (bt *blockTree) shiftBlocksToFillHole(
   830  	ctx context.Context, parents []ParentBlockAndChildIndex) (
   831  	newDirtyPtrs []BlockPointer, newUnrefs []BlockInfo,
   832  	newlyDirtiedChildBytes int64, err error) {
   833  	// `parents` should represent the right side of the tree down to
   834  	// the new rightmost indirect pointer, the offset of which should
   835  	// match `newHoleStartOff`.  Keep swapping it with its sibling on
   836  	// the left until its offset would be lower than that child's
   837  	// offset.  If there are no children to the left, continue on with
   838  	// the children in the cousin block to the left.  If we swap a
   839  	// child between cousin blocks, we must update the offset in the
   840  	// right cousin's parent block.  If *that* updated pointer is the
   841  	// leftmost pointer in its parent block, update that one as well,
   842  	// up to the root.
   843  	//
   844  	// We are guaranteed at least one level of indirection because
   845  	// `newRightBlock` should have been called before
   846  	// `shiftBlocksToFillHole`.
   847  	immedParent := parents[len(parents)-1]
   848  	currIndex := immedParent.childIndex
   849  	_, newBlockStartOff := immedParent.childIPtr()
   850  
   851  	bt.vlog.CLogf(
   852  		ctx, libkb.VLog1, "Shifting block with offset %s for entry %v into "+
   853  			"position", newBlockStartOff, bt.rootBlockPointer())
   854  
   855  	// Swap left as needed.
   856  	for loopedOnce := false; ; loopedOnce = true {
   857  		var leftOff Offset
   858  		var newParents []ParentBlockAndChildIndex
   859  		immedPblock := immedParent.pblock
   860  		if currIndex > 0 {
   861  			_, leftOff = immedPblock.IndirectPtr(currIndex - 1)
   862  		} else {
   863  			if loopedOnce {
   864  				// Now update the left side if needed, before looking into
   865  				// swapping across blocks.
   866  				bt.vlog.CLogf(ctx, libkb.VLog1, "Updating on left side")
   867  				_, newOff := immedPblock.IndirectPtr(currIndex)
   868  				ndp, nu, err := bt.setParentOffsets(
   869  					ctx, newOff, parents, currIndex)
   870  				if err != nil {
   871  					return nil, nil, 0, err
   872  				}
   873  				newDirtyPtrs = append(newDirtyPtrs, ndp...)
   874  				newUnrefs = append(newUnrefs, nu...)
   875  			}
   876  
   877  			// Construct the new set of parents for the shifted block,
   878  			// by looking for the next left cousin.
   879  			newParents = make([]ParentBlockAndChildIndex, len(parents))
   880  			copy(newParents, parents)
   881  			var level int
   882  			for level = len(newParents) - 2; level >= 0; level-- {
   883  				// The parent at the level being evaluated has a left
   884  				// sibling, so we use that sibling.
   885  				if newParents[level].childIndex > 0 {
   886  					break
   887  				}
   888  				// Keep going up until we find a way back down a left branch.
   889  			}
   890  
   891  			if level < 0 {
   892  				// We are already all the way on the left, we're done!
   893  				return newDirtyPtrs, newUnrefs, newlyDirtiedChildBytes, nil
   894  			}
   895  			newParents[level].childIndex--
   896  
   897  			// Walk back down, shifting the new parents into position.
   898  			for ; level < len(newParents)-1; level++ {
   899  				nextPtr := newParents[level].childBlockPtr()
   900  				childBlock, _, err := bt.getter(
   901  					ctx, bt.kmd, nextPtr, bt.file, BlockWrite)
   902  				if err != nil {
   903  					return nil, nil, 0, err
   904  				}
   905  
   906  				newParents[level+1].pblock = childBlock
   907  				newParents[level+1].childIndex =
   908  					childBlock.NumIndirectPtrs() - 1
   909  				_, leftOff = childBlock.IndirectPtr(
   910  					childBlock.NumIndirectPtrs() - 1)
   911  			}
   912  		}
   913  
   914  		// We're done!
   915  		if leftOff.Less(newBlockStartOff) {
   916  			return newDirtyPtrs, newUnrefs, newlyDirtiedChildBytes, nil
   917  		}
   918  
   919  		// Otherwise, we need to swap the indirect file pointers.
   920  		if currIndex > 0 {
   921  			immedPblock.SwapIndirectPtrs(currIndex-1, immedPblock, currIndex)
   922  			currIndex--
   923  			continue
   924  		}
   925  
   926  		// Swap block pointers across cousins at the lowest level of
   927  		// indirection.
   928  		newImmedParent := newParents[len(newParents)-1]
   929  		newImmedPblock := newImmedParent.pblock
   930  		newCurrIndex := newImmedPblock.NumIndirectPtrs() - 1
   931  		newImmedPblock.SwapIndirectPtrs(newCurrIndex, immedPblock, currIndex)
   932  
   933  		// Cache the new immediate parent as dirty.  Also cache the
   934  		// old immediate parent's right-most leaf child as dirty, to
   935  		// make sure this path is captured in
   936  		// getNextDirtyBlockAtOffset calls.  TODO: this is inefficient
   937  		// since it might end up re-encoding and re-uploading a leaf
   938  		// block that wasn't actually dirty; we should find a better
   939  		// way to make sure ready() sees these parent blocks.
   940  		if len(newParents) > 1 {
   941  			i := len(newParents) - 2
   942  			childPtr := newParents[i].childBlockPtr()
   943  			if err := bt.cacher(
   944  				ctx, childPtr, newImmedPblock); err != nil {
   945  				return nil, nil, 0, err
   946  			}
   947  			newDirtyPtrs = append(newDirtyPtrs, childPtr)
   948  
   949  			// Fetch the old parent's right leaf for writing, and mark
   950  			// it as dirty.
   951  			rightLeafInfo, _ := immedPblock.IndirectPtr(
   952  				immedPblock.NumIndirectPtrs() - 1)
   953  			leafBlock, _, err := bt.getter(
   954  				ctx, bt.kmd, rightLeafInfo.BlockPointer, bt.file, BlockWrite)
   955  			if err != nil {
   956  				return nil, nil, 0, err
   957  			}
   958  			if err := bt.cacher(
   959  				ctx, rightLeafInfo.BlockPointer, leafBlock); err != nil {
   960  				return nil, nil, 0, err
   961  			}
   962  			newDirtyPtrs = append(newDirtyPtrs, rightLeafInfo.BlockPointer)
   963  			// Remember the size of the dirtied leaf.
   964  			if rightLeafInfo.EncodedSize != 0 {
   965  				newlyDirtiedChildBytes += leafBlock.BytesCanBeDirtied()
   966  				newUnrefs = append(newUnrefs, rightLeafInfo)
   967  				immedPblock.ClearIndirectPtrSize(
   968  					immedPblock.NumIndirectPtrs() - 1)
   969  			}
   970  		}
   971  
   972  		// Now we need to update the parent offsets on the right side,
   973  		// all the way up to the common ancestor (which is the one
   974  		// with the one that doesn't have a childIndex of 0).
   975  		_, newRightOff := immedPblock.IndirectPtr(currIndex)
   976  		ndp, nu, err := bt.setParentOffsets(
   977  			ctx, newRightOff, parents, currIndex)
   978  		if err != nil {
   979  			return nil, nil, 0, err
   980  		}
   981  		newDirtyPtrs = append(newDirtyPtrs, ndp...)
   982  		newUnrefs = append(newUnrefs, nu...)
   983  
   984  		immedParent = newImmedParent
   985  		currIndex = newCurrIndex
   986  		parents = newParents
   987  	}
   988  	// The loop above must exit via one of the returns.
   989  }
   990  
   991  // markParentsDirty caches all the blocks in `parentBlocks` as dirty,
   992  // and returns the dirtied block pointers as well as any block infos
   993  // with non-zero encoded sizes that will now need to be unreferenced.
   994  func (bt *blockTree) markParentsDirty(
   995  	ctx context.Context, parentBlocks []ParentBlockAndChildIndex) (
   996  	dirtyPtrs []BlockPointer, unrefs []BlockInfo, err error) {
   997  	parentPtr := bt.rootBlockPointer()
   998  	for _, pb := range parentBlocks {
   999  		dirtyPtrs = append(dirtyPtrs, parentPtr)
  1000  		childInfo, _ := pb.childIPtr()
  1001  
  1002  		// Remember the size of each newly-dirtied child.
  1003  		if childInfo.EncodedSize != 0 {
  1004  			unrefs = append(unrefs, childInfo)
  1005  			pb.clearEncodedSize()
  1006  		}
  1007  		if err := bt.cacher(ctx, parentPtr, pb.pblock); err != nil {
  1008  			return nil, unrefs, err
  1009  		}
  1010  		parentPtr = childInfo.BlockPointer
  1011  	}
  1012  	return dirtyPtrs, unrefs, nil
  1013  }
  1014  
  1015  type makeSyncFunc func(ptr BlockPointer) func() error
  1016  
  1017  func (bt *blockTree) readyWorker(
  1018  	ctx context.Context, id tlf.ID, bcache BlockCache, rp ReadyProvider,
  1019  	bps BlockPutState, pathsFromRoot [][]ParentBlockAndChildIndex,
  1020  	makeSync makeSyncFunc, i int, level int, lock *sync.Mutex,
  1021  	oldPtrs map[BlockInfo]BlockPointer, donePtrs map[BlockPointer]bool,
  1022  	hashBehavior BlockCacheHashBehavior) error {
  1023  	// Ready the dirty block.
  1024  	pb := pathsFromRoot[i][level]
  1025  
  1026  	lock.Lock()
  1027  	parentPB := pathsFromRoot[i][level-1]
  1028  	ptr := parentPB.childBlockPtr()
  1029  	// If this is already a new pointer, skip it.
  1030  	if donePtrs[ptr] {
  1031  		lock.Unlock()
  1032  		return nil
  1033  	}
  1034  	donePtrs[ptr] = true
  1035  	lock.Unlock()
  1036  
  1037  	newInfo, _, readyBlockData, err := ReadyBlock(
  1038  		ctx, bcache, rp, bt.kmd, pb.pblock,
  1039  		bt.chargedTo, bt.rootBlockPointer().GetBlockType(), hashBehavior)
  1040  	if err != nil {
  1041  		return err
  1042  	}
  1043  
  1044  	lock.Lock()
  1045  	defer lock.Unlock()
  1046  
  1047  	err = bcache.Put(
  1048  		newInfo.BlockPointer, id, pb.pblock, PermanentEntry, SkipCacheHash)
  1049  	if err != nil {
  1050  		return err
  1051  	}
  1052  
  1053  	// Only the leaf level need to be tracked by the dirty file.
  1054  	var syncFunc func() error
  1055  	if makeSync != nil && level == len(pathsFromRoot[0])-1 {
  1056  		syncFunc = makeSync(ptr)
  1057  	}
  1058  
  1059  	err = bps.AddNewBlock(
  1060  		ctx, newInfo.BlockPointer, pb.pblock, readyBlockData,
  1061  		syncFunc)
  1062  	if err != nil {
  1063  		return err
  1064  	}
  1065  	err = bps.SaveOldPtr(ctx, ptr)
  1066  	if err != nil {
  1067  		return err
  1068  	}
  1069  
  1070  	parentPB.setChildBlockInfo(newInfo)
  1071  	oldPtrs[newInfo] = ptr
  1072  	donePtrs[newInfo.BlockPointer] = true
  1073  	return nil
  1074  }
  1075  
  1076  // readyHelper takes a set of paths from a root down to a child block,
  1077  // and readies all the blocks represented in those paths.  If the
  1078  // caller wants leaf blocks readied, then the last element of each
  1079  // slice in `pathsFromRoot` should contain a leaf block, with a child
  1080  // index of -1.  It's assumed that all slices in `pathsFromRoot` have
  1081  // the same size. This function returns a map pointing from the new
  1082  // block info from any readied block to its corresponding old block
  1083  // pointer.
  1084  func (bt *blockTree) readyHelper(
  1085  	ctx context.Context, id tlf.ID, bcache BlockCache,
  1086  	rp ReadyProvider, bps BlockPutState,
  1087  	pathsFromRoot [][]ParentBlockAndChildIndex, makeSync makeSyncFunc,
  1088  	hashBehavior BlockCacheHashBehavior) (
  1089  	map[BlockInfo]BlockPointer, error) {
  1090  	oldPtrs := make(map[BlockInfo]BlockPointer)
  1091  	donePtrs := make(map[BlockPointer]bool)
  1092  
  1093  	// lock protects `bps`, `oldPtrs`, and `donePtrs` while
  1094  	// parallelizing block readies below.
  1095  	var lock sync.Mutex
  1096  
  1097  	// Starting from the leaf level, ready each block at each level,
  1098  	// and put the new BlockInfo into the parent block at the level
  1099  	// above.  At each level, only ready each block once. Don't ready
  1100  	// the root block though; the folderUpdatePrepper code will do
  1101  	// that.
  1102  	for level := len(pathsFromRoot[0]) - 1; level > 0; level-- {
  1103  		eg, groupCtx := errgroup.WithContext(ctx)
  1104  		indices := make(chan int, len(pathsFromRoot))
  1105  		numWorkers := len(pathsFromRoot)
  1106  		if numWorkers > maxParallelReadies {
  1107  			numWorkers = maxParallelReadies
  1108  		}
  1109  
  1110  		worker := func() error {
  1111  			for i := range indices {
  1112  				err := bt.readyWorker(
  1113  					groupCtx, id, bcache, rp, bps, pathsFromRoot, makeSync,
  1114  					i, level, &lock, oldPtrs, donePtrs, hashBehavior)
  1115  				if err != nil {
  1116  					return err
  1117  				}
  1118  			}
  1119  			return nil
  1120  		}
  1121  		for i := 0; i < numWorkers; i++ {
  1122  			eg.Go(worker)
  1123  		}
  1124  
  1125  		for i := 0; i < len(pathsFromRoot); i++ {
  1126  			indices <- i
  1127  		}
  1128  		close(indices)
  1129  		err := eg.Wait()
  1130  		if err != nil {
  1131  			return nil, err
  1132  		}
  1133  	}
  1134  	return oldPtrs, nil
  1135  }
  1136  
  1137  // ready, if given an indirect top-block, readies all the dirty child
  1138  // blocks, and updates their block IDs in their parent block's list of
  1139  // indirect pointers.  It returns a map pointing from the new block
  1140  // info from any readied block to its corresponding old block pointer.
  1141  func (bt *blockTree) ready(
  1142  	ctx context.Context, id tlf.ID, bcache BlockCache,
  1143  	dirtyBcache IsDirtyProvider, rp ReadyProvider, bps BlockPutState,
  1144  	topBlock BlockWithPtrs, makeSync makeSyncFunc,
  1145  	hashBehavior BlockCacheHashBehavior) (
  1146  	map[BlockInfo]BlockPointer, error) {
  1147  	if !topBlock.IsIndirect() {
  1148  		return nil, nil
  1149  	}
  1150  
  1151  	// This will contain paths to all dirty leaf paths.  The final
  1152  	// entry index in each path will be the leaf node block itself
  1153  	// (with a -1 child index).
  1154  	var dirtyLeafPaths [][]ParentBlockAndChildIndex
  1155  
  1156  	// Gather all the paths to all dirty leaf blocks first.
  1157  	off := topBlock.FirstOffset()
  1158  	for off != nil {
  1159  		_, parentBlocks, block, nextBlockOff, _, err :=
  1160  			bt.getNextDirtyBlockAtOffset(
  1161  				ctx, topBlock, off, BlockWrite, dirtyBcache)
  1162  		if err != nil {
  1163  			return nil, err
  1164  		}
  1165  
  1166  		if block == nil {
  1167  			// No more dirty blocks.
  1168  			break
  1169  		}
  1170  		off = nextBlockOff // Will be `nil` if there are no more blocks.
  1171  
  1172  		// Make sure there's only one copy of each pblock among all
  1173  		// the paths, so `readyHelper` can update the blocks in place
  1174  		// along any path, and they will all be updated.
  1175  		for _, p := range dirtyLeafPaths {
  1176  			for i := range parentBlocks {
  1177  				if i == 0 || p[i-1].childBlockPtr() ==
  1178  					parentBlocks[i-1].childBlockPtr() {
  1179  					parentBlocks[i].pblock = p[i].pblock
  1180  				}
  1181  			}
  1182  		}
  1183  
  1184  		dirtyLeafPaths = append(dirtyLeafPaths,
  1185  			append(parentBlocks, ParentBlockAndChildIndex{block, -1}))
  1186  	}
  1187  
  1188  	// No dirty blocks means nothing to do.
  1189  	if len(dirtyLeafPaths) == 0 {
  1190  		return nil, nil
  1191  	}
  1192  
  1193  	return bt.readyHelper(
  1194  		ctx, id, bcache, rp, bps, dirtyLeafPaths, makeSync, hashBehavior)
  1195  }
  1196  
  1197  func (bt *blockTree) getIndirectBlocksForOffsetRange(
  1198  	ctx context.Context, pblock BlockWithPtrs, startOff, endOff Offset) (
  1199  	pathsFromRoot [][]ParentBlockAndChildIndex, err error) {
  1200  	// Fetch the paths of indirect blocks, without getting the direct
  1201  	// blocks.
  1202  	pfr, _, _, err := bt.getBlocksForOffsetRange(
  1203  		ctx, bt.rootBlockPointer(), pblock, startOff, endOff, false,
  1204  		false /* no direct blocks */)
  1205  	if err != nil {
  1206  		return nil, err
  1207  	}
  1208  
  1209  	return pfr, nil
  1210  }
  1211  
  1212  func (bt *blockTree) getIndirectBlockInfosWithTopBlock(
  1213  	ctx context.Context, topBlock BlockWithPtrs) ([]BlockInfo, error) {
  1214  	if !topBlock.IsIndirect() {
  1215  		return nil, nil
  1216  	}
  1217  
  1218  	pfr, err := bt.getIndirectBlocksForOffsetRange(
  1219  		ctx, topBlock, topBlock.FirstOffset(), nil)
  1220  	if err != nil {
  1221  		return nil, err
  1222  	}
  1223  
  1224  	var blockInfos []BlockInfo
  1225  	infoSeen := make(map[BlockPointer]bool)
  1226  	for _, Path := range pfr {
  1227  	pathLoop:
  1228  		for _, pb := range Path {
  1229  			for i := 0; i < pb.pblock.NumIndirectPtrs(); i++ {
  1230  				info, _ := pb.pblock.IndirectPtr(i)
  1231  				if infoSeen[info.BlockPointer] {
  1232  					// No need to iterate through this whole block
  1233  					// again if we've already seen one of its children
  1234  					// before.
  1235  					continue pathLoop
  1236  				}
  1237  
  1238  				infoSeen[info.BlockPointer] = true
  1239  				blockInfos = append(blockInfos, info)
  1240  			}
  1241  		}
  1242  	}
  1243  	return blockInfos, nil
  1244  }
  1245  
  1246  func (bt *blockTree) getIndirectBlockInfos(ctx context.Context) (
  1247  	[]BlockInfo, error) {
  1248  	if bt.rootBlockPointer().DirectType == DirectBlock {
  1249  		return nil, nil
  1250  	}
  1251  
  1252  	topBlock, _, err := bt.getter(
  1253  		ctx, bt.kmd, bt.rootBlockPointer(), bt.file, BlockRead)
  1254  	if err != nil {
  1255  		return nil, err
  1256  	}
  1257  	return bt.getIndirectBlockInfosWithTopBlock(ctx, topBlock)
  1258  }