github.com/keybase/client/go@v0.0.0-20241007131713-f10651d043c8/kbfs/libkbfs/prefetcher.go (about)

     1  // Copyright 2016 Keybase Inc. All rights reserved.
     2  // Use of this source code is governed by a BSD
     3  // license that can be found in the LICENSE file.
     4  
     5  package libkbfs
     6  
     7  import (
     8  	"fmt"
     9  	"sort"
    10  	"sync"
    11  	"time"
    12  
    13  	"github.com/eapache/channels"
    14  	"github.com/keybase/backoff"
    15  	"github.com/keybase/client/go/kbfs/data"
    16  	"github.com/keybase/client/go/kbfs/env"
    17  	"github.com/keybase/client/go/kbfs/kbfsblock"
    18  	"github.com/keybase/client/go/kbfs/libkey"
    19  	"github.com/keybase/client/go/kbfs/tlf"
    20  	"github.com/keybase/client/go/libkb"
    21  	"github.com/keybase/client/go/logger"
    22  	"github.com/keybase/client/go/protocol/keybase1"
    23  	"github.com/pkg/errors"
    24  	"golang.org/x/net/context"
    25  )
    26  
    27  const (
    28  	updatePointerPrefetchPriority int           = 1
    29  	prefetchTimeout               time.Duration = 24 * time.Hour
    30  	overallSyncStatusInterval     time.Duration = 1 * time.Second
    31  )
    32  
    33  type prefetcherConfig interface {
    34  	syncedTlfGetterSetter
    35  	data.Versioner
    36  	logMaker
    37  	blockCacher
    38  	diskBlockCacheGetter
    39  	clockGetter
    40  	reporterGetter
    41  	settingsDBGetter
    42  	subscriptionManagerGetter
    43  	subscriptionManagerPublisherGetter
    44  }
    45  
    46  type prefetchRequest struct {
    47  	ptr            data.BlockPointer
    48  	encodedSize    uint32
    49  	newBlock       func() data.Block
    50  	kmd            libkey.KeyMetadata
    51  	priority       int
    52  	lifetime       data.BlockCacheLifetime
    53  	prefetchStatus PrefetchStatus
    54  	action         BlockRequestAction
    55  	sendCh         chan<- <-chan struct{}
    56  
    57  	// obseleted is a channel that can be used to cancel this request while
    58  	// it is waiting in the queue if the prefetch is no longer necessary.
    59  	obseleted <-chan struct{}
    60  
    61  	// countedInOverall is true if the bytes of this block are counted
    62  	// in the overall sync status byte total currently.
    63  	countedInOverall bool
    64  }
    65  
    66  type ctxPrefetcherTagKey int
    67  
    68  const (
    69  	ctxPrefetcherIDKey ctxPrefetcherTagKey = iota
    70  	ctxPrefetchIDKey
    71  
    72  	ctxPrefetcherID = "PREID"
    73  	ctxPrefetchID   = "PFID"
    74  )
    75  
    76  type prefetch struct {
    77  	subtreeBlockCount int
    78  	subtreeTriggered  bool
    79  	subtreeRetrigger  bool
    80  	req               *prefetchRequest
    81  	// Each refnonce for this block ID can have a different set of
    82  	// parents.  Track the channel for the specific instance of the
    83  	// prefetch that counted us in its progress (since a parent may be
    84  	// canceled and rescheduled later).
    85  	parents map[kbfsblock.RefNonce]map[data.BlockPointer]<-chan struct{}
    86  	ctx     context.Context
    87  	cancel  context.CancelFunc
    88  	waitCh  chan struct{}
    89  
    90  	PrefetchProgress
    91  }
    92  
    93  func (p *prefetch) Close() {
    94  	select {
    95  	case <-p.waitCh:
    96  	default:
    97  		close(p.waitCh)
    98  	}
    99  	p.cancel()
   100  }
   101  
   102  type rescheduledPrefetch struct {
   103  	off   backoff.BackOff
   104  	timer *time.Timer
   105  }
   106  
   107  type queuedPrefetch struct {
   108  	waitingPrefetches int
   109  	channel           chan struct{}
   110  	tlfID             tlf.ID
   111  }
   112  
   113  type cancelTlfPrefetch struct {
   114  	tlfID   tlf.ID
   115  	channel chan<- struct{}
   116  }
   117  
   118  type blockPrefetcher struct {
   119  	ctx             context.Context
   120  	config          prefetcherConfig
   121  	log             logger.Logger
   122  	vlog            *libkb.VDebugLog
   123  	appStateUpdater env.AppStateUpdater
   124  
   125  	makeNewBackOff func() backoff.BackOff
   126  
   127  	// blockRetriever to retrieve blocks from the server
   128  	retriever BlockRetriever
   129  	// channel to request prefetches
   130  	prefetchRequestCh channels.Channel
   131  	// channel to cancel prefetches
   132  	prefetchCancelCh channels.Channel
   133  	// channel to cancel all prefetches for a TLF
   134  	prefetchCancelTlfCh channels.Channel
   135  	// channel to reschedule prefetches
   136  	prefetchRescheduleCh channels.Channel
   137  	// channel to get prefetch status
   138  	prefetchStatusCh channels.Channel
   139  	// channel to allow synchronization on completion
   140  	inFlightFetches channels.Channel
   141  	// protects shutdownCh
   142  	shutdownOnce sync.Once
   143  	// channel that is idempotently closed when a shutdown occurs
   144  	shutdownCh chan struct{}
   145  	// channel that is closed when all current fetches are done and prefetches
   146  	// have been triggered
   147  	almostDoneCh chan struct{}
   148  	// channel that is closed when a shutdown completes and all pending
   149  	// prefetch requests are complete
   150  	doneCh chan struct{}
   151  	// map to store prefetch metadata
   152  	prefetches map[kbfsblock.ID]*prefetch
   153  	// map to store backoffs for rescheduling top blocks
   154  	rescheduled map[kbfsblock.ID]*rescheduledPrefetch
   155  	// channel that's always closed, to avoid overhead on certain requests
   156  	closedCh <-chan struct{}
   157  
   158  	pauseLock sync.RWMutex
   159  	paused    bool
   160  	pausedCh  chan struct{}
   161  
   162  	// map to channels for cancelling queued prefetches
   163  	queuedPrefetchHandlesLock sync.Mutex
   164  	queuedPrefetchHandles     map[data.BlockPointer]queuedPrefetch
   165  
   166  	// Tracks the overall bytes currently being prefetched to the sync
   167  	// cache.  The total outstanding bytes resets on the first new
   168  	// prefetch after a completion happens.
   169  	overallSyncStatusLock     sync.RWMutex
   170  	overallSyncStatus         PrefetchProgress
   171  	lastOverallSyncStatusSent time.Time
   172  }
   173  
   174  var _ Prefetcher = (*blockPrefetcher)(nil)
   175  
   176  func defaultBackOffForPrefetcher() backoff.BackOff {
   177  	return backoff.NewExponentialBackOff()
   178  }
   179  
   180  func newBlockPrefetcher(retriever BlockRetriever,
   181  	config prefetcherConfig, testSyncCh <-chan struct{},
   182  	testDoneCh chan<- struct{},
   183  	appStateUpdater env.AppStateUpdater) *blockPrefetcher {
   184  	closedCh := make(chan struct{})
   185  	close(closedCh)
   186  	p := &blockPrefetcher{
   187  		config:                config,
   188  		appStateUpdater:       appStateUpdater,
   189  		makeNewBackOff:        defaultBackOffForPrefetcher,
   190  		retriever:             retriever,
   191  		prefetchRequestCh:     NewInfiniteChannelWrapper(),
   192  		prefetchCancelCh:      NewInfiniteChannelWrapper(),
   193  		prefetchCancelTlfCh:   NewInfiniteChannelWrapper(),
   194  		prefetchRescheduleCh:  NewInfiniteChannelWrapper(),
   195  		prefetchStatusCh:      NewInfiniteChannelWrapper(),
   196  		inFlightFetches:       NewInfiniteChannelWrapper(),
   197  		shutdownCh:            make(chan struct{}),
   198  		almostDoneCh:          make(chan struct{}),
   199  		doneCh:                make(chan struct{}),
   200  		prefetches:            make(map[kbfsblock.ID]*prefetch),
   201  		queuedPrefetchHandles: make(map[data.BlockPointer]queuedPrefetch),
   202  		rescheduled:           make(map[kbfsblock.ID]*rescheduledPrefetch),
   203  		closedCh:              closedCh,
   204  		pausedCh:              make(chan struct{}),
   205  	}
   206  	if config != nil {
   207  		p.log = config.MakeLogger("PRE")
   208  		p.vlog = config.MakeVLogger(p.log)
   209  	} else {
   210  		p.log = logger.NewNull()
   211  		p.vlog = libkb.NewVDebugLog(p.log)
   212  	}
   213  	p.ctx = CtxWithRandomIDReplayable(context.Background(), ctxPrefetcherIDKey,
   214  		ctxPrefetcherID, p.log)
   215  	if retriever == nil {
   216  		// If we pass in a nil retriever, this prefetcher shouldn't do
   217  		// anything. Treat it as already shut down.
   218  		p.Shutdown()
   219  		close(p.doneCh)
   220  	} else {
   221  		go p.run(testSyncCh, testDoneCh)
   222  		go p.shutdownLoop()
   223  	}
   224  	return p
   225  }
   226  
   227  func (p *blockPrefetcher) sendOverallSyncStatusHelperLocked() {
   228  	var status keybase1.FolderSyncStatus
   229  	status.PrefetchProgress = p.overallSyncStatus.ToProtocolProgress(
   230  		p.config.Clock())
   231  
   232  	FillInDiskSpaceStatus(
   233  		context.Background(), &status, p.overallSyncStatus.ToProtocolStatus(),
   234  		p.config.DiskBlockCache())
   235  
   236  	p.config.Reporter().NotifyOverallSyncStatus(context.Background(), status)
   237  	p.config.SubscriptionManagerPublisher().PublishChange(
   238  		keybase1.SubscriptionTopic_OVERALL_SYNC_STATUS)
   239  	p.lastOverallSyncStatusSent = p.config.Clock().Now()
   240  
   241  }
   242  
   243  func (p *blockPrefetcher) sendOverallSyncStatusLocked() {
   244  	// Don't send a new status notification if we aren't complete, and
   245  	// if we have sent one within the last interval.
   246  	if p.overallSyncStatus.SubtreeBytesFetched !=
   247  		p.overallSyncStatus.SubtreeBytesTotal &&
   248  		p.config.Clock().Now().Before(
   249  			p.lastOverallSyncStatusSent.Add(overallSyncStatusInterval)) {
   250  		return
   251  	}
   252  
   253  	p.sendOverallSyncStatusHelperLocked()
   254  }
   255  
   256  func (p *blockPrefetcher) incOverallSyncTotalBytes(req *prefetchRequest) {
   257  	if !req.action.Sync() || req.countedInOverall {
   258  		return
   259  	}
   260  
   261  	p.overallSyncStatusLock.Lock()
   262  	defer p.overallSyncStatusLock.Unlock()
   263  	if p.overallSyncStatus.SubtreeBytesFetched ==
   264  		p.overallSyncStatus.SubtreeBytesTotal {
   265  		// Reset since we had already finished syncing.
   266  		p.overallSyncStatus = PrefetchProgress{}
   267  		p.overallSyncStatus.Start = p.config.Clock().Now()
   268  	}
   269  
   270  	p.overallSyncStatus.SubtreeBytesTotal += uint64(req.encodedSize)
   271  	req.countedInOverall = true
   272  	p.sendOverallSyncStatusLocked()
   273  }
   274  
   275  func (p *blockPrefetcher) decOverallSyncTotalBytes(req *prefetchRequest) {
   276  	if !req.action.Sync() || !req.countedInOverall {
   277  		return
   278  	}
   279  
   280  	p.overallSyncStatusLock.Lock()
   281  	defer p.overallSyncStatusLock.Unlock()
   282  	if p.overallSyncStatus.SubtreeBytesTotal < uint64(req.encodedSize) {
   283  		// Both log and panic so that we get the PFID in the log.
   284  		p.log.CErrorf(
   285  			context.TODO(), "panic: decOverallSyncTotalBytes overstepped "+
   286  				"its bounds (bytes=%d, fetched=%d, total=%d)", req.encodedSize,
   287  			p.overallSyncStatus.SubtreeBytesFetched,
   288  			p.overallSyncStatus.SubtreeBytesTotal)
   289  		panic("decOverallSyncTotalBytes overstepped its bounds")
   290  	}
   291  
   292  	p.overallSyncStatus.SubtreeBytesTotal -= uint64(req.encodedSize)
   293  	req.countedInOverall = false
   294  	p.sendOverallSyncStatusLocked()
   295  }
   296  
   297  func (p *blockPrefetcher) incOverallSyncFetchedBytes(req *prefetchRequest) {
   298  	if !req.action.Sync() || !req.countedInOverall {
   299  		return
   300  	}
   301  
   302  	p.overallSyncStatusLock.Lock()
   303  	defer p.overallSyncStatusLock.Unlock()
   304  	p.overallSyncStatus.SubtreeBytesFetched += uint64(req.encodedSize)
   305  	req.countedInOverall = false
   306  	p.sendOverallSyncStatusLocked()
   307  	if p.overallSyncStatus.SubtreeBytesFetched >
   308  		p.overallSyncStatus.SubtreeBytesTotal {
   309  		// Both log and panic so that we get the PFID in the log.
   310  		p.log.CErrorf(
   311  			context.TODO(), "panic: incOverallSyncFetchedBytes overstepped "+
   312  				"its bounds (fetched=%d, total=%d)",
   313  			p.overallSyncStatus.SubtreeBytesFetched,
   314  			p.overallSyncStatus.SubtreeBytesTotal)
   315  		panic("incOverallSyncFetchedBytes overstepped its bounds")
   316  	}
   317  }
   318  
   319  func (p *blockPrefetcher) newPrefetch(
   320  	count int, bytes uint64, triggered bool,
   321  	req *prefetchRequest) *prefetch {
   322  	ctx, cancel := context.WithTimeout(p.ctx, prefetchTimeout)
   323  	ctx = CtxWithRandomIDReplayable(
   324  		ctx, ctxPrefetchIDKey, ctxPrefetchID, p.log)
   325  	p.incOverallSyncTotalBytes(req)
   326  	return &prefetch{
   327  		subtreeBlockCount: count,
   328  		subtreeTriggered:  triggered,
   329  		req:               req,
   330  		parents:           make(map[kbfsblock.RefNonce]map[data.BlockPointer]<-chan struct{}),
   331  		ctx:               ctx,
   332  		cancel:            cancel,
   333  		waitCh:            make(chan struct{}),
   334  		PrefetchProgress: PrefetchProgress{
   335  			SubtreeBytesTotal: bytes,
   336  			Start:             p.config.Clock().Now(),
   337  		},
   338  	}
   339  }
   340  
   341  func (p *blockPrefetcher) getParentForApply(
   342  	pptr data.BlockPointer, refMap map[data.BlockPointer]<-chan struct{},
   343  	ch <-chan struct{}) *prefetch {
   344  	// Check if the particular prefetch for our parent that we're
   345  	// tracking has already completed or been canceled, and if so,
   346  	// don't apply to that parent.  This can happen in the following
   347  	// scenario:
   348  	//
   349  	// * A path `a/b/c` gets prefetched.
   350  	// * The path gets updated via another write to `a'/b'/c`.
   351  	// * `a` and `b` get canceled.
   352  	// * `a` gets re-fetched, and `b` gets added to the prefetch list.
   353  	// * `c` completes and tries to complete its old parent `b`, which
   354  	//   prematurely closes the new prefetches for `b` and `c` (which
   355  	//   are now only expecting one block, the new `b` prefetch).
   356  	parentDone := false
   357  	select {
   358  	case <-ch:
   359  		parentDone = true
   360  	default:
   361  	}
   362  
   363  	parent, ok := p.prefetches[pptr.ID]
   364  	if parentDone || !ok {
   365  		// Note that the parent (or some other ancestor) might be
   366  		// rescheduled for later and have been removed from
   367  		// `prefetches`.  In that case still delete it from the
   368  		// `parents` list as normal; the reschedule will add it
   369  		// back in later as needed.
   370  		delete(refMap, pptr)
   371  		return nil
   372  	}
   373  	return parent
   374  }
   375  
   376  // applyToPtrParentsRecursive applies a function just to the parents
   377  // of the specific pointer (with refnonce).
   378  func (p *blockPrefetcher) applyToPtrParentsRecursive(
   379  	f func(data.BlockPointer, *prefetch), ptr data.BlockPointer, pre *prefetch) {
   380  	defer func() {
   381  		if r := recover(); r != nil {
   382  			id := kbfsblock.ZeroID
   383  			if pre.req != nil {
   384  				id = pre.req.ptr.ID
   385  			}
   386  			p.log.CErrorf(pre.ctx, "Next prefetch in panic unroll: id=%s, "+
   387  				"subtreeBlockCount=%d, subtreeTriggered=%t, parents=%+v",
   388  				id, pre.subtreeBlockCount, pre.subtreeTriggered, pre.parents)
   389  			panic(r)
   390  		}
   391  	}()
   392  	refMap := pre.parents[ptr.RefNonce]
   393  	for pptr, ch := range refMap {
   394  		parent := p.getParentForApply(pptr, refMap, ch)
   395  		if parent != nil {
   396  			p.applyToPtrParentsRecursive(f, pptr, parent)
   397  		}
   398  	}
   399  	if len(pre.parents[ptr.RefNonce]) == 0 {
   400  		delete(pre.parents, ptr.RefNonce)
   401  	}
   402  	f(ptr, pre)
   403  }
   404  
   405  // applyToParentsRecursive applies a function to all the parents of
   406  // the pointer (with any refnonces).
   407  func (p *blockPrefetcher) applyToParentsRecursive(
   408  	f func(kbfsblock.ID, *prefetch), blockID kbfsblock.ID, pre *prefetch) {
   409  	defer func() {
   410  		if r := recover(); r != nil {
   411  			id := kbfsblock.ZeroID
   412  			if pre.req != nil {
   413  				id = pre.req.ptr.ID
   414  			}
   415  			p.log.CErrorf(pre.ctx, "Next prefetch in panic unroll: id=%s, "+
   416  				"subtreeBlockCount=%d, subtreeTriggered=%t, parents=%+v",
   417  				id, pre.subtreeBlockCount, pre.subtreeTriggered, pre.parents)
   418  			panic(r)
   419  		}
   420  	}()
   421  	for refNonce, refMap := range pre.parents {
   422  		for pptr, ch := range refMap {
   423  			parent := p.getParentForApply(pptr, refMap, ch)
   424  			if parent != nil {
   425  				p.applyToParentsRecursive(f, pptr.ID, parent)
   426  			}
   427  		}
   428  		if len(refMap) == 0 {
   429  			delete(pre.parents, refNonce)
   430  		}
   431  	}
   432  	f(blockID, pre)
   433  }
   434  
   435  func (p *blockPrefetcher) getBlockSynchronously(
   436  	ctx context.Context, req *prefetchRequest, action BlockRequestAction) (
   437  	data.Block, error) {
   438  	// Avoid the overhead of the block retriever copy if possible.
   439  	cachedBlock, err := p.config.BlockCache().Get(req.ptr)
   440  	if err == nil {
   441  		return cachedBlock, nil
   442  	}
   443  
   444  	b := req.newBlock()
   445  	err = <-p.retriever.Request(
   446  		ctx, defaultOnDemandRequestPriority, req.kmd, req.ptr,
   447  		b, req.lifetime, action)
   448  	if err != nil {
   449  		return nil, err
   450  	}
   451  	return b, nil
   452  }
   453  
   454  // Walk up the block tree decrementing each node by `numBlocks`. Any
   455  // zeroes we hit get marked complete and deleted.  Also, count
   456  // `numBytes` bytes as being fetched.  If the block count becomes 0,
   457  // then the total number of bytes must now be fetched.
   458  // TODO: If we ever hit a lower number than the child, panic.
   459  func (p *blockPrefetcher) completePrefetch(
   460  	numBlocks int, numBytes uint64) func(kbfsblock.ID, *prefetch) {
   461  	return func(blockID kbfsblock.ID, pp *prefetch) {
   462  		pp.subtreeBlockCount -= numBlocks
   463  		pp.SubtreeBytesFetched += numBytes
   464  		if pp.subtreeBlockCount < 0 {
   465  			// Both log and panic so that we get the PFID in the log.
   466  			p.log.CErrorf(pp.ctx, "panic: completePrefetch overstepped its "+
   467  				"bounds")
   468  			panic("completePrefetch overstepped its bounds")
   469  		}
   470  		if pp.req == nil {
   471  			p.log.CErrorf(pp.ctx, "panic: completePrefetch got a nil req "+
   472  				"for block %s", blockID)
   473  			panic("completePrefetch got a nil req")
   474  		}
   475  		if pp.subtreeBlockCount == 0 {
   476  			if pp.SubtreeBytesFetched != pp.SubtreeBytesTotal {
   477  				panic(fmt.Sprintf("Bytes fetch mismatch: fetched=%d, total=%d",
   478  					pp.SubtreeBytesFetched, pp.SubtreeBytesTotal))
   479  			}
   480  			delete(p.prefetches, blockID)
   481  			p.clearRescheduleState(blockID)
   482  			delete(p.rescheduled, blockID)
   483  			defer pp.Close()
   484  			b, err := p.getBlockSynchronously(pp.ctx, pp.req, BlockRequestSolo)
   485  			if err != nil {
   486  				p.log.CWarningf(pp.ctx, "failed to retrieve block to "+
   487  					"complete its prefetch, canceled it instead: %+v", err)
   488  				return
   489  			}
   490  			err = p.retriever.PutInCaches(pp.ctx, pp.req.ptr,
   491  				pp.req.kmd.TlfID(), b, pp.req.lifetime,
   492  				FinishedPrefetch, pp.req.action.CacheType())
   493  			if err != nil {
   494  				p.log.CWarningf(pp.ctx, "failed to complete prefetch due to "+
   495  					"cache error, canceled it instead: %+v", err)
   496  			}
   497  		}
   498  	}
   499  }
   500  
   501  func (p *blockPrefetcher) decrementPrefetch(blockID kbfsblock.ID, pp *prefetch) {
   502  	pp.subtreeBlockCount--
   503  	if pp.subtreeBlockCount < 0 {
   504  		// Both log and panic so that we get the PFID in the log.
   505  		p.log.CErrorf(pp.ctx, "panic: decrementPrefetch overstepped its bounds")
   506  		panic("decrementPrefetch overstepped its bounds")
   507  	}
   508  }
   509  
   510  func (p *blockPrefetcher) addFetchedBytes(bytes uint64) func(
   511  	kbfsblock.ID, *prefetch) {
   512  	return func(blockID kbfsblock.ID, pp *prefetch) {
   513  		pp.SubtreeBytesFetched += bytes
   514  		if pp.SubtreeBytesFetched > pp.SubtreeBytesTotal {
   515  			// Both log and panic so that we get the PFID in the log.
   516  			p.log.CErrorf(pp.ctx, "panic: addFetchedBytes overstepped "+
   517  				"its bounds (fetched=%d, total=%d)", pp.SubtreeBytesFetched,
   518  				pp.SubtreeBytesTotal)
   519  			panic("addFetchedBytes overstepped its bounds")
   520  		}
   521  	}
   522  }
   523  
   524  func (p *blockPrefetcher) clearRescheduleState(blockID kbfsblock.ID) {
   525  	rp, ok := p.rescheduled[blockID]
   526  	if !ok {
   527  		return
   528  	}
   529  	if rp.timer != nil {
   530  		rp.timer.Stop()
   531  		rp.timer = nil
   532  	}
   533  }
   534  
   535  func (p *blockPrefetcher) cancelQueuedPrefetch(ptr data.BlockPointer) {
   536  	p.queuedPrefetchHandlesLock.Lock()
   537  	defer p.queuedPrefetchHandlesLock.Unlock()
   538  	qp, ok := p.queuedPrefetchHandles[ptr]
   539  	if ok {
   540  		close(qp.channel)
   541  		delete(p.queuedPrefetchHandles, ptr)
   542  		p.log.Debug("cancelled queued prefetch for block %s", ptr)
   543  	} else {
   544  		p.vlog.Log(libkb.VLog2, "nothing to cancel for block %s", ptr)
   545  	}
   546  }
   547  
   548  func (p *blockPrefetcher) cancelQueuedPrefetchesForTlf(tlfID tlf.ID) {
   549  	p.queuedPrefetchHandlesLock.Lock()
   550  	defer p.queuedPrefetchHandlesLock.Unlock()
   551  	for ptr, qp := range p.queuedPrefetchHandles {
   552  		if qp.tlfID != tlfID {
   553  			continue
   554  		}
   555  
   556  		p.vlog.Log(
   557  			libkb.VLog2, "Canceling queued prefetch for %s, tlf=%s", ptr, tlfID)
   558  		close(qp.channel)
   559  		delete(p.queuedPrefetchHandles, ptr)
   560  	}
   561  }
   562  
   563  func (p *blockPrefetcher) markQueuedPrefetchDone(ptr data.BlockPointer) {
   564  	p.queuedPrefetchHandlesLock.Lock()
   565  	defer p.queuedPrefetchHandlesLock.Unlock()
   566  	qp, present := p.queuedPrefetchHandles[ptr]
   567  	if !present {
   568  		p.vlog.CLogf(
   569  			context.Background(), libkb.VLog2, "queuedPrefetch not present in"+
   570  				" queuedPrefetchHandles: %s", ptr)
   571  		return
   572  	}
   573  	if qp.waitingPrefetches == 1 {
   574  		delete(p.queuedPrefetchHandles, ptr)
   575  	} else {
   576  		p.queuedPrefetchHandles[ptr] = queuedPrefetch{
   577  			qp.waitingPrefetches - 1, qp.channel, qp.tlfID}
   578  	}
   579  }
   580  
   581  func (p *blockPrefetcher) doCancel(id kbfsblock.ID, pp *prefetch) {
   582  	p.decOverallSyncTotalBytes(pp.req)
   583  	delete(p.prefetches, id)
   584  	pp.Close()
   585  	p.clearRescheduleState(id)
   586  	delete(p.rescheduled, id)
   587  }
   588  
   589  func (p *blockPrefetcher) cancelPrefetch(ptr data.BlockPointer, pp *prefetch) {
   590  	delete(pp.parents, ptr.RefNonce)
   591  	if len(pp.parents) > 0 {
   592  		return
   593  	}
   594  	p.doCancel(ptr.ID, pp)
   595  }
   596  
   597  // shutdownLoop tracks in-flight requests
   598  func (p *blockPrefetcher) shutdownLoop() {
   599  top:
   600  	for {
   601  		select {
   602  		case chInterface := <-p.inFlightFetches.Out():
   603  			ch := chInterface.(<-chan error)
   604  			<-ch
   605  		case <-p.shutdownCh:
   606  			break top
   607  		}
   608  	}
   609  	for p.inFlightFetches.Len() > 0 {
   610  		chInterface := <-p.inFlightFetches.Out()
   611  		ch := chInterface.(<-chan error)
   612  		<-ch
   613  	}
   614  	close(p.almostDoneCh)
   615  }
   616  
   617  // calculatePriority returns either a base priority for an unsynced TLF or a
   618  // high priority for a synced TLF.
   619  func (p *blockPrefetcher) calculatePriority(
   620  	basePriority int, action BlockRequestAction) int {
   621  	// A prefetched, non-deep-synced child always gets throttled for
   622  	// now, until we fix the database performance issues.
   623  	if basePriority > throttleRequestPriority && !action.DeepSync() {
   624  		basePriority = throttleRequestPriority
   625  	}
   626  	return basePriority - 1
   627  }
   628  
   629  // removeFinishedParent removes a parent from the given refmap if it's
   630  // finished or is otherwise no longer a prefetch in progress.
   631  func (p *blockPrefetcher) removeFinishedParent(
   632  	pptr data.BlockPointer, refMap map[data.BlockPointer]<-chan struct{},
   633  	ch <-chan struct{}) {
   634  	_ = p.getParentForApply(pptr, refMap, ch)
   635  }
   636  
   637  // request maps the parent->child block relationship in the prefetcher, and it
   638  // triggers child prefetches that aren't already in progress.
   639  func (p *blockPrefetcher) request(ctx context.Context, priority int,
   640  	kmd libkey.KeyMetadata, info data.BlockInfo, block data.Block,
   641  	lifetime data.BlockCacheLifetime, parentPtr data.BlockPointer,
   642  	isParentNew bool, action BlockRequestAction,
   643  	idsSeen map[kbfsblock.ID]bool) (
   644  	numBlocks int, numBytesFetched, numBytesTotal uint64) {
   645  	ptr := info.BlockPointer
   646  	if idsSeen[ptr.ID] {
   647  		return 0, 0, 0
   648  	}
   649  	idsSeen[ptr.ID] = true
   650  
   651  	// If the prefetch is already waiting, don't make it wait again.
   652  	// Add the parent, however.
   653  	pre, isPrefetchWaiting := p.prefetches[ptr.ID]
   654  	if !isPrefetchWaiting {
   655  		// If the block isn't in the tree, we add it with a block count of 1 (a
   656  		// later TriggerPrefetch will come in and decrement it).
   657  		obseleted := make(chan struct{})
   658  		req := &prefetchRequest{
   659  			ptr, info.EncodedSize, block.NewEmptier(), kmd, priority,
   660  			lifetime, NoPrefetch, action, nil, obseleted, false}
   661  
   662  		pre = p.newPrefetch(1, uint64(info.EncodedSize), false, req)
   663  		p.prefetches[ptr.ID] = pre
   664  	}
   665  	// If this is a new prefetch, or if we need to update the action,
   666  	// send a new request.
   667  	newAction := action.Combine(pre.req.action)
   668  	if !isPrefetchWaiting || pre.req.action != newAction || pre.req.ptr != ptr {
   669  		// Update the action to prevent any early cancellation of a
   670  		// previous, non-deeply-synced request, and trigger a new
   671  		// request in case the previous request has already been
   672  		// handled.
   673  		oldAction := pre.req.action
   674  		pre.req.action = newAction
   675  		if !oldAction.Sync() && newAction.Sync() {
   676  			p.incOverallSyncTotalBytes(pre.req)
   677  			// Delete the old parent waitCh if it's been canceled already.
   678  			if ch, ok := pre.parents[ptr.RefNonce][parentPtr]; ok {
   679  				p.removeFinishedParent(parentPtr, pre.parents[ptr.RefNonce], ch)
   680  			}
   681  			if pre.subtreeTriggered {
   682  				// Since this fetch is being converted into a sync, we
   683  				// need to re-trigger all the child fetches to be
   684  				// syncs as well.
   685  				pre.subtreeRetrigger = true
   686  			}
   687  		}
   688  
   689  		ch := p.retriever.Request(
   690  			pre.ctx, priority, kmd, ptr, block.NewEmpty(), lifetime,
   691  			action.DelayedCacheCheckAction())
   692  		p.inFlightFetches.In() <- ch
   693  	}
   694  	parentPre, isParentWaiting := p.prefetches[parentPtr.ID]
   695  	if !isParentWaiting {
   696  		p.vlog.CLogf(pre.ctx, libkb.VLog2,
   697  			"prefetcher doesn't know about parent block "+
   698  				"%s for child block %s", parentPtr, ptr.ID)
   699  		panic("prefetcher doesn't know about parent block when trying to " +
   700  			"record parent-child relationship")
   701  	}
   702  	if pre.parents[ptr.RefNonce][parentPtr] == nil || isParentNew {
   703  		// The new parent needs its subtree block count increased. This can
   704  		// happen either when:
   705  		// 1. The child doesn't know about the parent when the child is first
   706  		// created above, or the child was previously in the tree but the
   707  		// parent was not (e.g. when there's an updated parent due to a change
   708  		// in a sibling of this child).
   709  		// 2. The parent is newly created but the child _did_ know about it,
   710  		// like when the parent previously had a prefetch but was canceled.
   711  		if len(pre.parents[ptr.RefNonce]) == 0 {
   712  			pre.parents[ptr.RefNonce] = make(map[data.BlockPointer]<-chan struct{})
   713  		}
   714  		pre.parents[ptr.RefNonce][parentPtr] = parentPre.waitCh
   715  		if pre.subtreeBlockCount > 0 {
   716  			p.vlog.CLogf(ctx, libkb.VLog2,
   717  				"Prefetching %v, action=%s, numBlocks=%d, isParentNew=%t",
   718  				ptr, action, pre.subtreeBlockCount, isParentNew)
   719  		}
   720  		return pre.subtreeBlockCount, pre.SubtreeBytesFetched,
   721  			pre.SubtreeBytesTotal
   722  	}
   723  	return 0, 0, 0
   724  }
   725  
   726  func (p *blockPrefetcher) handleStatusRequest(req *prefetchStatusRequest) {
   727  	pre, isPrefetchWaiting := p.prefetches[req.ptr.ID]
   728  	if !isPrefetchWaiting {
   729  		req.ch <- PrefetchProgress{}
   730  	} else {
   731  		req.ch <- pre.PrefetchProgress
   732  	}
   733  }
   734  
   735  // handleCriticalRequests should be called periodically during any
   736  // long prefetch requests, to make sure we handle critical requests
   737  // quickly.  These are requests that are required to be run in the
   738  // main processing goroutine, but won't interfere with whatever
   739  // request we're in the middle of.
   740  func (p *blockPrefetcher) handleCriticalRequests() {
   741  	for {
   742  		// Fulfill any status requests since the user could be waiting
   743  		// for them.
   744  		select {
   745  		case req := <-p.prefetchStatusCh.Out():
   746  			p.handleStatusRequest(req.(*prefetchStatusRequest))
   747  		default:
   748  			return
   749  		}
   750  	}
   751  }
   752  
   753  func (p *blockPrefetcher) prefetchIndirectFileBlock(
   754  	ctx context.Context, parentPtr data.BlockPointer, b *data.FileBlock,
   755  	kmd libkey.KeyMetadata, lifetime data.BlockCacheLifetime, isPrefetchNew bool,
   756  	action BlockRequestAction, basePriority int) (
   757  	numBlocks int, numBytesFetched, numBytesTotal uint64, isTail bool) {
   758  	// Prefetch indirect block pointers.
   759  	newPriority := p.calculatePriority(basePriority, action)
   760  	idsSeen := make(map[kbfsblock.ID]bool, len(b.IPtrs))
   761  	for _, ptr := range b.IPtrs {
   762  		b, f, t := p.request(
   763  			ctx, newPriority, kmd, ptr.BlockInfo, b.NewEmpty(), lifetime,
   764  			parentPtr, isPrefetchNew, action, idsSeen)
   765  		numBlocks += b
   766  		numBytesFetched += f
   767  		numBytesTotal += t
   768  
   769  		p.handleCriticalRequests()
   770  	}
   771  	return numBlocks, numBytesFetched, numBytesTotal, len(b.IPtrs) == 0
   772  }
   773  
   774  func (p *blockPrefetcher) prefetchIndirectDirBlock(
   775  	ctx context.Context, parentPtr data.BlockPointer, b *data.DirBlock,
   776  	kmd libkey.KeyMetadata, lifetime data.BlockCacheLifetime, isPrefetchNew bool,
   777  	action BlockRequestAction, basePriority int) (
   778  	numBlocks int, numBytesFetched, numBytesTotal uint64, isTail bool) {
   779  	// Prefetch indirect block pointers.
   780  	newPriority := p.calculatePriority(basePriority, action)
   781  	idsSeen := make(map[kbfsblock.ID]bool, len(b.IPtrs))
   782  	for _, ptr := range b.IPtrs {
   783  		b, f, t := p.request(
   784  			ctx, newPriority, kmd, ptr.BlockInfo, b.NewEmpty(), lifetime,
   785  			parentPtr, isPrefetchNew, action, idsSeen)
   786  		numBlocks += b
   787  		numBytesFetched += f
   788  		numBytesTotal += t
   789  
   790  		p.handleCriticalRequests()
   791  	}
   792  	return numBlocks, numBytesFetched, numBytesTotal, len(b.IPtrs) == 0
   793  }
   794  
   795  func (p *blockPrefetcher) prefetchDirectDirBlock(
   796  	ctx context.Context, parentPtr data.BlockPointer, b *data.DirBlock,
   797  	kmd libkey.KeyMetadata, lifetime data.BlockCacheLifetime, isPrefetchNew bool,
   798  	action BlockRequestAction, basePriority int) (
   799  	numBlocks int, numBytesFetched, numBytesTotal uint64, isTail bool) {
   800  	// Prefetch all DirEntry root blocks.
   801  	dirEntries := data.DirEntriesBySizeAsc{
   802  		DirEntries: data.DirEntryMapToDirEntries(b.Children),
   803  	}
   804  	sort.Sort(dirEntries)
   805  	newPriority := p.calculatePriority(basePriority, action)
   806  	totalChildEntries := 0
   807  	idsSeen := make(map[kbfsblock.ID]bool, len(dirEntries.DirEntries))
   808  	for _, entry := range dirEntries.DirEntries {
   809  		var block data.Block
   810  		switch entry.Type {
   811  		case data.Dir:
   812  			block = &data.DirBlock{}
   813  		case data.File:
   814  			block = &data.FileBlock{}
   815  		case data.Exec:
   816  			block = &data.FileBlock{}
   817  		case data.Sym:
   818  			// Skip symbolic links because there's nothing to prefetch.
   819  			continue
   820  		default:
   821  			p.log.CDebugf(ctx, "Skipping prefetch for entry of "+
   822  				"unknown type %d", entry.Type)
   823  			continue
   824  		}
   825  		totalChildEntries++
   826  		b, f, t := p.request(
   827  			ctx, newPriority, kmd, entry.BlockInfo, block, lifetime,
   828  			parentPtr, isPrefetchNew, action, idsSeen)
   829  		numBlocks += b
   830  		numBytesFetched += f
   831  		numBytesTotal += t
   832  
   833  		p.handleCriticalRequests()
   834  	}
   835  	if totalChildEntries == 0 {
   836  		isTail = true
   837  	}
   838  	return numBlocks, numBytesFetched, numBytesTotal, isTail
   839  }
   840  
   841  // handlePrefetch allows the prefetcher to trigger prefetches. `run` calls this
   842  // when a prefetch request is received and the criteria are satisfied to
   843  // initiate a prefetch for this block's children.
   844  // Returns `numBlocks` which indicates how many additional blocks (blocks not
   845  // currently in the prefetch tree) with a parent of `pre.req.ptr.ID` must be
   846  // added to the tree.
   847  func (p *blockPrefetcher) handlePrefetch(
   848  	pre *prefetch, isPrefetchNew bool, action BlockRequestAction, b data.Block) (
   849  	numBlocks int, numBytesFetched, numBytesTotal uint64, isTail bool,
   850  	err error) {
   851  	req := pre.req
   852  	childAction := action.ChildAction(b)
   853  	switch b := b.(type) {
   854  	case *data.FileBlock:
   855  		if b.IsInd {
   856  			numBlocks, numBytesFetched, numBytesTotal, isTail =
   857  				p.prefetchIndirectFileBlock(
   858  					pre.ctx, req.ptr, b, req.kmd, req.lifetime,
   859  					isPrefetchNew, childAction, req.priority)
   860  		} else {
   861  			isTail = true
   862  		}
   863  	case *data.DirBlock:
   864  		if b.IsInd {
   865  			numBlocks, numBytesFetched, numBytesTotal, isTail =
   866  				p.prefetchIndirectDirBlock(
   867  					pre.ctx, req.ptr, b, req.kmd, req.lifetime,
   868  					isPrefetchNew, childAction, req.priority)
   869  		} else {
   870  			numBlocks, numBytesFetched, numBytesTotal, isTail =
   871  				p.prefetchDirectDirBlock(
   872  					pre.ctx, req.ptr, b, req.kmd, req.lifetime,
   873  					isPrefetchNew, childAction, req.priority)
   874  		}
   875  	default:
   876  		// Skipping prefetch for block of unknown type (likely CommonBlock)
   877  		return 0, 0, 0, false, errors.New("unknown block type")
   878  	}
   879  	return numBlocks, numBytesFetched, numBytesTotal, isTail, nil
   880  }
   881  
   882  func (p *blockPrefetcher) rescheduleTopBlock(
   883  	blockID kbfsblock.ID, pp *prefetch) {
   884  	// If this block has parents and thus is not a top-block, cancel
   885  	// all of the references for it.
   886  	if len(pp.parents) > 0 {
   887  		for refNonce := range pp.parents {
   888  			p.cancelPrefetch(data.BlockPointer{
   889  				ID:      blockID,
   890  				Context: kbfsblock.Context{RefNonce: refNonce},
   891  			}, pp)
   892  		}
   893  		return
   894  	}
   895  
   896  	// Effectively below we are transferring the request for the top
   897  	// block from `p.prefetches` to `p.rescheduled`.
   898  	delete(p.prefetches, blockID)
   899  	pp.Close()
   900  
   901  	// Only reschedule the top-most blocks, which has no parents.
   902  	rp, ok := p.rescheduled[blockID]
   903  	if !ok {
   904  		rp = &rescheduledPrefetch{
   905  			off: p.makeNewBackOff(),
   906  		}
   907  		p.rescheduled[blockID] = rp
   908  	}
   909  
   910  	if rp.timer != nil {
   911  		// Prefetch already scheduled.
   912  		return
   913  	}
   914  	// Copy the req, re-using the same Block as before.
   915  	req := *pp.req
   916  	d := rp.off.NextBackOff()
   917  	if d == backoff.Stop {
   918  		p.log.Debug("Stopping rescheduling of %s due to stopped backoff timer",
   919  			blockID)
   920  		return
   921  	}
   922  	p.log.Debug("Rescheduling prefetch of %s in %s", blockID, d)
   923  	rp.timer = time.AfterFunc(d, func() {
   924  		p.triggerPrefetch(&req)
   925  	})
   926  }
   927  
   928  func (p *blockPrefetcher) reschedulePrefetch(req *prefetchRequest) {
   929  	select {
   930  	case p.prefetchRescheduleCh.In() <- req:
   931  	case <-p.shutdownCh:
   932  		p.log.Warning("Skipping prefetch reschedule for block %v since "+
   933  			"the prefetcher is shutdown", req.ptr.ID)
   934  	}
   935  }
   936  
   937  func (p *blockPrefetcher) sendOverallSyncStatusNotification() {
   938  	p.overallSyncStatusLock.Lock()
   939  	defer p.overallSyncStatusLock.Unlock()
   940  	p.sendOverallSyncStatusHelperLocked()
   941  }
   942  
   943  func (p *blockPrefetcher) stopIfNeeded(
   944  	ctx context.Context, req *prefetchRequest) (doStop, doCancel bool) {
   945  	dbc := p.config.DiskBlockCache()
   946  	if dbc == nil {
   947  		return false, false
   948  	}
   949  	hasRoom, howMuchRoom, err := dbc.DoesCacheHaveSpace(ctx, req.action.CacheType())
   950  	if err != nil {
   951  		p.log.CDebugf(ctx, "Error checking space: +%v", err)
   952  		return false, false
   953  	}
   954  	if hasRoom {
   955  		db := p.config.GetSettingsDB()
   956  		if db != nil {
   957  			if settings, err := db.Settings(ctx); err == nil &&
   958  				req.action.CacheType() == DiskBlockSyncCache &&
   959  				howMuchRoom < settings.SpaceAvailableNotificationThreshold {
   960  				// If a notification threshold is configured, we send a
   961  				// notificaiton here.
   962  				p.sendOverallSyncStatusNotification()
   963  			}
   964  		}
   965  		return false, false
   966  	}
   967  
   968  	defer func() {
   969  		if doStop {
   970  			p.vlog.CLogf(ctx, libkb.VLog2,
   971  				"stopping prefetch for block %s due to full cache (sync=%t)",
   972  				req.ptr.ID, req.action.Sync())
   973  		}
   974  	}()
   975  
   976  	if req.action.Sync() {
   977  		// If the sync cache is close to full, reschedule the prefetch.
   978  		p.reschedulePrefetch(req)
   979  		p.sendOverallSyncStatusNotification()
   980  		return true, false
   981  	}
   982  
   983  	// Otherwise, only stop if we're supposed to stop when full.
   984  	doStop = req.action.StopIfFull()
   985  	if doStop {
   986  		doCancel = true
   987  	}
   988  	return doStop, doCancel
   989  }
   990  
   991  type prefetchStatusRequest struct {
   992  	ptr data.BlockPointer
   993  	ch  chan<- PrefetchProgress
   994  }
   995  
   996  func (p *blockPrefetcher) handlePrefetchRequest(req *prefetchRequest) {
   997  	pre, isPrefetchWaiting := p.prefetches[req.ptr.ID]
   998  	if isPrefetchWaiting && pre.req == nil {
   999  		// If this prefetch already appeared in the tree, ensure it
  1000  		// has a req associated with it.
  1001  		pre.req = req
  1002  	}
  1003  
  1004  	p.clearRescheduleState(req.ptr.ID)
  1005  
  1006  	// If this request is just asking for the wait channel,
  1007  	// send it now.  (This is processed in the same queue as
  1008  	// the prefetch requests, to guarantee an initial prefetch
  1009  	// request has always been processed before the wait
  1010  	// channel request is processed.)
  1011  	if req.sendCh != nil {
  1012  		if !isPrefetchWaiting {
  1013  			req.sendCh <- p.closedCh
  1014  		} else {
  1015  			req.sendCh <- pre.waitCh
  1016  		}
  1017  		return
  1018  	}
  1019  
  1020  	select {
  1021  	case <-req.obseleted:
  1022  		// This request was cancelled while it was waiting.
  1023  		p.vlog.CLogf(context.Background(), libkb.VLog2,
  1024  			"Request not processing because it was canceled already"+
  1025  				": ptr=%s action=%v", req.ptr, req.action)
  1026  		return
  1027  	default:
  1028  		p.markQueuedPrefetchDone(req.ptr)
  1029  	}
  1030  
  1031  	if isPrefetchWaiting {
  1032  		select {
  1033  		case <-pre.ctx.Done():
  1034  			p.vlog.CLogf(context.Background(), libkb.VLog2,
  1035  				"Request not processing because it was canceled "+
  1036  					"already: id=%v action=%v", req.ptr.ID, req.action)
  1037  			return
  1038  		default:
  1039  		}
  1040  	}
  1041  
  1042  	ctx := context.TODO()
  1043  	if isPrefetchWaiting {
  1044  		ctx = pre.ctx
  1045  	}
  1046  	p.vlog.CLogf(ctx, libkb.VLog2, "Handling request for %v, action=%s",
  1047  		req.ptr, req.action)
  1048  
  1049  	// Ensure the block is in the right cache.
  1050  	b, err := p.getBlockSynchronously(ctx, req, req.action.SoloAction())
  1051  	if err != nil {
  1052  		p.log.CWarningf(ctx, "error requesting for block %s: "+
  1053  			"%+v", req.ptr.ID, err)
  1054  		// There's nothing for us to do when there's an error.
  1055  		return
  1056  	}
  1057  
  1058  	// Update the priority and action of any existing
  1059  	// prefetch, and count it in the overall sync status if
  1060  	// needed.
  1061  	newAction := req.action
  1062  	oldAction := newAction
  1063  	if isPrefetchWaiting {
  1064  		if req.priority > pre.req.priority {
  1065  			pre.req.priority = req.priority
  1066  		}
  1067  
  1068  		oldAction = pre.req.action
  1069  		newAction = oldAction.Combine(newAction)
  1070  		if newAction != pre.req.action {
  1071  			// This can happen for example if the prefetcher
  1072  			// doesn't know about a deep sync but now one has
  1073  			// been created.
  1074  			pre.req.action = newAction
  1075  		}
  1076  
  1077  		if !oldAction.Sync() && newAction.Sync() {
  1078  			// This request turned into a syncing request, so
  1079  			// update the overall sync status.
  1080  			p.incOverallSyncTotalBytes(pre.req)
  1081  		}
  1082  	}
  1083  
  1084  	defer func() {
  1085  		if pre != nil {
  1086  			// We definitely have the block, so update the total
  1087  			// fetched bytes as needed.
  1088  			p.incOverallSyncFetchedBytes(pre.req)
  1089  		}
  1090  	}()
  1091  
  1092  	// If the request is finished (i.e., if it's marked as
  1093  	// finished or if it has no child blocks to fetch), then
  1094  	// complete the prefetch.
  1095  	if req.prefetchStatus == FinishedPrefetch || b.IsTail() {
  1096  		// First we handle finished prefetches.
  1097  		if isPrefetchWaiting {
  1098  			if pre.subtreeBlockCount < 0 {
  1099  				// Both log and panic so that we get the PFID in the
  1100  				// log.
  1101  				p.log.CErrorf(ctx, "the subtreeBlockCount for a "+
  1102  					"block should never be < 0")
  1103  				panic("the subtreeBlockCount for a block should " +
  1104  					"never be < 0")
  1105  			}
  1106  			// Since we decrement by `pre.subtreeBlockCount`, we're
  1107  			// guaranteed that `pre` will be removed from the
  1108  			// prefetcher.
  1109  			numBytes := pre.SubtreeBytesTotal - pre.SubtreeBytesFetched
  1110  			p.applyToParentsRecursive(
  1111  				p.completePrefetch(pre.subtreeBlockCount, numBytes),
  1112  				req.ptr.ID, pre)
  1113  		} else {
  1114  			p.vlog.CLogf(ctx, libkb.VLog2,
  1115  				"skipping prefetch for finished block %s", req.ptr.ID)
  1116  			if req.prefetchStatus != FinishedPrefetch {
  1117  				// Mark this block as finished in the cache.
  1118  				err = p.retriever.PutInCaches(
  1119  					ctx, req.ptr, req.kmd.TlfID(), b, req.lifetime,
  1120  					FinishedPrefetch, req.action.CacheType())
  1121  				if err != nil {
  1122  					p.vlog.CLogf(ctx, libkb.VLog2,
  1123  						"Couldn't put finished block %s in cache: %+v",
  1124  						req.ptr, err)
  1125  				}
  1126  			}
  1127  		}
  1128  		// Always short circuit a finished prefetch.
  1129  		return
  1130  	}
  1131  	if !req.action.Prefetch(b) {
  1132  		p.vlog.CLogf(ctx, libkb.VLog2,
  1133  			"skipping prefetch for block %s, action %s",
  1134  			req.ptr.ID, req.action)
  1135  		if isPrefetchWaiting && !oldAction.Prefetch(b) {
  1136  			// Cancel this prefetch if we're skipping it and
  1137  			// there's not already another prefetch in
  1138  			// progress.  It's not a tail block since that
  1139  			// case is caught above, so we are definitely
  1140  			// giving up here without fetching its children.
  1141  			p.applyToPtrParentsRecursive(p.cancelPrefetch, req.ptr, pre)
  1142  		}
  1143  		return
  1144  	}
  1145  	if req.prefetchStatus == TriggeredPrefetch &&
  1146  		!newAction.DeepSync() &&
  1147  		(isPrefetchWaiting &&
  1148  			newAction.Sync() == oldAction.Sync() &&
  1149  			newAction.StopIfFull() == oldAction.StopIfFull()) {
  1150  		p.vlog.CLogf(ctx, libkb.VLog2,
  1151  			"prefetch already triggered for block ID %s", req.ptr.ID)
  1152  		return
  1153  	}
  1154  
  1155  	// Bail out early if we know the cache is already full, to
  1156  	// avoid enqueuing the child blocks when they aren't able
  1157  	// to be cached.
  1158  	if doStop, doCancel := p.stopIfNeeded(ctx, req); doStop {
  1159  		if doCancel && isPrefetchWaiting {
  1160  			p.applyToPtrParentsRecursive(p.cancelPrefetch, req.ptr, pre)
  1161  		}
  1162  		return
  1163  	}
  1164  
  1165  	if isPrefetchWaiting {
  1166  		switch {
  1167  		case pre.subtreeRetrigger:
  1168  			p.vlog.CLogf(
  1169  				ctx, libkb.VLog2,
  1170  				"retriggering prefetch subtree for block ID %s", req.ptr.ID)
  1171  			pre.subtreeRetrigger = false
  1172  		case pre.subtreeTriggered:
  1173  			p.vlog.CLogf(
  1174  				ctx, libkb.VLog2, "prefetch subtree already triggered "+
  1175  					"for block ID %s", req.ptr.ID)
  1176  			// Redundant prefetch request.
  1177  			// We've already seen _this_ block, and already triggered
  1178  			// prefetches for its children. No use doing it again!
  1179  			if pre.subtreeBlockCount == 0 {
  1180  				// Only this block is left, and we didn't prefetch on a
  1181  				// previous prefetch through to the tail. So we cancel
  1182  				// up the tree. This still allows upgrades from an
  1183  				// unsynced block to a synced block, since p.prefetches
  1184  				// should be ephemeral.
  1185  				p.applyToPtrParentsRecursive(
  1186  					p.cancelPrefetch, req.ptr, pre)
  1187  			}
  1188  			if newAction == oldAction {
  1189  				// Short circuit prefetches if the subtree was
  1190  				// already triggered, unless we've changed the
  1191  				// prefetch action.
  1192  				return
  1193  			}
  1194  		default:
  1195  			// This block was in the tree and thus was counted, but now
  1196  			// it has been successfully fetched. We need to percolate
  1197  			// that information up the tree.
  1198  			if pre.subtreeBlockCount == 0 {
  1199  				// Both log and panic so that we get the PFID in the
  1200  				// log.
  1201  				p.log.CErrorf(ctx, "prefetch was in the tree, "+
  1202  					"wasn't triggered, but had a block count of 0")
  1203  				panic("prefetch was in the tree, wasn't triggered, " +
  1204  					"but had a block count of 0")
  1205  			}
  1206  			p.applyToParentsRecursive(
  1207  				p.decrementPrefetch, req.ptr.ID, pre)
  1208  			bytes := uint64(b.GetEncodedSize())
  1209  			p.applyToParentsRecursive(
  1210  				p.addFetchedBytes(bytes), req.ptr.ID, pre)
  1211  			pre.subtreeTriggered = true
  1212  		}
  1213  	} else {
  1214  		// Ensure we have a prefetch to work with.
  1215  		// If the prefetch is to be tracked, then the 0
  1216  		// `subtreeBlockCount` will be incremented by `numBlocks`
  1217  		// below, once we've ensured that `numBlocks` is not 0.
  1218  		pre = p.newPrefetch(0, 0, true, req)
  1219  		p.prefetches[req.ptr.ID] = pre
  1220  		ctx = pre.ctx
  1221  		p.vlog.CLogf(ctx, libkb.VLog2,
  1222  			"created new prefetch for block %s", req.ptr.ID)
  1223  	}
  1224  
  1225  	// TODO: There is a potential optimization here that we can
  1226  	// consider: Currently every time a prefetch is triggered, we
  1227  	// iterate through all the block's child pointers. This is short
  1228  	// circuited in `TriggerPrefetch` and here in various conditions.
  1229  	// However, for synced trees we ignore that and prefetch anyway. So
  1230  	// here we would need to figure out a heuristic to avoid that
  1231  	// iteration.
  1232  	//
  1233  	// `numBlocks` now represents only the number of blocks to add
  1234  	// to the tree from `pre` to its roots, inclusive.
  1235  	numBlocks, numBytesFetched, numBytesTotal, isTail, err :=
  1236  		p.handlePrefetch(pre, !isPrefetchWaiting, req.action, b)
  1237  	if err != nil {
  1238  		p.log.CWarningf(ctx, "error handling prefetch for block %s: "+
  1239  			"%+v", req.ptr.ID, err)
  1240  		// There's nothing for us to do when there's an error.
  1241  		return
  1242  	}
  1243  	if isTail {
  1244  		p.vlog.CLogf(ctx, libkb.VLog2,
  1245  			"completed prefetch for tail block %s ", req.ptr.ID)
  1246  		// This is a tail block with no children.  Parent blocks are
  1247  		// potentially waiting for this prefetch, so we percolate the
  1248  		// information up the tree that this prefetch is done.
  1249  		//
  1250  		// Note that only a tail block or cached block with
  1251  		// `FinishedPrefetch` can trigger a completed prefetch.
  1252  		//
  1253  		// We use 0 as our completion number because we've already
  1254  		// decremented above as appropriate. This just walks up the
  1255  		// tree removing blocks with a 0 subtree. We couldn't do that
  1256  		// above because `handlePrefetch` potentially adds blocks.
  1257  		// TODO: think about whether a refactor can be cleanly done to
  1258  		// only walk up the tree once. We'd track a `numBlocks` and
  1259  		// complete or decrement as appropriate.
  1260  		p.applyToParentsRecursive(
  1261  			p.completePrefetch(0, 0), req.ptr.ID, pre)
  1262  		return
  1263  	}
  1264  	// This is not a tail block.
  1265  	if numBlocks == 0 {
  1266  		p.vlog.CLogf(ctx, libkb.VLog2,
  1267  			"no blocks to prefetch for block %s", req.ptr.ID)
  1268  		// All the blocks to be triggered have already done so. Do
  1269  		// nothing.  This is simply an optimization to avoid crawling
  1270  		// the tree.
  1271  		return
  1272  	}
  1273  	if !isPrefetchWaiting {
  1274  		p.vlog.CLogf(ctx, libkb.VLog2,
  1275  			"adding block %s to the prefetch tree", req.ptr)
  1276  		// This block doesn't appear in the prefetch tree, so it's the
  1277  		// root of a new prefetch tree. Add it to the tree.
  1278  		p.prefetches[req.ptr.ID] = pre
  1279  		// One might think that since this block wasn't in the tree, we
  1280  		// need to `numBlocks++`. But since we're in this flow, the
  1281  		// block has already been fetched and is thus done.  So it
  1282  		// shouldn't block anything above it in the tree from
  1283  		// completing.
  1284  	}
  1285  	p.vlog.CLogf(ctx, libkb.VLog2,
  1286  		"prefetching %d block(s) with parent block %s "+
  1287  			"[bytesFetched=%d, bytesTotal=%d]",
  1288  		numBlocks, req.ptr.ID, numBytesFetched, numBytesTotal)
  1289  	// Walk up the block tree and add numBlocks to every parent,
  1290  	// starting with this block.
  1291  	p.applyToParentsRecursive(func(blockID kbfsblock.ID, pp *prefetch) {
  1292  		pp.subtreeBlockCount += numBlocks
  1293  		pp.SubtreeBytesFetched += numBytesFetched
  1294  		pp.SubtreeBytesTotal += numBytesTotal
  1295  	}, req.ptr.ID, pre)
  1296  	// Ensure this block's status is marked as triggered.  If
  1297  	// it was rescheduled due to a previously-full cache, it
  1298  	// might not yet be set.
  1299  	dbc := p.config.DiskBlockCache()
  1300  	if dbc != nil {
  1301  		err := dbc.UpdateMetadata(
  1302  			pre.ctx, req.kmd.TlfID(), req.ptr.ID, TriggeredPrefetch,
  1303  			req.action.CacheType())
  1304  		if err != nil {
  1305  			p.log.CDebugf(pre.ctx,
  1306  				"Couldn't update metadata for block %s, action=%s",
  1307  				req.ptr.ID, pre.req.action)
  1308  		}
  1309  	}
  1310  }
  1311  
  1312  func (p *blockPrefetcher) setPaused(paused bool) {
  1313  	p.pauseLock.Lock()
  1314  	defer p.pauseLock.Unlock()
  1315  	oldPaused := p.paused
  1316  	p.paused = paused
  1317  	if oldPaused != paused {
  1318  		close(p.pausedCh)
  1319  		p.pausedCh = make(chan struct{})
  1320  	}
  1321  }
  1322  
  1323  func (p *blockPrefetcher) getPaused() (paused bool, ch <-chan struct{}) {
  1324  	p.pauseLock.RLock()
  1325  	defer p.pauseLock.RUnlock()
  1326  	return p.paused, p.pausedCh
  1327  }
  1328  
  1329  func (p *blockPrefetcher) handleAppStateChange(
  1330  	appState *keybase1.MobileAppState) {
  1331  	defer func() {
  1332  		p.setPaused(false)
  1333  	}()
  1334  
  1335  	// Pause the prefetcher when backgrounded.
  1336  	for *appState != keybase1.MobileAppState_FOREGROUND {
  1337  		p.setPaused(true)
  1338  		p.log.CDebugf(
  1339  			context.TODO(), "Pausing prefetcher while backgrounded")
  1340  		select {
  1341  		case *appState = <-p.appStateUpdater.NextAppStateUpdate(
  1342  			appState):
  1343  		case req := <-p.prefetchStatusCh.Out():
  1344  			p.handleStatusRequest(req.(*prefetchStatusRequest))
  1345  			continue
  1346  		case <-p.almostDoneCh:
  1347  			return
  1348  		}
  1349  	}
  1350  }
  1351  
  1352  type prefetcherSubscriber struct {
  1353  	ch       chan<- struct{}
  1354  	clientID SubscriptionManagerClientID
  1355  }
  1356  
  1357  func makePrefetcherSubscriptionManagerClientID() SubscriptionManagerClientID {
  1358  	return SubscriptionManagerClientID(
  1359  		fmt.Sprintf("prefetcher-%d", time.Now().UnixNano()))
  1360  }
  1361  
  1362  func (ps prefetcherSubscriber) OnPathChange(
  1363  	_ SubscriptionManagerClientID,
  1364  	_ []SubscriptionID, _ string, _ []keybase1.PathSubscriptionTopic) {
  1365  }
  1366  
  1367  func (ps prefetcherSubscriber) OnNonPathChange(
  1368  	clientID SubscriptionManagerClientID,
  1369  	_ []SubscriptionID, _ keybase1.SubscriptionTopic) {
  1370  	if clientID != ps.clientID {
  1371  		return
  1372  	}
  1373  
  1374  	select {
  1375  	case ps.ch <- struct{}{}:
  1376  	default:
  1377  	}
  1378  }
  1379  
  1380  func (p *blockPrefetcher) handleNetStateChange(
  1381  	netState *keybase1.MobileNetworkState, subCh <-chan struct{}) {
  1382  	for *netState != keybase1.MobileNetworkState_CELLULAR {
  1383  		return
  1384  	}
  1385  
  1386  	defer func() {
  1387  		p.setPaused(false)
  1388  	}()
  1389  
  1390  	for *netState == keybase1.MobileNetworkState_CELLULAR {
  1391  		// Default to not syncing while on a cell network.
  1392  		syncOnCellular := false
  1393  		db := p.config.GetSettingsDB()
  1394  		if db != nil {
  1395  			s, err := db.Settings(context.TODO())
  1396  			if err == nil {
  1397  				syncOnCellular = s.SyncOnCellular
  1398  			}
  1399  		}
  1400  
  1401  		if syncOnCellular {
  1402  			// Can ignore this network change.
  1403  			break
  1404  		}
  1405  
  1406  		p.setPaused(true)
  1407  		p.log.CDebugf(
  1408  			context.TODO(), "Pausing prefetcher on cell network")
  1409  		select {
  1410  		case *netState = <-p.appStateUpdater.NextNetworkStateUpdate(
  1411  			netState):
  1412  		case <-subCh:
  1413  			p.log.CDebugf(context.TODO(), "Settings changed")
  1414  		case req := <-p.prefetchStatusCh.Out():
  1415  			p.handleStatusRequest(req.(*prefetchStatusRequest))
  1416  			continue
  1417  		case <-p.almostDoneCh:
  1418  			return
  1419  		}
  1420  	}
  1421  }
  1422  
  1423  // run prefetches blocks.
  1424  // E.g. a synced prefetch:
  1425  // a -> {b -> {c, d}, e -> {f, g}}:
  1426  // * state of prefetch tree in `p.prefetches`.
  1427  // 1) a is fetched, triggers b and e.
  1428  //   - a:2 -> {b:1, e:1}
  1429  //  2. b is fetched, decrements b and a by 1, and triggers c and d to increment
  1430  //     b and a by 2.
  1431  //     * a:3 -> {b:2 -> {c:1, d:1}, e:1}
  1432  //  3. c is fetched, and isTail==true so it completes up the tree.
  1433  //     * a:2 -> {b:1 -> {d:1}, e:1}
  1434  //  4. d is fetched, and isTail==true so it completes up the tree.
  1435  //     * a:1 -> {e:1}
  1436  //  5. e is fetched, decrements e and a by 1, and triggers f and g to increment
  1437  //     e an a by 2.
  1438  //     * a:2 -> {e:2 -> {f:1, g:1}}
  1439  //  6. f is fetched, and isTail==true so it completes up the tree.
  1440  //     * a:1 -> {e:1 -> {g:1}}
  1441  //  7. g is fetched, completing g, e, and a.
  1442  //     * <empty>
  1443  //
  1444  // Blocks may have multiple parents over time, since this block's current
  1445  // parent might not have finished prefetching by the time it's changed by a
  1446  // write to its subtree. That is, if we have a tree of `a` -> `b`, and a write
  1447  // causes `a` to get an additional child of `c`, then the new tree is `a` ->
  1448  // `b`, `a'` -> {`b`, `c`}. `b` now has 2 parents: `a` and `a'`, both of which
  1449  // need to be notified of the prefetch completing.
  1450  //
  1451  // A *critical* assumption here is that a block tree will never have a diamond
  1452  // topology. That is, while a block may have multiple parents, at no point can
  1453  // there exist more than one path from a block to another block in the tree.
  1454  // That assumption should hold because blocks are content addressed, so
  1455  // changing anything about one block creates brand new parents all the way up
  1456  // the tree. If this did ever happen, a completed fetch downstream of the
  1457  // diamond would be double counted in all nodes above the diamond, and the
  1458  // prefetcher would eventually panic.
  1459  func (p *blockPrefetcher) run(
  1460  	testSyncCh <-chan struct{}, testDoneCh chan<- struct{}) {
  1461  	defer func() {
  1462  		close(p.doneCh)
  1463  		p.prefetchRequestCh.Close()
  1464  		p.prefetchCancelCh.Close()
  1465  		p.prefetchCancelTlfCh.Close()
  1466  		p.prefetchRescheduleCh.Close()
  1467  		p.prefetchStatusCh.Close()
  1468  		p.inFlightFetches.Close()
  1469  	}()
  1470  	isShuttingDown := false
  1471  	var shuttingDownCh <-chan interface{}
  1472  	first := true
  1473  	appState := keybase1.MobileAppState_FOREGROUND
  1474  	netState := keybase1.MobileNetworkState_NONE
  1475  
  1476  	// Subscribe to settings updates while waiting for the network to
  1477  	// change.
  1478  	subCh := make(chan struct{}, 1)
  1479  	clientID := makePrefetcherSubscriptionManagerClientID()
  1480  	subMan := p.config.SubscriptionManager(
  1481  		clientID, false,
  1482  		prefetcherSubscriber{
  1483  			ch:       subCh,
  1484  			clientID: clientID,
  1485  		})
  1486  	if subMan != nil {
  1487  		const prefetcherSubKey = "prefetcherSettings"
  1488  		err := subMan.SubscribeNonPath(
  1489  			context.TODO(), prefetcherSubKey,
  1490  			keybase1.SubscriptionTopic_SETTINGS, nil)
  1491  		if err != nil {
  1492  			p.log.CDebugf(
  1493  				context.TODO(), "Error subscribing to settings: %+v", err)
  1494  		} else {
  1495  			defer subMan.Unsubscribe(context.TODO(), prefetcherSubKey)
  1496  		}
  1497  		defer subMan.Shutdown(context.TODO())
  1498  	} else {
  1499  		close(subCh)
  1500  		subCh = nil
  1501  	}
  1502  
  1503  	for {
  1504  		if !first && testDoneCh != nil && !isShuttingDown {
  1505  			testDoneCh <- struct{}{}
  1506  		}
  1507  		first = false
  1508  		if isShuttingDown {
  1509  			if p.inFlightFetches.Len() == 0 &&
  1510  				p.prefetchRequestCh.Len() == 0 &&
  1511  				p.prefetchCancelCh.Len() == 0 &&
  1512  				p.prefetchCancelTlfCh.Len() == 0 &&
  1513  				p.prefetchRescheduleCh.Len() == 0 &&
  1514  				p.prefetchStatusCh.Len() == 0 {
  1515  				return
  1516  			}
  1517  		} else if testSyncCh != nil {
  1518  			// Only sync if we aren't shutting down.
  1519  			<-testSyncCh
  1520  		}
  1521  
  1522  		p.handleCriticalRequests()
  1523  
  1524  		select {
  1525  		case req := <-p.prefetchStatusCh.Out():
  1526  			p.handleStatusRequest(req.(*prefetchStatusRequest))
  1527  		case chInterface := <-shuttingDownCh:
  1528  			p.log.Debug("shutting down, clearing in flight fetches")
  1529  			ch := chInterface.(<-chan error)
  1530  			<-ch
  1531  		case appState = <-p.appStateUpdater.NextAppStateUpdate(&appState):
  1532  			p.handleAppStateChange(&appState)
  1533  		case netState = <-p.appStateUpdater.NextNetworkStateUpdate(&netState):
  1534  			p.handleNetStateChange(&netState, subCh)
  1535  		case <-subCh:
  1536  			// Settings have changed, so recheck the network state.
  1537  			netState = keybase1.MobileNetworkState_NONE
  1538  		case ptrInt := <-p.prefetchCancelCh.Out():
  1539  			ptr := ptrInt.(data.BlockPointer)
  1540  			pre, ok := p.prefetches[ptr.ID]
  1541  			if !ok {
  1542  				p.vlog.Log(libkb.VLog2, "nothing to cancel for block %s", ptr)
  1543  				continue
  1544  			}
  1545  			p.vlog.Log(libkb.VLog2, "canceling prefetch for block %s", ptr)
  1546  			// Walk up the block tree and delete every parent, but
  1547  			// only ancestors of this given pointer with this
  1548  			// refnonce.  Other references to the same ID might still
  1549  			// be live.
  1550  			p.applyToPtrParentsRecursive(p.cancelPrefetch, ptr, pre)
  1551  		case reqInt := <-p.prefetchCancelTlfCh.Out():
  1552  			req := reqInt.(cancelTlfPrefetch)
  1553  			p.log.CDebugf(
  1554  				context.TODO(), "Canceling all prefetches for TLF %s",
  1555  				req.tlfID)
  1556  			// Cancel all prefetches for this TLF.
  1557  			for id, pre := range p.prefetches {
  1558  				if pre.req.kmd.TlfID() != req.tlfID {
  1559  					continue
  1560  				}
  1561  
  1562  				p.vlog.CLogf(
  1563  					pre.ctx, libkb.VLog2, "TLF-canceling prefetch for %s",
  1564  					pre.req.ptr)
  1565  				p.doCancel(id, pre)
  1566  			}
  1567  			close(req.channel)
  1568  		case reqInt := <-p.prefetchRescheduleCh.Out():
  1569  			req := reqInt.(*prefetchRequest)
  1570  			blockID := req.ptr.ID
  1571  			pre, isPrefetchWaiting := p.prefetches[blockID]
  1572  			if !isPrefetchWaiting {
  1573  				// Create new prefetch here while rescheduling, to
  1574  				// prevent other subsequent requests from creating
  1575  				// one.
  1576  				pre = p.newPrefetch(1, uint64(req.encodedSize), false, req)
  1577  				p.prefetches[blockID] = pre
  1578  			} else {
  1579  				pre.req = req
  1580  			}
  1581  			p.vlog.CLogf(pre.ctx, libkb.VLog2,
  1582  				"rescheduling top-block prefetch for block %s", blockID)
  1583  			p.applyToParentsRecursive(p.rescheduleTopBlock, blockID, pre)
  1584  		case reqInt := <-p.prefetchRequestCh.Out():
  1585  			req := reqInt.(*prefetchRequest)
  1586  			p.handlePrefetchRequest(req)
  1587  		case <-p.almostDoneCh:
  1588  			p.log.CDebugf(p.ctx, "starting shutdown")
  1589  			isShuttingDown = true
  1590  			shuttingDownCh = p.inFlightFetches.Out()
  1591  			for id := range p.rescheduled {
  1592  				p.clearRescheduleState(id)
  1593  			}
  1594  		}
  1595  	}
  1596  }
  1597  
  1598  func (p *blockPrefetcher) setObseletedOnQueuedPrefetch(req *prefetchRequest) {
  1599  	p.queuedPrefetchHandlesLock.Lock()
  1600  	defer p.queuedPrefetchHandlesLock.Unlock()
  1601  	qp, present := p.queuedPrefetchHandles[req.ptr]
  1602  	if present {
  1603  		req.obseleted = qp.channel
  1604  		qp.waitingPrefetches++
  1605  	} else {
  1606  		obseleted := make(chan struct{})
  1607  		req.obseleted = obseleted
  1608  		p.queuedPrefetchHandles[req.ptr] = queuedPrefetch{
  1609  			1, obseleted, req.kmd.TlfID()}
  1610  	}
  1611  }
  1612  
  1613  func (p *blockPrefetcher) triggerPrefetch(req *prefetchRequest) {
  1614  	if req.obseleted == nil {
  1615  		p.setObseletedOnQueuedPrefetch(req)
  1616  	}
  1617  	select {
  1618  	case p.prefetchRequestCh.In() <- req:
  1619  	case <-p.shutdownCh:
  1620  		p.log.Warning("Skipping prefetch for block %v since "+
  1621  			"the prefetcher is shutdown", req.ptr.ID)
  1622  	}
  1623  }
  1624  
  1625  func (p *blockPrefetcher) cacheOrCancelPrefetch(ctx context.Context,
  1626  	ptr data.BlockPointer, tlfID tlf.ID, block data.Block, lifetime data.BlockCacheLifetime,
  1627  	prefetchStatus PrefetchStatus, action BlockRequestAction,
  1628  	req *prefetchRequest) error {
  1629  	err := p.retriever.PutInCaches(
  1630  		ctx, ptr, tlfID, block, lifetime, prefetchStatus, action.CacheType())
  1631  	if err != nil {
  1632  		// The PutInCaches call can return an error if the cache is
  1633  		// full, so check for rescheduling even when err != nil.
  1634  		if doStop, doCancel := p.stopIfNeeded(ctx, req); doStop {
  1635  			if doCancel {
  1636  				p.CancelPrefetch(ptr)
  1637  			}
  1638  			return err
  1639  		}
  1640  
  1641  		p.vlog.CLogf(
  1642  			ctx, libkb.VLog2, "error prefetching block %s: %+v, canceling",
  1643  			ptr.ID, err)
  1644  		p.CancelPrefetch(ptr)
  1645  	}
  1646  	return err
  1647  }
  1648  
  1649  // ProcessBlockForPrefetch triggers a prefetch if appropriate.
  1650  func (p *blockPrefetcher) ProcessBlockForPrefetch(ctx context.Context,
  1651  	ptr data.BlockPointer, block data.Block, kmd libkey.KeyMetadata, priority int,
  1652  	lifetime data.BlockCacheLifetime, prefetchStatus PrefetchStatus,
  1653  	action BlockRequestAction) {
  1654  	req := &prefetchRequest{
  1655  		ptr, block.GetEncodedSize(), block.NewEmptier(), kmd, priority,
  1656  		lifetime, prefetchStatus, action, nil, nil, false}
  1657  	switch {
  1658  	case prefetchStatus == FinishedPrefetch:
  1659  		// Finished prefetches can always be short circuited.
  1660  		// If we're here, then FinishedPrefetch is already cached.
  1661  	case !action.Prefetch(block):
  1662  		// Only high priority requests can trigger prefetches. Leave the
  1663  		// prefetchStatus unchanged, but cache anyway.
  1664  		err := p.retriever.PutInCaches(
  1665  			ctx, ptr, kmd.TlfID(), block, lifetime, prefetchStatus,
  1666  			action.CacheType())
  1667  		if err != nil {
  1668  			p.log.CDebugf(ctx, "Couldn't put block %s in caches: %+v", ptr, err)
  1669  		}
  1670  	default:
  1671  		// Note that here we are caching `TriggeredPrefetch`, but the request
  1672  		// will still reflect the passed-in `prefetchStatus`, since that's the
  1673  		// one the prefetching goroutine needs to decide what to do with.
  1674  		err := p.cacheOrCancelPrefetch(
  1675  			ctx, ptr, kmd.TlfID(), block, lifetime, TriggeredPrefetch, action,
  1676  			req)
  1677  		if err != nil {
  1678  			return
  1679  		}
  1680  	}
  1681  	p.triggerPrefetch(req)
  1682  }
  1683  
  1684  var errPrefetcherAlreadyShutDown = errors.New("Already shut down")
  1685  
  1686  func (p *blockPrefetcher) proxyWaitCh(
  1687  	ctx context.Context, ptr data.BlockPointer,
  1688  	c <-chan <-chan struct{}) <-chan struct{} {
  1689  	p.log.CDebugf(
  1690  		ctx, "Proxying the wait channel for %s while prefetching is paused",
  1691  		ptr)
  1692  	proxyCh := make(chan struct{})
  1693  	go func() {
  1694  		var waitCh <-chan struct{}
  1695  		select {
  1696  		case waitCh = <-c:
  1697  		case <-p.shutdownCh:
  1698  			return
  1699  		}
  1700  		select {
  1701  		case <-waitCh:
  1702  			close(proxyCh)
  1703  		case <-p.shutdownCh:
  1704  		}
  1705  	}()
  1706  	return proxyCh
  1707  }
  1708  
  1709  // WaitChannelForBlockPrefetch implements the Prefetcher interface for
  1710  // blockPrefetcher.
  1711  func (p *blockPrefetcher) WaitChannelForBlockPrefetch(
  1712  	ctx context.Context, ptr data.BlockPointer) (
  1713  	waitCh <-chan struct{}, err error) {
  1714  	c := make(chan (<-chan struct{}), 1)
  1715  	req := &prefetchRequest{
  1716  		ptr, 0, nil, nil, 0, data.TransientEntry, 0, BlockRequestSolo, c, nil,
  1717  		false}
  1718  
  1719  	select {
  1720  	case p.prefetchRequestCh.In() <- req:
  1721  	case <-p.shutdownCh:
  1722  		return nil, errPrefetcherAlreadyShutDown
  1723  	case <-ctx.Done():
  1724  		return nil, ctx.Err()
  1725  	}
  1726  
  1727  	// If we're paused for some reason, we still want to return a
  1728  	// channel quickly to the caller, so proxy the real wait channel
  1729  	// and return right away.  The caller can still wait on the proxy
  1730  	// channel while the real request is waiting on the prefetcher
  1731  	// request queue.
  1732  	paused, pausedCh := p.getPaused()
  1733  	if paused {
  1734  		return p.proxyWaitCh(ctx, ptr, c), nil
  1735  	}
  1736  
  1737  	// Wait for response.
  1738  	for {
  1739  		select {
  1740  		case waitCh := <-c:
  1741  			return waitCh, nil
  1742  		case <-pausedCh:
  1743  			paused, pausedCh = p.getPaused()
  1744  			if paused {
  1745  				return p.proxyWaitCh(ctx, ptr, c), nil
  1746  			}
  1747  		case <-p.shutdownCh:
  1748  			return nil, errPrefetcherAlreadyShutDown
  1749  		case <-ctx.Done():
  1750  			return nil, ctx.Err()
  1751  		}
  1752  	}
  1753  }
  1754  
  1755  // Status implements the Prefetcher interface for
  1756  // blockPrefetcher.
  1757  func (p *blockPrefetcher) Status(ctx context.Context, ptr data.BlockPointer) (
  1758  	PrefetchProgress, error) {
  1759  	c := make(chan PrefetchProgress, 1)
  1760  	req := &prefetchStatusRequest{ptr, c}
  1761  
  1762  	select {
  1763  	case p.prefetchStatusCh.In() <- req:
  1764  	case <-p.shutdownCh:
  1765  		return PrefetchProgress{}, errPrefetcherAlreadyShutDown
  1766  	case <-ctx.Done():
  1767  		return PrefetchProgress{}, ctx.Err()
  1768  	}
  1769  	// Wait for response.
  1770  	select {
  1771  	case status := <-c:
  1772  		return status, nil
  1773  	case <-p.shutdownCh:
  1774  		return PrefetchProgress{}, errPrefetcherAlreadyShutDown
  1775  	case <-ctx.Done():
  1776  		return PrefetchProgress{}, ctx.Err()
  1777  	}
  1778  }
  1779  
  1780  // OverallSyncStatus implements the Prefetcher interface for
  1781  // blockPrefetcher.
  1782  func (p *blockPrefetcher) OverallSyncStatus() PrefetchProgress {
  1783  	p.overallSyncStatusLock.RLock()
  1784  	defer p.overallSyncStatusLock.RUnlock()
  1785  	return p.overallSyncStatus
  1786  }
  1787  
  1788  func (p *blockPrefetcher) CancelPrefetch(ptr data.BlockPointer) {
  1789  	p.cancelQueuedPrefetch(ptr)
  1790  	select {
  1791  	case p.prefetchCancelCh.In() <- ptr:
  1792  	case <-p.shutdownCh:
  1793  		p.log.Warning("Skipping prefetch cancel for block %v since "+
  1794  			"the prefetcher is shutdown", ptr)
  1795  	}
  1796  }
  1797  
  1798  func (p *blockPrefetcher) CancelTlfPrefetches(
  1799  	ctx context.Context, tlfID tlf.ID) error {
  1800  	c := make(chan struct{})
  1801  
  1802  	p.cancelQueuedPrefetchesForTlf(tlfID)
  1803  	select {
  1804  	case p.prefetchCancelTlfCh.In() <- cancelTlfPrefetch{tlfID, c}:
  1805  	case <-ctx.Done():
  1806  		return ctx.Err()
  1807  	case <-p.shutdownCh:
  1808  		p.log.Warning("Skipping prefetch cancel for TLF %s since "+
  1809  			"the prefetcher is shutdown", tlfID)
  1810  	}
  1811  
  1812  	select {
  1813  	case <-c:
  1814  		return nil
  1815  	case <-ctx.Done():
  1816  		return ctx.Err()
  1817  	case <-p.shutdownCh:
  1818  		return errPrefetcherAlreadyShutDown
  1819  	}
  1820  }
  1821  
  1822  // Shutdown implements the Prefetcher interface for blockPrefetcher.
  1823  func (p *blockPrefetcher) Shutdown() <-chan struct{} {
  1824  	p.shutdownOnce.Do(func() {
  1825  		close(p.shutdownCh)
  1826  	})
  1827  	return p.doneCh
  1828  }