github.com/zhiqiangxu/go-ethereum@v1.9.16-0.20210824055606-be91cfdebc48/eth/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/zhiqiangxu/go-ethereum"
    29  	"github.com/zhiqiangxu/go-ethereum/common"
    30  	"github.com/zhiqiangxu/go-ethereum/core/rawdb"
    31  	"github.com/zhiqiangxu/go-ethereum/core/types"
    32  	"github.com/zhiqiangxu/go-ethereum/ethdb"
    33  	"github.com/zhiqiangxu/go-ethereum/event"
    34  	"github.com/zhiqiangxu/go-ethereum/log"
    35  	"github.com/zhiqiangxu/go-ethereum/metrics"
    36  	"github.com/zhiqiangxu/go-ethereum/params"
    37  	"github.com/zhiqiangxu/go-ethereum/trie"
    38  )
    39  
    40  var (
    41  	MaxHashFetch    = 512 // Amount of hashes to be fetched per retrieval request
    42  	MaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request
    43  	MaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request
    44  	MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
    45  	MaxBodyFetch    = 128 // Amount of block bodies to be fetched per retrieval request
    46  	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
    47  	MaxStateFetch   = 384 // Amount of node state values to allow fetching per request
    48  
    49  	rttMinEstimate   = 2 * time.Second  // Minimum round-trip time to target for download requests
    50  	rttMaxEstimate   = 20 * time.Second // Maximum round-trip time to target for download requests
    51  	rttMinConfidence = 0.1              // Worse confidence factor in our estimated RTT value
    52  	ttlScaling       = 3                // Constant scaling factor for RTT -> TTL conversion
    53  	ttlLimit         = time.Minute      // Maximum TTL allowance to prevent reaching crazy timeouts
    54  
    55  	qosTuningPeers   = 5    // Number of peers to tune based on (best peers)
    56  	qosConfidenceCap = 10   // Number of peers above which not to modify RTT confidence
    57  	qosTuningImpact  = 0.25 // Impact that a new tuning target has on the previous value
    58  
    59  	maxQueuedHeaders         = 32 * 1024                    // [eth/62] Maximum number of headers to queue for import (DOS protection)
    60  	maxHeadersProcess        = 2048                         // Number of header download results to import at once into the chain
    61  	maxResultsProcess        = 2048                         // Number of content download results to import at once into the chain
    62  	maxForkAncestry   uint64 = params.ImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    63  
    64  	reorgProtThreshold   = 48 // Threshold number of recent blocks to disable mini reorg protection
    65  	reorgProtHeaderDelay = 2  // Number of headers to delay delivering to cover mini reorgs
    66  
    67  	fsHeaderCheckFrequency = 100             // Verification frequency of the downloaded headers during fast sync
    68  	fsHeaderSafetyNet      = 2048            // Number of headers to discard in case a chain violation is detected
    69  	fsHeaderForceVerify    = 24              // Number of headers to verify before and after the pivot to accept it
    70  	fsHeaderContCheck      = 3 * time.Second // Time interval to check for header continuations during state download
    71  	fsMinFullBlocks        = 64              // Number of blocks to retrieve fully even in fast sync
    72  )
    73  
    74  var (
    75  	errBusy                    = errors.New("busy")
    76  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    77  	errBadPeer                 = errors.New("action from bad peer ignored")
    78  	errStallingPeer            = errors.New("peer is stalling")
    79  	errUnsyncedPeer            = errors.New("unsynced peer")
    80  	errNoPeers                 = errors.New("no peers to keep download active")
    81  	errTimeout                 = errors.New("timeout")
    82  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    83  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    84  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    85  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    86  	errInvalidBody             = errors.New("retrieved block body is invalid")
    87  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    88  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    89  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    90  	errCanceled                = errors.New("syncing canceled (requested)")
    91  	errNoSyncActive            = errors.New("no sync active")
    92  	errTooOld                  = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)")
    93  )
    94  
    95  type Downloader struct {
    96  	// WARNING: The `rttEstimate` and `rttConfidence` fields are accessed atomically.
    97  	// On 32 bit platforms, only 64-bit aligned fields can be atomic. The struct is
    98  	// guaranteed to be so aligned, so take advantage of that. For more information,
    99  	// see https://golang.org/pkg/sync/atomic/#pkg-note-BUG.
   100  	rttEstimate   uint64 // Round trip time to target for download requests
   101  	rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops)
   102  
   103  	mode SyncMode       // Synchronisation mode defining the strategy used (per sync cycle)
   104  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
   105  
   106  	checkpoint uint64   // Checkpoint block number to enforce head against (e.g. fast sync)
   107  	genesis    uint64   // Genesis block number to limit sync to (e.g. light client CHT)
   108  	queue      *queue   // Scheduler for selecting the hashes to download
   109  	peers      *peerSet // Set of active peers from which download can proceed
   110  
   111  	stateDB    ethdb.Database  // Database to state sync into (and deduplicate via)
   112  	stateBloom *trie.SyncBloom // Bloom filter for fast trie node existence checks
   113  
   114  	// Statistics
   115  	syncStatsChainOrigin uint64 // Origin block number where syncing started at
   116  	syncStatsChainHeight uint64 // Highest block number known when syncing started
   117  	syncStatsState       stateSyncStats
   118  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   119  
   120  	lightchain LightChain
   121  	blockchain BlockChain
   122  
   123  	// Callbacks
   124  	dropPeer peerDropFn // Drops a peer for misbehaving
   125  
   126  	// Status
   127  	synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
   128  	synchronising   int32
   129  	notified        int32
   130  	committed       int32
   131  	ancientLimit    uint64 // The maximum block number which can be regarded as ancient data.
   132  
   133  	// Channels
   134  	headerCh      chan dataPack        // [eth/62] Channel receiving inbound block headers
   135  	bodyCh        chan dataPack        // [eth/62] Channel receiving inbound block bodies
   136  	receiptCh     chan dataPack        // [eth/63] Channel receiving inbound receipts
   137  	bodyWakeCh    chan bool            // [eth/62] Channel to signal the block body fetcher of new tasks
   138  	receiptWakeCh chan bool            // [eth/63] Channel to signal the receipt fetcher of new tasks
   139  	headerProcCh  chan []*types.Header // [eth/62] Channel to feed the header processor new tasks
   140  
   141  	// for stateFetcher
   142  	stateSyncStart chan *stateSync
   143  	trackStateReq  chan *stateReq
   144  	stateCh        chan dataPack // [eth/63] Channel receiving inbound node state data
   145  
   146  	// Cancellation and termination
   147  	cancelPeer string         // Identifier of the peer currently being used as the master (cancel on drop)
   148  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   149  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   150  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   151  
   152  	quitCh   chan struct{} // Quit channel to signal termination
   153  	quitLock sync.RWMutex  // Lock to prevent double closes
   154  
   155  	// Testing hooks
   156  	syncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run
   157  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   158  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   159  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   160  }
   161  
   162  // LightChain encapsulates functions required to synchronise a light chain.
   163  type LightChain interface {
   164  	// HasHeader verifies a header's presence in the local chain.
   165  	HasHeader(common.Hash, uint64) bool
   166  
   167  	// GetHeaderByHash retrieves a header from the local chain.
   168  	GetHeaderByHash(common.Hash) *types.Header
   169  
   170  	// CurrentHeader retrieves the head header from the local chain.
   171  	CurrentHeader() *types.Header
   172  
   173  	// GetTd returns the total difficulty of a local block.
   174  	GetTd(common.Hash, uint64) *big.Int
   175  
   176  	// InsertHeaderChain inserts a batch of headers into the local chain.
   177  	InsertHeaderChain([]*types.Header, int) (int, error)
   178  
   179  	// Rollback removes a few recently added elements from the local chain.
   180  	Rollback([]common.Hash)
   181  }
   182  
   183  // BlockChain encapsulates functions required to sync a (full or fast) blockchain.
   184  type BlockChain interface {
   185  	LightChain
   186  
   187  	// HasBlock verifies a block's presence in the local chain.
   188  	HasBlock(common.Hash, uint64) bool
   189  
   190  	// HasFastBlock verifies a fast block's presence in the local chain.
   191  	HasFastBlock(common.Hash, uint64) bool
   192  
   193  	// GetBlockByHash retrieves a block from the local chain.
   194  	GetBlockByHash(common.Hash) *types.Block
   195  
   196  	// CurrentBlock retrieves the head block from the local chain.
   197  	CurrentBlock() *types.Block
   198  
   199  	// CurrentFastBlock retrieves the head fast block from the local chain.
   200  	CurrentFastBlock() *types.Block
   201  
   202  	// FastSyncCommitHead directly commits the head block to a certain entity.
   203  	FastSyncCommitHead(common.Hash) error
   204  
   205  	// InsertChain inserts a batch of blocks into the local chain.
   206  	InsertChain(types.Blocks) (int, error)
   207  
   208  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   209  	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
   210  }
   211  
   212  // New creates a new downloader to fetch hashes and blocks from remote peers.
   213  func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
   214  	if lightchain == nil {
   215  		lightchain = chain
   216  	}
   217  	dl := &Downloader{
   218  		stateDB:        stateDb,
   219  		stateBloom:     stateBloom,
   220  		mux:            mux,
   221  		checkpoint:     checkpoint,
   222  		queue:          newQueue(),
   223  		peers:          newPeerSet(),
   224  		rttEstimate:    uint64(rttMaxEstimate),
   225  		rttConfidence:  uint64(1000000),
   226  		blockchain:     chain,
   227  		lightchain:     lightchain,
   228  		dropPeer:       dropPeer,
   229  		headerCh:       make(chan dataPack, 1),
   230  		bodyCh:         make(chan dataPack, 1),
   231  		receiptCh:      make(chan dataPack, 1),
   232  		bodyWakeCh:     make(chan bool, 1),
   233  		receiptWakeCh:  make(chan bool, 1),
   234  		headerProcCh:   make(chan []*types.Header, 1),
   235  		quitCh:         make(chan struct{}),
   236  		stateCh:        make(chan dataPack),
   237  		stateSyncStart: make(chan *stateSync),
   238  		syncStatsState: stateSyncStats{
   239  			processed: rawdb.ReadFastTrieProgress(stateDb),
   240  		},
   241  		trackStateReq: make(chan *stateReq),
   242  	}
   243  	go dl.qosTuner()
   244  	go dl.stateFetcher()
   245  	return dl
   246  }
   247  
   248  // Progress retrieves the synchronisation boundaries, specifically the origin
   249  // block where synchronisation started at (may have failed/suspended); the block
   250  // or header sync is currently at; and the latest known block which the sync targets.
   251  //
   252  // In addition, during the state download phase of fast synchronisation the number
   253  // of processed and the total number of known states are also returned. Otherwise
   254  // these are zero.
   255  func (d *Downloader) Progress() ethereum.SyncProgress {
   256  	// Lock the current stats and return the progress
   257  	d.syncStatsLock.RLock()
   258  	defer d.syncStatsLock.RUnlock()
   259  
   260  	current := uint64(0)
   261  	switch {
   262  	case d.blockchain != nil && d.mode == FullSync:
   263  		current = d.blockchain.CurrentBlock().NumberU64()
   264  	case d.blockchain != nil && d.mode == FastSync:
   265  		current = d.blockchain.CurrentFastBlock().NumberU64()
   266  	case d.lightchain != nil:
   267  		current = d.lightchain.CurrentHeader().Number.Uint64()
   268  	default:
   269  		log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", d.mode)
   270  	}
   271  	return ethereum.SyncProgress{
   272  		StartingBlock: d.syncStatsChainOrigin,
   273  		CurrentBlock:  current,
   274  		HighestBlock:  d.syncStatsChainHeight,
   275  		PulledStates:  d.syncStatsState.processed,
   276  		KnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,
   277  	}
   278  }
   279  
   280  // Synchronising returns whether the downloader is currently retrieving blocks.
   281  func (d *Downloader) Synchronising() bool {
   282  	return atomic.LoadInt32(&d.synchronising) > 0
   283  }
   284  
   285  // RegisterPeer injects a new download peer into the set of block source to be
   286  // used for fetching hashes and blocks from.
   287  func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error {
   288  	logger := log.New("peer", id)
   289  	logger.Trace("Registering sync peer")
   290  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   291  		logger.Error("Failed to register sync peer", "err", err)
   292  		return err
   293  	}
   294  	d.qosReduceConfidence()
   295  
   296  	return nil
   297  }
   298  
   299  // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
   300  func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error {
   301  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   302  }
   303  
   304  // UnregisterPeer remove a peer from the known list, preventing any action from
   305  // the specified peer. An effort is also made to return any pending fetches into
   306  // the queue.
   307  func (d *Downloader) UnregisterPeer(id string) error {
   308  	// Unregister the peer from the active peer set and revoke any fetch tasks
   309  	logger := log.New("peer", id)
   310  	logger.Trace("Unregistering sync peer")
   311  	if err := d.peers.Unregister(id); err != nil {
   312  		logger.Error("Failed to unregister sync peer", "err", err)
   313  		return err
   314  	}
   315  	d.queue.Revoke(id)
   316  
   317  	return nil
   318  }
   319  
   320  // Synchronise tries to sync up our local block chain with a remote peer, both
   321  // adding various sanity checks as well as wrapping it with various log entries.
   322  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   323  	err := d.synchronise(id, head, td, mode)
   324  
   325  	switch err {
   326  	case nil, errBusy, errCanceled:
   327  		return err
   328  	}
   329  
   330  	if errors.Is(err, errInvalidChain) {
   331  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   332  		if d.dropPeer == nil {
   333  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   334  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   335  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   336  		} else {
   337  			d.dropPeer(id)
   338  		}
   339  		return err
   340  	}
   341  
   342  	switch err {
   343  	case errTimeout, errBadPeer, errStallingPeer, errUnsyncedPeer,
   344  		errEmptyHeaderSet, errPeersUnavailable, errTooOld,
   345  		errInvalidAncestor:
   346  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   347  		if d.dropPeer == nil {
   348  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   349  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   350  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   351  		} else {
   352  			d.dropPeer(id)
   353  		}
   354  	default:
   355  		log.Warn("Synchronisation failed, retrying", "err", err)
   356  	}
   357  	return err
   358  }
   359  
   360  // synchronise will select the peer and use it for synchronising. If an empty string is given
   361  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   362  // checks fail an error will be returned. This method is synchronous
   363  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   364  	// Mock out the synchronisation if testing
   365  	if d.synchroniseMock != nil {
   366  		return d.synchroniseMock(id, hash)
   367  	}
   368  	// Make sure only one goroutine is ever allowed past this point at once
   369  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   370  		return errBusy
   371  	}
   372  	defer atomic.StoreInt32(&d.synchronising, 0)
   373  
   374  	// Post a user notification of the sync (only once per session)
   375  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   376  		log.Info("Block synchronisation started")
   377  	}
   378  	// If we are already full syncing, but have a fast-sync bloom filter laying
   379  	// around, make sure it doesn't use memory any more. This is a special case
   380  	// when the user attempts to fast sync a new empty network.
   381  	if mode == FullSync && d.stateBloom != nil {
   382  		d.stateBloom.Close()
   383  	}
   384  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   385  	d.queue.Reset()
   386  	d.peers.Reset()
   387  
   388  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   389  		select {
   390  		case <-ch:
   391  		default:
   392  		}
   393  	}
   394  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
   395  		for empty := false; !empty; {
   396  			select {
   397  			case <-ch:
   398  			default:
   399  				empty = true
   400  			}
   401  		}
   402  	}
   403  	for empty := false; !empty; {
   404  		select {
   405  		case <-d.headerProcCh:
   406  		default:
   407  			empty = true
   408  		}
   409  	}
   410  	// Create cancel channel for aborting mid-flight and mark the master peer
   411  	d.cancelLock.Lock()
   412  	d.cancelCh = make(chan struct{})
   413  	d.cancelPeer = id
   414  	d.cancelLock.Unlock()
   415  
   416  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   417  
   418  	// Set the requested sync mode, unless it's forbidden
   419  	d.mode = mode
   420  
   421  	// Retrieve the origin peer and initiate the downloading process
   422  	p := d.peers.Peer(id)
   423  	if p == nil {
   424  		return errUnknownPeer
   425  	}
   426  	return d.syncWithPeer(p, hash, td)
   427  }
   428  
   429  // syncWithPeer starts a block synchronization based on the hash chain from the
   430  // specified peer and head hash.
   431  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
   432  	d.mux.Post(StartEvent{})
   433  	defer func() {
   434  		// reset on error
   435  		if err != nil {
   436  			d.mux.Post(FailedEvent{err})
   437  		} else {
   438  			latest := d.lightchain.CurrentHeader()
   439  			d.mux.Post(DoneEvent{latest})
   440  		}
   441  	}()
   442  	if p.version < 62 {
   443  		return errTooOld
   444  	}
   445  
   446  	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode)
   447  	defer func(start time.Time) {
   448  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   449  	}(time.Now())
   450  
   451  	// Look up the sync boundaries: the common ancestor and the target block
   452  	latest, err := d.fetchHeight(p)
   453  	if err != nil {
   454  		return err
   455  	}
   456  	height := latest.Number.Uint64()
   457  
   458  	origin, err := d.findAncestor(p, latest)
   459  	if err != nil {
   460  		return err
   461  	}
   462  	d.syncStatsLock.Lock()
   463  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   464  		d.syncStatsChainOrigin = origin
   465  	}
   466  	d.syncStatsChainHeight = height
   467  	d.syncStatsLock.Unlock()
   468  
   469  	// Ensure our origin point is below any fast sync pivot point
   470  	pivot := uint64(0)
   471  	if d.mode == FastSync {
   472  		if height <= uint64(fsMinFullBlocks) {
   473  			origin = 0
   474  		} else {
   475  			pivot = height - uint64(fsMinFullBlocks)
   476  			if pivot <= origin {
   477  				origin = pivot - 1
   478  			}
   479  		}
   480  	}
   481  	d.committed = 1
   482  	if d.mode == FastSync && pivot != 0 {
   483  		d.committed = 0
   484  	}
   485  	if d.mode == FastSync {
   486  		// Set the ancient data limitation.
   487  		// If we are running fast sync, all block data older than ancientLimit will be
   488  		// written to the ancient store. More recent data will be written to the active
   489  		// database and will wait for the freezer to migrate.
   490  		//
   491  		// If there is a checkpoint available, then calculate the ancientLimit through
   492  		// that. Otherwise calculate the ancient limit through the advertised height
   493  		// of the remote peer.
   494  		//
   495  		// The reason for picking checkpoint first is that a malicious peer can give us
   496  		// a fake (very high) height, forcing the ancient limit to also be very high.
   497  		// The peer would start to feed us valid blocks until head, resulting in all of
   498  		// the blocks might be written into the ancient store. A following mini-reorg
   499  		// could cause issues.
   500  		if d.checkpoint != 0 && d.checkpoint > maxForkAncestry+1 {
   501  			d.ancientLimit = d.checkpoint
   502  		} else if height > maxForkAncestry+1 {
   503  			d.ancientLimit = height - maxForkAncestry - 1
   504  		}
   505  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   506  		// If a part of blockchain data has already been written into active store,
   507  		// disable the ancient style insertion explicitly.
   508  		if origin >= frozen && frozen != 0 {
   509  			d.ancientLimit = 0
   510  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   511  		} else if d.ancientLimit > 0 {
   512  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   513  		}
   514  		// Rewind the ancient store and blockchain if reorg happens.
   515  		if origin+1 < frozen {
   516  			var hashes []common.Hash
   517  			for i := origin + 1; i < d.lightchain.CurrentHeader().Number.Uint64(); i++ {
   518  				hashes = append(hashes, rawdb.ReadCanonicalHash(d.stateDB, i))
   519  			}
   520  			d.lightchain.Rollback(hashes)
   521  		}
   522  	}
   523  	// Initiate the sync using a concurrent header and content retrieval algorithm
   524  	d.queue.Prepare(origin+1, d.mode)
   525  	if d.syncInitHook != nil {
   526  		d.syncInitHook(origin, height)
   527  	}
   528  	fetchers := []func() error{
   529  		func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved
   530  		func() error { return d.fetchBodies(origin + 1) },          // Bodies are retrieved during normal and fast sync
   531  		func() error { return d.fetchReceipts(origin + 1) },        // Receipts are retrieved during fast sync
   532  		func() error { return d.processHeaders(origin+1, pivot, td) },
   533  	}
   534  	if d.mode == FastSync {
   535  		fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) })
   536  	} else if d.mode == FullSync {
   537  		fetchers = append(fetchers, d.processFullSyncContent)
   538  	}
   539  	return d.spawnSync(fetchers)
   540  }
   541  
   542  // spawnSync runs d.process and all given fetcher functions to completion in
   543  // separate goroutines, returning the first error that appears.
   544  func (d *Downloader) spawnSync(fetchers []func() error) error {
   545  	errc := make(chan error, len(fetchers))
   546  	d.cancelWg.Add(len(fetchers))
   547  	for _, fn := range fetchers {
   548  		fn := fn
   549  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   550  	}
   551  	// Wait for the first error, then terminate the others.
   552  	var err error
   553  	for i := 0; i < len(fetchers); i++ {
   554  		if i == len(fetchers)-1 {
   555  			// Close the queue when all fetchers have exited.
   556  			// This will cause the block processor to end when
   557  			// it has processed the queue.
   558  			d.queue.Close()
   559  		}
   560  		if err = <-errc; err != nil && err != errCanceled {
   561  			break
   562  		}
   563  	}
   564  	d.queue.Close()
   565  	d.Cancel()
   566  	return err
   567  }
   568  
   569  // cancel aborts all of the operations and resets the queue. However, cancel does
   570  // not wait for the running download goroutines to finish. This method should be
   571  // used when cancelling the downloads from inside the downloader.
   572  func (d *Downloader) cancel() {
   573  	// Close the current cancel channel
   574  	d.cancelLock.Lock()
   575  	defer d.cancelLock.Unlock()
   576  
   577  	if d.cancelCh != nil {
   578  		select {
   579  		case <-d.cancelCh:
   580  			// Channel was already closed
   581  		default:
   582  			close(d.cancelCh)
   583  		}
   584  	}
   585  }
   586  
   587  // Cancel aborts all of the operations and waits for all download goroutines to
   588  // finish before returning.
   589  func (d *Downloader) Cancel() {
   590  	d.cancel()
   591  	d.cancelWg.Wait()
   592  
   593  	d.ancientLimit = 0
   594  	log.Debug("Reset ancient limit to zero")
   595  }
   596  
   597  // Terminate interrupts the downloader, canceling all pending operations.
   598  // The downloader cannot be reused after calling Terminate.
   599  func (d *Downloader) Terminate() {
   600  	// Close the termination channel (make sure double close is allowed)
   601  	d.quitLock.Lock()
   602  	select {
   603  	case <-d.quitCh:
   604  	default:
   605  		close(d.quitCh)
   606  	}
   607  	d.quitLock.Unlock()
   608  
   609  	// Cancel any pending download requests
   610  	d.Cancel()
   611  }
   612  
   613  // fetchHeight retrieves the head header of the remote peer to aid in estimating
   614  // the total time a pending synchronisation would take.
   615  func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) {
   616  	p.log.Debug("Retrieving remote chain height")
   617  
   618  	// Request the advertised remote head block and wait for the response
   619  	head, _ := p.peer.Head()
   620  	go p.peer.RequestHeadersByHash(head, 1, 0, false)
   621  
   622  	ttl := d.requestTTL()
   623  	timeout := time.After(ttl)
   624  	for {
   625  		select {
   626  		case <-d.cancelCh:
   627  			return nil, errCanceled
   628  
   629  		case packet := <-d.headerCh:
   630  			// Discard anything not from the origin peer
   631  			if packet.PeerId() != p.id {
   632  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   633  				break
   634  			}
   635  			// Make sure the peer actually gave something valid
   636  			headers := packet.(*headerPack).headers
   637  			if len(headers) != 1 {
   638  				p.log.Debug("Multiple headers for single request", "headers", len(headers))
   639  				return nil, errBadPeer
   640  			}
   641  			head := headers[0]
   642  			if (d.mode == FastSync || d.mode == LightSync) && head.Number.Uint64() < d.checkpoint {
   643  				p.log.Warn("Remote head below checkpoint", "number", head.Number, "hash", head.Hash())
   644  				return nil, errUnsyncedPeer
   645  			}
   646  			p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash())
   647  			return head, nil
   648  
   649  		case <-timeout:
   650  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   651  			return nil, errTimeout
   652  
   653  		case <-d.bodyCh:
   654  		case <-d.receiptCh:
   655  			// Out of bounds delivery, ignore
   656  		}
   657  	}
   658  }
   659  
   660  // calculateRequestSpan calculates what headers to request from a peer when trying to determine the
   661  // common ancestor.
   662  // It returns parameters to be used for peer.RequestHeadersByNumber:
   663  //  from - starting block number
   664  //  count - number of headers to request
   665  //  skip - number of headers to skip
   666  // and also returns 'max', the last block which is expected to be returned by the remote peers,
   667  // given the (from,count,skip)
   668  func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
   669  	var (
   670  		from     int
   671  		count    int
   672  		MaxCount = MaxHeaderFetch / 16
   673  	)
   674  	// requestHead is the highest block that we will ask for. If requestHead is not offset,
   675  	// the highest block that we will get is 16 blocks back from head, which means we
   676  	// will fetch 14 or 15 blocks unnecessarily in the case the height difference
   677  	// between us and the peer is 1-2 blocks, which is most common
   678  	requestHead := int(remoteHeight) - 1
   679  	if requestHead < 0 {
   680  		requestHead = 0
   681  	}
   682  	// requestBottom is the lowest block we want included in the query
   683  	// Ideally, we want to include the one just below our own head
   684  	requestBottom := int(localHeight - 1)
   685  	if requestBottom < 0 {
   686  		requestBottom = 0
   687  	}
   688  	totalSpan := requestHead - requestBottom
   689  	span := 1 + totalSpan/MaxCount
   690  	if span < 2 {
   691  		span = 2
   692  	}
   693  	if span > 16 {
   694  		span = 16
   695  	}
   696  
   697  	count = 1 + totalSpan/span
   698  	if count > MaxCount {
   699  		count = MaxCount
   700  	}
   701  	if count < 2 {
   702  		count = 2
   703  	}
   704  	from = requestHead - (count-1)*span
   705  	if from < 0 {
   706  		from = 0
   707  	}
   708  	max := from + (count-1)*span
   709  	return int64(from), count, span - 1, uint64(max)
   710  }
   711  
   712  // findAncestor tries to locate the common ancestor link of the local chain and
   713  // a remote peers blockchain. In the general case when our node was in sync and
   714  // on the correct chain, checking the top N links should already get us a match.
   715  // In the rare scenario when we ended up on a long reorganisation (i.e. none of
   716  // the head links match), we do a binary search to find the common ancestor.
   717  func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
   718  	// Figure out the valid ancestor range to prevent rewrite attacks
   719  	var (
   720  		floor        = int64(-1)
   721  		localHeight  uint64
   722  		remoteHeight = remoteHeader.Number.Uint64()
   723  	)
   724  	switch d.mode {
   725  	case FullSync:
   726  		localHeight = d.blockchain.CurrentBlock().NumberU64()
   727  	case FastSync:
   728  		localHeight = d.blockchain.CurrentFastBlock().NumberU64()
   729  	default:
   730  		localHeight = d.lightchain.CurrentHeader().Number.Uint64()
   731  	}
   732  	p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
   733  
   734  	// Recap floor value for binary search
   735  	if localHeight >= maxForkAncestry {
   736  		// We're above the max reorg threshold, find the earliest fork point
   737  		floor = int64(localHeight - maxForkAncestry)
   738  	}
   739  	// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
   740  	// all headers before that point will be missing.
   741  	if d.mode == LightSync {
   742  		// If we don't know the current CHT position, find it
   743  		if d.genesis == 0 {
   744  			header := d.lightchain.CurrentHeader()
   745  			for header != nil {
   746  				d.genesis = header.Number.Uint64()
   747  				if floor >= int64(d.genesis)-1 {
   748  					break
   749  				}
   750  				header = d.lightchain.GetHeaderByHash(header.ParentHash)
   751  			}
   752  		}
   753  		// We already know the "genesis" block number, cap floor to that
   754  		if floor < int64(d.genesis)-1 {
   755  			floor = int64(d.genesis) - 1
   756  		}
   757  	}
   758  
   759  	from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
   760  
   761  	p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
   762  	go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
   763  
   764  	// Wait for the remote response to the head fetch
   765  	number, hash := uint64(0), common.Hash{}
   766  
   767  	ttl := d.requestTTL()
   768  	timeout := time.After(ttl)
   769  
   770  	for finished := false; !finished; {
   771  		select {
   772  		case <-d.cancelCh:
   773  			return 0, errCanceled
   774  
   775  		case packet := <-d.headerCh:
   776  			// Discard anything not from the origin peer
   777  			if packet.PeerId() != p.id {
   778  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   779  				break
   780  			}
   781  			// Make sure the peer actually gave something valid
   782  			headers := packet.(*headerPack).headers
   783  			if len(headers) == 0 {
   784  				p.log.Warn("Empty head header set")
   785  				return 0, errEmptyHeaderSet
   786  			}
   787  			// Make sure the peer's reply conforms to the request
   788  			for i, header := range headers {
   789  				expectNumber := from + int64(i)*int64(skip+1)
   790  				if number := header.Number.Int64(); number != expectNumber {
   791  					p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
   792  					return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
   793  				}
   794  			}
   795  			// Check if a common ancestor was found
   796  			finished = true
   797  			for i := len(headers) - 1; i >= 0; i-- {
   798  				// Skip any headers that underflow/overflow our requested set
   799  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
   800  					continue
   801  				}
   802  				// Otherwise check if we already know the header or not
   803  				h := headers[i].Hash()
   804  				n := headers[i].Number.Uint64()
   805  
   806  				var known bool
   807  				switch d.mode {
   808  				case FullSync:
   809  					known = d.blockchain.HasBlock(h, n)
   810  				case FastSync:
   811  					known = d.blockchain.HasFastBlock(h, n)
   812  				default:
   813  					known = d.lightchain.HasHeader(h, n)
   814  				}
   815  				if known {
   816  					number, hash = n, h
   817  					break
   818  				}
   819  			}
   820  
   821  		case <-timeout:
   822  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   823  			return 0, errTimeout
   824  
   825  		case <-d.bodyCh:
   826  		case <-d.receiptCh:
   827  			// Out of bounds delivery, ignore
   828  		}
   829  	}
   830  	// If the head fetch already found an ancestor, return
   831  	if hash != (common.Hash{}) {
   832  		if int64(number) <= floor {
   833  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   834  			return 0, errInvalidAncestor
   835  		}
   836  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   837  		return number, nil
   838  	}
   839  	// Ancestor not found, we need to binary search over our chain
   840  	start, end := uint64(0), remoteHeight
   841  	if floor > 0 {
   842  		start = uint64(floor)
   843  	}
   844  	p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
   845  
   846  	for start+1 < end {
   847  		// Split our chain interval in two, and request the hash to cross check
   848  		check := (start + end) / 2
   849  
   850  		ttl := d.requestTTL()
   851  		timeout := time.After(ttl)
   852  
   853  		go p.peer.RequestHeadersByNumber(check, 1, 0, false)
   854  
   855  		// Wait until a reply arrives to this request
   856  		for arrived := false; !arrived; {
   857  			select {
   858  			case <-d.cancelCh:
   859  				return 0, errCanceled
   860  
   861  			case packer := <-d.headerCh:
   862  				// Discard anything not from the origin peer
   863  				if packer.PeerId() != p.id {
   864  					log.Debug("Received headers from incorrect peer", "peer", packer.PeerId())
   865  					break
   866  				}
   867  				// Make sure the peer actually gave something valid
   868  				headers := packer.(*headerPack).headers
   869  				if len(headers) != 1 {
   870  					p.log.Debug("Multiple headers for single request", "headers", len(headers))
   871  					return 0, errBadPeer
   872  				}
   873  				arrived = true
   874  
   875  				// Modify the search interval based on the response
   876  				h := headers[0].Hash()
   877  				n := headers[0].Number.Uint64()
   878  
   879  				var known bool
   880  				switch d.mode {
   881  				case FullSync:
   882  					known = d.blockchain.HasBlock(h, n)
   883  				case FastSync:
   884  					known = d.blockchain.HasFastBlock(h, n)
   885  				default:
   886  					known = d.lightchain.HasHeader(h, n)
   887  				}
   888  				if !known {
   889  					end = check
   890  					break
   891  				}
   892  				header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
   893  				if header.Number.Uint64() != check {
   894  					p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   895  					return 0, errBadPeer
   896  				}
   897  				start = check
   898  				hash = h
   899  
   900  			case <-timeout:
   901  				p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
   902  				return 0, errTimeout
   903  
   904  			case <-d.bodyCh:
   905  			case <-d.receiptCh:
   906  				// Out of bounds delivery, ignore
   907  			}
   908  		}
   909  	}
   910  	// Ensure valid ancestry and return
   911  	if int64(start) <= floor {
   912  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
   913  		return 0, errInvalidAncestor
   914  	}
   915  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
   916  	return start, nil
   917  }
   918  
   919  // fetchHeaders keeps retrieving headers concurrently from the number
   920  // requested, until no more are returned, potentially throttling on the way. To
   921  // facilitate concurrency but still protect against malicious nodes sending bad
   922  // headers, we construct a header chain skeleton using the "origin" peer we are
   923  // syncing with, and fill in the missing headers using anyone else. Headers from
   924  // other peers are only accepted if they map cleanly to the skeleton. If no one
   925  // can fill in the skeleton - not even the origin peer - it's assumed invalid and
   926  // the origin is dropped.
   927  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error {
   928  	p.log.Debug("Directing header downloads", "origin", from)
   929  	defer p.log.Debug("Header download terminated")
   930  
   931  	// Create a timeout timer, and the associated header fetcher
   932  	skeleton := true            // Skeleton assembly phase or finishing up
   933  	request := time.Now()       // time of the last skeleton fetch request
   934  	timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
   935  	<-timeout.C                 // timeout channel should be initially empty
   936  	defer timeout.Stop()
   937  
   938  	var ttl time.Duration
   939  	getHeaders := func(from uint64) {
   940  		request = time.Now()
   941  
   942  		ttl = d.requestTTL()
   943  		timeout.Reset(ttl)
   944  
   945  		if skeleton {
   946  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
   947  			go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
   948  		} else {
   949  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
   950  			go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
   951  		}
   952  	}
   953  	// Start pulling the header chain skeleton until all is done
   954  	ancestor := from
   955  	getHeaders(from)
   956  
   957  	for {
   958  		select {
   959  		case <-d.cancelCh:
   960  			return errCanceled
   961  
   962  		case packet := <-d.headerCh:
   963  			// Make sure the active peer is giving us the skeleton headers
   964  			if packet.PeerId() != p.id {
   965  				log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
   966  				break
   967  			}
   968  			headerReqTimer.UpdateSince(request)
   969  			timeout.Stop()
   970  
   971  			// If the skeleton's finished, pull any remaining head headers directly from the origin
   972  			if packet.Items() == 0 && skeleton {
   973  				skeleton = false
   974  				getHeaders(from)
   975  				continue
   976  			}
   977  			// If no more headers are inbound, notify the content fetchers and return
   978  			if packet.Items() == 0 {
   979  				// Don't abort header fetches while the pivot is downloading
   980  				if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
   981  					p.log.Debug("No headers, waiting for pivot commit")
   982  					select {
   983  					case <-time.After(fsHeaderContCheck):
   984  						getHeaders(from)
   985  						continue
   986  					case <-d.cancelCh:
   987  						return errCanceled
   988  					}
   989  				}
   990  				// Pivot done (or not in fast sync) and no more headers, terminate the process
   991  				p.log.Debug("No more headers available")
   992  				select {
   993  				case d.headerProcCh <- nil:
   994  					return nil
   995  				case <-d.cancelCh:
   996  					return errCanceled
   997  				}
   998  			}
   999  			headers := packet.(*headerPack).headers
  1000  
  1001  			// If we received a skeleton batch, resolve internals concurrently
  1002  			if skeleton {
  1003  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
  1004  				if err != nil {
  1005  					p.log.Debug("Skeleton chain invalid", "err", err)
  1006  					return fmt.Errorf("%w: %v", errInvalidChain, err)
  1007  				}
  1008  				headers = filled[proced:]
  1009  				from += uint64(proced)
  1010  			} else {
  1011  				// If we're closing in on the chain head, but haven't yet reached it, delay
  1012  				// the last few headers so mini reorgs on the head don't cause invalid hash
  1013  				// chain errors.
  1014  				if n := len(headers); n > 0 {
  1015  					// Retrieve the current head we're at
  1016  					var head uint64
  1017  					if d.mode == LightSync {
  1018  						head = d.lightchain.CurrentHeader().Number.Uint64()
  1019  					} else {
  1020  						head = d.blockchain.CurrentFastBlock().NumberU64()
  1021  						if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
  1022  							head = full
  1023  						}
  1024  					}
  1025  					// If the head is below the common ancestor, we're actually deduplicating
  1026  					// already existing chain segments, so use the ancestor as the fake head.
  1027  					// Otherwise we might end up delaying header deliveries pointlessly.
  1028  					if head < ancestor {
  1029  						head = ancestor
  1030  					}
  1031  					// If the head is way older than this batch, delay the last few headers
  1032  					if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
  1033  						delay := reorgProtHeaderDelay
  1034  						if delay > n {
  1035  							delay = n
  1036  						}
  1037  						headers = headers[:n-delay]
  1038  					}
  1039  				}
  1040  			}
  1041  			// Insert all the new headers and fetch the next batch
  1042  			if len(headers) > 0 {
  1043  				p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
  1044  				select {
  1045  				case d.headerProcCh <- headers:
  1046  				case <-d.cancelCh:
  1047  					return errCanceled
  1048  				}
  1049  				from += uint64(len(headers))
  1050  				getHeaders(from)
  1051  			} else {
  1052  				// No headers delivered, or all of them being delayed, sleep a bit and retry
  1053  				p.log.Trace("All headers delayed, waiting")
  1054  				select {
  1055  				case <-time.After(fsHeaderContCheck):
  1056  					getHeaders(from)
  1057  					continue
  1058  				case <-d.cancelCh:
  1059  					return errCanceled
  1060  				}
  1061  			}
  1062  
  1063  		case <-timeout.C:
  1064  			if d.dropPeer == nil {
  1065  				// The dropPeer method is nil when `--copydb` is used for a local copy.
  1066  				// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
  1067  				p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
  1068  				break
  1069  			}
  1070  			// Header retrieval timed out, consider the peer bad and drop
  1071  			p.log.Debug("Header request timed out", "elapsed", ttl)
  1072  			headerTimeoutMeter.Mark(1)
  1073  			d.dropPeer(p.id)
  1074  
  1075  			// Finish the sync gracefully instead of dumping the gathered data though
  1076  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1077  				select {
  1078  				case ch <- false:
  1079  				case <-d.cancelCh:
  1080  				}
  1081  			}
  1082  			select {
  1083  			case d.headerProcCh <- nil:
  1084  			case <-d.cancelCh:
  1085  			}
  1086  			return errBadPeer
  1087  		}
  1088  	}
  1089  }
  1090  
  1091  // fillHeaderSkeleton concurrently retrieves headers from all our available peers
  1092  // and maps them to the provided skeleton header chain.
  1093  //
  1094  // Any partial results from the beginning of the skeleton is (if possible) forwarded
  1095  // immediately to the header processor to keep the rest of the pipeline full even
  1096  // in the case of header stalls.
  1097  //
  1098  // The method returns the entire filled skeleton and also the number of headers
  1099  // already forwarded for processing.
  1100  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
  1101  	log.Debug("Filling up skeleton", "from", from)
  1102  	d.queue.ScheduleSkeleton(from, skeleton)
  1103  
  1104  	var (
  1105  		deliver = func(packet dataPack) (int, error) {
  1106  			pack := packet.(*headerPack)
  1107  			return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
  1108  		}
  1109  		expire   = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) }
  1110  		throttle = func() bool { return false }
  1111  		reserve  = func(p *peerConnection, count int) (*fetchRequest, bool, error) {
  1112  			return d.queue.ReserveHeaders(p, count), false, nil
  1113  		}
  1114  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
  1115  		capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) }
  1116  		setIdle  = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) }
  1117  	)
  1118  	err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
  1119  		d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve,
  1120  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
  1121  
  1122  	log.Debug("Skeleton fill terminated", "err", err)
  1123  
  1124  	filled, proced := d.queue.RetrieveHeaders()
  1125  	return filled, proced, err
  1126  }
  1127  
  1128  // fetchBodies iteratively downloads the scheduled block bodies, taking any
  1129  // available peers, reserving a chunk of blocks for each, waiting for delivery
  1130  // and also periodically checking for timeouts.
  1131  func (d *Downloader) fetchBodies(from uint64) error {
  1132  	log.Debug("Downloading block bodies", "origin", from)
  1133  
  1134  	var (
  1135  		deliver = func(packet dataPack) (int, error) {
  1136  			pack := packet.(*bodyPack)
  1137  			return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
  1138  		}
  1139  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) }
  1140  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
  1141  		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) }
  1142  		setIdle  = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) }
  1143  	)
  1144  	err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
  1145  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies,
  1146  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
  1147  
  1148  	log.Debug("Block body download terminated", "err", err)
  1149  	return err
  1150  }
  1151  
  1152  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
  1153  // available peers, reserving a chunk of receipts for each, waiting for delivery
  1154  // and also periodically checking for timeouts.
  1155  func (d *Downloader) fetchReceipts(from uint64) error {
  1156  	log.Debug("Downloading transaction receipts", "origin", from)
  1157  
  1158  	var (
  1159  		deliver = func(packet dataPack) (int, error) {
  1160  			pack := packet.(*receiptPack)
  1161  			return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
  1162  		}
  1163  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) }
  1164  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
  1165  		capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) }
  1166  		setIdle  = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) }
  1167  	)
  1168  	err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
  1169  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts,
  1170  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
  1171  
  1172  	log.Debug("Transaction receipt download terminated", "err", err)
  1173  	return err
  1174  }
  1175  
  1176  // fetchParts iteratively downloads scheduled block parts, taking any available
  1177  // peers, reserving a chunk of fetch requests for each, waiting for delivery and
  1178  // also periodically checking for timeouts.
  1179  //
  1180  // As the scheduling/timeout logic mostly is the same for all downloaded data
  1181  // types, this method is used by each for data gathering and is instrumented with
  1182  // various callbacks to handle the slight differences between processing them.
  1183  //
  1184  // The instrumentation parameters:
  1185  //  - errCancel:   error type to return if the fetch operation is cancelled (mostly makes logging nicer)
  1186  //  - deliveryCh:  channel from which to retrieve downloaded data packets (merged from all concurrent peers)
  1187  //  - deliver:     processing callback to deliver data packets into type specific download queues (usually within `queue`)
  1188  //  - wakeCh:      notification channel for waking the fetcher when new tasks are available (or sync completed)
  1189  //  - expire:      task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
  1190  //  - pending:     task callback for the number of requests still needing download (detect completion/non-completability)
  1191  //  - inFlight:    task callback for the number of in-progress requests (wait for all active downloads to finish)
  1192  //  - throttle:    task callback to check if the processing queue is full and activate throttling (bound memory use)
  1193  //  - reserve:     task callback to reserve new download tasks to a particular peer (also signals partial completions)
  1194  //  - fetchHook:   tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
  1195  //  - fetch:       network callback to actually send a particular download request to a physical remote peer
  1196  //  - cancel:      task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
  1197  //  - capacity:    network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
  1198  //  - idle:        network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
  1199  //  - setIdle:     network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
  1200  //  - kind:        textual label of the type being downloaded to display in log messages
  1201  func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
  1202  	expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error),
  1203  	fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
  1204  	idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error {
  1205  
  1206  	// Create a ticker to detect expired retrieval tasks
  1207  	ticker := time.NewTicker(100 * time.Millisecond)
  1208  	defer ticker.Stop()
  1209  
  1210  	update := make(chan struct{}, 1)
  1211  
  1212  	// Prepare the queue and fetch block parts until the block header fetcher's done
  1213  	finished := false
  1214  	for {
  1215  		select {
  1216  		case <-d.cancelCh:
  1217  			return errCanceled
  1218  
  1219  		case packet := <-deliveryCh:
  1220  			// If the peer was previously banned and failed to deliver its pack
  1221  			// in a reasonable time frame, ignore its message.
  1222  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
  1223  				// Deliver the received chunk of data and check chain validity
  1224  				accepted, err := deliver(packet)
  1225  				if errors.Is(err, errInvalidChain) {
  1226  					return err
  1227  				}
  1228  				// Unless a peer delivered something completely else than requested (usually
  1229  				// caused by a timed out request which came through in the end), set it to
  1230  				// idle. If the delivery's stale, the peer should have already been idled.
  1231  				if !errors.Is(err, errStaleDelivery) {
  1232  					setIdle(peer, accepted)
  1233  				}
  1234  				// Issue a log to the user to see what's going on
  1235  				switch {
  1236  				case err == nil && packet.Items() == 0:
  1237  					peer.log.Trace("Requested data not delivered", "type", kind)
  1238  				case err == nil:
  1239  					peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
  1240  				default:
  1241  					peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err)
  1242  				}
  1243  			}
  1244  			// Blocks assembled, try to update the progress
  1245  			select {
  1246  			case update <- struct{}{}:
  1247  			default:
  1248  			}
  1249  
  1250  		case cont := <-wakeCh:
  1251  			// The header fetcher sent a continuation flag, check if it's done
  1252  			if !cont {
  1253  				finished = true
  1254  			}
  1255  			// Headers arrive, try to update the progress
  1256  			select {
  1257  			case update <- struct{}{}:
  1258  			default:
  1259  			}
  1260  
  1261  		case <-ticker.C:
  1262  			// Sanity check update the progress
  1263  			select {
  1264  			case update <- struct{}{}:
  1265  			default:
  1266  			}
  1267  
  1268  		case <-update:
  1269  			// Short circuit if we lost all our peers
  1270  			if d.peers.Len() == 0 {
  1271  				return errNoPeers
  1272  			}
  1273  			// Check for fetch request timeouts and demote the responsible peers
  1274  			for pid, fails := range expire() {
  1275  				if peer := d.peers.Peer(pid); peer != nil {
  1276  					// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
  1277  					// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
  1278  					// out that sync wise we need to get rid of the peer.
  1279  					//
  1280  					// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
  1281  					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
  1282  					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
  1283  					if fails > 2 {
  1284  						peer.log.Trace("Data delivery timed out", "type", kind)
  1285  						setIdle(peer, 0)
  1286  					} else {
  1287  						peer.log.Debug("Stalling delivery, dropping", "type", kind)
  1288  
  1289  						if d.dropPeer == nil {
  1290  							// The dropPeer method is nil when `--copydb` is used for a local copy.
  1291  							// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
  1292  							peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
  1293  						} else {
  1294  							d.dropPeer(pid)
  1295  
  1296  							// If this peer was the master peer, abort sync immediately
  1297  							d.cancelLock.RLock()
  1298  							master := pid == d.cancelPeer
  1299  							d.cancelLock.RUnlock()
  1300  
  1301  							if master {
  1302  								d.cancel()
  1303  								return errTimeout
  1304  							}
  1305  						}
  1306  					}
  1307  				}
  1308  			}
  1309  			// If there's nothing more to fetch, wait or terminate
  1310  			if pending() == 0 {
  1311  				if !inFlight() && finished {
  1312  					log.Debug("Data fetching completed", "type", kind)
  1313  					return nil
  1314  				}
  1315  				break
  1316  			}
  1317  			// Send a download request to all idle peers, until throttled
  1318  			progressed, throttled, running := false, false, inFlight()
  1319  			idles, total := idle()
  1320  
  1321  			for _, peer := range idles {
  1322  				// Short circuit if throttling activated
  1323  				if throttle() {
  1324  					throttled = true
  1325  					break
  1326  				}
  1327  				// Short circuit if there is no more available task.
  1328  				if pending() == 0 {
  1329  					break
  1330  				}
  1331  				// Reserve a chunk of fetches for a peer. A nil can mean either that
  1332  				// no more headers are available, or that the peer is known not to
  1333  				// have them.
  1334  				request, progress, err := reserve(peer, capacity(peer))
  1335  				if err != nil {
  1336  					return err
  1337  				}
  1338  				if progress {
  1339  					progressed = true
  1340  				}
  1341  				if request == nil {
  1342  					continue
  1343  				}
  1344  				if request.From > 0 {
  1345  					peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
  1346  				} else {
  1347  					peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
  1348  				}
  1349  				// Fetch the chunk and make sure any errors return the hashes to the queue
  1350  				if fetchHook != nil {
  1351  					fetchHook(request.Headers)
  1352  				}
  1353  				if err := fetch(peer, request); err != nil {
  1354  					// Although we could try and make an attempt to fix this, this error really
  1355  					// means that we've double allocated a fetch task to a peer. If that is the
  1356  					// case, the internal state of the downloader and the queue is very wrong so
  1357  					// better hard crash and note the error instead of silently accumulating into
  1358  					// a much bigger issue.
  1359  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
  1360  				}
  1361  				running = true
  1362  			}
  1363  			// Make sure that we have peers available for fetching. If all peers have been tried
  1364  			// and all failed throw an error
  1365  			if !progressed && !throttled && !running && len(idles) == total && pending() > 0 {
  1366  				return errPeersUnavailable
  1367  			}
  1368  		}
  1369  	}
  1370  }
  1371  
  1372  // processHeaders takes batches of retrieved headers from an input channel and
  1373  // keeps processing and scheduling them into the header chain and downloader's
  1374  // queue until the stream ends or a failure occurs.
  1375  func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error {
  1376  	// Keep a count of uncertain headers to roll back
  1377  	var rollback []*types.Header
  1378  	defer func() {
  1379  		if len(rollback) > 0 {
  1380  			// Flatten the headers and roll them back
  1381  			hashes := make([]common.Hash, len(rollback))
  1382  			for i, header := range rollback {
  1383  				hashes[i] = header.Hash()
  1384  			}
  1385  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1386  			if d.mode != LightSync {
  1387  				lastFastBlock = d.blockchain.CurrentFastBlock().Number()
  1388  				lastBlock = d.blockchain.CurrentBlock().Number()
  1389  			}
  1390  			d.lightchain.Rollback(hashes)
  1391  			curFastBlock, curBlock := common.Big0, common.Big0
  1392  			if d.mode != LightSync {
  1393  				curFastBlock = d.blockchain.CurrentFastBlock().Number()
  1394  				curBlock = d.blockchain.CurrentBlock().Number()
  1395  			}
  1396  			log.Warn("Rolled back headers", "count", len(hashes),
  1397  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1398  				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1399  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock))
  1400  		}
  1401  	}()
  1402  
  1403  	// Wait for batches of headers to process
  1404  	gotHeaders := false
  1405  
  1406  	for {
  1407  		select {
  1408  		case <-d.cancelCh:
  1409  			return errCanceled
  1410  
  1411  		case headers := <-d.headerProcCh:
  1412  			// Terminate header processing if we synced up
  1413  			if len(headers) == 0 {
  1414  				// Notify everyone that headers are fully processed
  1415  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1416  					select {
  1417  					case ch <- false:
  1418  					case <-d.cancelCh:
  1419  					}
  1420  				}
  1421  				// If no headers were retrieved at all, the peer violated its TD promise that it had a
  1422  				// better chain compared to ours. The only exception is if its promised blocks were
  1423  				// already imported by other means (e.g. fetcher):
  1424  				//
  1425  				// R <remote peer>, L <local node>: Both at block 10
  1426  				// R: Mine block 11, and propagate it to L
  1427  				// L: Queue block 11 for import
  1428  				// L: Notice that R's head and TD increased compared to ours, start sync
  1429  				// L: Import of block 11 finishes
  1430  				// L: Sync begins, and finds common ancestor at 11
  1431  				// L: Request new headers up from 11 (R's TD was higher, it must have something)
  1432  				// R: Nothing to give
  1433  				if d.mode != LightSync {
  1434  					head := d.blockchain.CurrentBlock()
  1435  					if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
  1436  						return errStallingPeer
  1437  					}
  1438  				}
  1439  				// If fast or light syncing, ensure promised headers are indeed delivered. This is
  1440  				// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
  1441  				// of delivering the post-pivot blocks that would flag the invalid content.
  1442  				//
  1443  				// This check cannot be executed "as is" for full imports, since blocks may still be
  1444  				// queued for processing when the header download completes. However, as long as the
  1445  				// peer gave us something useful, we're already happy/progressed (above check).
  1446  				if d.mode == FastSync || d.mode == LightSync {
  1447  					head := d.lightchain.CurrentHeader()
  1448  					if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1449  						return errStallingPeer
  1450  					}
  1451  				}
  1452  				// Disable any rollback and return
  1453  				rollback = nil
  1454  				return nil
  1455  			}
  1456  			// Otherwise split the chunk of headers into batches and process them
  1457  			gotHeaders = true
  1458  			for len(headers) > 0 {
  1459  				// Terminate if something failed in between processing chunks
  1460  				select {
  1461  				case <-d.cancelCh:
  1462  					return errCanceled
  1463  				default:
  1464  				}
  1465  				// Select the next chunk of headers to import
  1466  				limit := maxHeadersProcess
  1467  				if limit > len(headers) {
  1468  					limit = len(headers)
  1469  				}
  1470  				chunk := headers[:limit]
  1471  				// In case of header only syncing, validate the chunk immediately
  1472  				if d.mode == FastSync || d.mode == LightSync {
  1473  					// Collect the yet unknown headers to mark them as uncertain
  1474  					unknown := make([]*types.Header, 0, len(chunk))
  1475  					for _, header := range chunk {
  1476  						if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) {
  1477  							unknown = append(unknown, header)
  1478  						}
  1479  					}
  1480  					// If we're importing pure headers, verify based on their recentness
  1481  					frequency := fsHeaderCheckFrequency
  1482  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1483  						frequency = 1
  1484  					}
  1485  					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
  1486  						// If some headers were inserted, add them too to the rollback list
  1487  						if n > 0 {
  1488  							rollback = append(rollback, chunk[:n]...)
  1489  						}
  1490  						log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err)
  1491  						return fmt.Errorf("%w: %v", errInvalidChain, err)
  1492  					}
  1493  					// All verifications passed, store newly found uncertain headers
  1494  					rollback = append(rollback, unknown...)
  1495  					if len(rollback) > fsHeaderSafetyNet {
  1496  						rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...)
  1497  					}
  1498  				}
  1499  				// Unless we're doing light chains, schedule the headers for associated content retrieval
  1500  				if d.mode == FullSync || d.mode == FastSync {
  1501  					// If we've reached the allowed number of pending headers, stall a bit
  1502  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1503  						select {
  1504  						case <-d.cancelCh:
  1505  							return errCanceled
  1506  						case <-time.After(time.Second):
  1507  						}
  1508  					}
  1509  					// Otherwise insert the headers for content retrieval
  1510  					inserts := d.queue.Schedule(chunk, origin)
  1511  					if len(inserts) != len(chunk) {
  1512  						log.Debug("Stale headers")
  1513  						return errBadPeer
  1514  					}
  1515  				}
  1516  				headers = headers[limit:]
  1517  				origin += uint64(limit)
  1518  			}
  1519  			// Update the highest block number we know if a higher one is found.
  1520  			d.syncStatsLock.Lock()
  1521  			if d.syncStatsChainHeight < origin {
  1522  				d.syncStatsChainHeight = origin - 1
  1523  			}
  1524  			d.syncStatsLock.Unlock()
  1525  
  1526  			// Signal the content downloaders of the availablility of new tasks
  1527  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1528  				select {
  1529  				case ch <- true:
  1530  				default:
  1531  				}
  1532  			}
  1533  		}
  1534  	}
  1535  }
  1536  
  1537  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
  1538  func (d *Downloader) processFullSyncContent() error {
  1539  	for {
  1540  		results := d.queue.Results(true)
  1541  		if len(results) == 0 {
  1542  			return nil
  1543  		}
  1544  		if d.chainInsertHook != nil {
  1545  			d.chainInsertHook(results)
  1546  		}
  1547  		if err := d.importBlockResults(results); err != nil {
  1548  			return err
  1549  		}
  1550  	}
  1551  }
  1552  
  1553  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1554  	// Check for any early termination requests
  1555  	if len(results) == 0 {
  1556  		return nil
  1557  	}
  1558  	select {
  1559  	case <-d.quitCh:
  1560  		return errCancelContentProcessing
  1561  	default:
  1562  	}
  1563  	// Retrieve the a batch of results to import
  1564  	first, last := results[0].Header, results[len(results)-1].Header
  1565  	log.Debug("Inserting downloaded chain", "items", len(results),
  1566  		"firstnum", first.Number, "firsthash", first.Hash(),
  1567  		"lastnum", last.Number, "lasthash", last.Hash(),
  1568  	)
  1569  	blocks := make([]*types.Block, len(results))
  1570  	for i, result := range results {
  1571  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1572  	}
  1573  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1574  		if index < len(results) {
  1575  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1576  		} else {
  1577  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
  1578  			// when it needs to preprocess blocks to import a sidechain.
  1579  			// The importer will put together a new list of blocks to import, which is a superset
  1580  			// of the blocks delivered from the downloader, and the indexing will be off.
  1581  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
  1582  		}
  1583  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1584  	}
  1585  	return nil
  1586  }
  1587  
  1588  // processFastSyncContent takes fetch results from the queue and writes them to the
  1589  // database. It also controls the synchronisation of state nodes of the pivot block.
  1590  func (d *Downloader) processFastSyncContent(latest *types.Header) error {
  1591  	// Start syncing state of the reported head block. This should get us most of
  1592  	// the state of the pivot block.
  1593  	sync := d.syncState(latest.Root)
  1594  	defer sync.Cancel()
  1595  	closeOnErr := func(s *stateSync) {
  1596  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled {
  1597  			d.queue.Close() // wake up Results
  1598  		}
  1599  	}
  1600  	go closeOnErr(sync)
  1601  	// Figure out the ideal pivot block. Note, that this goalpost may move if the
  1602  	// sync takes long enough for the chain head to move significantly.
  1603  	pivot := uint64(0)
  1604  	if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) {
  1605  		pivot = height - uint64(fsMinFullBlocks)
  1606  	}
  1607  	// To cater for moving pivot points, track the pivot block and subsequently
  1608  	// accumulated download results separately.
  1609  	var (
  1610  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
  1611  		oldTail  []*fetchResult // Downloaded content after the pivot
  1612  	)
  1613  	for {
  1614  		// Wait for the next batch of downloaded data to be available, and if the pivot
  1615  		// block became stale, move the goalpost
  1616  		results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
  1617  		if len(results) == 0 {
  1618  			// If pivot sync is done, stop
  1619  			if oldPivot == nil {
  1620  				return sync.Cancel()
  1621  			}
  1622  			// If sync failed, stop
  1623  			select {
  1624  			case <-d.cancelCh:
  1625  				sync.Cancel()
  1626  				return errCanceled
  1627  			default:
  1628  			}
  1629  		}
  1630  		if d.chainInsertHook != nil {
  1631  			d.chainInsertHook(results)
  1632  		}
  1633  		if oldPivot != nil {
  1634  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1635  		}
  1636  		// Split around the pivot block and process the two sides via fast/full sync
  1637  		if atomic.LoadInt32(&d.committed) == 0 {
  1638  			latest = results[len(results)-1].Header
  1639  			if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) {
  1640  				log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks))
  1641  				pivot = height - uint64(fsMinFullBlocks)
  1642  			}
  1643  		}
  1644  		P, beforeP, afterP := splitAroundPivot(pivot, results)
  1645  		if err := d.commitFastSyncData(beforeP, sync); err != nil {
  1646  			return err
  1647  		}
  1648  		if P != nil {
  1649  			// If new pivot block found, cancel old state retrieval and restart
  1650  			if oldPivot != P {
  1651  				sync.Cancel()
  1652  
  1653  				sync = d.syncState(P.Header.Root)
  1654  				defer sync.Cancel()
  1655  				go closeOnErr(sync)
  1656  				oldPivot = P
  1657  			}
  1658  			// Wait for completion, occasionally checking for pivot staleness
  1659  			select {
  1660  			case <-sync.done:
  1661  				if sync.err != nil {
  1662  					return sync.err
  1663  				}
  1664  				if err := d.commitPivotBlock(P); err != nil {
  1665  					return err
  1666  				}
  1667  				oldPivot = nil
  1668  
  1669  			case <-time.After(time.Second):
  1670  				oldTail = afterP
  1671  				continue
  1672  			}
  1673  		}
  1674  		// Fast sync done, pivot commit done, full import
  1675  		if err := d.importBlockResults(afterP); err != nil {
  1676  			return err
  1677  		}
  1678  	}
  1679  }
  1680  
  1681  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1682  	for _, result := range results {
  1683  		num := result.Header.Number.Uint64()
  1684  		switch {
  1685  		case num < pivot:
  1686  			before = append(before, result)
  1687  		case num == pivot:
  1688  			p = result
  1689  		default:
  1690  			after = append(after, result)
  1691  		}
  1692  	}
  1693  	return p, before, after
  1694  }
  1695  
  1696  func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
  1697  	// Check for any early termination requests
  1698  	if len(results) == 0 {
  1699  		return nil
  1700  	}
  1701  	select {
  1702  	case <-d.quitCh:
  1703  		return errCancelContentProcessing
  1704  	case <-stateSync.done:
  1705  		if err := stateSync.Wait(); err != nil {
  1706  			return err
  1707  		}
  1708  	default:
  1709  	}
  1710  	// Retrieve the a batch of results to import
  1711  	first, last := results[0].Header, results[len(results)-1].Header
  1712  	log.Debug("Inserting fast-sync blocks", "items", len(results),
  1713  		"firstnum", first.Number, "firsthash", first.Hash(),
  1714  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1715  	)
  1716  	blocks := make([]*types.Block, len(results))
  1717  	receipts := make([]types.Receipts, len(results))
  1718  	for i, result := range results {
  1719  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1720  		receipts[i] = result.Receipts
  1721  	}
  1722  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
  1723  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1724  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1725  	}
  1726  	return nil
  1727  }
  1728  
  1729  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1730  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1731  	log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1732  
  1733  	// Commit the pivot block as the new head, will require full sync from here on
  1734  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
  1735  		return err
  1736  	}
  1737  	if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
  1738  		return err
  1739  	}
  1740  	atomic.StoreInt32(&d.committed, 1)
  1741  
  1742  	// If we had a bloom filter for the state sync, deallocate it now. Note, we only
  1743  	// deallocate internally, but keep the empty wrapper. This ensures that if we do
  1744  	// a rollback after committing the pivot and restarting fast sync, we don't end
  1745  	// up using a nil bloom. Empty bloom is fine, it just returns that it does not
  1746  	// have the info we need, so reach down to the database instead.
  1747  	if d.stateBloom != nil {
  1748  		d.stateBloom.Close()
  1749  	}
  1750  	return nil
  1751  }
  1752  
  1753  // DeliverHeaders injects a new batch of block headers received from a remote
  1754  // node into the download schedule.
  1755  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) {
  1756  	return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1757  }
  1758  
  1759  // DeliverBodies injects a new batch of block bodies received from a remote node.
  1760  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) {
  1761  	return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1762  }
  1763  
  1764  // DeliverReceipts injects a new batch of receipts received from a remote node.
  1765  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) {
  1766  	return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1767  }
  1768  
  1769  // DeliverNodeData injects a new batch of node state data received from a remote node.
  1770  func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) {
  1771  	return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1772  }
  1773  
  1774  // deliver injects a new batch of data received from a remote node.
  1775  func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1776  	// Update the delivery metrics for both good and failed deliveries
  1777  	inMeter.Mark(int64(packet.Items()))
  1778  	defer func() {
  1779  		if err != nil {
  1780  			dropMeter.Mark(int64(packet.Items()))
  1781  		}
  1782  	}()
  1783  	// Deliver or abort if the sync is canceled while queuing
  1784  	d.cancelLock.RLock()
  1785  	cancel := d.cancelCh
  1786  	d.cancelLock.RUnlock()
  1787  	if cancel == nil {
  1788  		return errNoSyncActive
  1789  	}
  1790  	select {
  1791  	case destCh <- packet:
  1792  		return nil
  1793  	case <-cancel:
  1794  		return errNoSyncActive
  1795  	}
  1796  }
  1797  
  1798  // qosTuner is the quality of service tuning loop that occasionally gathers the
  1799  // peer latency statistics and updates the estimated request round trip time.
  1800  func (d *Downloader) qosTuner() {
  1801  	for {
  1802  		// Retrieve the current median RTT and integrate into the previoust target RTT
  1803  		rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT()))
  1804  		atomic.StoreUint64(&d.rttEstimate, uint64(rtt))
  1805  
  1806  		// A new RTT cycle passed, increase our confidence in the estimated RTT
  1807  		conf := atomic.LoadUint64(&d.rttConfidence)
  1808  		conf = conf + (1000000-conf)/2
  1809  		atomic.StoreUint64(&d.rttConfidence, conf)
  1810  
  1811  		// Log the new QoS values and sleep until the next RTT
  1812  		log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1813  		select {
  1814  		case <-d.quitCh:
  1815  			return
  1816  		case <-time.After(rtt):
  1817  		}
  1818  	}
  1819  }
  1820  
  1821  // qosReduceConfidence is meant to be called when a new peer joins the downloader's
  1822  // peer set, needing to reduce the confidence we have in out QoS estimates.
  1823  func (d *Downloader) qosReduceConfidence() {
  1824  	// If we have a single peer, confidence is always 1
  1825  	peers := uint64(d.peers.Len())
  1826  	if peers == 0 {
  1827  		// Ensure peer connectivity races don't catch us off guard
  1828  		return
  1829  	}
  1830  	if peers == 1 {
  1831  		atomic.StoreUint64(&d.rttConfidence, 1000000)
  1832  		return
  1833  	}
  1834  	// If we have a ton of peers, don't drop confidence)
  1835  	if peers >= uint64(qosConfidenceCap) {
  1836  		return
  1837  	}
  1838  	// Otherwise drop the confidence factor
  1839  	conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers
  1840  	if float64(conf)/1000000 < rttMinConfidence {
  1841  		conf = uint64(rttMinConfidence * 1000000)
  1842  	}
  1843  	atomic.StoreUint64(&d.rttConfidence, conf)
  1844  
  1845  	rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1846  	log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL())
  1847  }
  1848  
  1849  // requestRTT returns the current target round trip time for a download request
  1850  // to complete in.
  1851  //
  1852  // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that
  1853  // the downloader tries to adapt queries to the RTT, so multiple RTT values can
  1854  // be adapted to, but smaller ones are preferred (stabler download stream).
  1855  func (d *Downloader) requestRTT() time.Duration {
  1856  	return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10
  1857  }
  1858  
  1859  // requestTTL returns the current timeout allowance for a single download request
  1860  // to finish under.
  1861  func (d *Downloader) requestTTL() time.Duration {
  1862  	var (
  1863  		rtt  = time.Duration(atomic.LoadUint64(&d.rttEstimate))
  1864  		conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0
  1865  	)
  1866  	ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf)
  1867  	if ttl > ttlLimit {
  1868  		ttl = ttlLimit
  1869  	}
  1870  	return ttl
  1871  }