github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/les/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // This is a temporary package whilst working on the eth/66 blocking refactors.
    18  // After that work is done, les needs to be refactored to use the new package,
    19  // or alternatively use a stripped down version of it. Either way, we need to
    20  // keep the changes scoped so duplicating temporarily seems the sanest.
    21  package downloader
    22  
    23  import (
    24  	"errors"
    25  	"fmt"
    26  	"math/big"
    27  	"sync"
    28  	"sync/atomic"
    29  	"time"
    30  
    31  	"github.com/ethereum/go-ethereum"
    32  	"github.com/ethereum/go-ethereum/common"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/eth/protocols/eth"
    37  	"github.com/ethereum/go-ethereum/eth/protocols/snap"
    38  	"github.com/ethereum/go-ethereum/ethdb"
    39  	"github.com/ethereum/go-ethereum/event"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/metrics"
    42  	"github.com/ethereum/go-ethereum/params"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  )
    45  
    46  var (
    47  	MaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request
    48  	MaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request
    49  	MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
    50  	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
    51  	MaxStateFetch   = 384 // Amount of node state values to allow fetching per request
    52  
    53  	maxQueuedHeaders            = 32 * 1024                         // [eth/62] Maximum number of headers to queue for import (DOS protection)
    54  	maxHeadersProcess           = 2048                              // Number of header download results to import at once into the chain
    55  	maxResultsProcess           = 2048                              // Number of content download results to import at once into the chain
    56  	fullMaxForkAncestry  uint64 = params.FullImmutabilityThreshold  // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    57  	lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    58  
    59  	reorgProtThreshold   = 48 // Threshold number of recent blocks to disable mini reorg protection
    60  	reorgProtHeaderDelay = 2  // Number of headers to delay delivering to cover mini reorgs
    61  
    62  	fsHeaderCheckFrequency = 100             // Verification frequency of the downloaded headers during fast sync
    63  	fsHeaderSafetyNet      = 2048            // Number of headers to discard in case a chain violation is detected
    64  	fsHeaderForceVerify    = 24              // Number of headers to verify before and after the pivot to accept it
    65  	fsHeaderContCheck      = 3 * time.Second // Time interval to check for header continuations during state download
    66  	fsMinFullBlocks        = 64              // Number of blocks to retrieve fully even in fast sync
    67  )
    68  
    69  var (
    70  	errBusy                    = errors.New("busy")
    71  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    72  	errBadPeer                 = errors.New("action from bad peer ignored")
    73  	errStallingPeer            = errors.New("peer is stalling")
    74  	errUnsyncedPeer            = errors.New("unsynced peer")
    75  	errNoPeers                 = errors.New("no peers to keep download active")
    76  	errTimeout                 = errors.New("timeout")
    77  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    78  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    79  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    80  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    81  	errInvalidBody             = errors.New("retrieved block body is invalid")
    82  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    83  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    84  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    85  	errCanceled                = errors.New("syncing canceled (requested)")
    86  	errNoSyncActive            = errors.New("no sync active")
    87  	errTooOld                  = errors.New("peer's protocol version too old")
    88  	errNoAncestorFound         = errors.New("no common ancestor found")
    89  )
    90  
    91  type Downloader struct {
    92  	mode uint32         // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
    93  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
    94  
    95  	checkpoint uint64   // Checkpoint block number to enforce head against (e.g. fast sync)
    96  	genesis    uint64   // Genesis block number to limit sync to (e.g. light client CHT)
    97  	queue      *queue   // Scheduler for selecting the hashes to download
    98  	peers      *peerSet // Set of active peers from which download can proceed
    99  
   100  	stateDB    ethdb.Database  // Database to state sync into (and deduplicate via)
   101  	stateBloom *trie.SyncBloom // Bloom filter for fast trie node and contract code existence checks
   102  
   103  	// Statistics
   104  	syncStatsChainOrigin uint64 // Origin block number where syncing started at
   105  	syncStatsChainHeight uint64 // Highest block number known when syncing started
   106  	syncStatsState       stateSyncStats
   107  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   108  
   109  	lightchain LightChain
   110  	blockchain BlockChain
   111  
   112  	// Callbacks
   113  	dropPeer peerDropFn // Drops a peer for misbehaving
   114  
   115  	// Status
   116  	synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
   117  	synchronising   int32
   118  	notified        int32
   119  	committed       int32
   120  	ancientLimit    uint64 // The maximum block number which can be regarded as ancient data.
   121  
   122  	// Channels
   123  	headerCh      chan dataPack        // Channel receiving inbound block headers
   124  	bodyCh        chan dataPack        // Channel receiving inbound block bodies
   125  	receiptCh     chan dataPack        // Channel receiving inbound receipts
   126  	bodyWakeCh    chan bool            // Channel to signal the block body fetcher of new tasks
   127  	receiptWakeCh chan bool            // Channel to signal the receipt fetcher of new tasks
   128  	headerProcCh  chan []*types.Header // Channel to feed the header processor new tasks
   129  
   130  	// State sync
   131  	pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
   132  	pivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates
   133  
   134  	snapSync       bool         // Whether to run state sync over the snap protocol
   135  	SnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now
   136  	stateSyncStart chan *stateSync
   137  	trackStateReq  chan *stateReq
   138  	stateCh        chan dataPack // Channel receiving inbound node state data
   139  
   140  	// Cancellation and termination
   141  	cancelPeer string         // Identifier of the peer currently being used as the master (cancel on drop)
   142  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   143  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   144  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   145  
   146  	quitCh   chan struct{} // Quit channel to signal termination
   147  	quitLock sync.Mutex    // Lock to prevent double closes
   148  
   149  	// Testing hooks
   150  	syncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run
   151  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   152  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   153  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   154  }
   155  
   156  // LightChain encapsulates functions required to synchronise a light chain.
   157  type LightChain interface {
   158  	// HasHeader verifies a header's presence in the local chain.
   159  	HasHeader(common.Hash, uint64) bool
   160  
   161  	// GetHeaderByHash retrieves a header from the local chain.
   162  	GetHeaderByHash(common.Hash) *types.Header
   163  
   164  	// CurrentHeader retrieves the head header from the local chain.
   165  	CurrentHeader() *types.Header
   166  
   167  	// GetTd returns the total difficulty of a local block.
   168  	GetTd(common.Hash, uint64) *big.Int
   169  
   170  	// InsertHeaderChain inserts a batch of headers into the local chain.
   171  	InsertHeaderChain([]*types.Header, int) (int, error)
   172  
   173  	// SetHead rewinds the local chain to a new head.
   174  	SetHead(uint64) error
   175  }
   176  
   177  // BlockChain encapsulates functions required to sync a (full or fast) blockchain.
   178  type BlockChain interface {
   179  	LightChain
   180  
   181  	// HasBlock verifies a block's presence in the local chain.
   182  	HasBlock(common.Hash, uint64) bool
   183  
   184  	// HasFastBlock verifies a fast block's presence in the local chain.
   185  	HasFastBlock(common.Hash, uint64) bool
   186  
   187  	// GetBlockByHash retrieves a block from the local chain.
   188  	GetBlockByHash(common.Hash) *types.Block
   189  
   190  	// CurrentBlock retrieves the head block from the local chain.
   191  	CurrentBlock() *types.Block
   192  
   193  	// CurrentFastBlock retrieves the head fast block from the local chain.
   194  	CurrentFastBlock() *types.Block
   195  
   196  	// FastSyncCommitHead directly commits the head block to a certain entity.
   197  	FastSyncCommitHead(common.Hash) error
   198  
   199  	// InsertChain inserts a batch of blocks into the local chain.
   200  	InsertChain(types.Blocks) (int, error)
   201  
   202  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   203  	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
   204  
   205  	// Snapshots returns the blockchain snapshot tree to paused it during sync.
   206  	Snapshots() *snapshot.Tree
   207  }
   208  
   209  // New creates a new downloader to fetch hashes and blocks from remote peers.
   210  func New(checkpoint uint64, stateDb ethdb.Database, stateBloom *trie.SyncBloom, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader {
   211  	if lightchain == nil {
   212  		lightchain = chain
   213  	}
   214  	dl := &Downloader{
   215  		stateDB:        stateDb,
   216  		stateBloom:     stateBloom,
   217  		mux:            mux,
   218  		checkpoint:     checkpoint,
   219  		queue:          newQueue(blockCacheMaxItems, blockCacheInitialItems),
   220  		peers:          newPeerSet(),
   221  		blockchain:     chain,
   222  		lightchain:     lightchain,
   223  		dropPeer:       dropPeer,
   224  		headerCh:       make(chan dataPack, 1),
   225  		bodyCh:         make(chan dataPack, 1),
   226  		receiptCh:      make(chan dataPack, 1),
   227  		bodyWakeCh:     make(chan bool, 1),
   228  		receiptWakeCh:  make(chan bool, 1),
   229  		headerProcCh:   make(chan []*types.Header, 1),
   230  		quitCh:         make(chan struct{}),
   231  		stateCh:        make(chan dataPack),
   232  		SnapSyncer:     snap.NewSyncer(stateDb),
   233  		stateSyncStart: make(chan *stateSync),
   234  		syncStatsState: stateSyncStats{
   235  			processed: rawdb.ReadFastTrieProgress(stateDb),
   236  		},
   237  		trackStateReq: make(chan *stateReq),
   238  	}
   239  	go dl.stateFetcher()
   240  	return dl
   241  }
   242  
   243  // Progress retrieves the synchronisation boundaries, specifically the origin
   244  // block where synchronisation started at (may have failed/suspended); the block
   245  // or header sync is currently at; and the latest known block which the sync targets.
   246  //
   247  // In addition, during the state download phase of fast synchronisation the number
   248  // of processed and the total number of known states are also returned. Otherwise
   249  // these are zero.
   250  func (d *Downloader) Progress() ethereum.SyncProgress {
   251  	// Lock the current stats and return the progress
   252  	d.syncStatsLock.RLock()
   253  	defer d.syncStatsLock.RUnlock()
   254  
   255  	current := uint64(0)
   256  	mode := d.getMode()
   257  	switch {
   258  	case d.blockchain != nil && mode == FullSync:
   259  		current = d.blockchain.CurrentBlock().NumberU64()
   260  	case d.blockchain != nil && mode == FastSync:
   261  		current = d.blockchain.CurrentFastBlock().NumberU64()
   262  	case d.lightchain != nil:
   263  		current = d.lightchain.CurrentHeader().Number.Uint64()
   264  	default:
   265  		log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
   266  	}
   267  	return ethereum.SyncProgress{
   268  		StartingBlock: d.syncStatsChainOrigin,
   269  		CurrentBlock:  current,
   270  		HighestBlock:  d.syncStatsChainHeight,
   271  		PulledStates:  d.syncStatsState.processed,
   272  		KnownStates:   d.syncStatsState.processed + d.syncStatsState.pending,
   273  	}
   274  }
   275  
   276  // Synchronising returns whether the downloader is currently retrieving blocks.
   277  func (d *Downloader) Synchronising() bool {
   278  	return atomic.LoadInt32(&d.synchronising) > 0
   279  }
   280  
   281  // RegisterPeer injects a new download peer into the set of block source to be
   282  // used for fetching hashes and blocks from.
   283  func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
   284  	var logger log.Logger
   285  	if len(id) < 16 {
   286  		// Tests use short IDs, don't choke on them
   287  		logger = log.New("peer", id)
   288  	} else {
   289  		logger = log.New("peer", id[:8])
   290  	}
   291  	logger.Trace("Registering sync peer")
   292  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   293  		logger.Error("Failed to register sync peer", "err", err)
   294  		return err
   295  	}
   296  	return nil
   297  }
   298  
   299  // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
   300  func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
   301  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   302  }
   303  
   304  // UnregisterPeer remove a peer from the known list, preventing any action from
   305  // the specified peer. An effort is also made to return any pending fetches into
   306  // the queue.
   307  func (d *Downloader) UnregisterPeer(id string) error {
   308  	// Unregister the peer from the active peer set and revoke any fetch tasks
   309  	var logger log.Logger
   310  	if len(id) < 16 {
   311  		// Tests use short IDs, don't choke on them
   312  		logger = log.New("peer", id)
   313  	} else {
   314  		logger = log.New("peer", id[:8])
   315  	}
   316  	logger.Trace("Unregistering sync peer")
   317  	if err := d.peers.Unregister(id); err != nil {
   318  		logger.Error("Failed to unregister sync peer", "err", err)
   319  		return err
   320  	}
   321  	d.queue.Revoke(id)
   322  
   323  	return nil
   324  }
   325  
   326  // Synchronise tries to sync up our local block chain with a remote peer, both
   327  // adding various sanity checks as well as wrapping it with various log entries.
   328  func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error {
   329  	err := d.synchronise(id, head, td, mode)
   330  
   331  	switch err {
   332  	case nil, errBusy, errCanceled:
   333  		return err
   334  	}
   335  	if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
   336  		errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
   337  		errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
   338  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   339  		if d.dropPeer == nil {
   340  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   341  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   342  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   343  		} else {
   344  			d.dropPeer(id)
   345  		}
   346  		return err
   347  	}
   348  	log.Warn("Synchronisation failed, retrying", "err", err)
   349  	return err
   350  }
   351  
   352  // synchronise will select the peer and use it for synchronising. If an empty string is given
   353  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   354  // checks fail an error will be returned. This method is synchronous
   355  func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error {
   356  	// Mock out the synchronisation if testing
   357  	if d.synchroniseMock != nil {
   358  		return d.synchroniseMock(id, hash)
   359  	}
   360  	// Make sure only one goroutine is ever allowed past this point at once
   361  	if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) {
   362  		return errBusy
   363  	}
   364  	defer atomic.StoreInt32(&d.synchronising, 0)
   365  
   366  	// Post a user notification of the sync (only once per session)
   367  	if atomic.CompareAndSwapInt32(&d.notified, 0, 1) {
   368  		log.Info("Block synchronisation started")
   369  	}
   370  	// If we are already full syncing, but have a fast-sync bloom filter laying
   371  	// around, make sure it doesn't use memory any more. This is a special case
   372  	// when the user attempts to fast sync a new empty network.
   373  	if mode == FullSync && d.stateBloom != nil {
   374  		d.stateBloom.Close()
   375  	}
   376  	// If snap sync was requested, create the snap scheduler and switch to fast
   377  	// sync mode. Long term we could drop fast sync or merge the two together,
   378  	// but until snap becomes prevalent, we should support both. TODO(karalabe).
   379  	if mode == SnapSync {
   380  		if !d.snapSync {
   381  			// Snap sync uses the snapshot namespace to store potentially flakey data until
   382  			// sync completely heals and finishes. Pause snapshot maintenance in the mean
   383  			// time to prevent access.
   384  			if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
   385  				snapshots.Disable()
   386  			}
   387  			log.Warn("Enabling snapshot sync prototype")
   388  			d.snapSync = true
   389  		}
   390  		mode = FastSync
   391  	}
   392  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   393  	d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
   394  	d.peers.Reset()
   395  
   396  	for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
   397  		select {
   398  		case <-ch:
   399  		default:
   400  		}
   401  	}
   402  	for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} {
   403  		for empty := false; !empty; {
   404  			select {
   405  			case <-ch:
   406  			default:
   407  				empty = true
   408  			}
   409  		}
   410  	}
   411  	for empty := false; !empty; {
   412  		select {
   413  		case <-d.headerProcCh:
   414  		default:
   415  			empty = true
   416  		}
   417  	}
   418  	// Create cancel channel for aborting mid-flight and mark the master peer
   419  	d.cancelLock.Lock()
   420  	d.cancelCh = make(chan struct{})
   421  	d.cancelPeer = id
   422  	d.cancelLock.Unlock()
   423  
   424  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   425  
   426  	// Atomically set the requested sync mode
   427  	atomic.StoreUint32(&d.mode, uint32(mode))
   428  
   429  	// Retrieve the origin peer and initiate the downloading process
   430  	p := d.peers.Peer(id)
   431  	if p == nil {
   432  		return errUnknownPeer
   433  	}
   434  	return d.syncWithPeer(p, hash, td)
   435  }
   436  
   437  func (d *Downloader) getMode() SyncMode {
   438  	return SyncMode(atomic.LoadUint32(&d.mode))
   439  }
   440  
   441  // syncWithPeer starts a block synchronization based on the hash chain from the
   442  // specified peer and head hash.
   443  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) {
   444  	d.mux.Post(StartEvent{})
   445  	defer func() {
   446  		// reset on error
   447  		if err != nil {
   448  			d.mux.Post(FailedEvent{err})
   449  		} else {
   450  			latest := d.lightchain.CurrentHeader()
   451  			d.mux.Post(DoneEvent{latest})
   452  		}
   453  	}()
   454  	if p.version < eth.ETH66 {
   455  		return fmt.Errorf("%w: advertized %d < required %d", errTooOld, p.version, eth.ETH66)
   456  	}
   457  	mode := d.getMode()
   458  
   459  	log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode)
   460  	defer func(start time.Time) {
   461  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   462  	}(time.Now())
   463  
   464  	// Look up the sync boundaries: the common ancestor and the target block
   465  	latest, pivot, err := d.fetchHead(p)
   466  	if err != nil {
   467  		return err
   468  	}
   469  	if mode == FastSync && pivot == nil {
   470  		// If no pivot block was returned, the head is below the min full block
   471  		// threshold (i.e. new chain). In that case we won't really fast sync
   472  		// anyway, but still need a valid pivot block to avoid some code hitting
   473  		// nil panics on an access.
   474  		pivot = d.blockchain.CurrentBlock().Header()
   475  	}
   476  	height := latest.Number.Uint64()
   477  
   478  	origin, err := d.findAncestor(p, latest)
   479  	if err != nil {
   480  		return err
   481  	}
   482  	d.syncStatsLock.Lock()
   483  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   484  		d.syncStatsChainOrigin = origin
   485  	}
   486  	d.syncStatsChainHeight = height
   487  	d.syncStatsLock.Unlock()
   488  
   489  	// Ensure our origin point is below any fast sync pivot point
   490  	if mode == FastSync {
   491  		if height <= uint64(fsMinFullBlocks) {
   492  			origin = 0
   493  		} else {
   494  			pivotNumber := pivot.Number.Uint64()
   495  			if pivotNumber <= origin {
   496  				origin = pivotNumber - 1
   497  			}
   498  			// Write out the pivot into the database so a rollback beyond it will
   499  			// reenable fast sync
   500  			rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
   501  		}
   502  	}
   503  	d.committed = 1
   504  	if mode == FastSync && pivot.Number.Uint64() != 0 {
   505  		d.committed = 0
   506  	}
   507  	if mode == FastSync {
   508  		// Set the ancient data limitation.
   509  		// If we are running fast sync, all block data older than ancientLimit will be
   510  		// written to the ancient store. More recent data will be written to the active
   511  		// database and will wait for the freezer to migrate.
   512  		//
   513  		// If there is a checkpoint available, then calculate the ancientLimit through
   514  		// that. Otherwise calculate the ancient limit through the advertised height
   515  		// of the remote peer.
   516  		//
   517  		// The reason for picking checkpoint first is that a malicious peer can give us
   518  		// a fake (very high) height, forcing the ancient limit to also be very high.
   519  		// The peer would start to feed us valid blocks until head, resulting in all of
   520  		// the blocks might be written into the ancient store. A following mini-reorg
   521  		// could cause issues.
   522  		if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 {
   523  			d.ancientLimit = d.checkpoint
   524  		} else if height > fullMaxForkAncestry+1 {
   525  			d.ancientLimit = height - fullMaxForkAncestry - 1
   526  		} else {
   527  			d.ancientLimit = 0
   528  		}
   529  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   530  
   531  		// If a part of blockchain data has already been written into active store,
   532  		// disable the ancient style insertion explicitly.
   533  		if origin >= frozen && frozen != 0 {
   534  			d.ancientLimit = 0
   535  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   536  		} else if d.ancientLimit > 0 {
   537  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   538  		}
   539  		// Rewind the ancient store and blockchain if reorg happens.
   540  		if origin+1 < frozen {
   541  			if err := d.lightchain.SetHead(origin + 1); err != nil {
   542  				return err
   543  			}
   544  		}
   545  	}
   546  	// Initiate the sync using a concurrent header and content retrieval algorithm
   547  	d.queue.Prepare(origin+1, mode)
   548  	if d.syncInitHook != nil {
   549  		d.syncInitHook(origin, height)
   550  	}
   551  	fetchers := []func() error{
   552  		func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved
   553  		func() error { return d.fetchBodies(origin + 1) },   // Bodies are retrieved during normal and fast sync
   554  		func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync
   555  		func() error { return d.processHeaders(origin+1, td) },
   556  	}
   557  	if mode == FastSync {
   558  		d.pivotLock.Lock()
   559  		d.pivotHeader = pivot
   560  		d.pivotLock.Unlock()
   561  
   562  		fetchers = append(fetchers, func() error { return d.processFastSyncContent() })
   563  	} else if mode == FullSync {
   564  		fetchers = append(fetchers, d.processFullSyncContent)
   565  	}
   566  	return d.spawnSync(fetchers)
   567  }
   568  
   569  // spawnSync runs d.process and all given fetcher functions to completion in
   570  // separate goroutines, returning the first error that appears.
   571  func (d *Downloader) spawnSync(fetchers []func() error) error {
   572  	errc := make(chan error, len(fetchers))
   573  	d.cancelWg.Add(len(fetchers))
   574  	for _, fn := range fetchers {
   575  		fn := fn
   576  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   577  	}
   578  	// Wait for the first error, then terminate the others.
   579  	var err error
   580  	for i := 0; i < len(fetchers); i++ {
   581  		if i == len(fetchers)-1 {
   582  			// Close the queue when all fetchers have exited.
   583  			// This will cause the block processor to end when
   584  			// it has processed the queue.
   585  			d.queue.Close()
   586  		}
   587  		if err = <-errc; err != nil && err != errCanceled {
   588  			break
   589  		}
   590  	}
   591  	d.queue.Close()
   592  	d.Cancel()
   593  	return err
   594  }
   595  
   596  // cancel aborts all of the operations and resets the queue. However, cancel does
   597  // not wait for the running download goroutines to finish. This method should be
   598  // used when cancelling the downloads from inside the downloader.
   599  func (d *Downloader) cancel() {
   600  	// Close the current cancel channel
   601  	d.cancelLock.Lock()
   602  	defer d.cancelLock.Unlock()
   603  
   604  	if d.cancelCh != nil {
   605  		select {
   606  		case <-d.cancelCh:
   607  			// Channel was already closed
   608  		default:
   609  			close(d.cancelCh)
   610  		}
   611  	}
   612  }
   613  
   614  // Cancel aborts all of the operations and waits for all download goroutines to
   615  // finish before returning.
   616  func (d *Downloader) Cancel() {
   617  	d.cancel()
   618  	d.cancelWg.Wait()
   619  }
   620  
   621  // Terminate interrupts the downloader, canceling all pending operations.
   622  // The downloader cannot be reused after calling Terminate.
   623  func (d *Downloader) Terminate() {
   624  	// Close the termination channel (make sure double close is allowed)
   625  	d.quitLock.Lock()
   626  	select {
   627  	case <-d.quitCh:
   628  	default:
   629  		close(d.quitCh)
   630  	}
   631  	if d.stateBloom != nil {
   632  		d.stateBloom.Close()
   633  	}
   634  	d.quitLock.Unlock()
   635  
   636  	// Cancel any pending download requests
   637  	d.Cancel()
   638  }
   639  
   640  // fetchHead retrieves the head header and prior pivot block (if available) from
   641  // a remote peer.
   642  func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) {
   643  	p.log.Debug("Retrieving remote chain head")
   644  	mode := d.getMode()
   645  
   646  	// Request the advertised remote head block and wait for the response
   647  	latest, _ := p.peer.Head()
   648  	fetch := 1
   649  	if mode == FastSync {
   650  		fetch = 2 // head + pivot headers
   651  	}
   652  	go p.peer.RequestHeadersByHash(latest, fetch, fsMinFullBlocks-1, true)
   653  
   654  	ttl := d.peers.rates.TargetTimeout()
   655  	timeout := time.After(ttl)
   656  	for {
   657  		select {
   658  		case <-d.cancelCh:
   659  			return nil, nil, errCanceled
   660  
   661  		case packet := <-d.headerCh:
   662  			// Discard anything not from the origin peer
   663  			if packet.PeerId() != p.id {
   664  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   665  				break
   666  			}
   667  			// Make sure the peer gave us at least one and at most the requested headers
   668  			headers := packet.(*headerPack).headers
   669  			if len(headers) == 0 || len(headers) > fetch {
   670  				return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch)
   671  			}
   672  			// The first header needs to be the head, validate against the checkpoint
   673  			// and request. If only 1 header was returned, make sure there's no pivot
   674  			// or there was not one requested.
   675  			head := headers[0]
   676  			if (mode == FastSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint {
   677  				return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint)
   678  			}
   679  			if len(headers) == 1 {
   680  				if mode == FastSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {
   681  					return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer)
   682  				}
   683  				p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", head.Hash())
   684  				return head, nil, nil
   685  			}
   686  			// At this point we have 2 headers in total and the first is the
   687  			// validated head of the chain. Check the pivot number and return,
   688  			pivot := headers[1]
   689  			if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) {
   690  				return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks))
   691  			}
   692  			return head, pivot, nil
   693  
   694  		case <-timeout:
   695  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   696  			return nil, nil, errTimeout
   697  
   698  		case <-d.bodyCh:
   699  		case <-d.receiptCh:
   700  			// Out of bounds delivery, ignore
   701  		}
   702  	}
   703  }
   704  
   705  // calculateRequestSpan calculates what headers to request from a peer when trying to determine the
   706  // common ancestor.
   707  // It returns parameters to be used for peer.RequestHeadersByNumber:
   708  //  from - starting block number
   709  //  count - number of headers to request
   710  //  skip - number of headers to skip
   711  // and also returns 'max', the last block which is expected to be returned by the remote peers,
   712  // given the (from,count,skip)
   713  func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
   714  	var (
   715  		from     int
   716  		count    int
   717  		MaxCount = MaxHeaderFetch / 16
   718  	)
   719  	// requestHead is the highest block that we will ask for. If requestHead is not offset,
   720  	// the highest block that we will get is 16 blocks back from head, which means we
   721  	// will fetch 14 or 15 blocks unnecessarily in the case the height difference
   722  	// between us and the peer is 1-2 blocks, which is most common
   723  	requestHead := int(remoteHeight) - 1
   724  	if requestHead < 0 {
   725  		requestHead = 0
   726  	}
   727  	// requestBottom is the lowest block we want included in the query
   728  	// Ideally, we want to include the one just below our own head
   729  	requestBottom := int(localHeight - 1)
   730  	if requestBottom < 0 {
   731  		requestBottom = 0
   732  	}
   733  	totalSpan := requestHead - requestBottom
   734  	span := 1 + totalSpan/MaxCount
   735  	if span < 2 {
   736  		span = 2
   737  	}
   738  	if span > 16 {
   739  		span = 16
   740  	}
   741  
   742  	count = 1 + totalSpan/span
   743  	if count > MaxCount {
   744  		count = MaxCount
   745  	}
   746  	if count < 2 {
   747  		count = 2
   748  	}
   749  	from = requestHead - (count-1)*span
   750  	if from < 0 {
   751  		from = 0
   752  	}
   753  	max := from + (count-1)*span
   754  	return int64(from), count, span - 1, uint64(max)
   755  }
   756  
   757  // findAncestor tries to locate the common ancestor link of the local chain and
   758  // a remote peers blockchain. In the general case when our node was in sync and
   759  // on the correct chain, checking the top N links should already get us a match.
   760  // In the rare scenario when we ended up on a long reorganisation (i.e. none of
   761  // the head links match), we do a binary search to find the common ancestor.
   762  func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
   763  	// Figure out the valid ancestor range to prevent rewrite attacks
   764  	var (
   765  		floor        = int64(-1)
   766  		localHeight  uint64
   767  		remoteHeight = remoteHeader.Number.Uint64()
   768  	)
   769  	mode := d.getMode()
   770  	switch mode {
   771  	case FullSync:
   772  		localHeight = d.blockchain.CurrentBlock().NumberU64()
   773  	case FastSync:
   774  		localHeight = d.blockchain.CurrentFastBlock().NumberU64()
   775  	default:
   776  		localHeight = d.lightchain.CurrentHeader().Number.Uint64()
   777  	}
   778  	p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
   779  
   780  	// Recap floor value for binary search
   781  	maxForkAncestry := fullMaxForkAncestry
   782  	if d.getMode() == LightSync {
   783  		maxForkAncestry = lightMaxForkAncestry
   784  	}
   785  	if localHeight >= maxForkAncestry {
   786  		// We're above the max reorg threshold, find the earliest fork point
   787  		floor = int64(localHeight - maxForkAncestry)
   788  	}
   789  	// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
   790  	// all headers before that point will be missing.
   791  	if mode == LightSync {
   792  		// If we don't know the current CHT position, find it
   793  		if d.genesis == 0 {
   794  			header := d.lightchain.CurrentHeader()
   795  			for header != nil {
   796  				d.genesis = header.Number.Uint64()
   797  				if floor >= int64(d.genesis)-1 {
   798  					break
   799  				}
   800  				header = d.lightchain.GetHeaderByHash(header.ParentHash)
   801  			}
   802  		}
   803  		// We already know the "genesis" block number, cap floor to that
   804  		if floor < int64(d.genesis)-1 {
   805  			floor = int64(d.genesis) - 1
   806  		}
   807  	}
   808  
   809  	ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)
   810  	if err == nil {
   811  		return ancestor, nil
   812  	}
   813  	// The returned error was not nil.
   814  	// If the error returned does not reflect that a common ancestor was not found, return it.
   815  	// If the error reflects that a common ancestor was not found, continue to binary search,
   816  	// where the error value will be reassigned.
   817  	if !errors.Is(err, errNoAncestorFound) {
   818  		return 0, err
   819  	}
   820  
   821  	ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor)
   822  	if err != nil {
   823  		return 0, err
   824  	}
   825  	return ancestor, nil
   826  }
   827  
   828  func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (commonAncestor uint64, err error) {
   829  	from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
   830  
   831  	p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
   832  	go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false)
   833  
   834  	// Wait for the remote response to the head fetch
   835  	number, hash := uint64(0), common.Hash{}
   836  
   837  	ttl := d.peers.rates.TargetTimeout()
   838  	timeout := time.After(ttl)
   839  
   840  	for finished := false; !finished; {
   841  		select {
   842  		case <-d.cancelCh:
   843  			return 0, errCanceled
   844  
   845  		case packet := <-d.headerCh:
   846  			// Discard anything not from the origin peer
   847  			if packet.PeerId() != p.id {
   848  				log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   849  				break
   850  			}
   851  			// Make sure the peer actually gave something valid
   852  			headers := packet.(*headerPack).headers
   853  			if len(headers) == 0 {
   854  				p.log.Warn("Empty head header set")
   855  				return 0, errEmptyHeaderSet
   856  			}
   857  			// Make sure the peer's reply conforms to the request
   858  			for i, header := range headers {
   859  				expectNumber := from + int64(i)*int64(skip+1)
   860  				if number := header.Number.Int64(); number != expectNumber {
   861  					p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
   862  					return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
   863  				}
   864  			}
   865  			// Check if a common ancestor was found
   866  			finished = true
   867  			for i := len(headers) - 1; i >= 0; i-- {
   868  				// Skip any headers that underflow/overflow our requested set
   869  				if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
   870  					continue
   871  				}
   872  				// Otherwise check if we already know the header or not
   873  				h := headers[i].Hash()
   874  				n := headers[i].Number.Uint64()
   875  
   876  				var known bool
   877  				switch mode {
   878  				case FullSync:
   879  					known = d.blockchain.HasBlock(h, n)
   880  				case FastSync:
   881  					known = d.blockchain.HasFastBlock(h, n)
   882  				default:
   883  					known = d.lightchain.HasHeader(h, n)
   884  				}
   885  				if known {
   886  					number, hash = n, h
   887  					break
   888  				}
   889  			}
   890  
   891  		case <-timeout:
   892  			p.log.Debug("Waiting for head header timed out", "elapsed", ttl)
   893  			return 0, errTimeout
   894  
   895  		case <-d.bodyCh:
   896  		case <-d.receiptCh:
   897  			// Out of bounds delivery, ignore
   898  		}
   899  	}
   900  	// If the head fetch already found an ancestor, return
   901  	if hash != (common.Hash{}) {
   902  		if int64(number) <= floor {
   903  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   904  			return 0, errInvalidAncestor
   905  		}
   906  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   907  		return number, nil
   908  	}
   909  	return 0, errNoAncestorFound
   910  }
   911  
   912  func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (commonAncestor uint64, err error) {
   913  	hash := common.Hash{}
   914  
   915  	// Ancestor not found, we need to binary search over our chain
   916  	start, end := uint64(0), remoteHeight
   917  	if floor > 0 {
   918  		start = uint64(floor)
   919  	}
   920  	p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
   921  
   922  	for start+1 < end {
   923  		// Split our chain interval in two, and request the hash to cross check
   924  		check := (start + end) / 2
   925  
   926  		ttl := d.peers.rates.TargetTimeout()
   927  		timeout := time.After(ttl)
   928  
   929  		go p.peer.RequestHeadersByNumber(check, 1, 0, false)
   930  
   931  		// Wait until a reply arrives to this request
   932  		for arrived := false; !arrived; {
   933  			select {
   934  			case <-d.cancelCh:
   935  				return 0, errCanceled
   936  
   937  			case packet := <-d.headerCh:
   938  				// Discard anything not from the origin peer
   939  				if packet.PeerId() != p.id {
   940  					log.Debug("Received headers from incorrect peer", "peer", packet.PeerId())
   941  					break
   942  				}
   943  				// Make sure the peer actually gave something valid
   944  				headers := packet.(*headerPack).headers
   945  				if len(headers) != 1 {
   946  					p.log.Warn("Multiple headers for single request", "headers", len(headers))
   947  					return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
   948  				}
   949  				arrived = true
   950  
   951  				// Modify the search interval based on the response
   952  				h := headers[0].Hash()
   953  				n := headers[0].Number.Uint64()
   954  
   955  				var known bool
   956  				switch mode {
   957  				case FullSync:
   958  					known = d.blockchain.HasBlock(h, n)
   959  				case FastSync:
   960  					known = d.blockchain.HasFastBlock(h, n)
   961  				default:
   962  					known = d.lightchain.HasHeader(h, n)
   963  				}
   964  				if !known {
   965  					end = check
   966  					break
   967  				}
   968  				header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
   969  				if header.Number.Uint64() != check {
   970  					p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   971  					return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number)
   972  				}
   973  				start = check
   974  				hash = h
   975  
   976  			case <-timeout:
   977  				p.log.Debug("Waiting for search header timed out", "elapsed", ttl)
   978  				return 0, errTimeout
   979  
   980  			case <-d.bodyCh:
   981  			case <-d.receiptCh:
   982  				// Out of bounds delivery, ignore
   983  			}
   984  		}
   985  	}
   986  	// Ensure valid ancestry and return
   987  	if int64(start) <= floor {
   988  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
   989  		return 0, errInvalidAncestor
   990  	}
   991  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
   992  	return start, nil
   993  }
   994  
   995  // fetchHeaders keeps retrieving headers concurrently from the number
   996  // requested, until no more are returned, potentially throttling on the way. To
   997  // facilitate concurrency but still protect against malicious nodes sending bad
   998  // headers, we construct a header chain skeleton using the "origin" peer we are
   999  // syncing with, and fill in the missing headers using anyone else. Headers from
  1000  // other peers are only accepted if they map cleanly to the skeleton. If no one
  1001  // can fill in the skeleton - not even the origin peer - it's assumed invalid and
  1002  // the origin is dropped.
  1003  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error {
  1004  	p.log.Debug("Directing header downloads", "origin", from)
  1005  	defer p.log.Debug("Header download terminated")
  1006  
  1007  	// Create a timeout timer, and the associated header fetcher
  1008  	skeleton := true            // Skeleton assembly phase or finishing up
  1009  	pivoting := false           // Whether the next request is pivot verification
  1010  	request := time.Now()       // time of the last skeleton fetch request
  1011  	timeout := time.NewTimer(0) // timer to dump a non-responsive active peer
  1012  	<-timeout.C                 // timeout channel should be initially empty
  1013  	defer timeout.Stop()
  1014  
  1015  	var ttl time.Duration
  1016  	getHeaders := func(from uint64) {
  1017  		request = time.Now()
  1018  
  1019  		ttl = d.peers.rates.TargetTimeout()
  1020  		timeout.Reset(ttl)
  1021  
  1022  		if skeleton {
  1023  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
  1024  			go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
  1025  		} else {
  1026  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
  1027  			go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false)
  1028  		}
  1029  	}
  1030  	getNextPivot := func() {
  1031  		pivoting = true
  1032  		request = time.Now()
  1033  
  1034  		ttl = d.peers.rates.TargetTimeout()
  1035  		timeout.Reset(ttl)
  1036  
  1037  		d.pivotLock.RLock()
  1038  		pivot := d.pivotHeader.Number.Uint64()
  1039  		d.pivotLock.RUnlock()
  1040  
  1041  		p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks))
  1042  		go p.peer.RequestHeadersByNumber(pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
  1043  	}
  1044  	// Start pulling the header chain skeleton until all is done
  1045  	ancestor := from
  1046  	getHeaders(from)
  1047  
  1048  	mode := d.getMode()
  1049  	for {
  1050  		select {
  1051  		case <-d.cancelCh:
  1052  			return errCanceled
  1053  
  1054  		case packet := <-d.headerCh:
  1055  			// Make sure the active peer is giving us the skeleton headers
  1056  			if packet.PeerId() != p.id {
  1057  				log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId())
  1058  				break
  1059  			}
  1060  			headerReqTimer.UpdateSince(request)
  1061  			timeout.Stop()
  1062  
  1063  			// If the pivot is being checked, move if it became stale and run the real retrieval
  1064  			var pivot uint64
  1065  
  1066  			d.pivotLock.RLock()
  1067  			if d.pivotHeader != nil {
  1068  				pivot = d.pivotHeader.Number.Uint64()
  1069  			}
  1070  			d.pivotLock.RUnlock()
  1071  
  1072  			if pivoting {
  1073  				if packet.Items() == 2 {
  1074  					// Retrieve the headers and do some sanity checks, just in case
  1075  					headers := packet.(*headerPack).headers
  1076  
  1077  					if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want {
  1078  						log.Warn("Peer sent invalid next pivot", "have", have, "want", want)
  1079  						return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want)
  1080  					}
  1081  					if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want {
  1082  						log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want)
  1083  						return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want)
  1084  					}
  1085  					log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number)
  1086  					pivot = headers[0].Number.Uint64()
  1087  
  1088  					d.pivotLock.Lock()
  1089  					d.pivotHeader = headers[0]
  1090  					d.pivotLock.Unlock()
  1091  
  1092  					// Write out the pivot into the database so a rollback beyond
  1093  					// it will reenable fast sync and update the state root that
  1094  					// the state syncer will be downloading.
  1095  					rawdb.WriteLastPivotNumber(d.stateDB, pivot)
  1096  				}
  1097  				pivoting = false
  1098  				getHeaders(from)
  1099  				continue
  1100  			}
  1101  			// If the skeleton's finished, pull any remaining head headers directly from the origin
  1102  			if skeleton && packet.Items() == 0 {
  1103  				skeleton = false
  1104  				getHeaders(from)
  1105  				continue
  1106  			}
  1107  			// If no more headers are inbound, notify the content fetchers and return
  1108  			if packet.Items() == 0 {
  1109  				// Don't abort header fetches while the pivot is downloading
  1110  				if atomic.LoadInt32(&d.committed) == 0 && pivot <= from {
  1111  					p.log.Debug("No headers, waiting for pivot commit")
  1112  					select {
  1113  					case <-time.After(fsHeaderContCheck):
  1114  						getHeaders(from)
  1115  						continue
  1116  					case <-d.cancelCh:
  1117  						return errCanceled
  1118  					}
  1119  				}
  1120  				// Pivot done (or not in fast sync) and no more headers, terminate the process
  1121  				p.log.Debug("No more headers available")
  1122  				select {
  1123  				case d.headerProcCh <- nil:
  1124  					return nil
  1125  				case <-d.cancelCh:
  1126  					return errCanceled
  1127  				}
  1128  			}
  1129  			headers := packet.(*headerPack).headers
  1130  
  1131  			// If we received a skeleton batch, resolve internals concurrently
  1132  			if skeleton {
  1133  				filled, proced, err := d.fillHeaderSkeleton(from, headers)
  1134  				if err != nil {
  1135  					p.log.Debug("Skeleton chain invalid", "err", err)
  1136  					return fmt.Errorf("%w: %v", errInvalidChain, err)
  1137  				}
  1138  				headers = filled[proced:]
  1139  				from += uint64(proced)
  1140  			} else {
  1141  				// If we're closing in on the chain head, but haven't yet reached it, delay
  1142  				// the last few headers so mini reorgs on the head don't cause invalid hash
  1143  				// chain errors.
  1144  				if n := len(headers); n > 0 {
  1145  					// Retrieve the current head we're at
  1146  					var head uint64
  1147  					if mode == LightSync {
  1148  						head = d.lightchain.CurrentHeader().Number.Uint64()
  1149  					} else {
  1150  						head = d.blockchain.CurrentFastBlock().NumberU64()
  1151  						if full := d.blockchain.CurrentBlock().NumberU64(); head < full {
  1152  							head = full
  1153  						}
  1154  					}
  1155  					// If the head is below the common ancestor, we're actually deduplicating
  1156  					// already existing chain segments, so use the ancestor as the fake head.
  1157  					// Otherwise we might end up delaying header deliveries pointlessly.
  1158  					if head < ancestor {
  1159  						head = ancestor
  1160  					}
  1161  					// If the head is way older than this batch, delay the last few headers
  1162  					if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
  1163  						delay := reorgProtHeaderDelay
  1164  						if delay > n {
  1165  							delay = n
  1166  						}
  1167  						headers = headers[:n-delay]
  1168  					}
  1169  				}
  1170  			}
  1171  			// Insert all the new headers and fetch the next batch
  1172  			if len(headers) > 0 {
  1173  				p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
  1174  				select {
  1175  				case d.headerProcCh <- headers:
  1176  				case <-d.cancelCh:
  1177  					return errCanceled
  1178  				}
  1179  				from += uint64(len(headers))
  1180  
  1181  				// If we're still skeleton filling fast sync, check pivot staleness
  1182  				// before continuing to the next skeleton filling
  1183  				if skeleton && pivot > 0 {
  1184  					getNextPivot()
  1185  				} else {
  1186  					getHeaders(from)
  1187  				}
  1188  			} else {
  1189  				// No headers delivered, or all of them being delayed, sleep a bit and retry
  1190  				p.log.Trace("All headers delayed, waiting")
  1191  				select {
  1192  				case <-time.After(fsHeaderContCheck):
  1193  					getHeaders(from)
  1194  					continue
  1195  				case <-d.cancelCh:
  1196  					return errCanceled
  1197  				}
  1198  			}
  1199  
  1200  		case <-timeout.C:
  1201  			if d.dropPeer == nil {
  1202  				// The dropPeer method is nil when `--copydb` is used for a local copy.
  1203  				// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
  1204  				p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id)
  1205  				break
  1206  			}
  1207  			// Header retrieval timed out, consider the peer bad and drop
  1208  			p.log.Debug("Header request timed out", "elapsed", ttl)
  1209  			headerTimeoutMeter.Mark(1)
  1210  			d.dropPeer(p.id)
  1211  
  1212  			// Finish the sync gracefully instead of dumping the gathered data though
  1213  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1214  				select {
  1215  				case ch <- false:
  1216  				case <-d.cancelCh:
  1217  				}
  1218  			}
  1219  			select {
  1220  			case d.headerProcCh <- nil:
  1221  			case <-d.cancelCh:
  1222  			}
  1223  			return fmt.Errorf("%w: header request timed out", errBadPeer)
  1224  		}
  1225  	}
  1226  }
  1227  
  1228  // fillHeaderSkeleton concurrently retrieves headers from all our available peers
  1229  // and maps them to the provided skeleton header chain.
  1230  //
  1231  // Any partial results from the beginning of the skeleton is (if possible) forwarded
  1232  // immediately to the header processor to keep the rest of the pipeline full even
  1233  // in the case of header stalls.
  1234  //
  1235  // The method returns the entire filled skeleton and also the number of headers
  1236  // already forwarded for processing.
  1237  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) {
  1238  	log.Debug("Filling up skeleton", "from", from)
  1239  	d.queue.ScheduleSkeleton(from, skeleton)
  1240  
  1241  	var (
  1242  		deliver = func(packet dataPack) (int, error) {
  1243  			pack := packet.(*headerPack)
  1244  			return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh)
  1245  		}
  1246  		expire  = func() map[string]int { return d.queue.ExpireHeaders(d.peers.rates.TargetTimeout()) }
  1247  		reserve = func(p *peerConnection, count int) (*fetchRequest, bool, bool) {
  1248  			return d.queue.ReserveHeaders(p, count), false, false
  1249  		}
  1250  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) }
  1251  		capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.peers.rates.TargetRoundTrip()) }
  1252  		setIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) {
  1253  			p.SetHeadersIdle(accepted, deliveryTime)
  1254  		}
  1255  	)
  1256  	err := d.fetchParts(d.headerCh, deliver, d.queue.headerContCh, expire,
  1257  		d.queue.PendingHeaders, d.queue.InFlightHeaders, reserve,
  1258  		nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers")
  1259  
  1260  	log.Debug("Skeleton fill terminated", "err", err)
  1261  
  1262  	filled, proced := d.queue.RetrieveHeaders()
  1263  	return filled, proced, err
  1264  }
  1265  
  1266  // fetchBodies iteratively downloads the scheduled block bodies, taking any
  1267  // available peers, reserving a chunk of blocks for each, waiting for delivery
  1268  // and also periodically checking for timeouts.
  1269  func (d *Downloader) fetchBodies(from uint64) error {
  1270  	log.Debug("Downloading block bodies", "origin", from)
  1271  
  1272  	var (
  1273  		deliver = func(packet dataPack) (int, error) {
  1274  			pack := packet.(*bodyPack)
  1275  			return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles)
  1276  		}
  1277  		expire   = func() map[string]int { return d.queue.ExpireBodies(d.peers.rates.TargetTimeout()) }
  1278  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) }
  1279  		capacity = func(p *peerConnection) int { return p.BlockCapacity(d.peers.rates.TargetRoundTrip()) }
  1280  		setIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) { p.SetBodiesIdle(accepted, deliveryTime) }
  1281  	)
  1282  	err := d.fetchParts(d.bodyCh, deliver, d.bodyWakeCh, expire,
  1283  		d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ReserveBodies,
  1284  		d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies")
  1285  
  1286  	log.Debug("Block body download terminated", "err", err)
  1287  	return err
  1288  }
  1289  
  1290  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
  1291  // available peers, reserving a chunk of receipts for each, waiting for delivery
  1292  // and also periodically checking for timeouts.
  1293  func (d *Downloader) fetchReceipts(from uint64) error {
  1294  	log.Debug("Downloading transaction receipts", "origin", from)
  1295  
  1296  	var (
  1297  		deliver = func(packet dataPack) (int, error) {
  1298  			pack := packet.(*receiptPack)
  1299  			return d.queue.DeliverReceipts(pack.peerID, pack.receipts)
  1300  		}
  1301  		expire   = func() map[string]int { return d.queue.ExpireReceipts(d.peers.rates.TargetTimeout()) }
  1302  		fetch    = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) }
  1303  		capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.peers.rates.TargetRoundTrip()) }
  1304  		setIdle  = func(p *peerConnection, accepted int, deliveryTime time.Time) {
  1305  			p.SetReceiptsIdle(accepted, deliveryTime)
  1306  		}
  1307  	)
  1308  	err := d.fetchParts(d.receiptCh, deliver, d.receiptWakeCh, expire,
  1309  		d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ReserveReceipts,
  1310  		d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts")
  1311  
  1312  	log.Debug("Transaction receipt download terminated", "err", err)
  1313  	return err
  1314  }
  1315  
  1316  // fetchParts iteratively downloads scheduled block parts, taking any available
  1317  // peers, reserving a chunk of fetch requests for each, waiting for delivery and
  1318  // also periodically checking for timeouts.
  1319  //
  1320  // As the scheduling/timeout logic mostly is the same for all downloaded data
  1321  // types, this method is used by each for data gathering and is instrumented with
  1322  // various callbacks to handle the slight differences between processing them.
  1323  //
  1324  // The instrumentation parameters:
  1325  //  - errCancel:   error type to return if the fetch operation is cancelled (mostly makes logging nicer)
  1326  //  - deliveryCh:  channel from which to retrieve downloaded data packets (merged from all concurrent peers)
  1327  //  - deliver:     processing callback to deliver data packets into type specific download queues (usually within `queue`)
  1328  //  - wakeCh:      notification channel for waking the fetcher when new tasks are available (or sync completed)
  1329  //  - expire:      task callback method to abort requests that took too long and return the faulty peers (traffic shaping)
  1330  //  - pending:     task callback for the number of requests still needing download (detect completion/non-completability)
  1331  //  - inFlight:    task callback for the number of in-progress requests (wait for all active downloads to finish)
  1332  //  - throttle:    task callback to check if the processing queue is full and activate throttling (bound memory use)
  1333  //  - reserve:     task callback to reserve new download tasks to a particular peer (also signals partial completions)
  1334  //  - fetchHook:   tester callback to notify of new tasks being initiated (allows testing the scheduling logic)
  1335  //  - fetch:       network callback to actually send a particular download request to a physical remote peer
  1336  //  - cancel:      task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer)
  1337  //  - capacity:    network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping)
  1338  //  - idle:        network callback to retrieve the currently (type specific) idle peers that can be assigned tasks
  1339  //  - setIdle:     network callback to set a peer back to idle and update its estimated capacity (traffic shaping)
  1340  //  - kind:        textual label of the type being downloaded to display in log messages
  1341  func (d *Downloader) fetchParts(deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool,
  1342  	expire func() map[string]int, pending func() int, inFlight func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, bool),
  1343  	fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int,
  1344  	idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int, time.Time), kind string) error {
  1345  
  1346  	// Create a ticker to detect expired retrieval tasks
  1347  	ticker := time.NewTicker(100 * time.Millisecond)
  1348  	defer ticker.Stop()
  1349  
  1350  	update := make(chan struct{}, 1)
  1351  
  1352  	// Prepare the queue and fetch block parts until the block header fetcher's done
  1353  	finished := false
  1354  	for {
  1355  		select {
  1356  		case <-d.cancelCh:
  1357  			return errCanceled
  1358  
  1359  		case packet := <-deliveryCh:
  1360  			deliveryTime := time.Now()
  1361  			// If the peer was previously banned and failed to deliver its pack
  1362  			// in a reasonable time frame, ignore its message.
  1363  			if peer := d.peers.Peer(packet.PeerId()); peer != nil {
  1364  				// Deliver the received chunk of data and check chain validity
  1365  				accepted, err := deliver(packet)
  1366  				if errors.Is(err, errInvalidChain) {
  1367  					return err
  1368  				}
  1369  				// Unless a peer delivered something completely else than requested (usually
  1370  				// caused by a timed out request which came through in the end), set it to
  1371  				// idle. If the delivery's stale, the peer should have already been idled.
  1372  				if !errors.Is(err, errStaleDelivery) {
  1373  					setIdle(peer, accepted, deliveryTime)
  1374  				}
  1375  				// Issue a log to the user to see what's going on
  1376  				switch {
  1377  				case err == nil && packet.Items() == 0:
  1378  					peer.log.Trace("Requested data not delivered", "type", kind)
  1379  				case err == nil:
  1380  					peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats())
  1381  				default:
  1382  					peer.log.Debug("Failed to deliver retrieved data", "type", kind, "err", err)
  1383  				}
  1384  			}
  1385  			// Blocks assembled, try to update the progress
  1386  			select {
  1387  			case update <- struct{}{}:
  1388  			default:
  1389  			}
  1390  
  1391  		case cont := <-wakeCh:
  1392  			// The header fetcher sent a continuation flag, check if it's done
  1393  			if !cont {
  1394  				finished = true
  1395  			}
  1396  			// Headers arrive, try to update the progress
  1397  			select {
  1398  			case update <- struct{}{}:
  1399  			default:
  1400  			}
  1401  
  1402  		case <-ticker.C:
  1403  			// Sanity check update the progress
  1404  			select {
  1405  			case update <- struct{}{}:
  1406  			default:
  1407  			}
  1408  
  1409  		case <-update:
  1410  			// Short circuit if we lost all our peers
  1411  			if d.peers.Len() == 0 {
  1412  				return errNoPeers
  1413  			}
  1414  			// Check for fetch request timeouts and demote the responsible peers
  1415  			for pid, fails := range expire() {
  1416  				if peer := d.peers.Peer(pid); peer != nil {
  1417  					// If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps
  1418  					// ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times
  1419  					// out that sync wise we need to get rid of the peer.
  1420  					//
  1421  					// The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth
  1422  					// and latency of a peer separately, which requires pushing the measures capacity a bit and seeing
  1423  					// how response times reacts, to it always requests one more than the minimum (i.e. min 2).
  1424  					if fails > 2 {
  1425  						peer.log.Trace("Data delivery timed out", "type", kind)
  1426  						setIdle(peer, 0, time.Now())
  1427  					} else {
  1428  						peer.log.Debug("Stalling delivery, dropping", "type", kind)
  1429  
  1430  						if d.dropPeer == nil {
  1431  							// The dropPeer method is nil when `--copydb` is used for a local copy.
  1432  							// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
  1433  							peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid)
  1434  						} else {
  1435  							d.dropPeer(pid)
  1436  
  1437  							// If this peer was the master peer, abort sync immediately
  1438  							d.cancelLock.RLock()
  1439  							master := pid == d.cancelPeer
  1440  							d.cancelLock.RUnlock()
  1441  
  1442  							if master {
  1443  								d.cancel()
  1444  								return errTimeout
  1445  							}
  1446  						}
  1447  					}
  1448  				}
  1449  			}
  1450  			// If there's nothing more to fetch, wait or terminate
  1451  			if pending() == 0 {
  1452  				if !inFlight() && finished {
  1453  					log.Debug("Data fetching completed", "type", kind)
  1454  					return nil
  1455  				}
  1456  				break
  1457  			}
  1458  			// Send a download request to all idle peers, until throttled
  1459  			progressed, throttled, running := false, false, inFlight()
  1460  			idles, total := idle()
  1461  			pendCount := pending()
  1462  			for _, peer := range idles {
  1463  				// Short circuit if throttling activated
  1464  				if throttled {
  1465  					break
  1466  				}
  1467  				// Short circuit if there is no more available task.
  1468  				if pendCount = pending(); pendCount == 0 {
  1469  					break
  1470  				}
  1471  				// Reserve a chunk of fetches for a peer. A nil can mean either that
  1472  				// no more headers are available, or that the peer is known not to
  1473  				// have them.
  1474  				request, progress, throttle := reserve(peer, capacity(peer))
  1475  				if progress {
  1476  					progressed = true
  1477  				}
  1478  				if throttle {
  1479  					throttled = true
  1480  					throttleCounter.Inc(1)
  1481  				}
  1482  				if request == nil {
  1483  					continue
  1484  				}
  1485  				if request.From > 0 {
  1486  					peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From)
  1487  				} else {
  1488  					peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number)
  1489  				}
  1490  				// Fetch the chunk and make sure any errors return the hashes to the queue
  1491  				if fetchHook != nil {
  1492  					fetchHook(request.Headers)
  1493  				}
  1494  				if err := fetch(peer, request); err != nil {
  1495  					// Although we could try and make an attempt to fix this, this error really
  1496  					// means that we've double allocated a fetch task to a peer. If that is the
  1497  					// case, the internal state of the downloader and the queue is very wrong so
  1498  					// better hard crash and note the error instead of silently accumulating into
  1499  					// a much bigger issue.
  1500  					panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind))
  1501  				}
  1502  				running = true
  1503  			}
  1504  			// Make sure that we have peers available for fetching. If all peers have been tried
  1505  			// and all failed throw an error
  1506  			if !progressed && !throttled && !running && len(idles) == total && pendCount > 0 {
  1507  				return errPeersUnavailable
  1508  			}
  1509  		}
  1510  	}
  1511  }
  1512  
  1513  // processHeaders takes batches of retrieved headers from an input channel and
  1514  // keeps processing and scheduling them into the header chain and downloader's
  1515  // queue until the stream ends or a failure occurs.
  1516  func (d *Downloader) processHeaders(origin uint64, td *big.Int) error {
  1517  	// Keep a count of uncertain headers to roll back
  1518  	var (
  1519  		rollback    uint64 // Zero means no rollback (fine as you can't unroll the genesis)
  1520  		rollbackErr error
  1521  		mode        = d.getMode()
  1522  	)
  1523  	defer func() {
  1524  		if rollback > 0 {
  1525  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1526  			if mode != LightSync {
  1527  				lastFastBlock = d.blockchain.CurrentFastBlock().Number()
  1528  				lastBlock = d.blockchain.CurrentBlock().Number()
  1529  			}
  1530  			if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
  1531  				// We're already unwinding the stack, only print the error to make it more visible
  1532  				log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
  1533  			}
  1534  			curFastBlock, curBlock := common.Big0, common.Big0
  1535  			if mode != LightSync {
  1536  				curFastBlock = d.blockchain.CurrentFastBlock().Number()
  1537  				curBlock = d.blockchain.CurrentBlock().Number()
  1538  			}
  1539  			log.Warn("Rolled back chain segment",
  1540  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1541  				"fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1542  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
  1543  		}
  1544  	}()
  1545  	// Wait for batches of headers to process
  1546  	gotHeaders := false
  1547  
  1548  	for {
  1549  		select {
  1550  		case <-d.cancelCh:
  1551  			rollbackErr = errCanceled
  1552  			return errCanceled
  1553  
  1554  		case headers := <-d.headerProcCh:
  1555  			// Terminate header processing if we synced up
  1556  			if len(headers) == 0 {
  1557  				// Notify everyone that headers are fully processed
  1558  				for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1559  					select {
  1560  					case ch <- false:
  1561  					case <-d.cancelCh:
  1562  					}
  1563  				}
  1564  				// If no headers were retrieved at all, the peer violated its TD promise that it had a
  1565  				// better chain compared to ours. The only exception is if its promised blocks were
  1566  				// already imported by other means (e.g. fetcher):
  1567  				//
  1568  				// R <remote peer>, L <local node>: Both at block 10
  1569  				// R: Mine block 11, and propagate it to L
  1570  				// L: Queue block 11 for import
  1571  				// L: Notice that R's head and TD increased compared to ours, start sync
  1572  				// L: Import of block 11 finishes
  1573  				// L: Sync begins, and finds common ancestor at 11
  1574  				// L: Request new headers up from 11 (R's TD was higher, it must have something)
  1575  				// R: Nothing to give
  1576  				if mode != LightSync {
  1577  					head := d.blockchain.CurrentBlock()
  1578  					if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 {
  1579  						return errStallingPeer
  1580  					}
  1581  				}
  1582  				// If fast or light syncing, ensure promised headers are indeed delivered. This is
  1583  				// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
  1584  				// of delivering the post-pivot blocks that would flag the invalid content.
  1585  				//
  1586  				// This check cannot be executed "as is" for full imports, since blocks may still be
  1587  				// queued for processing when the header download completes. However, as long as the
  1588  				// peer gave us something useful, we're already happy/progressed (above check).
  1589  				if mode == FastSync || mode == LightSync {
  1590  					head := d.lightchain.CurrentHeader()
  1591  					if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1592  						return errStallingPeer
  1593  					}
  1594  				}
  1595  				// Disable any rollback and return
  1596  				rollback = 0
  1597  				return nil
  1598  			}
  1599  			// Otherwise split the chunk of headers into batches and process them
  1600  			gotHeaders = true
  1601  			for len(headers) > 0 {
  1602  				// Terminate if something failed in between processing chunks
  1603  				select {
  1604  				case <-d.cancelCh:
  1605  					rollbackErr = errCanceled
  1606  					return errCanceled
  1607  				default:
  1608  				}
  1609  				// Select the next chunk of headers to import
  1610  				limit := maxHeadersProcess
  1611  				if limit > len(headers) {
  1612  					limit = len(headers)
  1613  				}
  1614  				chunk := headers[:limit]
  1615  
  1616  				// In case of header only syncing, validate the chunk immediately
  1617  				if mode == FastSync || mode == LightSync {
  1618  					// If we're importing pure headers, verify based on their recentness
  1619  					var pivot uint64
  1620  
  1621  					d.pivotLock.RLock()
  1622  					if d.pivotHeader != nil {
  1623  						pivot = d.pivotHeader.Number.Uint64()
  1624  					}
  1625  					d.pivotLock.RUnlock()
  1626  
  1627  					frequency := fsHeaderCheckFrequency
  1628  					if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot {
  1629  						frequency = 1
  1630  					}
  1631  					if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil {
  1632  						rollbackErr = err
  1633  
  1634  						// If some headers were inserted, track them as uncertain
  1635  						if (mode == FastSync || frequency > 1) && n > 0 && rollback == 0 {
  1636  							rollback = chunk[0].Number.Uint64()
  1637  						}
  1638  						log.Warn("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "parent", chunk[n].ParentHash, "err", err)
  1639  						return fmt.Errorf("%w: %v", errInvalidChain, err)
  1640  					}
  1641  					// All verifications passed, track all headers within the alloted limits
  1642  					if mode == FastSync {
  1643  						head := chunk[len(chunk)-1].Number.Uint64()
  1644  						if head-rollback > uint64(fsHeaderSafetyNet) {
  1645  							rollback = head - uint64(fsHeaderSafetyNet)
  1646  						} else {
  1647  							rollback = 1
  1648  						}
  1649  					}
  1650  				}
  1651  				// Unless we're doing light chains, schedule the headers for associated content retrieval
  1652  				if mode == FullSync || mode == FastSync {
  1653  					// If we've reached the allowed number of pending headers, stall a bit
  1654  					for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1655  						select {
  1656  						case <-d.cancelCh:
  1657  							rollbackErr = errCanceled
  1658  							return errCanceled
  1659  						case <-time.After(time.Second):
  1660  						}
  1661  					}
  1662  					// Otherwise insert the headers for content retrieval
  1663  					inserts := d.queue.Schedule(chunk, origin)
  1664  					if len(inserts) != len(chunk) {
  1665  						rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunk))
  1666  						return fmt.Errorf("%w: stale headers", errBadPeer)
  1667  					}
  1668  				}
  1669  				headers = headers[limit:]
  1670  				origin += uint64(limit)
  1671  			}
  1672  			// Update the highest block number we know if a higher one is found.
  1673  			d.syncStatsLock.Lock()
  1674  			if d.syncStatsChainHeight < origin {
  1675  				d.syncStatsChainHeight = origin - 1
  1676  			}
  1677  			d.syncStatsLock.Unlock()
  1678  
  1679  			// Signal the content downloaders of the availablility of new tasks
  1680  			for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} {
  1681  				select {
  1682  				case ch <- true:
  1683  				default:
  1684  				}
  1685  			}
  1686  		}
  1687  	}
  1688  }
  1689  
  1690  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
  1691  func (d *Downloader) processFullSyncContent() error {
  1692  	for {
  1693  		results := d.queue.Results(true)
  1694  		if len(results) == 0 {
  1695  			return nil
  1696  		}
  1697  		if d.chainInsertHook != nil {
  1698  			d.chainInsertHook(results)
  1699  		}
  1700  		if err := d.importBlockResults(results); err != nil {
  1701  			return err
  1702  		}
  1703  	}
  1704  }
  1705  
  1706  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1707  	// Check for any early termination requests
  1708  	if len(results) == 0 {
  1709  		return nil
  1710  	}
  1711  	select {
  1712  	case <-d.quitCh:
  1713  		return errCancelContentProcessing
  1714  	default:
  1715  	}
  1716  	// Retrieve the a batch of results to import
  1717  	first, last := results[0].Header, results[len(results)-1].Header
  1718  	log.Debug("Inserting downloaded chain", "items", len(results),
  1719  		"firstnum", first.Number, "firsthash", first.Hash(),
  1720  		"lastnum", last.Number, "lasthash", last.Hash(),
  1721  	)
  1722  	blocks := make([]*types.Block, len(results))
  1723  	for i, result := range results {
  1724  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1725  	}
  1726  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1727  		if index < len(results) {
  1728  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1729  		} else {
  1730  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
  1731  			// when it needs to preprocess blocks to import a sidechain.
  1732  			// The importer will put together a new list of blocks to import, which is a superset
  1733  			// of the blocks delivered from the downloader, and the indexing will be off.
  1734  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
  1735  		}
  1736  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1737  	}
  1738  	return nil
  1739  }
  1740  
  1741  // processFastSyncContent takes fetch results from the queue and writes them to the
  1742  // database. It also controls the synchronisation of state nodes of the pivot block.
  1743  func (d *Downloader) processFastSyncContent() error {
  1744  	// Start syncing state of the reported head block. This should get us most of
  1745  	// the state of the pivot block.
  1746  	d.pivotLock.RLock()
  1747  	sync := d.syncState(d.pivotHeader.Root)
  1748  	d.pivotLock.RUnlock()
  1749  
  1750  	defer func() {
  1751  		// The `sync` object is replaced every time the pivot moves. We need to
  1752  		// defer close the very last active one, hence the lazy evaluation vs.
  1753  		// calling defer sync.Cancel() !!!
  1754  		sync.Cancel()
  1755  	}()
  1756  
  1757  	closeOnErr := func(s *stateSync) {
  1758  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
  1759  			d.queue.Close() // wake up Results
  1760  		}
  1761  	}
  1762  	go closeOnErr(sync)
  1763  
  1764  	// To cater for moving pivot points, track the pivot block and subsequently
  1765  	// accumulated download results separately.
  1766  	var (
  1767  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
  1768  		oldTail  []*fetchResult // Downloaded content after the pivot
  1769  	)
  1770  	for {
  1771  		// Wait for the next batch of downloaded data to be available, and if the pivot
  1772  		// block became stale, move the goalpost
  1773  		results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness
  1774  		if len(results) == 0 {
  1775  			// If pivot sync is done, stop
  1776  			if oldPivot == nil {
  1777  				return sync.Cancel()
  1778  			}
  1779  			// If sync failed, stop
  1780  			select {
  1781  			case <-d.cancelCh:
  1782  				sync.Cancel()
  1783  				return errCanceled
  1784  			default:
  1785  			}
  1786  		}
  1787  		if d.chainInsertHook != nil {
  1788  			d.chainInsertHook(results)
  1789  		}
  1790  		// If we haven't downloaded the pivot block yet, check pivot staleness
  1791  		// notifications from the header downloader
  1792  		d.pivotLock.RLock()
  1793  		pivot := d.pivotHeader
  1794  		d.pivotLock.RUnlock()
  1795  
  1796  		if oldPivot == nil {
  1797  			if pivot.Root != sync.root {
  1798  				sync.Cancel()
  1799  				sync = d.syncState(pivot.Root)
  1800  
  1801  				go closeOnErr(sync)
  1802  			}
  1803  		} else {
  1804  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1805  		}
  1806  		// Split around the pivot block and process the two sides via fast/full sync
  1807  		if atomic.LoadInt32(&d.committed) == 0 {
  1808  			latest := results[len(results)-1].Header
  1809  			// If the height is above the pivot block by 2 sets, it means the pivot
  1810  			// become stale in the network and it was garbage collected, move to a
  1811  			// new pivot.
  1812  			//
  1813  			// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
  1814  			// need to be taken into account, otherwise we're detecting the pivot move
  1815  			// late and will drop peers due to unavailable state!!!
  1816  			if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
  1817  				log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
  1818  				pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
  1819  
  1820  				d.pivotLock.Lock()
  1821  				d.pivotHeader = pivot
  1822  				d.pivotLock.Unlock()
  1823  
  1824  				// Write out the pivot into the database so a rollback beyond it will
  1825  				// reenable fast sync
  1826  				rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
  1827  			}
  1828  		}
  1829  		P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
  1830  		if err := d.commitFastSyncData(beforeP, sync); err != nil {
  1831  			return err
  1832  		}
  1833  		if P != nil {
  1834  			// If new pivot block found, cancel old state retrieval and restart
  1835  			if oldPivot != P {
  1836  				sync.Cancel()
  1837  				sync = d.syncState(P.Header.Root)
  1838  
  1839  				go closeOnErr(sync)
  1840  				oldPivot = P
  1841  			}
  1842  			// Wait for completion, occasionally checking for pivot staleness
  1843  			select {
  1844  			case <-sync.done:
  1845  				if sync.err != nil {
  1846  					return sync.err
  1847  				}
  1848  				if err := d.commitPivotBlock(P); err != nil {
  1849  					return err
  1850  				}
  1851  				oldPivot = nil
  1852  
  1853  			case <-time.After(time.Second):
  1854  				oldTail = afterP
  1855  				continue
  1856  			}
  1857  		}
  1858  		// Fast sync done, pivot commit done, full import
  1859  		if err := d.importBlockResults(afterP); err != nil {
  1860  			return err
  1861  		}
  1862  	}
  1863  }
  1864  
  1865  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1866  	if len(results) == 0 {
  1867  		return nil, nil, nil
  1868  	}
  1869  	if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
  1870  		// the pivot is somewhere in the future
  1871  		return nil, results, nil
  1872  	}
  1873  	// This can also be optimized, but only happens very seldom
  1874  	for _, result := range results {
  1875  		num := result.Header.Number.Uint64()
  1876  		switch {
  1877  		case num < pivot:
  1878  			before = append(before, result)
  1879  		case num == pivot:
  1880  			p = result
  1881  		default:
  1882  			after = append(after, result)
  1883  		}
  1884  	}
  1885  	return p, before, after
  1886  }
  1887  
  1888  func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error {
  1889  	// Check for any early termination requests
  1890  	if len(results) == 0 {
  1891  		return nil
  1892  	}
  1893  	select {
  1894  	case <-d.quitCh:
  1895  		return errCancelContentProcessing
  1896  	case <-stateSync.done:
  1897  		if err := stateSync.Wait(); err != nil {
  1898  			return err
  1899  		}
  1900  	default:
  1901  	}
  1902  	// Retrieve the a batch of results to import
  1903  	first, last := results[0].Header, results[len(results)-1].Header
  1904  	log.Debug("Inserting fast-sync blocks", "items", len(results),
  1905  		"firstnum", first.Number, "firsthash", first.Hash(),
  1906  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1907  	)
  1908  	blocks := make([]*types.Block, len(results))
  1909  	receipts := make([]types.Receipts, len(results))
  1910  	for i, result := range results {
  1911  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1912  		receipts[i] = result.Receipts
  1913  	}
  1914  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
  1915  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1916  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1917  	}
  1918  	return nil
  1919  }
  1920  
  1921  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1922  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles)
  1923  	log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1924  
  1925  	// Commit the pivot block as the new head, will require full sync from here on
  1926  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
  1927  		return err
  1928  	}
  1929  	if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil {
  1930  		return err
  1931  	}
  1932  	atomic.StoreInt32(&d.committed, 1)
  1933  
  1934  	// If we had a bloom filter for the state sync, deallocate it now. Note, we only
  1935  	// deallocate internally, but keep the empty wrapper. This ensures that if we do
  1936  	// a rollback after committing the pivot and restarting fast sync, we don't end
  1937  	// up using a nil bloom. Empty bloom is fine, it just returns that it does not
  1938  	// have the info we need, so reach down to the database instead.
  1939  	if d.stateBloom != nil {
  1940  		d.stateBloom.Close()
  1941  	}
  1942  	return nil
  1943  }
  1944  
  1945  // DeliverHeaders injects a new batch of block headers received from a remote
  1946  // node into the download schedule.
  1947  func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) error {
  1948  	return d.deliver(d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter)
  1949  }
  1950  
  1951  // DeliverBodies injects a new batch of block bodies received from a remote node.
  1952  func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) error {
  1953  	return d.deliver(d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter)
  1954  }
  1955  
  1956  // DeliverReceipts injects a new batch of receipts received from a remote node.
  1957  func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) error {
  1958  	return d.deliver(d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter)
  1959  }
  1960  
  1961  // DeliverNodeData injects a new batch of node state data received from a remote node.
  1962  func (d *Downloader) DeliverNodeData(id string, data [][]byte) error {
  1963  	return d.deliver(d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter)
  1964  }
  1965  
  1966  // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
  1967  // data packet for the local node to consume.
  1968  func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
  1969  	switch packet := packet.(type) {
  1970  	case *snap.AccountRangePacket:
  1971  		hashes, accounts, err := packet.Unpack()
  1972  		if err != nil {
  1973  			return err
  1974  		}
  1975  		return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
  1976  
  1977  	case *snap.StorageRangesPacket:
  1978  		hashset, slotset := packet.Unpack()
  1979  		return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
  1980  
  1981  	case *snap.ByteCodesPacket:
  1982  		return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
  1983  
  1984  	case *snap.TrieNodesPacket:
  1985  		return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
  1986  
  1987  	default:
  1988  		return fmt.Errorf("unexpected snap packet type: %T", packet)
  1989  	}
  1990  }
  1991  
  1992  // deliver injects a new batch of data received from a remote node.
  1993  func (d *Downloader) deliver(destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) {
  1994  	// Update the delivery metrics for both good and failed deliveries
  1995  	inMeter.Mark(int64(packet.Items()))
  1996  	defer func() {
  1997  		if err != nil {
  1998  			dropMeter.Mark(int64(packet.Items()))
  1999  		}
  2000  	}()
  2001  	// Deliver or abort if the sync is canceled while queuing
  2002  	d.cancelLock.RLock()
  2003  	cancel := d.cancelCh
  2004  	d.cancelLock.RUnlock()
  2005  	if cancel == nil {
  2006  		return errNoSyncActive
  2007  	}
  2008  	select {
  2009  	case destCh <- packet:
  2010  		return nil
  2011  	case <-cancel:
  2012  		return errNoSyncActive
  2013  	}
  2014  }