github.com/theQRL/go-zond@v0.1.1/zond/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/theQRL/go-zond"
    29  	"github.com/theQRL/go-zond/common"
    30  	"github.com/theQRL/go-zond/core/rawdb"
    31  	"github.com/theQRL/go-zond/core/state/snapshot"
    32  	"github.com/theQRL/go-zond/core/types"
    33  	"github.com/theQRL/go-zond/event"
    34  	"github.com/theQRL/go-zond/log"
    35  	"github.com/theQRL/go-zond/params"
    36  	"github.com/theQRL/go-zond/trie"
    37  	"github.com/theQRL/go-zond/zond/protocols/snap"
    38  	"github.com/theQRL/go-zond/zonddb"
    39  )
    40  
    41  var (
    42  	MaxBlockFetch   = 128 // Amount of blocks to be fetched per retrieval request
    43  	MaxHeaderFetch  = 192 // Amount of block headers to be fetched per retrieval request
    44  	MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly
    45  	MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request
    46  
    47  	maxQueuedHeaders            = 32 * 1024                         // [zond/62] Maximum number of headers to queue for import (DOS protection)
    48  	maxHeadersProcess           = 2048                              // Number of header download results to import at once into the chain
    49  	maxResultsProcess           = 2048                              // Number of content download results to import at once into the chain
    50  	fullMaxForkAncestry  uint64 = params.FullImmutabilityThreshold  // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    51  	lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    52  
    53  	reorgProtThreshold   = 48 // Threshold number of recent blocks to disable mini reorg protection
    54  	reorgProtHeaderDelay = 2  // Number of headers to delay delivering to cover mini reorgs
    55  
    56  	fsHeaderSafetyNet = 2048            // Number of headers to discard in case a chain violation is detected
    57  	fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
    58  	fsMinFullBlocks   = 64              // Number of blocks to retrieve fully even in snap sync
    59  )
    60  
    61  var (
    62  	errBusy                    = errors.New("busy")
    63  	errUnknownPeer             = errors.New("peer is unknown or unhealthy")
    64  	errBadPeer                 = errors.New("action from bad peer ignored")
    65  	errStallingPeer            = errors.New("peer is stalling")
    66  	errUnsyncedPeer            = errors.New("unsynced peer")
    67  	errNoPeers                 = errors.New("no peers to keep download active")
    68  	errTimeout                 = errors.New("timeout")
    69  	errEmptyHeaderSet          = errors.New("empty header set by peer")
    70  	errPeersUnavailable        = errors.New("no peers available or all tried for download")
    71  	errInvalidAncestor         = errors.New("retrieved ancestor is invalid")
    72  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    73  	errInvalidBody             = errors.New("retrieved block body is invalid")
    74  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    75  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    76  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    77  	errCanceled                = errors.New("syncing canceled (requested)")
    78  	errTooOld                  = errors.New("peer's protocol version too old")
    79  	errNoAncestorFound         = errors.New("no common ancestor found")
    80  	errNoPivotHeader           = errors.New("pivot header is not found")
    81  	ErrMergeTransition         = errors.New("legacy sync reached the merge")
    82  )
    83  
    84  // peerDropFn is a callback type for dropping a peer detected as malicious.
    85  type peerDropFn func(id string)
    86  
    87  // badBlockFn is a callback for the async beacon sync to notify the caller that
    88  // the origin header requested to sync to, produced a chain with a bad block.
    89  type badBlockFn func(invalid *types.Header, origin *types.Header)
    90  
    91  // headerTask is a set of downloaded headers to queue along with their precomputed
    92  // hashes to avoid constant rehashing.
    93  type headerTask struct {
    94  	headers []*types.Header
    95  	hashes  []common.Hash
    96  }
    97  
    98  type Downloader struct {
    99  	mode atomic.Uint32  // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
   100  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
   101  
   102  	genesis uint64   // Genesis block number to limit sync to (e.g. light client CHT)
   103  	queue   *queue   // Scheduler for selecting the hashes to download
   104  	peers   *peerSet // Set of active peers from which download can proceed
   105  
   106  	stateDB zonddb.Database // Database to state sync into (and deduplicate via)
   107  
   108  	// Statistics
   109  	syncStatsChainOrigin uint64       // Origin block number where syncing started at
   110  	syncStatsChainHeight uint64       // Highest block number known when syncing started
   111  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   112  
   113  	lightchain LightChain
   114  	blockchain BlockChain
   115  
   116  	// Callbacks
   117  	dropPeer peerDropFn // Drops a peer for misbehaving
   118  	badBlock badBlockFn // Reports a block as rejected by the chain
   119  
   120  	// Status
   121  	synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing
   122  	synchronising   atomic.Bool
   123  	notified        atomic.Bool
   124  	committed       atomic.Bool
   125  	ancientLimit    uint64 // The maximum block number which can be regarded as ancient data.
   126  
   127  	// Channels
   128  	headerProcCh chan *headerTask // Channel to feed the header processor new tasks
   129  
   130  	// Skeleton sync
   131  	skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
   132  
   133  	// State sync
   134  	pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
   135  	pivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates
   136  
   137  	SnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now
   138  	stateSyncStart chan *stateSync
   139  
   140  	// Cancellation and termination
   141  	cancelPeer string         // Identifier of the peer currently being used as the master (cancel on drop)
   142  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   143  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   144  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   145  
   146  	quitCh   chan struct{} // Quit channel to signal termination
   147  	quitLock sync.Mutex    // Lock to prevent double closes
   148  
   149  	// Testing hooks
   150  	syncInitHook     func(uint64, uint64)  // Method to call upon initiating a new sync run
   151  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   152  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   153  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   154  
   155  	// Progress reporting metrics
   156  	syncStartBlock uint64    // Head snap block when Geth was started
   157  	syncStartTime  time.Time // Time instance when chain sync started
   158  	syncLogTime    time.Time // Time instance when status was last reported
   159  }
   160  
   161  // LightChain encapsulates functions required to synchronise a light chain.
   162  type LightChain interface {
   163  	// HasHeader verifies a header's presence in the local chain.
   164  	HasHeader(common.Hash, uint64) bool
   165  
   166  	// GetHeaderByHash retrieves a header from the local chain.
   167  	GetHeaderByHash(common.Hash) *types.Header
   168  
   169  	// CurrentHeader retrieves the head header from the local chain.
   170  	CurrentHeader() *types.Header
   171  
   172  	// GetTd returns the total difficulty of a local block.
   173  	GetTd(common.Hash, uint64) *big.Int
   174  
   175  	// InsertHeaderChain inserts a batch of headers into the local chain.
   176  	InsertHeaderChain([]*types.Header) (int, error)
   177  
   178  	// SetHead rewinds the local chain to a new head.
   179  	SetHead(uint64) error
   180  }
   181  
   182  // BlockChain encapsulates functions required to sync a (full or snap) blockchain.
   183  type BlockChain interface {
   184  	LightChain
   185  
   186  	// HasBlock verifies a block's presence in the local chain.
   187  	HasBlock(common.Hash, uint64) bool
   188  
   189  	// HasFastBlock verifies a snap block's presence in the local chain.
   190  	HasFastBlock(common.Hash, uint64) bool
   191  
   192  	// GetBlockByHash retrieves a block from the local chain.
   193  	GetBlockByHash(common.Hash) *types.Block
   194  
   195  	// CurrentBlock retrieves the head block from the local chain.
   196  	CurrentBlock() *types.Header
   197  
   198  	// CurrentSnapBlock retrieves the head snap block from the local chain.
   199  	CurrentSnapBlock() *types.Header
   200  
   201  	// SnapSyncCommitHead directly commits the head block to a certain entity.
   202  	SnapSyncCommitHead(common.Hash) error
   203  
   204  	// InsertChain inserts a batch of blocks into the local chain.
   205  	InsertChain(types.Blocks) (int, error)
   206  
   207  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   208  	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
   209  
   210  	// Snapshots returns the blockchain snapshot tree to paused it during sync.
   211  	Snapshots() *snapshot.Tree
   212  
   213  	// TrieDB retrieves the low level trie database used for interacting
   214  	// with trie nodes.
   215  	TrieDB() *trie.Database
   216  }
   217  
   218  // New creates a new downloader to fetch hashes and blocks from remote peers.
   219  func New(stateDb zonddb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader {
   220  	if lightchain == nil {
   221  		lightchain = chain
   222  	}
   223  	dl := &Downloader{
   224  		stateDB:        stateDb,
   225  		mux:            mux,
   226  		queue:          newQueue(blockCacheMaxItems, blockCacheInitialItems),
   227  		peers:          newPeerSet(),
   228  		blockchain:     chain,
   229  		lightchain:     lightchain,
   230  		dropPeer:       dropPeer,
   231  		headerProcCh:   make(chan *headerTask, 1),
   232  		quitCh:         make(chan struct{}),
   233  		SnapSyncer:     snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
   234  		stateSyncStart: make(chan *stateSync),
   235  		syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(),
   236  	}
   237  	// Create the post-merge skeleton syncer and start the process
   238  	dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
   239  
   240  	go dl.stateFetcher()
   241  	return dl
   242  }
   243  
   244  // Progress retrieves the synchronisation boundaries, specifically the origin
   245  // block where synchronisation started at (may have failed/suspended); the block
   246  // or header sync is currently at; and the latest known block which the sync targets.
   247  //
   248  // In addition, during the state download phase of snap synchronisation the number
   249  // of processed and the total number of known states are also returned. Otherwise
   250  // these are zero.
   251  func (d *Downloader) Progress() zond.SyncProgress {
   252  	// Lock the current stats and return the progress
   253  	d.syncStatsLock.RLock()
   254  	defer d.syncStatsLock.RUnlock()
   255  
   256  	current := uint64(0)
   257  	mode := d.getMode()
   258  	switch {
   259  	case d.blockchain != nil && mode == FullSync:
   260  		current = d.blockchain.CurrentBlock().Number.Uint64()
   261  	case d.blockchain != nil && mode == SnapSync:
   262  		current = d.blockchain.CurrentSnapBlock().Number.Uint64()
   263  	case d.lightchain != nil:
   264  		current = d.lightchain.CurrentHeader().Number.Uint64()
   265  	default:
   266  		log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
   267  	}
   268  	progress, pending := d.SnapSyncer.Progress()
   269  
   270  	return zond.SyncProgress{
   271  		StartingBlock:       d.syncStatsChainOrigin,
   272  		CurrentBlock:        current,
   273  		HighestBlock:        d.syncStatsChainHeight,
   274  		SyncedAccounts:      progress.AccountSynced,
   275  		SyncedAccountBytes:  uint64(progress.AccountBytes),
   276  		SyncedBytecodes:     progress.BytecodeSynced,
   277  		SyncedBytecodeBytes: uint64(progress.BytecodeBytes),
   278  		SyncedStorage:       progress.StorageSynced,
   279  		SyncedStorageBytes:  uint64(progress.StorageBytes),
   280  		HealedTrienodes:     progress.TrienodeHealSynced,
   281  		HealedTrienodeBytes: uint64(progress.TrienodeHealBytes),
   282  		HealedBytecodes:     progress.BytecodeHealSynced,
   283  		HealedBytecodeBytes: uint64(progress.BytecodeHealBytes),
   284  		HealingTrienodes:    pending.TrienodeHeal,
   285  		HealingBytecode:     pending.BytecodeHeal,
   286  	}
   287  }
   288  
   289  // Synchronising returns whether the downloader is currently retrieving blocks.
   290  func (d *Downloader) Synchronising() bool {
   291  	return d.synchronising.Load()
   292  }
   293  
   294  // RegisterPeer injects a new download peer into the set of block source to be
   295  // used for fetching hashes and blocks from.
   296  func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
   297  	var logger log.Logger
   298  	if len(id) < 16 {
   299  		// Tests use short IDs, don't choke on them
   300  		logger = log.New("peer", id)
   301  	} else {
   302  		logger = log.New("peer", id[:8])
   303  	}
   304  	logger.Trace("Registering sync peer")
   305  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   306  		logger.Error("Failed to register sync peer", "err", err)
   307  		return err
   308  	}
   309  	return nil
   310  }
   311  
   312  // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer.
   313  func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error {
   314  	return d.RegisterPeer(id, version, &lightPeerWrapper{peer})
   315  }
   316  
   317  // UnregisterPeer remove a peer from the known list, preventing any action from
   318  // the specified peer. An effort is also made to return any pending fetches into
   319  // the queue.
   320  func (d *Downloader) UnregisterPeer(id string) error {
   321  	// Unregister the peer from the active peer set and revoke any fetch tasks
   322  	var logger log.Logger
   323  	if len(id) < 16 {
   324  		// Tests use short IDs, don't choke on them
   325  		logger = log.New("peer", id)
   326  	} else {
   327  		logger = log.New("peer", id[:8])
   328  	}
   329  	logger.Trace("Unregistering sync peer")
   330  	if err := d.peers.Unregister(id); err != nil {
   331  		logger.Error("Failed to unregister sync peer", "err", err)
   332  		return err
   333  	}
   334  	d.queue.Revoke(id)
   335  
   336  	return nil
   337  }
   338  
   339  // LegacySync tries to sync up our local block chain with a remote peer, both
   340  // adding various sanity checks as well as wrapping it with various log entries.
   341  func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error {
   342  	err := d.synchronise(id, head, td, ttd, mode, false, nil)
   343  
   344  	switch err {
   345  	case nil, errBusy, errCanceled:
   346  		return err
   347  	}
   348  	if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) ||
   349  		errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) ||
   350  		errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) {
   351  		log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err)
   352  		if d.dropPeer == nil {
   353  			// The dropPeer method is nil when `--copydb` is used for a local copy.
   354  			// Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored
   355  			log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id)
   356  		} else {
   357  			d.dropPeer(id)
   358  		}
   359  		return err
   360  	}
   361  	if errors.Is(err, ErrMergeTransition) {
   362  		return err // This is an expected fault, don't keep printing it in a spin-loop
   363  	}
   364  	log.Warn("Synchronisation failed, retrying", "err", err)
   365  	return err
   366  }
   367  
   368  // synchronise will select the peer and use it for synchronising. If an empty string is given
   369  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   370  // checks fail an error will be returned. This method is synchronous
   371  func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error {
   372  	// The beacon header syncer is async. It will start this synchronization and
   373  	// will continue doing other tasks. However, if synchronization needs to be
   374  	// cancelled, the syncer needs to know if we reached the startup point (and
   375  	// inited the cancel channel) or not yet. Make sure that we'll signal even in
   376  	// case of a failure.
   377  	if beaconPing != nil {
   378  		defer func() {
   379  			select {
   380  			case <-beaconPing: // already notified
   381  			default:
   382  				close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
   383  			}
   384  		}()
   385  	}
   386  	// Mock out the synchronisation if testing
   387  	if d.synchroniseMock != nil {
   388  		return d.synchroniseMock(id, hash)
   389  	}
   390  	// Make sure only one goroutine is ever allowed past this point at once
   391  	if !d.synchronising.CompareAndSwap(false, true) {
   392  		return errBusy
   393  	}
   394  	defer d.synchronising.Store(false)
   395  
   396  	// Post a user notification of the sync (only once per session)
   397  	if d.notified.CompareAndSwap(false, true) {
   398  		log.Info("Block synchronisation started")
   399  	}
   400  	if mode == SnapSync {
   401  		// Snap sync will directly modify the persistent state, making the entire
   402  		// trie database unusable until the state is fully synced. To prevent any
   403  		// subsequent state reads, explicitly disable the trie database and state
   404  		// syncer is responsible to address and correct any state missing.
   405  		if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
   406  			d.blockchain.TrieDB().Reset(types.EmptyRootHash)
   407  		}
   408  		// Snap sync uses the snapshot namespace to store potentially flaky data until
   409  		// sync completely heals and finishes. Pause snapshot maintenance in the mean-
   410  		// time to prevent access.
   411  		if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
   412  			snapshots.Disable()
   413  		}
   414  	}
   415  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   416  	d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
   417  	d.peers.Reset()
   418  
   419  	for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   420  		select {
   421  		case <-ch:
   422  		default:
   423  		}
   424  	}
   425  	for empty := false; !empty; {
   426  		select {
   427  		case <-d.headerProcCh:
   428  		default:
   429  			empty = true
   430  		}
   431  	}
   432  	// Create cancel channel for aborting mid-flight and mark the master peer
   433  	d.cancelLock.Lock()
   434  	d.cancelCh = make(chan struct{})
   435  	d.cancelPeer = id
   436  	d.cancelLock.Unlock()
   437  
   438  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   439  
   440  	// Atomically set the requested sync mode
   441  	d.mode.Store(uint32(mode))
   442  
   443  	// Retrieve the origin peer and initiate the downloading process
   444  	var p *peerConnection
   445  	if !beaconMode { // Beacon mode doesn't need a peer to sync from
   446  		p = d.peers.Peer(id)
   447  		if p == nil {
   448  			return errUnknownPeer
   449  		}
   450  	}
   451  	if beaconPing != nil {
   452  		close(beaconPing)
   453  	}
   454  	return d.syncWithPeer(p, hash, td, ttd, beaconMode)
   455  }
   456  
   457  func (d *Downloader) getMode() SyncMode {
   458  	return SyncMode(d.mode.Load())
   459  }
   460  
   461  // syncWithPeer starts a block synchronization based on the hash chain from the
   462  // specified peer and head hash.
   463  func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) {
   464  	d.mux.Post(StartEvent{})
   465  	defer func() {
   466  		// reset on error
   467  		if err != nil {
   468  			d.mux.Post(FailedEvent{err})
   469  		} else {
   470  			latest := d.lightchain.CurrentHeader()
   471  			d.mux.Post(DoneEvent{latest})
   472  		}
   473  	}()
   474  	mode := d.getMode()
   475  
   476  	if !beaconMode {
   477  		log.Debug("Synchronising with the network", "peer", p.id, "zond", p.version, "head", hash, "td", td, "mode", mode)
   478  	} else {
   479  		log.Debug("Backfilling with the network", "mode", mode)
   480  	}
   481  	defer func(start time.Time) {
   482  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   483  	}(time.Now())
   484  
   485  	// Look up the sync boundaries: the common ancestor and the target block
   486  	var latest, pivot, final *types.Header
   487  	if !beaconMode {
   488  		// In legacy mode, use the master peer to retrieve the headers from
   489  		latest, pivot, err = d.fetchHead(p)
   490  		if err != nil {
   491  			return err
   492  		}
   493  	} else {
   494  		// In beacon mode, use the skeleton chain to retrieve the headers from
   495  		latest, _, final, err = d.skeleton.Bounds()
   496  		if err != nil {
   497  			return err
   498  		}
   499  		if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
   500  			number := latest.Number.Uint64() - uint64(fsMinFullBlocks)
   501  
   502  			// Retrieve the pivot header from the skeleton chain segment but
   503  			// fallback to local chain if it's not found in skeleton space.
   504  			if pivot = d.skeleton.Header(number); pivot == nil {
   505  				_, oldest, _, _ := d.skeleton.Bounds() // error is already checked
   506  				if number < oldest.Number.Uint64() {
   507  					count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks
   508  					headers := d.readHeaderRange(oldest, count)
   509  					if len(headers) == count {
   510  						pivot = headers[len(headers)-1]
   511  						log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number)
   512  					}
   513  				}
   514  			}
   515  			// Print an error log and return directly in case the pivot header
   516  			// is still not found. It means the skeleton chain is not linked
   517  			// correctly with local chain.
   518  			if pivot == nil {
   519  				log.Error("Pivot header is not found", "number", number)
   520  				return errNoPivotHeader
   521  			}
   522  		}
   523  	}
   524  	// If no pivot block was returned, the head is below the min full block
   525  	// threshold (i.e. new chain). In that case we won't really snap sync
   526  	// anyway, but still need a valid pivot block to avoid some code hitting
   527  	// nil panics on access.
   528  	if mode == SnapSync && pivot == nil {
   529  		pivot = d.blockchain.CurrentBlock()
   530  	}
   531  	height := latest.Number.Uint64()
   532  
   533  	var origin uint64
   534  	if !beaconMode {
   535  		// In legacy mode, reach out to the network and find the ancestor
   536  		origin, err = d.findAncestor(p, latest)
   537  		if err != nil {
   538  			return err
   539  		}
   540  	} else {
   541  		// In beacon mode, use the skeleton chain for the ancestor lookup
   542  		origin, err = d.findBeaconAncestor()
   543  		if err != nil {
   544  			return err
   545  		}
   546  	}
   547  	d.syncStatsLock.Lock()
   548  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   549  		d.syncStatsChainOrigin = origin
   550  	}
   551  	d.syncStatsChainHeight = height
   552  	d.syncStatsLock.Unlock()
   553  
   554  	// Ensure our origin point is below any snap sync pivot point
   555  	if mode == SnapSync {
   556  		if height <= uint64(fsMinFullBlocks) {
   557  			origin = 0
   558  		} else {
   559  			pivotNumber := pivot.Number.Uint64()
   560  			if pivotNumber <= origin {
   561  				origin = pivotNumber - 1
   562  			}
   563  			// Write out the pivot into the database so a rollback beyond it will
   564  			// reenable snap sync
   565  			rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
   566  		}
   567  	}
   568  	d.committed.Store(true)
   569  	if mode == SnapSync && pivot.Number.Uint64() != 0 {
   570  		d.committed.Store(false)
   571  	}
   572  	if mode == SnapSync {
   573  		// Set the ancient data limitation. If we are running snap sync, all block
   574  		// data older than ancientLimit will be written to the ancient store. More
   575  		// recent data will be written to the active database and will wait for the
   576  		// freezer to migrate.
   577  		//
   578  		// If the network is post-merge, use either the last announced finalized
   579  		// block as the ancient limit, or if we haven't yet received one, the head-
   580  		// a max fork ancestry limit. One quirky case if we've already passed the
   581  		// finalized block, in which case the skeleton.Bounds will return nil and
   582  		// we'll revert to head - 90K. That's fine, we're finishing sync anyway.
   583  		//
   584  		// For non-merged networks, if there is a checkpoint available, then calculate
   585  		// the ancientLimit through that. Otherwise calculate the ancient limit through
   586  		// the advertised height of the remote peer. This most is mostly a fallback for
   587  		// legacy networks, but should eventually be droppped. TODO(karalabe).
   588  		if beaconMode {
   589  			// Beacon sync, use the latest finalized block as the ancient limit
   590  			// or a reasonable height if no finalized block is yet announced.
   591  			if final != nil {
   592  				d.ancientLimit = final.Number.Uint64()
   593  			} else if height > fullMaxForkAncestry+1 {
   594  				d.ancientLimit = height - fullMaxForkAncestry - 1
   595  			} else {
   596  				d.ancientLimit = 0
   597  			}
   598  		} else {
   599  			// Legacy sync, use the best announcement we have from the remote peer.
   600  			// TODO(karalabe): Drop this pathway.
   601  			if height > fullMaxForkAncestry+1 {
   602  				d.ancientLimit = height - fullMaxForkAncestry - 1
   603  			} else {
   604  				d.ancientLimit = 0
   605  			}
   606  		}
   607  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   608  
   609  		// If a part of blockchain data has already been written into active store,
   610  		// disable the ancient style insertion explicitly.
   611  		if origin >= frozen && frozen != 0 {
   612  			d.ancientLimit = 0
   613  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   614  		} else if d.ancientLimit > 0 {
   615  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   616  		}
   617  		// Rewind the ancient store and blockchain if reorg happens.
   618  		if origin+1 < frozen {
   619  			if err := d.lightchain.SetHead(origin); err != nil {
   620  				return err
   621  			}
   622  		}
   623  	}
   624  	// Initiate the sync using a concurrent header and content retrieval algorithm
   625  	d.queue.Prepare(origin+1, mode)
   626  	if d.syncInitHook != nil {
   627  		d.syncInitHook(origin, height)
   628  	}
   629  	var headerFetcher func() error
   630  	if !beaconMode {
   631  		// In legacy mode, headers are retrieved from the network
   632  		headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) }
   633  	} else {
   634  		// In beacon mode, headers are served by the skeleton syncer
   635  		headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) }
   636  	}
   637  	fetchers := []func() error{
   638  		headerFetcher, // Headers are always retrieved
   639  		func() error { return d.fetchBodies(origin+1, beaconMode) },   // Bodies are retrieved during normal and snap sync
   640  		func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync
   641  		func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) },
   642  	}
   643  	if mode == SnapSync {
   644  		d.pivotLock.Lock()
   645  		d.pivotHeader = pivot
   646  		d.pivotLock.Unlock()
   647  
   648  		fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
   649  	} else if mode == FullSync {
   650  		fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) })
   651  	}
   652  	return d.spawnSync(fetchers)
   653  }
   654  
   655  // spawnSync runs d.process and all given fetcher functions to completion in
   656  // separate goroutines, returning the first error that appears.
   657  func (d *Downloader) spawnSync(fetchers []func() error) error {
   658  	errc := make(chan error, len(fetchers))
   659  	d.cancelWg.Add(len(fetchers))
   660  	for _, fn := range fetchers {
   661  		fn := fn
   662  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   663  	}
   664  	// Wait for the first error, then terminate the others.
   665  	var err error
   666  	for i := 0; i < len(fetchers); i++ {
   667  		if i == len(fetchers)-1 {
   668  			// Close the queue when all fetchers have exited.
   669  			// This will cause the block processor to end when
   670  			// it has processed the queue.
   671  			d.queue.Close()
   672  		}
   673  		if got := <-errc; got != nil {
   674  			err = got
   675  			if got != errCanceled {
   676  				break // receive a meaningful error, bubble it up
   677  			}
   678  		}
   679  	}
   680  	d.queue.Close()
   681  	d.Cancel()
   682  	return err
   683  }
   684  
   685  // cancel aborts all of the operations and resets the queue. However, cancel does
   686  // not wait for the running download goroutines to finish. This method should be
   687  // used when cancelling the downloads from inside the downloader.
   688  func (d *Downloader) cancel() {
   689  	// Close the current cancel channel
   690  	d.cancelLock.Lock()
   691  	defer d.cancelLock.Unlock()
   692  
   693  	if d.cancelCh != nil {
   694  		select {
   695  		case <-d.cancelCh:
   696  			// Channel was already closed
   697  		default:
   698  			close(d.cancelCh)
   699  		}
   700  	}
   701  }
   702  
   703  // Cancel aborts all of the operations and waits for all download goroutines to
   704  // finish before returning.
   705  func (d *Downloader) Cancel() {
   706  	d.cancel()
   707  	d.cancelWg.Wait()
   708  }
   709  
   710  // Terminate interrupts the downloader, canceling all pending operations.
   711  // The downloader cannot be reused after calling Terminate.
   712  func (d *Downloader) Terminate() {
   713  	// Close the termination channel (make sure double close is allowed)
   714  	d.quitLock.Lock()
   715  	select {
   716  	case <-d.quitCh:
   717  	default:
   718  		close(d.quitCh)
   719  
   720  		// Terminate the internal beacon syncer
   721  		d.skeleton.Terminate()
   722  	}
   723  	d.quitLock.Unlock()
   724  
   725  	// Cancel any pending download requests
   726  	d.Cancel()
   727  }
   728  
   729  // fetchHead retrieves the head header and prior pivot block (if available) from
   730  // a remote peer.
   731  func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) {
   732  	p.log.Debug("Retrieving remote chain head")
   733  	mode := d.getMode()
   734  
   735  	// Request the advertised remote head block and wait for the response
   736  	latest, _ := p.peer.Head()
   737  	fetch := 1
   738  	if mode == SnapSync {
   739  		fetch = 2 // head + pivot headers
   740  	}
   741  	headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true)
   742  	if err != nil {
   743  		return nil, nil, err
   744  	}
   745  	// Make sure the peer gave us at least one and at most the requested headers
   746  	if len(headers) == 0 || len(headers) > fetch {
   747  		return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch)
   748  	}
   749  	// The first header needs to be the head, validate against the request. If
   750  	// only 1 header was returned, make sure there's no pivot or there was not
   751  	// one requested.
   752  	head = headers[0]
   753  	if len(headers) == 1 {
   754  		if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) {
   755  			return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer)
   756  		}
   757  		p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0])
   758  		return head, nil, nil
   759  	}
   760  	// At this point we have 2 headers in total and the first is the
   761  	// validated head of the chain. Check the pivot number and return,
   762  	pivot = headers[1]
   763  	if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) {
   764  		return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks))
   765  	}
   766  	return head, pivot, nil
   767  }
   768  
   769  // calculateRequestSpan calculates what headers to request from a peer when trying to determine the
   770  // common ancestor.
   771  // It returns parameters to be used for peer.RequestHeadersByNumber:
   772  //
   773  //	from  - starting block number
   774  //	count - number of headers to request
   775  //	skip  - number of headers to skip
   776  //
   777  // and also returns 'max', the last block which is expected to be returned by the remote peers,
   778  // given the (from,count,skip)
   779  func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) {
   780  	var (
   781  		from     int
   782  		count    int
   783  		MaxCount = MaxHeaderFetch / 16
   784  	)
   785  	// requestHead is the highest block that we will ask for. If requestHead is not offset,
   786  	// the highest block that we will get is 16 blocks back from head, which means we
   787  	// will fetch 14 or 15 blocks unnecessarily in the case the height difference
   788  	// between us and the peer is 1-2 blocks, which is most common
   789  	requestHead := int(remoteHeight) - 1
   790  	if requestHead < 0 {
   791  		requestHead = 0
   792  	}
   793  	// requestBottom is the lowest block we want included in the query
   794  	// Ideally, we want to include the one just below our own head
   795  	requestBottom := int(localHeight - 1)
   796  	if requestBottom < 0 {
   797  		requestBottom = 0
   798  	}
   799  	totalSpan := requestHead - requestBottom
   800  	span := 1 + totalSpan/MaxCount
   801  	if span < 2 {
   802  		span = 2
   803  	}
   804  	if span > 16 {
   805  		span = 16
   806  	}
   807  
   808  	count = 1 + totalSpan/span
   809  	if count > MaxCount {
   810  		count = MaxCount
   811  	}
   812  	if count < 2 {
   813  		count = 2
   814  	}
   815  	from = requestHead - (count-1)*span
   816  	if from < 0 {
   817  		from = 0
   818  	}
   819  	max := from + (count-1)*span
   820  	return int64(from), count, span - 1, uint64(max)
   821  }
   822  
   823  // findAncestor tries to locate the common ancestor link of the local chain and
   824  // a remote peers blockchain. In the general case when our node was in sync and
   825  // on the correct chain, checking the top N links should already get us a match.
   826  // In the rare scenario when we ended up on a long reorganisation (i.e. none of
   827  // the head links match), we do a binary search to find the common ancestor.
   828  func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) {
   829  	// Figure out the valid ancestor range to prevent rewrite attacks
   830  	var (
   831  		floor        = int64(-1)
   832  		localHeight  uint64
   833  		remoteHeight = remoteHeader.Number.Uint64()
   834  	)
   835  	mode := d.getMode()
   836  	switch mode {
   837  	case FullSync:
   838  		localHeight = d.blockchain.CurrentBlock().Number.Uint64()
   839  	case SnapSync:
   840  		localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64()
   841  	default:
   842  		localHeight = d.lightchain.CurrentHeader().Number.Uint64()
   843  	}
   844  	p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight)
   845  
   846  	// Recap floor value for binary search
   847  	maxForkAncestry := fullMaxForkAncestry
   848  	if d.getMode() == LightSync {
   849  		maxForkAncestry = lightMaxForkAncestry
   850  	}
   851  	if localHeight >= maxForkAncestry {
   852  		// We're above the max reorg threshold, find the earliest fork point
   853  		floor = int64(localHeight - maxForkAncestry)
   854  	}
   855  	// If we're doing a light sync, ensure the floor doesn't go below the CHT, as
   856  	// all headers before that point will be missing.
   857  	if mode == LightSync {
   858  		// If we don't know the current CHT position, find it
   859  		if d.genesis == 0 {
   860  			header := d.lightchain.CurrentHeader()
   861  			for header != nil {
   862  				d.genesis = header.Number.Uint64()
   863  				if floor >= int64(d.genesis)-1 {
   864  					break
   865  				}
   866  				header = d.lightchain.GetHeaderByHash(header.ParentHash)
   867  			}
   868  		}
   869  		// We already know the "genesis" block number, cap floor to that
   870  		if floor < int64(d.genesis)-1 {
   871  			floor = int64(d.genesis) - 1
   872  		}
   873  	}
   874  
   875  	ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor)
   876  	if err == nil {
   877  		return ancestor, nil
   878  	}
   879  	// The returned error was not nil.
   880  	// If the error returned does not reflect that a common ancestor was not found, return it.
   881  	// If the error reflects that a common ancestor was not found, continue to binary search,
   882  	// where the error value will be reassigned.
   883  	if !errors.Is(err, errNoAncestorFound) {
   884  		return 0, err
   885  	}
   886  
   887  	ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor)
   888  	if err != nil {
   889  		return 0, err
   890  	}
   891  	return ancestor, nil
   892  }
   893  
   894  func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) {
   895  	from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight)
   896  
   897  	p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip)
   898  	headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false)
   899  	if err != nil {
   900  		return 0, err
   901  	}
   902  	// Wait for the remote response to the head fetch
   903  	number, hash := uint64(0), common.Hash{}
   904  
   905  	// Make sure the peer actually gave something valid
   906  	if len(headers) == 0 {
   907  		p.log.Warn("Empty head header set")
   908  		return 0, errEmptyHeaderSet
   909  	}
   910  	// Make sure the peer's reply conforms to the request
   911  	for i, header := range headers {
   912  		expectNumber := from + int64(i)*int64(skip+1)
   913  		if number := header.Number.Int64(); number != expectNumber {
   914  			p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number)
   915  			return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering"))
   916  		}
   917  	}
   918  	// Check if a common ancestor was found
   919  	for i := len(headers) - 1; i >= 0; i-- {
   920  		// Skip any headers that underflow/overflow our requested set
   921  		if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max {
   922  			continue
   923  		}
   924  		// Otherwise check if we already know the header or not
   925  		h := hashes[i]
   926  		n := headers[i].Number.Uint64()
   927  
   928  		var known bool
   929  		switch mode {
   930  		case FullSync:
   931  			known = d.blockchain.HasBlock(h, n)
   932  		case SnapSync:
   933  			known = d.blockchain.HasFastBlock(h, n)
   934  		default:
   935  			known = d.lightchain.HasHeader(h, n)
   936  		}
   937  		if known {
   938  			number, hash = n, h
   939  			break
   940  		}
   941  	}
   942  	// If the head fetch already found an ancestor, return
   943  	if hash != (common.Hash{}) {
   944  		if int64(number) <= floor {
   945  			p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor)
   946  			return 0, errInvalidAncestor
   947  		}
   948  		p.log.Debug("Found common ancestor", "number", number, "hash", hash)
   949  		return number, nil
   950  	}
   951  	return 0, errNoAncestorFound
   952  }
   953  
   954  func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) {
   955  	hash := common.Hash{}
   956  
   957  	// Ancestor not found, we need to binary search over our chain
   958  	start, end := uint64(0), remoteHeight
   959  	if floor > 0 {
   960  		start = uint64(floor)
   961  	}
   962  	p.log.Trace("Binary searching for common ancestor", "start", start, "end", end)
   963  
   964  	for start+1 < end {
   965  		// Split our chain interval in two, and request the hash to cross check
   966  		check := (start + end) / 2
   967  
   968  		headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false)
   969  		if err != nil {
   970  			return 0, err
   971  		}
   972  		// Make sure the peer actually gave something valid
   973  		if len(headers) != 1 {
   974  			p.log.Warn("Multiple headers for single request", "headers", len(headers))
   975  			return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers))
   976  		}
   977  		// Modify the search interval based on the response
   978  		h := hashes[0]
   979  		n := headers[0].Number.Uint64()
   980  
   981  		var known bool
   982  		switch mode {
   983  		case FullSync:
   984  			known = d.blockchain.HasBlock(h, n)
   985  		case SnapSync:
   986  			known = d.blockchain.HasFastBlock(h, n)
   987  		default:
   988  			known = d.lightchain.HasHeader(h, n)
   989  		}
   990  		if !known {
   991  			end = check
   992  			continue
   993  		}
   994  		header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists
   995  		if header.Number.Uint64() != check {
   996  			p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check)
   997  			return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number)
   998  		}
   999  		start = check
  1000  		hash = h
  1001  	}
  1002  	// Ensure valid ancestry and return
  1003  	if int64(start) <= floor {
  1004  		p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor)
  1005  		return 0, errInvalidAncestor
  1006  	}
  1007  	p.log.Debug("Found common ancestor", "number", start, "hash", hash)
  1008  	return start, nil
  1009  }
  1010  
  1011  // fetchHeaders keeps retrieving headers concurrently from the number
  1012  // requested, until no more are returned, potentially throttling on the way. To
  1013  // facilitate concurrency but still protect against malicious nodes sending bad
  1014  // headers, we construct a header chain skeleton using the "origin" peer we are
  1015  // syncing with, and fill in the missing headers using anyone else. Headers from
  1016  // other peers are only accepted if they map cleanly to the skeleton. If no one
  1017  // can fill in the skeleton - not even the origin peer - it's assumed invalid and
  1018  // the origin is dropped.
  1019  func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error {
  1020  	p.log.Debug("Directing header downloads", "origin", from)
  1021  	defer p.log.Debug("Header download terminated")
  1022  
  1023  	// Start pulling the header chain skeleton until all is done
  1024  	var (
  1025  		skeleton = true  // Skeleton assembly phase or finishing up
  1026  		pivoting = false // Whether the next request is pivot verification
  1027  		ancestor = from
  1028  		mode     = d.getMode()
  1029  	)
  1030  	for {
  1031  		// Pull the next batch of headers, it either:
  1032  		//   - Pivot check to see if the chain moved too far
  1033  		//   - Skeleton retrieval to permit concurrent header fetches
  1034  		//   - Full header retrieval if we're near the chain head
  1035  		var (
  1036  			headers []*types.Header
  1037  			hashes  []common.Hash
  1038  			err     error
  1039  		)
  1040  		switch {
  1041  		case pivoting:
  1042  			d.pivotLock.RLock()
  1043  			pivot := d.pivotHeader.Number.Uint64()
  1044  			d.pivotLock.RUnlock()
  1045  
  1046  			p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks))
  1047  			headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep
  1048  
  1049  		case skeleton:
  1050  			p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from)
  1051  			headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false)
  1052  
  1053  		default:
  1054  			p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from)
  1055  			headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false)
  1056  		}
  1057  		switch err {
  1058  		case nil:
  1059  			// Headers retrieved, continue with processing
  1060  
  1061  		case errCanceled:
  1062  			// Sync cancelled, no issue, propagate up
  1063  			return err
  1064  
  1065  		default:
  1066  			// Header retrieval either timed out, or the peer failed in some strange way
  1067  			// (e.g. disconnect). Consider the master peer bad and drop
  1068  			d.dropPeer(p.id)
  1069  
  1070  			// Finish the sync gracefully instead of dumping the gathered data though
  1071  			for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
  1072  				select {
  1073  				case ch <- false:
  1074  				case <-d.cancelCh:
  1075  				}
  1076  			}
  1077  			select {
  1078  			case d.headerProcCh <- nil:
  1079  			case <-d.cancelCh:
  1080  			}
  1081  			return fmt.Errorf("%w: header request failed: %v", errBadPeer, err)
  1082  		}
  1083  		// If the pivot is being checked, move if it became stale and run the real retrieval
  1084  		var pivot uint64
  1085  
  1086  		d.pivotLock.RLock()
  1087  		if d.pivotHeader != nil {
  1088  			pivot = d.pivotHeader.Number.Uint64()
  1089  		}
  1090  		d.pivotLock.RUnlock()
  1091  
  1092  		if pivoting {
  1093  			if len(headers) == 2 {
  1094  				if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want {
  1095  					log.Warn("Peer sent invalid next pivot", "have", have, "want", want)
  1096  					return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want)
  1097  				}
  1098  				if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want {
  1099  					log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want)
  1100  					return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want)
  1101  				}
  1102  				log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number)
  1103  				pivot = headers[0].Number.Uint64()
  1104  
  1105  				d.pivotLock.Lock()
  1106  				d.pivotHeader = headers[0]
  1107  				d.pivotLock.Unlock()
  1108  
  1109  				// Write out the pivot into the database so a rollback beyond
  1110  				// it will reenable snap sync and update the state root that
  1111  				// the state syncer will be downloading.
  1112  				rawdb.WriteLastPivotNumber(d.stateDB, pivot)
  1113  			}
  1114  			// Disable the pivot check and fetch the next batch of headers
  1115  			pivoting = false
  1116  			continue
  1117  		}
  1118  		// If the skeleton's finished, pull any remaining head headers directly from the origin
  1119  		if skeleton && len(headers) == 0 {
  1120  			// A malicious node might withhold advertised headers indefinitely
  1121  			if from+uint64(MaxHeaderFetch)-1 <= head {
  1122  				p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1)
  1123  				return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1)
  1124  			}
  1125  			p.log.Debug("No skeleton, fetching headers directly")
  1126  			skeleton = false
  1127  			continue
  1128  		}
  1129  		// If no more headers are inbound, notify the content fetchers and return
  1130  		if len(headers) == 0 {
  1131  			// Don't abort header fetches while the pivot is downloading
  1132  			if !d.committed.Load() && pivot <= from {
  1133  				p.log.Debug("No headers, waiting for pivot commit")
  1134  				select {
  1135  				case <-time.After(fsHeaderContCheck):
  1136  					continue
  1137  				case <-d.cancelCh:
  1138  					return errCanceled
  1139  				}
  1140  			}
  1141  			// Pivot done (or not in snap sync) and no more headers, terminate the process
  1142  			p.log.Debug("No more headers available")
  1143  			select {
  1144  			case d.headerProcCh <- nil:
  1145  				return nil
  1146  			case <-d.cancelCh:
  1147  				return errCanceled
  1148  			}
  1149  		}
  1150  		// If we received a skeleton batch, resolve internals concurrently
  1151  		var progressed bool
  1152  		if skeleton {
  1153  			filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers)
  1154  			if err != nil {
  1155  				p.log.Debug("Skeleton chain invalid", "err", err)
  1156  				return fmt.Errorf("%w: %v", errInvalidChain, err)
  1157  			}
  1158  			headers = filled[proced:]
  1159  			hashes = hashset[proced:]
  1160  
  1161  			progressed = proced > 0
  1162  			from += uint64(proced)
  1163  		} else {
  1164  			// A malicious node might withhold advertised headers indefinitely
  1165  			if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head {
  1166  				p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64())
  1167  				return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64())
  1168  			}
  1169  			// If we're closing in on the chain head, but haven't yet reached it, delay
  1170  			// the last few headers so mini reorgs on the head don't cause invalid hash
  1171  			// chain errors.
  1172  			if n := len(headers); n > 0 {
  1173  				// Retrieve the current head we're at
  1174  				var head uint64
  1175  				if mode == LightSync {
  1176  					head = d.lightchain.CurrentHeader().Number.Uint64()
  1177  				} else {
  1178  					head = d.blockchain.CurrentSnapBlock().Number.Uint64()
  1179  					if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full {
  1180  						head = full
  1181  					}
  1182  				}
  1183  				// If the head is below the common ancestor, we're actually deduplicating
  1184  				// already existing chain segments, so use the ancestor as the fake head.
  1185  				// Otherwise, we might end up delaying header deliveries pointlessly.
  1186  				if head < ancestor {
  1187  					head = ancestor
  1188  				}
  1189  				// If the head is way older than this batch, delay the last few headers
  1190  				if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() {
  1191  					delay := reorgProtHeaderDelay
  1192  					if delay > n {
  1193  						delay = n
  1194  					}
  1195  					headers = headers[:n-delay]
  1196  					hashes = hashes[:n-delay]
  1197  				}
  1198  			}
  1199  		}
  1200  		// If no headers have been delivered, or all of them have been delayed,
  1201  		// sleep a bit and retry. Take care with headers already consumed during
  1202  		// skeleton filling
  1203  		if len(headers) == 0 && !progressed {
  1204  			p.log.Trace("All headers delayed, waiting")
  1205  			select {
  1206  			case <-time.After(fsHeaderContCheck):
  1207  				continue
  1208  			case <-d.cancelCh:
  1209  				return errCanceled
  1210  			}
  1211  		}
  1212  		// Insert any remaining new headers and fetch the next batch
  1213  		if len(headers) > 0 {
  1214  			p.log.Trace("Scheduling new headers", "count", len(headers), "from", from)
  1215  			select {
  1216  			case d.headerProcCh <- &headerTask{
  1217  				headers: headers,
  1218  				hashes:  hashes,
  1219  			}:
  1220  			case <-d.cancelCh:
  1221  				return errCanceled
  1222  			}
  1223  			from += uint64(len(headers))
  1224  		}
  1225  		// If we're still skeleton filling snap sync, check pivot staleness
  1226  		// before continuing to the next skeleton filling
  1227  		if skeleton && pivot > 0 {
  1228  			pivoting = true
  1229  		}
  1230  	}
  1231  }
  1232  
  1233  // fillHeaderSkeleton concurrently retrieves headers from all our available peers
  1234  // and maps them to the provided skeleton header chain.
  1235  //
  1236  // Any partial results from the beginning of the skeleton is (if possible) forwarded
  1237  // immediately to the header processor to keep the rest of the pipeline full even
  1238  // in the case of header stalls.
  1239  //
  1240  // The method returns the entire filled skeleton and also the number of headers
  1241  // already forwarded for processing.
  1242  func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) {
  1243  	log.Debug("Filling up skeleton", "from", from)
  1244  	d.queue.ScheduleSkeleton(from, skeleton)
  1245  
  1246  	err := d.concurrentFetch((*headerQueue)(d), false)
  1247  	if err != nil {
  1248  		log.Debug("Skeleton fill failed", "err", err)
  1249  	}
  1250  	filled, hashes, proced := d.queue.RetrieveHeaders()
  1251  	if err == nil {
  1252  		log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced)
  1253  	}
  1254  	return filled, hashes, proced, err
  1255  }
  1256  
  1257  // fetchBodies iteratively downloads the scheduled block bodies, taking any
  1258  // available peers, reserving a chunk of blocks for each, waiting for delivery
  1259  // and also periodically checking for timeouts.
  1260  func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error {
  1261  	log.Debug("Downloading block bodies", "origin", from)
  1262  	err := d.concurrentFetch((*bodyQueue)(d), beaconMode)
  1263  
  1264  	log.Debug("Block body download terminated", "err", err)
  1265  	return err
  1266  }
  1267  
  1268  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
  1269  // available peers, reserving a chunk of receipts for each, waiting for delivery
  1270  // and also periodically checking for timeouts.
  1271  func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error {
  1272  	log.Debug("Downloading receipts", "origin", from)
  1273  	err := d.concurrentFetch((*receiptQueue)(d), beaconMode)
  1274  
  1275  	log.Debug("Receipt download terminated", "err", err)
  1276  	return err
  1277  }
  1278  
  1279  // processHeaders takes batches of retrieved headers from an input channel and
  1280  // keeps processing and scheduling them into the header chain and downloader's
  1281  // queue until the stream ends or a failure occurs.
  1282  func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error {
  1283  	// Keep a count of uncertain headers to roll back
  1284  	var (
  1285  		rollback    uint64 // Zero means no rollback (fine as you can't unroll the genesis)
  1286  		rollbackErr error
  1287  		mode        = d.getMode()
  1288  	)
  1289  	defer func() {
  1290  		if rollback > 0 {
  1291  			lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0
  1292  			if mode != LightSync {
  1293  				lastFastBlock = d.blockchain.CurrentSnapBlock().Number
  1294  				lastBlock = d.blockchain.CurrentBlock().Number
  1295  			}
  1296  			if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block
  1297  				// We're already unwinding the stack, only print the error to make it more visible
  1298  				log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err)
  1299  			}
  1300  			curFastBlock, curBlock := common.Big0, common.Big0
  1301  			if mode != LightSync {
  1302  				curFastBlock = d.blockchain.CurrentSnapBlock().Number
  1303  				curBlock = d.blockchain.CurrentBlock().Number
  1304  			}
  1305  			log.Warn("Rolled back chain segment",
  1306  				"header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number),
  1307  				"snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock),
  1308  				"block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr)
  1309  		}
  1310  	}()
  1311  	// Wait for batches of headers to process
  1312  	gotHeaders := false
  1313  
  1314  	for {
  1315  		select {
  1316  		case <-d.cancelCh:
  1317  			rollbackErr = errCanceled
  1318  			return errCanceled
  1319  
  1320  		case task := <-d.headerProcCh:
  1321  			// Terminate header processing if we synced up
  1322  			if task == nil || len(task.headers) == 0 {
  1323  				// Notify everyone that headers are fully processed
  1324  				for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
  1325  					select {
  1326  					case ch <- false:
  1327  					case <-d.cancelCh:
  1328  					}
  1329  				}
  1330  				// If we're in legacy sync mode, we need to check total difficulty
  1331  				// violations from malicious peers. That is not needed in beacon
  1332  				// mode and we can skip to terminating sync.
  1333  				if !beaconMode {
  1334  					// If no headers were retrieved at all, the peer violated its TD promise that it had a
  1335  					// better chain compared to ours. The only exception is if its promised blocks were
  1336  					// already imported by other means (e.g. fetcher):
  1337  					//
  1338  					// R <remote peer>, L <local node>: Both at block 10
  1339  					// R: Mine block 11, and propagate it to L
  1340  					// L: Queue block 11 for import
  1341  					// L: Notice that R's head and TD increased compared to ours, start sync
  1342  					// L: Import of block 11 finishes
  1343  					// L: Sync begins, and finds common ancestor at 11
  1344  					// L: Request new headers up from 11 (R's TD was higher, it must have something)
  1345  					// R: Nothing to give
  1346  					if mode != LightSync {
  1347  						head := d.blockchain.CurrentBlock()
  1348  						if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1349  							return errStallingPeer
  1350  						}
  1351  					}
  1352  					// If snap or light syncing, ensure promised headers are indeed delivered. This is
  1353  					// needed to detect scenarios where an attacker feeds a bad pivot and then bails out
  1354  					// of delivering the post-pivot blocks that would flag the invalid content.
  1355  					//
  1356  					// This check cannot be executed "as is" for full imports, since blocks may still be
  1357  					// queued for processing when the header download completes. However, as long as the
  1358  					// peer gave us something useful, we're already happy/progressed (above check).
  1359  					if mode == SnapSync || mode == LightSync {
  1360  						head := d.lightchain.CurrentHeader()
  1361  						if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 {
  1362  							return errStallingPeer
  1363  						}
  1364  					}
  1365  				}
  1366  				// Disable any rollback and return
  1367  				rollback = 0
  1368  				return nil
  1369  			}
  1370  			// Otherwise split the chunk of headers into batches and process them
  1371  			headers, hashes := task.headers, task.hashes
  1372  
  1373  			gotHeaders = true
  1374  			for len(headers) > 0 {
  1375  				// Terminate if something failed in between processing chunks
  1376  				select {
  1377  				case <-d.cancelCh:
  1378  					rollbackErr = errCanceled
  1379  					return errCanceled
  1380  				default:
  1381  				}
  1382  				// Select the next chunk of headers to import
  1383  				limit := maxHeadersProcess
  1384  				if limit > len(headers) {
  1385  					limit = len(headers)
  1386  				}
  1387  				chunkHeaders := headers[:limit]
  1388  				chunkHashes := hashes[:limit]
  1389  
  1390  				// In case of header only syncing, validate the chunk immediately
  1391  				if mode == SnapSync || mode == LightSync {
  1392  					// Although the received headers might be all valid, a legacy
  1393  					// PoW/PoA sync must not accept post-merge headers. Make sure
  1394  					// that any transition is rejected at this point.
  1395  					var (
  1396  						rejected []*types.Header
  1397  						td       *big.Int
  1398  					)
  1399  					if !beaconMode && ttd != nil {
  1400  						td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1)
  1401  						if td == nil {
  1402  							// This should never really happen, but handle gracefully for now
  1403  							log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash)
  1404  							return fmt.Errorf("%w: parent TD missing", errInvalidChain)
  1405  						}
  1406  						for i, header := range chunkHeaders {
  1407  							td = new(big.Int).Add(td, header.Difficulty)
  1408  							if td.Cmp(ttd) >= 0 {
  1409  								// Terminal total difficulty reached, allow the last header in
  1410  								if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 {
  1411  									chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:]
  1412  									if len(rejected) > 0 {
  1413  										// Make a nicer user log as to the first TD truly rejected
  1414  										td = new(big.Int).Add(td, rejected[0].Difficulty)
  1415  									}
  1416  								} else {
  1417  									chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:]
  1418  								}
  1419  								break
  1420  							}
  1421  						}
  1422  					}
  1423  					if len(chunkHeaders) > 0 {
  1424  						if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil {
  1425  							rollbackErr = err
  1426  
  1427  							// If some headers were inserted, track them as uncertain
  1428  							if mode == SnapSync && n > 0 && rollback == 0 {
  1429  								rollback = chunkHeaders[0].Number.Uint64()
  1430  							}
  1431  							log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
  1432  							return fmt.Errorf("%w: %v", errInvalidChain, err)
  1433  						}
  1434  						// All verifications passed, track all headers within the allowed limits
  1435  						if mode == SnapSync {
  1436  							head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64()
  1437  							if head-rollback > uint64(fsHeaderSafetyNet) {
  1438  								rollback = head - uint64(fsHeaderSafetyNet)
  1439  							} else {
  1440  								rollback = 1
  1441  							}
  1442  						}
  1443  					}
  1444  					if len(rejected) != 0 {
  1445  						// Merge threshold reached, stop importing, but don't roll back
  1446  						rollback = 0
  1447  
  1448  						log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd)
  1449  						return ErrMergeTransition
  1450  					}
  1451  				}
  1452  				// Unless we're doing light chains, schedule the headers for associated content retrieval
  1453  				if mode == FullSync || mode == SnapSync {
  1454  					// If we've reached the allowed number of pending headers, stall a bit
  1455  					for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
  1456  						select {
  1457  						case <-d.cancelCh:
  1458  							rollbackErr = errCanceled
  1459  							return errCanceled
  1460  						case <-time.After(time.Second):
  1461  						}
  1462  					}
  1463  					// Otherwise insert the headers for content retrieval
  1464  					inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin)
  1465  					if len(inserts) != len(chunkHeaders) {
  1466  						rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders))
  1467  						return fmt.Errorf("%w: stale headers", errBadPeer)
  1468  					}
  1469  				}
  1470  				headers = headers[limit:]
  1471  				hashes = hashes[limit:]
  1472  				origin += uint64(limit)
  1473  			}
  1474  			// Update the highest block number we know if a higher one is found.
  1475  			d.syncStatsLock.Lock()
  1476  			if d.syncStatsChainHeight < origin {
  1477  				d.syncStatsChainHeight = origin - 1
  1478  			}
  1479  			d.syncStatsLock.Unlock()
  1480  
  1481  			// Signal the content downloaders of the availability of new tasks
  1482  			for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
  1483  				select {
  1484  				case ch <- true:
  1485  				default:
  1486  				}
  1487  			}
  1488  		}
  1489  	}
  1490  }
  1491  
  1492  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
  1493  func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error {
  1494  	for {
  1495  		results := d.queue.Results(true)
  1496  		if len(results) == 0 {
  1497  			return nil
  1498  		}
  1499  		if d.chainInsertHook != nil {
  1500  			d.chainInsertHook(results)
  1501  		}
  1502  		// Although the received blocks might be all valid, a legacy PoW/PoA sync
  1503  		// must not accept post-merge blocks. Make sure that pre-merge blocks are
  1504  		// imported, but post-merge ones are rejected.
  1505  		var (
  1506  			rejected []*fetchResult
  1507  			td       *big.Int
  1508  		)
  1509  		if !beaconMode && ttd != nil {
  1510  			td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1)
  1511  			if td == nil {
  1512  				// This should never really happen, but handle gracefully for now
  1513  				log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash)
  1514  				return fmt.Errorf("%w: parent TD missing", errInvalidChain)
  1515  			}
  1516  			for i, result := range results {
  1517  				td = new(big.Int).Add(td, result.Header.Difficulty)
  1518  				if td.Cmp(ttd) >= 0 {
  1519  					// Terminal total difficulty reached, allow the last block in
  1520  					if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 {
  1521  						results, rejected = results[:i+1], results[i+1:]
  1522  						if len(rejected) > 0 {
  1523  							// Make a nicer user log as to the first TD truly rejected
  1524  							td = new(big.Int).Add(td, rejected[0].Header.Difficulty)
  1525  						}
  1526  					} else {
  1527  						results, rejected = results[:i], results[i:]
  1528  					}
  1529  					break
  1530  				}
  1531  			}
  1532  		}
  1533  		if err := d.importBlockResults(results); err != nil {
  1534  			return err
  1535  		}
  1536  		if len(rejected) != 0 {
  1537  			log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd)
  1538  			return ErrMergeTransition
  1539  		}
  1540  	}
  1541  }
  1542  
  1543  func (d *Downloader) importBlockResults(results []*fetchResult) error {
  1544  	// Check for any early termination requests
  1545  	if len(results) == 0 {
  1546  		return nil
  1547  	}
  1548  	select {
  1549  	case <-d.quitCh:
  1550  		return errCancelContentProcessing
  1551  	default:
  1552  	}
  1553  	// Retrieve a batch of results to import
  1554  	first, last := results[0].Header, results[len(results)-1].Header
  1555  	log.Debug("Inserting downloaded chain", "items", len(results),
  1556  		"firstnum", first.Number, "firsthash", first.Hash(),
  1557  		"lastnum", last.Number, "lasthash", last.Hash(),
  1558  	)
  1559  	blocks := make([]*types.Block, len(results))
  1560  	for i, result := range results {
  1561  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals)
  1562  	}
  1563  	// Downloaded blocks are always regarded as trusted after the
  1564  	// transition. Because the downloaded chain is guided by the
  1565  	// consensus-layer.
  1566  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
  1567  		if index < len(results) {
  1568  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1569  
  1570  			// In post-merge, notify the engine API of encountered bad chains
  1571  			if d.badBlock != nil {
  1572  				head, _, _, err := d.skeleton.Bounds()
  1573  				if err != nil {
  1574  					log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err)
  1575  				} else {
  1576  					d.badBlock(blocks[index].Header(), head)
  1577  				}
  1578  			}
  1579  		} else {
  1580  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
  1581  			// when it needs to preprocess blocks to import a sidechain.
  1582  			// The importer will put together a new list of blocks to import, which is a superset
  1583  			// of the blocks delivered from the downloader, and the indexing will be off.
  1584  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
  1585  		}
  1586  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1587  	}
  1588  	return nil
  1589  }
  1590  
  1591  // processSnapSyncContent takes fetch results from the queue and writes them to the
  1592  // database. It also controls the synchronisation of state nodes of the pivot block.
  1593  func (d *Downloader) processSnapSyncContent() error {
  1594  	// Start syncing state of the reported head block. This should get us most of
  1595  	// the state of the pivot block.
  1596  	d.pivotLock.RLock()
  1597  	sync := d.syncState(d.pivotHeader.Root)
  1598  	d.pivotLock.RUnlock()
  1599  
  1600  	defer func() {
  1601  		// The `sync` object is replaced every time the pivot moves. We need to
  1602  		// defer close the very last active one, hence the lazy evaluation vs.
  1603  		// calling defer sync.Cancel() !!!
  1604  		sync.Cancel()
  1605  	}()
  1606  
  1607  	closeOnErr := func(s *stateSync) {
  1608  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
  1609  			d.queue.Close() // wake up Results
  1610  		}
  1611  	}
  1612  	go closeOnErr(sync)
  1613  
  1614  	// To cater for moving pivot points, track the pivot block and subsequently
  1615  	// accumulated download results separately.
  1616  	//
  1617  	// These will be nil up to the point where we reach the pivot, and will only
  1618  	// be set temporarily if the synced blocks are piling up, but the pivot is
  1619  	// still busy downloading. In that case, we need to occasionally check for
  1620  	// pivot moves, so need to unblock the loop. These fields will accumulate
  1621  	// the results in the meantime.
  1622  	//
  1623  	// Note, there's no issue with memory piling up since after 64 blocks the
  1624  	// pivot will forcefully move so these accumulators will be dropped.
  1625  	var (
  1626  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
  1627  		oldTail  []*fetchResult // Downloaded content after the pivot
  1628  	)
  1629  	for {
  1630  		// Wait for the next batch of downloaded data to be available. If we have
  1631  		// not yet reached the pivot point, wait blockingly as there's no need to
  1632  		// spin-loop check for pivot moves. If we reached the pivot but have not
  1633  		// yet processed it, check for results async, so we might notice pivot
  1634  		// moves while state syncing. If the pivot was passed fully, block again
  1635  		// as there's no more reason to check for pivot moves at all.
  1636  		results := d.queue.Results(oldPivot == nil)
  1637  		if len(results) == 0 {
  1638  			// If pivot sync is done, stop
  1639  			if d.committed.Load() {
  1640  				d.reportSnapSyncProgress(true)
  1641  				return sync.Cancel()
  1642  			}
  1643  			// If sync failed, stop
  1644  			select {
  1645  			case <-d.cancelCh:
  1646  				sync.Cancel()
  1647  				return errCanceled
  1648  			default:
  1649  			}
  1650  		}
  1651  		if d.chainInsertHook != nil {
  1652  			d.chainInsertHook(results)
  1653  		}
  1654  		d.reportSnapSyncProgress(false)
  1655  
  1656  		// If we haven't downloaded the pivot block yet, check pivot staleness
  1657  		// notifications from the header downloader
  1658  		d.pivotLock.RLock()
  1659  		pivot := d.pivotHeader
  1660  		d.pivotLock.RUnlock()
  1661  
  1662  		if oldPivot == nil { // no results piling up, we can move the pivot
  1663  			if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
  1664  				if pivot.Root != sync.root { // pivot position changed, we can move the pivot
  1665  					sync.Cancel()
  1666  					sync = d.syncState(pivot.Root)
  1667  
  1668  					go closeOnErr(sync)
  1669  				}
  1670  			}
  1671  		} else { // results already piled up, consume before handling pivot move
  1672  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
  1673  		}
  1674  		// Split around the pivot block and process the two sides via snap/full sync
  1675  		if !d.committed.Load() {
  1676  			latest := results[len(results)-1].Header
  1677  			// If the height is above the pivot block by 2 sets, it means the pivot
  1678  			// become stale in the network, and it was garbage collected, move to a
  1679  			// new pivot.
  1680  			//
  1681  			// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
  1682  			// need to be taken into account, otherwise we're detecting the pivot move
  1683  			// late and will drop peers due to unavailable state!!!
  1684  			if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
  1685  				log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
  1686  				pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
  1687  
  1688  				d.pivotLock.Lock()
  1689  				d.pivotHeader = pivot
  1690  				d.pivotLock.Unlock()
  1691  
  1692  				// Write out the pivot into the database so a rollback beyond it will
  1693  				// reenable snap sync
  1694  				rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
  1695  			}
  1696  		}
  1697  		P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
  1698  		if err := d.commitSnapSyncData(beforeP, sync); err != nil {
  1699  			return err
  1700  		}
  1701  		if P != nil {
  1702  			// If new pivot block found, cancel old state retrieval and restart
  1703  			if oldPivot != P {
  1704  				sync.Cancel()
  1705  				sync = d.syncState(P.Header.Root)
  1706  
  1707  				go closeOnErr(sync)
  1708  				oldPivot = P
  1709  			}
  1710  			// Wait for completion, occasionally checking for pivot staleness
  1711  			select {
  1712  			case <-sync.done:
  1713  				if sync.err != nil {
  1714  					return sync.err
  1715  				}
  1716  				if err := d.commitPivotBlock(P); err != nil {
  1717  					return err
  1718  				}
  1719  				oldPivot = nil
  1720  
  1721  			case <-time.After(time.Second):
  1722  				oldTail = afterP
  1723  				continue
  1724  			}
  1725  		}
  1726  		// Fast sync done, pivot commit done, full import
  1727  		if err := d.importBlockResults(afterP); err != nil {
  1728  			return err
  1729  		}
  1730  	}
  1731  }
  1732  
  1733  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1734  	if len(results) == 0 {
  1735  		return nil, nil, nil
  1736  	}
  1737  	if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
  1738  		// the pivot is somewhere in the future
  1739  		return nil, results, nil
  1740  	}
  1741  	// This can also be optimized, but only happens very seldom
  1742  	for _, result := range results {
  1743  		num := result.Header.Number.Uint64()
  1744  		switch {
  1745  		case num < pivot:
  1746  			before = append(before, result)
  1747  		case num == pivot:
  1748  			p = result
  1749  		default:
  1750  			after = append(after, result)
  1751  		}
  1752  	}
  1753  	return p, before, after
  1754  }
  1755  
  1756  func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error {
  1757  	// Check for any early termination requests
  1758  	if len(results) == 0 {
  1759  		return nil
  1760  	}
  1761  	select {
  1762  	case <-d.quitCh:
  1763  		return errCancelContentProcessing
  1764  	case <-stateSync.done:
  1765  		if err := stateSync.Wait(); err != nil {
  1766  			return err
  1767  		}
  1768  	default:
  1769  	}
  1770  	// Retrieve the batch of results to import
  1771  	first, last := results[0].Header, results[len(results)-1].Header
  1772  	log.Debug("Inserting snap-sync blocks", "items", len(results),
  1773  		"firstnum", first.Number, "firsthash", first.Hash(),
  1774  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1775  	)
  1776  	blocks := make([]*types.Block, len(results))
  1777  	receipts := make([]types.Receipts, len(results))
  1778  	for i, result := range results {
  1779  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals)
  1780  		receipts[i] = result.Receipts
  1781  	}
  1782  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
  1783  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1784  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1785  	}
  1786  	return nil
  1787  }
  1788  
  1789  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1790  	block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals)
  1791  	log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1792  
  1793  	// Commit the pivot block as the new head, will require full sync from here on
  1794  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
  1795  		return err
  1796  	}
  1797  	if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
  1798  		return err
  1799  	}
  1800  	d.committed.Store(true)
  1801  	return nil
  1802  }
  1803  
  1804  // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
  1805  // data packet for the local node to consume.
  1806  func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
  1807  	switch packet := packet.(type) {
  1808  	case *snap.AccountRangePacket:
  1809  		hashes, accounts, err := packet.Unpack()
  1810  		if err != nil {
  1811  			return err
  1812  		}
  1813  		return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
  1814  
  1815  	case *snap.StorageRangesPacket:
  1816  		hashset, slotset := packet.Unpack()
  1817  		return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
  1818  
  1819  	case *snap.ByteCodesPacket:
  1820  		return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
  1821  
  1822  	case *snap.TrieNodesPacket:
  1823  		return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
  1824  
  1825  	default:
  1826  		return fmt.Errorf("unexpected snap packet type: %T", packet)
  1827  	}
  1828  }
  1829  
  1830  // readHeaderRange returns a list of headers, using the given last header as the base,
  1831  // and going backwards towards genesis. This method assumes that the caller already has
  1832  // placed a reasonable cap on count.
  1833  func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header {
  1834  	var (
  1835  		current = last
  1836  		headers []*types.Header
  1837  	)
  1838  	for {
  1839  		parent := d.lightchain.GetHeaderByHash(current.ParentHash)
  1840  		if parent == nil {
  1841  			break // The chain is not continuous, or the chain is exhausted
  1842  		}
  1843  		headers = append(headers, parent)
  1844  		if len(headers) >= count {
  1845  			break
  1846  		}
  1847  		current = parent
  1848  	}
  1849  	return headers
  1850  }
  1851  
  1852  // reportSnapSyncProgress calculates various status reports and provides it to the user.
  1853  func (d *Downloader) reportSnapSyncProgress(force bool) {
  1854  	// Initialize the sync start time if it's the first time we're reporting
  1855  	if d.syncStartTime.IsZero() {
  1856  		d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero
  1857  	}
  1858  	// Don't report all the events, just occasionally
  1859  	if !force && time.Since(d.syncLogTime) < 8*time.Second {
  1860  		return
  1861  	}
  1862  	// Don't report anything until we have a meaningful progress
  1863  	var (
  1864  		headerBytes, _  = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable)
  1865  		bodyBytes, _    = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable)
  1866  		receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable)
  1867  	)
  1868  	syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes)
  1869  	if syncedBytes == 0 {
  1870  		return
  1871  	}
  1872  	var (
  1873  		header = d.blockchain.CurrentHeader()
  1874  		block  = d.blockchain.CurrentSnapBlock()
  1875  	)
  1876  	syncedBlocks := block.Number.Uint64() - d.syncStartBlock
  1877  	if syncedBlocks == 0 {
  1878  		return
  1879  	}
  1880  	// Retrieve the current chain head and calculate the ETA
  1881  	latest, _, _, err := d.skeleton.Bounds()
  1882  	if err != nil {
  1883  		// We're going to cheat for non-merged networks, but that's fine
  1884  		latest = d.pivotHeader
  1885  	}
  1886  	if latest == nil {
  1887  		// This should really never happen, but add some defensive code for now.
  1888  		// TODO(karalabe): Remove it eventually if we don't see it blow.
  1889  		log.Error("Nil latest block in sync progress report")
  1890  		return
  1891  	}
  1892  	var (
  1893  		left = latest.Number.Uint64() - block.Number.Uint64()
  1894  		eta  = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left)
  1895  
  1896  		progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64()))
  1897  		headers  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString())
  1898  		bodies   = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString())
  1899  		receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString())
  1900  	)
  1901  	log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta))
  1902  	d.syncLogTime = time.Now()
  1903  }