github.com/ethereum/go-ethereum@v1.16.1/eth/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"sort"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/core/rawdb"
    31  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/eth/ethconfig"
    34  	"github.com/ethereum/go-ethereum/eth/protocols/snap"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/params"
    39  	"github.com/ethereum/go-ethereum/rlp"
    40  	"github.com/ethereum/go-ethereum/triedb"
    41  )
    42  
    43  var (
    44  	MaxBlockFetch   = 128 // Number of blocks to be fetched per retrieval request
    45  	MaxHeaderFetch  = 192 // Number of block headers to be fetched per retrieval request
    46  	MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request
    47  
    48  	maxQueuedHeaders           = 32 * 1024                        // [eth/62] Maximum number of headers to queue for import (DOS protection)
    49  	maxHeadersProcess          = 2048                             // Number of header download results to import at once into the chain
    50  	maxResultsProcess          = 2048                             // Number of content download results to import at once into the chain
    51  	fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    52  
    53  	reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
    54  
    55  	fsHeaderSafetyNet = 2048            // Number of headers to discard in case a chain violation is detected
    56  	fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
    57  	fsMinFullBlocks   = 64              // Number of blocks to retrieve fully even in snap sync
    58  )
    59  
    60  var (
    61  	errBusy    = errors.New("busy")
    62  	errBadPeer = errors.New("action from bad peer ignored")
    63  
    64  	errTimeout                 = errors.New("timeout")
    65  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    66  	errInvalidBody             = errors.New("retrieved block body is invalid")
    67  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    68  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    69  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    70  	errCanceled                = errors.New("syncing canceled (requested)")
    71  	errNoPivotHeader           = errors.New("pivot header is not found")
    72  )
    73  
    74  // SyncMode defines the sync method of the downloader.
    75  // Deprecated: use ethconfig.SyncMode instead
    76  type SyncMode = ethconfig.SyncMode
    77  
    78  const (
    79  	// Deprecated: use ethconfig.FullSync
    80  	FullSync = ethconfig.FullSync
    81  	// Deprecated: use ethconfig.SnapSync
    82  	SnapSync = ethconfig.SnapSync
    83  )
    84  
    85  // peerDropFn is a callback type for dropping a peer detected as malicious.
    86  type peerDropFn func(id string)
    87  
    88  // badBlockFn is a callback for the async beacon sync to notify the caller that
    89  // the origin header requested to sync to, produced a chain with a bad block.
    90  type badBlockFn func(invalid *types.Header, origin *types.Header)
    91  
    92  // headerTask is a set of downloaded headers to queue along with their precomputed
    93  // hashes to avoid constant rehashing.
    94  type headerTask struct {
    95  	headers []*types.Header
    96  	hashes  []common.Hash
    97  }
    98  
    99  type Downloader struct {
   100  	mode atomic.Uint32  // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
   101  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
   102  
   103  	queue *queue   // Scheduler for selecting the hashes to download
   104  	peers *peerSet // Set of active peers from which download can proceed
   105  
   106  	stateDB ethdb.Database // Database to state sync into (and deduplicate via)
   107  
   108  	// Statistics
   109  	syncStatsChainOrigin uint64       // Origin block number where syncing started at
   110  	syncStatsChainHeight uint64       // Highest block number known when syncing started
   111  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   112  
   113  	blockchain BlockChain
   114  
   115  	// Callbacks
   116  	dropPeer peerDropFn // Drops a peer for misbehaving
   117  	badBlock badBlockFn // Reports a block as rejected by the chain
   118  
   119  	// Status
   120  	synchronising atomic.Bool
   121  	notified      atomic.Bool
   122  	committed     atomic.Bool
   123  	ancientLimit  uint64 // The maximum block number which can be regarded as ancient data.
   124  
   125  	// The cutoff block number and hash before which chain segments (bodies
   126  	// and receipts) are skipped during synchronization. 0 means the entire
   127  	// chain segment is aimed for synchronization.
   128  	chainCutoffNumber uint64
   129  	chainCutoffHash   common.Hash
   130  
   131  	// Channels
   132  	headerProcCh chan *headerTask // Channel to feed the header processor new tasks
   133  
   134  	// Skeleton sync
   135  	skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
   136  
   137  	// State sync
   138  	pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
   139  	pivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates
   140  
   141  	SnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now
   142  	stateSyncStart chan *stateSync
   143  
   144  	// Cancellation and termination
   145  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   146  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   147  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   148  
   149  	quitCh   chan struct{} // Quit channel to signal termination
   150  	quitLock sync.Mutex    // Lock to prevent double closes
   151  
   152  	// Testing hooks
   153  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   154  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   155  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   156  
   157  	// Progress reporting metrics
   158  	syncStartBlock uint64    // Head snap block when Geth was started
   159  	syncStartTime  time.Time // Time instance when chain sync started
   160  	syncLogTime    time.Time // Time instance when status was last reported
   161  }
   162  
   163  // BlockChain encapsulates functions required to sync a (full or snap) blockchain.
   164  type BlockChain interface {
   165  	// HasHeader verifies a header's presence in the local chain.
   166  	HasHeader(common.Hash, uint64) bool
   167  
   168  	// GetHeaderByHash retrieves a header from the local chain.
   169  	GetHeaderByHash(common.Hash) *types.Header
   170  
   171  	// CurrentHeader retrieves the head header from the local chain.
   172  	CurrentHeader() *types.Header
   173  
   174  	// SetHead rewinds the local chain to a new head.
   175  	SetHead(uint64) error
   176  
   177  	// HasBlock verifies a block's presence in the local chain.
   178  	HasBlock(common.Hash, uint64) bool
   179  
   180  	// HasFastBlock verifies a snap block's presence in the local chain.
   181  	HasFastBlock(common.Hash, uint64) bool
   182  
   183  	// GetBlockByHash retrieves a block from the local chain.
   184  	GetBlockByHash(common.Hash) *types.Block
   185  
   186  	// CurrentBlock retrieves the head block from the local chain.
   187  	CurrentBlock() *types.Header
   188  
   189  	// CurrentSnapBlock retrieves the head snap block from the local chain.
   190  	CurrentSnapBlock() *types.Header
   191  
   192  	// SnapSyncCommitHead directly commits the head block to a certain entity.
   193  	SnapSyncCommitHead(common.Hash) error
   194  
   195  	// InsertHeadersBeforeCutoff inserts a batch of headers before the configured
   196  	// chain cutoff into the ancient store.
   197  	InsertHeadersBeforeCutoff([]*types.Header) (int, error)
   198  
   199  	// InsertChain inserts a batch of blocks into the local chain.
   200  	InsertChain(types.Blocks) (int, error)
   201  
   202  	// InterruptInsert whether disables the chain insertion.
   203  	InterruptInsert(on bool)
   204  
   205  	// InsertReceiptChain inserts a batch of blocks along with their receipts
   206  	// into the local chain. Blocks older than the specified `ancientLimit`
   207  	// are stored directly in the ancient store, while newer blocks are stored
   208  	// in the live key-value store.
   209  	InsertReceiptChain(types.Blocks, []rlp.RawValue, uint64) (int, error)
   210  
   211  	// Snapshots returns the blockchain snapshot tree to paused it during sync.
   212  	Snapshots() *snapshot.Tree
   213  
   214  	// TrieDB retrieves the low level trie database used for interacting
   215  	// with trie nodes.
   216  	TrieDB() *triedb.Database
   217  
   218  	// HistoryPruningCutoff returns the configured history pruning point.
   219  	// Block bodies along with the receipts will be skipped for synchronization.
   220  	HistoryPruningCutoff() (uint64, common.Hash)
   221  }
   222  
   223  // New creates a new downloader to fetch hashes and blocks from remote peers.
   224  func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader {
   225  	cutoffNumber, cutoffHash := chain.HistoryPruningCutoff()
   226  	dl := &Downloader{
   227  		stateDB:           stateDb,
   228  		mux:               mux,
   229  		queue:             newQueue(blockCacheMaxItems, blockCacheInitialItems),
   230  		peers:             newPeerSet(),
   231  		blockchain:        chain,
   232  		chainCutoffNumber: cutoffNumber,
   233  		chainCutoffHash:   cutoffHash,
   234  		dropPeer:          dropPeer,
   235  		headerProcCh:      make(chan *headerTask, 1),
   236  		quitCh:            make(chan struct{}),
   237  		SnapSyncer:        snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
   238  		stateSyncStart:    make(chan *stateSync),
   239  		syncStartBlock:    chain.CurrentSnapBlock().Number.Uint64(),
   240  	}
   241  	// Create the post-merge skeleton syncer and start the process
   242  	dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
   243  
   244  	go dl.stateFetcher()
   245  	return dl
   246  }
   247  
   248  // Progress retrieves the synchronisation boundaries, specifically the origin
   249  // block where synchronisation started at (may have failed/suspended); the block
   250  // or header sync is currently at; and the latest known block which the sync targets.
   251  //
   252  // In addition, during the state download phase of snap synchronisation the number
   253  // of processed and the total number of known states are also returned. Otherwise
   254  // these are zero.
   255  func (d *Downloader) Progress() ethereum.SyncProgress {
   256  	// Lock the current stats and return the progress
   257  	d.syncStatsLock.RLock()
   258  	defer d.syncStatsLock.RUnlock()
   259  
   260  	current := uint64(0)
   261  	mode := d.getMode()
   262  	switch mode {
   263  	case ethconfig.FullSync:
   264  		current = d.blockchain.CurrentBlock().Number.Uint64()
   265  	case ethconfig.SnapSync:
   266  		current = d.blockchain.CurrentSnapBlock().Number.Uint64()
   267  	default:
   268  		log.Error("Unknown downloader mode", "mode", mode)
   269  	}
   270  	progress, pending := d.SnapSyncer.Progress()
   271  
   272  	return ethereum.SyncProgress{
   273  		StartingBlock:       d.syncStatsChainOrigin,
   274  		CurrentBlock:        current,
   275  		HighestBlock:        d.syncStatsChainHeight,
   276  		SyncedAccounts:      progress.AccountSynced,
   277  		SyncedAccountBytes:  uint64(progress.AccountBytes),
   278  		SyncedBytecodes:     progress.BytecodeSynced,
   279  		SyncedBytecodeBytes: uint64(progress.BytecodeBytes),
   280  		SyncedStorage:       progress.StorageSynced,
   281  		SyncedStorageBytes:  uint64(progress.StorageBytes),
   282  		HealedTrienodes:     progress.TrienodeHealSynced,
   283  		HealedTrienodeBytes: uint64(progress.TrienodeHealBytes),
   284  		HealedBytecodes:     progress.BytecodeHealSynced,
   285  		HealedBytecodeBytes: uint64(progress.BytecodeHealBytes),
   286  		HealingTrienodes:    pending.TrienodeHeal,
   287  		HealingBytecode:     pending.BytecodeHeal,
   288  	}
   289  }
   290  
   291  // RegisterPeer injects a new download peer into the set of block source to be
   292  // used for fetching hashes and blocks from.
   293  func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
   294  	var logger log.Logger
   295  	if len(id) < 16 {
   296  		// Tests use short IDs, don't choke on them
   297  		logger = log.New("peer", id)
   298  	} else {
   299  		logger = log.New("peer", id[:8])
   300  	}
   301  	logger.Trace("Registering sync peer")
   302  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   303  		logger.Error("Failed to register sync peer", "err", err)
   304  		return err
   305  	}
   306  	return nil
   307  }
   308  
   309  // UnregisterPeer remove a peer from the known list, preventing any action from
   310  // the specified peer. An effort is also made to return any pending fetches into
   311  // the queue.
   312  func (d *Downloader) UnregisterPeer(id string) error {
   313  	// Unregister the peer from the active peer set and revoke any fetch tasks
   314  	var logger log.Logger
   315  	if len(id) < 16 {
   316  		// Tests use short IDs, don't choke on them
   317  		logger = log.New("peer", id)
   318  	} else {
   319  		logger = log.New("peer", id[:8])
   320  	}
   321  	logger.Trace("Unregistering sync peer")
   322  	if err := d.peers.Unregister(id); err != nil {
   323  		logger.Error("Failed to unregister sync peer", "err", err)
   324  		return err
   325  	}
   326  	d.queue.Revoke(id)
   327  
   328  	return nil
   329  }
   330  
   331  // synchronise will select the peer and use it for synchronising. If an empty string is given
   332  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   333  // checks fail an error will be returned. This method is synchronous
   334  func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error {
   335  	// The beacon header syncer is async. It will start this synchronization and
   336  	// will continue doing other tasks. However, if synchronization needs to be
   337  	// cancelled, the syncer needs to know if we reached the startup point (and
   338  	// inited the cancel channel) or not yet. Make sure that we'll signal even in
   339  	// case of a failure.
   340  	if beaconPing != nil {
   341  		defer func() {
   342  			select {
   343  			case <-beaconPing: // already notified
   344  			default:
   345  				close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
   346  			}
   347  		}()
   348  	}
   349  	// Make sure only one goroutine is ever allowed past this point at once
   350  	if !d.synchronising.CompareAndSwap(false, true) {
   351  		return errBusy
   352  	}
   353  	defer d.synchronising.Store(false)
   354  
   355  	// Post a user notification of the sync (only once per session)
   356  	if d.notified.CompareAndSwap(false, true) {
   357  		log.Info("Block synchronisation started")
   358  	}
   359  	if mode == ethconfig.SnapSync {
   360  		// Snap sync will directly modify the persistent state, making the entire
   361  		// trie database unusable until the state is fully synced. To prevent any
   362  		// subsequent state reads, explicitly disable the trie database and state
   363  		// syncer is responsible to address and correct any state missing.
   364  		if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
   365  			if err := d.blockchain.TrieDB().Disable(); err != nil {
   366  				return err
   367  			}
   368  		}
   369  		// Snap sync uses the snapshot namespace to store potentially flaky data until
   370  		// sync completely heals and finishes. Pause snapshot maintenance in the mean-
   371  		// time to prevent access.
   372  		if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
   373  			snapshots.Disable()
   374  		}
   375  	}
   376  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   377  	d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
   378  	d.peers.Reset()
   379  
   380  	for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   381  		select {
   382  		case <-ch:
   383  		default:
   384  		}
   385  	}
   386  	for empty := false; !empty; {
   387  		select {
   388  		case <-d.headerProcCh:
   389  		default:
   390  			empty = true
   391  		}
   392  	}
   393  	// Create cancel channel for aborting mid-flight and mark the master peer
   394  	d.cancelLock.Lock()
   395  	d.cancelCh = make(chan struct{})
   396  	d.cancelLock.Unlock()
   397  
   398  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   399  
   400  	// Atomically set the requested sync mode
   401  	d.mode.Store(uint32(mode))
   402  
   403  	if beaconPing != nil {
   404  		close(beaconPing)
   405  	}
   406  	return d.syncToHead()
   407  }
   408  
   409  func (d *Downloader) getMode() SyncMode {
   410  	return SyncMode(d.mode.Load())
   411  }
   412  
   413  // syncToHead starts a block synchronization based on the hash chain from
   414  // the specified head hash.
   415  func (d *Downloader) syncToHead() (err error) {
   416  	d.mux.Post(StartEvent{})
   417  	defer func() {
   418  		// reset on error
   419  		if err != nil {
   420  			d.mux.Post(FailedEvent{err})
   421  		} else {
   422  			latest := d.blockchain.CurrentHeader()
   423  			d.mux.Post(DoneEvent{latest})
   424  		}
   425  	}()
   426  	mode := d.getMode()
   427  
   428  	log.Debug("Backfilling with the network", "mode", mode)
   429  	defer func(start time.Time) {
   430  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   431  	}(time.Now())
   432  
   433  	// Look up the sync boundaries: the common ancestor and the target block
   434  	var latest, pivot, final *types.Header
   435  	latest, _, final, err = d.skeleton.Bounds()
   436  	if err != nil {
   437  		return err
   438  	}
   439  	if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
   440  		number := latest.Number.Uint64() - uint64(fsMinFullBlocks)
   441  
   442  		// Retrieve the pivot header from the skeleton chain segment but
   443  		// fallback to local chain if it's not found in skeleton space.
   444  		if pivot = d.skeleton.Header(number); pivot == nil {
   445  			_, oldest, _, _ := d.skeleton.Bounds() // error is already checked
   446  			if number < oldest.Number.Uint64() {
   447  				count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks
   448  				headers := d.readHeaderRange(oldest, count)
   449  				if len(headers) == count {
   450  					pivot = headers[len(headers)-1]
   451  					log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number)
   452  				}
   453  			}
   454  		}
   455  		// Print an error log and return directly in case the pivot header
   456  		// is still not found. It means the skeleton chain is not linked
   457  		// correctly with local chain.
   458  		if pivot == nil {
   459  			log.Error("Pivot header is not found", "number", number)
   460  			return errNoPivotHeader
   461  		}
   462  	}
   463  	// If no pivot block was returned, the head is below the min full block
   464  	// threshold (i.e. new chain). In that case we won't really snap sync
   465  	// anyway, but still need a valid pivot block to avoid some code hitting
   466  	// nil panics on access.
   467  	if mode == ethconfig.SnapSync && pivot == nil {
   468  		pivot = d.blockchain.CurrentBlock()
   469  	}
   470  	height := latest.Number.Uint64()
   471  
   472  	// In beacon mode, use the skeleton chain for the ancestor lookup
   473  	origin, err := d.findBeaconAncestor()
   474  	if err != nil {
   475  		return err
   476  	}
   477  	d.syncStatsLock.Lock()
   478  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   479  		d.syncStatsChainOrigin = origin
   480  	}
   481  	d.syncStatsChainHeight = height
   482  	d.syncStatsLock.Unlock()
   483  
   484  	// Ensure our origin point is below any snap sync pivot point
   485  	if mode == ethconfig.SnapSync {
   486  		if height <= uint64(fsMinFullBlocks) {
   487  			origin = 0
   488  		} else {
   489  			pivotNumber := pivot.Number.Uint64()
   490  			if pivotNumber <= origin {
   491  				origin = pivotNumber - 1
   492  			}
   493  			// Write out the pivot into the database so a rollback beyond it will
   494  			// reenable snap sync
   495  			rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
   496  		}
   497  	}
   498  	d.committed.Store(true)
   499  	if mode == ethconfig.SnapSync && pivot.Number.Uint64() != 0 {
   500  		d.committed.Store(false)
   501  	}
   502  	if mode == ethconfig.SnapSync {
   503  		// Set the ancient data limitation. If we are running snap sync, all block
   504  		// data older than ancientLimit will be written to the ancient store. More
   505  		// recent data will be written to the active database and will wait for the
   506  		// freezer to migrate.
   507  		//
   508  		// If the network is post-merge, use either the last announced finalized
   509  		// block as the ancient limit, or if we haven't yet received one, the head-
   510  		// a max fork ancestry limit. One quirky case if we've already passed the
   511  		// finalized block, in which case the skeleton.Bounds will return nil and
   512  		// we'll revert to head - 90K. That's fine, we're finishing sync anyway.
   513  		//
   514  		// For non-merged networks, if there is a checkpoint available, then calculate
   515  		// the ancientLimit through that. Otherwise calculate the ancient limit through
   516  		// the advertised height of the remote peer. This most is mostly a fallback for
   517  		// legacy networks, but should eventually be dropped. TODO(karalabe).
   518  		//
   519  		// Beacon sync, use the latest finalized block as the ancient limit
   520  		// or a reasonable height if no finalized block is yet announced.
   521  		if final != nil {
   522  			d.ancientLimit = final.Number.Uint64()
   523  		} else if height > fullMaxForkAncestry+1 {
   524  			d.ancientLimit = height - fullMaxForkAncestry - 1
   525  		} else {
   526  			d.ancientLimit = 0
   527  		}
   528  		// Extend the ancient chain segment range if the ancient limit is even
   529  		// below the pre-configured chain cutoff.
   530  		if d.chainCutoffNumber != 0 && d.chainCutoffNumber > d.ancientLimit {
   531  			d.ancientLimit = d.chainCutoffNumber
   532  			log.Info("Extend the ancient range with configured cutoff", "cutoff", d.chainCutoffNumber)
   533  		}
   534  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   535  
   536  		// If a part of blockchain data has already been written into active store,
   537  		// disable the ancient style insertion explicitly.
   538  		if origin >= frozen && frozen != 0 {
   539  			d.ancientLimit = 0
   540  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   541  		} else if d.ancientLimit > 0 {
   542  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   543  		}
   544  		// Rewind the ancient store and blockchain if reorg happens.
   545  		if origin+1 < frozen {
   546  			if err := d.blockchain.SetHead(origin); err != nil {
   547  				return err
   548  			}
   549  			log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
   550  		}
   551  	}
   552  	// Skip ancient chain segments if Geth is running with a configured chain cutoff.
   553  	// These segments are not guaranteed to be available in the network.
   554  	chainOffset := origin + 1
   555  	if mode == ethconfig.SnapSync && d.chainCutoffNumber != 0 {
   556  		if chainOffset < d.chainCutoffNumber {
   557  			chainOffset = d.chainCutoffNumber
   558  			log.Info("Skip chain segment before cutoff", "origin", origin, "cutoff", d.chainCutoffNumber)
   559  		}
   560  	}
   561  	// Initiate the sync using a concurrent header and content retrieval algorithm
   562  	d.queue.Prepare(chainOffset, mode)
   563  
   564  	// In beacon mode, headers are served by the skeleton syncer
   565  	fetchers := []func() error{
   566  		func() error { return d.fetchHeaders(origin + 1) },   // Headers are always retrieved
   567  		func() error { return d.fetchBodies(chainOffset) },   // Bodies are retrieved during normal and snap sync
   568  		func() error { return d.fetchReceipts(chainOffset) }, // Receipts are retrieved during snap sync
   569  		func() error { return d.processHeaders(origin + 1) },
   570  	}
   571  	if mode == ethconfig.SnapSync {
   572  		d.pivotLock.Lock()
   573  		d.pivotHeader = pivot
   574  		d.pivotLock.Unlock()
   575  
   576  		fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
   577  	} else if mode == ethconfig.FullSync {
   578  		fetchers = append(fetchers, func() error { return d.processFullSyncContent() })
   579  	}
   580  	return d.spawnSync(fetchers)
   581  }
   582  
   583  // spawnSync runs d.process and all given fetcher functions to completion in
   584  // separate goroutines, returning the first error that appears.
   585  func (d *Downloader) spawnSync(fetchers []func() error) error {
   586  	errc := make(chan error, len(fetchers))
   587  	d.cancelWg.Add(len(fetchers))
   588  	for _, fn := range fetchers {
   589  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   590  	}
   591  	// Wait for the first error, then terminate the others.
   592  	var err error
   593  	for i := 0; i < len(fetchers); i++ {
   594  		if i == len(fetchers)-1 {
   595  			// Close the queue when all fetchers have exited.
   596  			// This will cause the block processor to end when
   597  			// it has processed the queue.
   598  			d.queue.Close()
   599  		}
   600  		if got := <-errc; got != nil {
   601  			err = got
   602  			if got != errCanceled {
   603  				break // receive a meaningful error, bubble it up
   604  			}
   605  		}
   606  	}
   607  	d.queue.Close()
   608  	d.Cancel()
   609  	return err
   610  }
   611  
   612  // cancel aborts all of the operations and resets the queue. However, cancel does
   613  // not wait for the running download goroutines to finish. This method should be
   614  // used when cancelling the downloads from inside the downloader.
   615  func (d *Downloader) cancel() {
   616  	// Close the current cancel channel
   617  	d.cancelLock.Lock()
   618  	defer d.cancelLock.Unlock()
   619  
   620  	if d.cancelCh != nil {
   621  		select {
   622  		case <-d.cancelCh:
   623  			// Channel was already closed
   624  		default:
   625  			close(d.cancelCh)
   626  		}
   627  	}
   628  }
   629  
   630  // Cancel aborts all of the operations and waits for all download goroutines to
   631  // finish before returning.
   632  func (d *Downloader) Cancel() {
   633  	d.blockchain.InterruptInsert(true)
   634  	d.cancel()
   635  	d.cancelWg.Wait()
   636  	d.blockchain.InterruptInsert(false)
   637  }
   638  
   639  // Terminate interrupts the downloader, canceling all pending operations.
   640  // The downloader cannot be reused after calling Terminate.
   641  func (d *Downloader) Terminate() {
   642  	// Close the termination channel (make sure double close is allowed)
   643  	d.quitLock.Lock()
   644  	select {
   645  	case <-d.quitCh:
   646  	default:
   647  		close(d.quitCh)
   648  
   649  		// Terminate the internal beacon syncer
   650  		d.skeleton.Terminate()
   651  	}
   652  	d.quitLock.Unlock()
   653  
   654  	// Cancel any pending download requests
   655  	d.Cancel()
   656  }
   657  
   658  // fetchBodies iteratively downloads the scheduled block bodies, taking any
   659  // available peers, reserving a chunk of blocks for each, waiting for delivery
   660  // and also periodically checking for timeouts.
   661  func (d *Downloader) fetchBodies(from uint64) error {
   662  	log.Debug("Downloading block bodies", "origin", from)
   663  	err := d.concurrentFetch((*bodyQueue)(d))
   664  
   665  	log.Debug("Block body download terminated", "err", err)
   666  	return err
   667  }
   668  
   669  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
   670  // available peers, reserving a chunk of receipts for each, waiting for delivery
   671  // and also periodically checking for timeouts.
   672  func (d *Downloader) fetchReceipts(from uint64) error {
   673  	log.Debug("Downloading receipts", "origin", from)
   674  	err := d.concurrentFetch((*receiptQueue)(d))
   675  
   676  	log.Debug("Receipt download terminated", "err", err)
   677  	return err
   678  }
   679  
   680  // processHeaders takes batches of retrieved headers from an input channel and
   681  // keeps processing and scheduling them into the header chain and downloader's
   682  // queue until the stream ends or a failure occurs.
   683  func (d *Downloader) processHeaders(origin uint64) error {
   684  	var (
   685  		mode  = d.getMode()
   686  		timer = time.NewTimer(time.Second)
   687  	)
   688  	defer timer.Stop()
   689  
   690  	for {
   691  		select {
   692  		case <-d.cancelCh:
   693  			return errCanceled
   694  
   695  		case task := <-d.headerProcCh:
   696  			// Terminate header processing if we synced up
   697  			if task == nil || len(task.headers) == 0 {
   698  				// Notify everyone that headers are fully processed
   699  				for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   700  					select {
   701  					case ch <- false:
   702  					case <-d.cancelCh:
   703  					}
   704  				}
   705  				return nil
   706  			}
   707  			// Otherwise split the chunk of headers into batches and process them
   708  			headers, hashes, scheduled := task.headers, task.hashes, false
   709  
   710  			for len(headers) > 0 {
   711  				// Terminate if something failed in between processing chunks
   712  				select {
   713  				case <-d.cancelCh:
   714  					return errCanceled
   715  				default:
   716  				}
   717  				// Select the next chunk of headers to import
   718  				limit := maxHeadersProcess
   719  				if limit > len(headers) {
   720  					limit = len(headers)
   721  				}
   722  				chunkHeaders := headers[:limit]
   723  				chunkHashes := hashes[:limit]
   724  
   725  				// Split the headers around the chain cutoff
   726  				var cutoff int
   727  				if mode == ethconfig.SnapSync && d.chainCutoffNumber != 0 {
   728  					cutoff = sort.Search(len(chunkHeaders), func(i int) bool {
   729  						return chunkHeaders[i].Number.Uint64() >= d.chainCutoffNumber
   730  					})
   731  				}
   732  				// Insert the header chain into the ancient store (with block bodies and
   733  				// receipts set to nil) if they fall before the cutoff.
   734  				if mode == ethconfig.SnapSync && cutoff != 0 {
   735  					if n, err := d.blockchain.InsertHeadersBeforeCutoff(chunkHeaders[:cutoff]); err != nil {
   736  						log.Warn("Failed to insert ancient header chain", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
   737  						return fmt.Errorf("%w: %v", errInvalidChain, err)
   738  					}
   739  					log.Debug("Inserted headers before cutoff", "number", chunkHeaders[cutoff-1].Number, "hash", chunkHashes[cutoff-1])
   740  				}
   741  				// If we've reached the allowed number of pending headers, stall a bit
   742  				for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
   743  					timer.Reset(time.Second)
   744  					select {
   745  					case <-d.cancelCh:
   746  						return errCanceled
   747  					case <-timer.C:
   748  					}
   749  				}
   750  				// Otherwise, schedule the headers for content retrieval (block bodies and
   751  				// potentially receipts in snap sync).
   752  				//
   753  				// Skip the bodies/receipts retrieval scheduling before the cutoff in snap
   754  				// sync if chain pruning is configured.
   755  				if mode == ethconfig.SnapSync && cutoff != 0 {
   756  					chunkHeaders = chunkHeaders[cutoff:]
   757  					chunkHashes = chunkHashes[cutoff:]
   758  				}
   759  				if len(chunkHeaders) > 0 {
   760  					scheduled = true
   761  					if d.queue.Schedule(chunkHeaders, chunkHashes, origin+uint64(cutoff)) != len(chunkHeaders) {
   762  						return fmt.Errorf("%w: stale headers", errBadPeer)
   763  					}
   764  				}
   765  				headers = headers[limit:]
   766  				hashes = hashes[limit:]
   767  				origin += uint64(limit)
   768  			}
   769  			// Update the highest block number we know if a higher one is found.
   770  			d.syncStatsLock.Lock()
   771  			if d.syncStatsChainHeight < origin {
   772  				d.syncStatsChainHeight = origin - 1
   773  			}
   774  			d.syncStatsLock.Unlock()
   775  
   776  			// Signal the downloader of the availability of new tasks
   777  			if scheduled {
   778  				for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   779  					select {
   780  					case ch <- true:
   781  					default:
   782  					}
   783  				}
   784  			}
   785  		}
   786  	}
   787  }
   788  
   789  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
   790  func (d *Downloader) processFullSyncContent() error {
   791  	for {
   792  		results := d.queue.Results(true)
   793  		if len(results) == 0 {
   794  			return nil
   795  		}
   796  		if d.chainInsertHook != nil {
   797  			d.chainInsertHook(results)
   798  		}
   799  		if err := d.importBlockResults(results); err != nil {
   800  			return err
   801  		}
   802  	}
   803  }
   804  
   805  func (d *Downloader) importBlockResults(results []*fetchResult) error {
   806  	// Check for any early termination requests
   807  	if len(results) == 0 {
   808  		return nil
   809  	}
   810  	select {
   811  	case <-d.quitCh:
   812  		return errCancelContentProcessing
   813  	default:
   814  	}
   815  	// Retrieve a batch of results to import
   816  	first, last := results[0].Header, results[len(results)-1].Header
   817  	log.Debug("Inserting downloaded chain", "items", len(results),
   818  		"firstnum", first.Number, "firsthash", first.Hash(),
   819  		"lastnum", last.Number, "lasthash", last.Hash(),
   820  	)
   821  	blocks := make([]*types.Block, len(results))
   822  	for i, result := range results {
   823  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
   824  	}
   825  	// Downloaded blocks are always regarded as trusted after the
   826  	// transition. Because the downloaded chain is guided by the
   827  	// consensus-layer.
   828  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
   829  		if index < len(results) {
   830  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
   831  
   832  			// In post-merge, notify the engine API of encountered bad chains
   833  			if d.badBlock != nil {
   834  				head, _, _, err := d.skeleton.Bounds()
   835  				if err != nil {
   836  					log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err)
   837  				} else {
   838  					d.badBlock(blocks[index].Header(), head)
   839  				}
   840  			}
   841  		} else {
   842  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
   843  			// when it needs to preprocess blocks to import a sidechain.
   844  			// The importer will put together a new list of blocks to import, which is a superset
   845  			// of the blocks delivered from the downloader, and the indexing will be off.
   846  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
   847  		}
   848  		return fmt.Errorf("%w: %v", errInvalidChain, err)
   849  	}
   850  	return nil
   851  }
   852  
   853  // processSnapSyncContent takes fetch results from the queue and writes them to the
   854  // database. It also controls the synchronisation of state nodes of the pivot block.
   855  func (d *Downloader) processSnapSyncContent() error {
   856  	// Start syncing state of the reported head block. This should get us most of
   857  	// the state of the pivot block.
   858  	d.pivotLock.RLock()
   859  	sync := d.syncState(d.pivotHeader.Root)
   860  	d.pivotLock.RUnlock()
   861  
   862  	defer func() {
   863  		// The `sync` object is replaced every time the pivot moves. We need to
   864  		// defer close the very last active one, hence the lazy evaluation vs.
   865  		// calling defer sync.Cancel() !!!
   866  		sync.Cancel()
   867  	}()
   868  
   869  	closeOnErr := func(s *stateSync) {
   870  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
   871  			d.queue.Close() // wake up Results
   872  		}
   873  	}
   874  	go closeOnErr(sync)
   875  
   876  	// To cater for moving pivot points, track the pivot block and subsequently
   877  	// accumulated download results separately.
   878  	//
   879  	// These will be nil up to the point where we reach the pivot, and will only
   880  	// be set temporarily if the synced blocks are piling up, but the pivot is
   881  	// still busy downloading. In that case, we need to occasionally check for
   882  	// pivot moves, so need to unblock the loop. These fields will accumulate
   883  	// the results in the meantime.
   884  	//
   885  	// Note, there's no issue with memory piling up since after 64 blocks the
   886  	// pivot will forcefully move so these accumulators will be dropped.
   887  	var (
   888  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
   889  		oldTail  []*fetchResult // Downloaded content after the pivot
   890  		timer    = time.NewTimer(time.Second)
   891  	)
   892  	defer timer.Stop()
   893  
   894  	for {
   895  		// Wait for the next batch of downloaded data to be available. If we have
   896  		// not yet reached the pivot point, wait blockingly as there's no need to
   897  		// spin-loop check for pivot moves. If we reached the pivot but have not
   898  		// yet processed it, check for results async, so we might notice pivot
   899  		// moves while state syncing. If the pivot was passed fully, block again
   900  		// as there's no more reason to check for pivot moves at all.
   901  		results := d.queue.Results(oldPivot == nil)
   902  		if len(results) == 0 {
   903  			// If pivot sync is done, stop
   904  			if d.committed.Load() {
   905  				d.reportSnapSyncProgress(true)
   906  				return sync.Cancel()
   907  			}
   908  			// If sync failed, stop
   909  			select {
   910  			case <-d.cancelCh:
   911  				sync.Cancel()
   912  				return errCanceled
   913  			default:
   914  			}
   915  		}
   916  		if d.chainInsertHook != nil {
   917  			d.chainInsertHook(results)
   918  		}
   919  		d.reportSnapSyncProgress(false)
   920  
   921  		// If we haven't downloaded the pivot block yet, check pivot staleness
   922  		// notifications from the header downloader
   923  		d.pivotLock.RLock()
   924  		pivot := d.pivotHeader
   925  		d.pivotLock.RUnlock()
   926  
   927  		if oldPivot == nil { // no results piling up, we can move the pivot
   928  			if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
   929  				if pivot.Root != sync.root { // pivot position changed, we can move the pivot
   930  					sync.Cancel()
   931  					sync = d.syncState(pivot.Root)
   932  
   933  					go closeOnErr(sync)
   934  				}
   935  			}
   936  		} else { // results already piled up, consume before handling pivot move
   937  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
   938  		}
   939  		// Split around the pivot block and process the two sides via snap/full sync
   940  		if !d.committed.Load() {
   941  			latest := results[len(results)-1].Header
   942  			// If the height is above the pivot block by 2 sets, it means the pivot
   943  			// become stale in the network, and it was garbage collected, move to a
   944  			// new pivot.
   945  			//
   946  			// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
   947  			// need to be taken into account, otherwise we're detecting the pivot move
   948  			// late and will drop peers due to unavailable state!!!
   949  			if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
   950  				log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
   951  				pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
   952  
   953  				d.pivotLock.Lock()
   954  				d.pivotHeader = pivot
   955  				d.pivotLock.Unlock()
   956  
   957  				// Write out the pivot into the database so a rollback beyond it will
   958  				// reenable snap sync
   959  				rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
   960  			}
   961  		}
   962  		P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
   963  		if err := d.commitSnapSyncData(beforeP, sync); err != nil {
   964  			return err
   965  		}
   966  		if P != nil {
   967  			// If new pivot block found, cancel old state retrieval and restart
   968  			if oldPivot != P {
   969  				sync.Cancel()
   970  				sync = d.syncState(P.Header.Root)
   971  
   972  				go closeOnErr(sync)
   973  				oldPivot = P
   974  			}
   975  			// Wait for completion, occasionally checking for pivot staleness
   976  			timer.Reset(time.Second)
   977  			select {
   978  			case <-sync.done:
   979  				if sync.err != nil {
   980  					return sync.err
   981  				}
   982  				if err := d.commitPivotBlock(P); err != nil {
   983  					return err
   984  				}
   985  				oldPivot = nil
   986  
   987  			case <-timer.C:
   988  				oldTail = afterP
   989  				continue
   990  			}
   991  		}
   992  		// Fast sync done, pivot commit done, full import
   993  		if err := d.importBlockResults(afterP); err != nil {
   994  			return err
   995  		}
   996  	}
   997  }
   998  
   999  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
  1000  	if len(results) == 0 {
  1001  		return nil, nil, nil
  1002  	}
  1003  	if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
  1004  		// the pivot is somewhere in the future
  1005  		return nil, results, nil
  1006  	}
  1007  	// This can also be optimized, but only happens very seldom
  1008  	for _, result := range results {
  1009  		num := result.Header.Number.Uint64()
  1010  		switch {
  1011  		case num < pivot:
  1012  			before = append(before, result)
  1013  		case num == pivot:
  1014  			p = result
  1015  		default:
  1016  			after = append(after, result)
  1017  		}
  1018  	}
  1019  	return p, before, after
  1020  }
  1021  
  1022  func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error {
  1023  	// Check for any early termination requests
  1024  	if len(results) == 0 {
  1025  		return nil
  1026  	}
  1027  	select {
  1028  	case <-d.quitCh:
  1029  		return errCancelContentProcessing
  1030  	case <-stateSync.done:
  1031  		if err := stateSync.Wait(); err != nil {
  1032  			return err
  1033  		}
  1034  	default:
  1035  	}
  1036  	// Retrieve the batch of results to import
  1037  	first, last := results[0].Header, results[len(results)-1].Header
  1038  	log.Debug("Inserting snap-sync blocks", "items", len(results),
  1039  		"firstnum", first.Number, "firsthash", first.Hash(),
  1040  		"lastnumn", last.Number, "lasthash", last.Hash(),
  1041  	)
  1042  	blocks := make([]*types.Block, len(results))
  1043  	receipts := make([]rlp.RawValue, len(results))
  1044  	for i, result := range results {
  1045  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
  1046  		receipts[i] = result.Receipts
  1047  	}
  1048  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
  1049  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1050  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1051  	}
  1052  	return nil
  1053  }
  1054  
  1055  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1056  	block := types.NewBlockWithHeader(result.Header).WithBody(result.body())
  1057  	log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1058  
  1059  	// Commit the pivot block as the new head, will require full sync from here on
  1060  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []rlp.RawValue{result.Receipts}, d.ancientLimit); err != nil {
  1061  		return err
  1062  	}
  1063  	if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
  1064  		return err
  1065  	}
  1066  	d.committed.Store(true)
  1067  	return nil
  1068  }
  1069  
  1070  // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
  1071  // data packet for the local node to consume.
  1072  func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
  1073  	switch packet := packet.(type) {
  1074  	case *snap.AccountRangePacket:
  1075  		hashes, accounts, err := packet.Unpack()
  1076  		if err != nil {
  1077  			return err
  1078  		}
  1079  		return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
  1080  
  1081  	case *snap.StorageRangesPacket:
  1082  		hashset, slotset := packet.Unpack()
  1083  		return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
  1084  
  1085  	case *snap.ByteCodesPacket:
  1086  		return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
  1087  
  1088  	case *snap.TrieNodesPacket:
  1089  		return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
  1090  
  1091  	default:
  1092  		return fmt.Errorf("unexpected snap packet type: %T", packet)
  1093  	}
  1094  }
  1095  
  1096  // readHeaderRange returns a list of headers, using the given last header as the base,
  1097  // and going backwards towards genesis. This method assumes that the caller already has
  1098  // placed a reasonable cap on count.
  1099  func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header {
  1100  	var (
  1101  		current = last
  1102  		headers []*types.Header
  1103  	)
  1104  	for {
  1105  		parent := d.blockchain.GetHeaderByHash(current.ParentHash)
  1106  		if parent == nil {
  1107  			break // The chain is not continuous, or the chain is exhausted
  1108  		}
  1109  		headers = append(headers, parent)
  1110  		if len(headers) >= count {
  1111  			break
  1112  		}
  1113  		current = parent
  1114  	}
  1115  	return headers
  1116  }
  1117  
  1118  // reportSnapSyncProgress calculates various status reports and provides it to the user.
  1119  func (d *Downloader) reportSnapSyncProgress(force bool) {
  1120  	// Initialize the sync start time if it's the first time we're reporting
  1121  	if d.syncStartTime.IsZero() {
  1122  		d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero
  1123  	}
  1124  	// Don't report all the events, just occasionally
  1125  	if !force && time.Since(d.syncLogTime) < 8*time.Second {
  1126  		return
  1127  	}
  1128  	// Don't report anything until we have a meaningful progress
  1129  	var (
  1130  		headerBytes, _  = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable)
  1131  		bodyBytes, _    = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable)
  1132  		receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable)
  1133  	)
  1134  	syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes)
  1135  	if syncedBytes == 0 {
  1136  		return
  1137  	}
  1138  	var (
  1139  		header = d.blockchain.CurrentHeader()
  1140  		block  = d.blockchain.CurrentSnapBlock()
  1141  	)
  1142  	// Prevent reporting if nothing has been synchronized yet
  1143  	if block.Number.Uint64() <= d.syncStartBlock {
  1144  		return
  1145  	}
  1146  	// Prevent reporting noise if the actual chain synchronization (headers
  1147  	// and bodies) hasn't started yet. Inserting the ancient header chain is
  1148  	// fast enough and would introduce significant bias if included in the count.
  1149  	if d.chainCutoffNumber != 0 && block.Number.Uint64() <= d.chainCutoffNumber {
  1150  		return
  1151  	}
  1152  	fetchedBlocks := block.Number.Uint64() - d.syncStartBlock
  1153  	if d.chainCutoffNumber != 0 && d.chainCutoffNumber > d.syncStartBlock {
  1154  		fetchedBlocks = block.Number.Uint64() - d.chainCutoffNumber
  1155  	}
  1156  	// Retrieve the current chain head and calculate the ETA
  1157  	latest, _, _, err := d.skeleton.Bounds()
  1158  	if err != nil {
  1159  		// We're going to cheat for non-merged networks, but that's fine
  1160  		latest = d.pivotHeader
  1161  	}
  1162  	if latest == nil {
  1163  		// This should really never happen, but add some defensive code for now.
  1164  		// TODO(karalabe): Remove it eventually if we don't see it blow.
  1165  		log.Error("Nil latest block in sync progress report")
  1166  		return
  1167  	}
  1168  	var (
  1169  		left = latest.Number.Uint64() - block.Number.Uint64()
  1170  		eta  = time.Since(d.syncStartTime) / time.Duration(fetchedBlocks) * time.Duration(left)
  1171  
  1172  		progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64()))
  1173  		headers  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString())
  1174  		bodies   = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString())
  1175  		receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString())
  1176  	)
  1177  	log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta))
  1178  	d.syncLogTime = time.Now()
  1179  }