github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/big"
    24  	"sync"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/core/rawdb"
    31  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/eth/protocols/snap"
    34  	"github.com/ethereum/go-ethereum/ethdb"
    35  	"github.com/ethereum/go-ethereum/event"
    36  	"github.com/ethereum/go-ethereum/log"
    37  	"github.com/ethereum/go-ethereum/params"
    38  	"github.com/ethereum/go-ethereum/triedb"
    39  )
    40  
    41  var (
    42  	MaxBlockFetch   = 128 // Number of blocks to be fetched per retrieval request
    43  	MaxHeaderFetch  = 192 // Number of block headers to be fetched per retrieval request
    44  	MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request
    45  
    46  	maxQueuedHeaders           = 32 * 1024                        // [eth/62] Maximum number of headers to queue for import (DOS protection)
    47  	maxHeadersProcess          = 2048                             // Number of header download results to import at once into the chain
    48  	maxResultsProcess          = 2048                             // Number of content download results to import at once into the chain
    49  	fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    50  
    51  	reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
    52  
    53  	fsHeaderSafetyNet = 2048            // Number of headers to discard in case a chain violation is detected
    54  	fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
    55  	fsMinFullBlocks   = 64              // Number of blocks to retrieve fully even in snap sync
    56  )
    57  
    58  var (
    59  	errBusy    = errors.New("busy")
    60  	errBadPeer = errors.New("action from bad peer ignored")
    61  
    62  	errTimeout                 = errors.New("timeout")
    63  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    64  	errInvalidBody             = errors.New("retrieved block body is invalid")
    65  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    66  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    67  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    68  	errCanceled                = errors.New("syncing canceled (requested)")
    69  	errNoPivotHeader           = errors.New("pivot header is not found")
    70  	ErrMergeTransition         = errors.New("legacy sync reached the merge")
    71  )
    72  
    73  // peerDropFn is a callback type for dropping a peer detected as malicious.
    74  type peerDropFn func(id string)
    75  
    76  // badBlockFn is a callback for the async beacon sync to notify the caller that
    77  // the origin header requested to sync to, produced a chain with a bad block.
    78  type badBlockFn func(invalid *types.Header, origin *types.Header)
    79  
    80  // headerTask is a set of downloaded headers to queue along with their precomputed
    81  // hashes to avoid constant rehashing.
    82  type headerTask struct {
    83  	headers []*types.Header
    84  	hashes  []common.Hash
    85  }
    86  
    87  type Downloader struct {
    88  	mode atomic.Uint32  // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
    89  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
    90  
    91  	queue *queue   // Scheduler for selecting the hashes to download
    92  	peers *peerSet // Set of active peers from which download can proceed
    93  
    94  	stateDB ethdb.Database // Database to state sync into (and deduplicate via)
    95  
    96  	// Statistics
    97  	syncStatsChainOrigin uint64       // Origin block number where syncing started at
    98  	syncStatsChainHeight uint64       // Highest block number known when syncing started
    99  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
   100  
   101  	lightchain LightChain
   102  	blockchain BlockChain
   103  
   104  	// Callbacks
   105  	dropPeer peerDropFn // Drops a peer for misbehaving
   106  	badBlock badBlockFn // Reports a block as rejected by the chain
   107  
   108  	// Status
   109  	synchronising atomic.Bool
   110  	notified      atomic.Bool
   111  	committed     atomic.Bool
   112  	ancientLimit  uint64 // The maximum block number which can be regarded as ancient data.
   113  
   114  	// Channels
   115  	headerProcCh chan *headerTask // Channel to feed the header processor new tasks
   116  
   117  	// Skeleton sync
   118  	skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
   119  
   120  	// State sync
   121  	pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
   122  	pivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates
   123  
   124  	SnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now
   125  	stateSyncStart chan *stateSync
   126  
   127  	// Cancellation and termination
   128  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   129  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   130  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   131  
   132  	quitCh   chan struct{} // Quit channel to signal termination
   133  	quitLock sync.Mutex    // Lock to prevent double closes
   134  
   135  	// Testing hooks
   136  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   137  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   138  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   139  
   140  	// Progress reporting metrics
   141  	syncStartBlock uint64    // Head snap block when Geth was started
   142  	syncStartTime  time.Time // Time instance when chain sync started
   143  	syncLogTime    time.Time // Time instance when status was last reported
   144  }
   145  
   146  // LightChain encapsulates functions required to synchronise a light chain.
   147  type LightChain interface {
   148  	// HasHeader verifies a header's presence in the local chain.
   149  	HasHeader(common.Hash, uint64) bool
   150  
   151  	// GetHeaderByHash retrieves a header from the local chain.
   152  	GetHeaderByHash(common.Hash) *types.Header
   153  
   154  	// CurrentHeader retrieves the head header from the local chain.
   155  	CurrentHeader() *types.Header
   156  
   157  	// GetTd returns the total difficulty of a local block.
   158  	GetTd(common.Hash, uint64) *big.Int
   159  
   160  	// InsertHeaderChain inserts a batch of headers into the local chain.
   161  	InsertHeaderChain([]*types.Header) (int, error)
   162  
   163  	// SetHead rewinds the local chain to a new head.
   164  	SetHead(uint64) error
   165  }
   166  
   167  // BlockChain encapsulates functions required to sync a (full or snap) blockchain.
   168  type BlockChain interface {
   169  	LightChain
   170  
   171  	// HasBlock verifies a block's presence in the local chain.
   172  	HasBlock(common.Hash, uint64) bool
   173  
   174  	// HasFastBlock verifies a snap block's presence in the local chain.
   175  	HasFastBlock(common.Hash, uint64) bool
   176  
   177  	// GetBlockByHash retrieves a block from the local chain.
   178  	GetBlockByHash(common.Hash) *types.Block
   179  
   180  	// CurrentBlock retrieves the head block from the local chain.
   181  	CurrentBlock() *types.Header
   182  
   183  	// CurrentSnapBlock retrieves the head snap block from the local chain.
   184  	CurrentSnapBlock() *types.Header
   185  
   186  	// SnapSyncCommitHead directly commits the head block to a certain entity.
   187  	SnapSyncCommitHead(common.Hash) error
   188  
   189  	// InsertChain inserts a batch of blocks into the local chain.
   190  	InsertChain(types.Blocks) (int, error)
   191  
   192  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   193  	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
   194  
   195  	// Snapshots returns the blockchain snapshot tree to paused it during sync.
   196  	Snapshots() *snapshot.Tree
   197  
   198  	// TrieDB retrieves the low level trie database used for interacting
   199  	// with trie nodes.
   200  	TrieDB() *triedb.Database
   201  }
   202  
   203  // New creates a new downloader to fetch hashes and blocks from remote peers.
   204  func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader {
   205  	if lightchain == nil {
   206  		lightchain = chain
   207  	}
   208  	dl := &Downloader{
   209  		stateDB:        stateDb,
   210  		mux:            mux,
   211  		queue:          newQueue(blockCacheMaxItems, blockCacheInitialItems),
   212  		peers:          newPeerSet(),
   213  		blockchain:     chain,
   214  		lightchain:     lightchain,
   215  		dropPeer:       dropPeer,
   216  		headerProcCh:   make(chan *headerTask, 1),
   217  		quitCh:         make(chan struct{}),
   218  		SnapSyncer:     snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
   219  		stateSyncStart: make(chan *stateSync),
   220  		syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(),
   221  	}
   222  	// Create the post-merge skeleton syncer and start the process
   223  	dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
   224  
   225  	go dl.stateFetcher()
   226  	return dl
   227  }
   228  
   229  // Progress retrieves the synchronisation boundaries, specifically the origin
   230  // block where synchronisation started at (may have failed/suspended); the block
   231  // or header sync is currently at; and the latest known block which the sync targets.
   232  //
   233  // In addition, during the state download phase of snap synchronisation the number
   234  // of processed and the total number of known states are also returned. Otherwise
   235  // these are zero.
   236  func (d *Downloader) Progress() ethereum.SyncProgress {
   237  	// Lock the current stats and return the progress
   238  	d.syncStatsLock.RLock()
   239  	defer d.syncStatsLock.RUnlock()
   240  
   241  	current := uint64(0)
   242  	mode := d.getMode()
   243  	switch {
   244  	case d.blockchain != nil && mode == FullSync:
   245  		current = d.blockchain.CurrentBlock().Number.Uint64()
   246  	case d.blockchain != nil && mode == SnapSync:
   247  		current = d.blockchain.CurrentSnapBlock().Number.Uint64()
   248  	case d.lightchain != nil:
   249  		current = d.lightchain.CurrentHeader().Number.Uint64()
   250  	default:
   251  		log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode)
   252  	}
   253  	progress, pending := d.SnapSyncer.Progress()
   254  
   255  	return ethereum.SyncProgress{
   256  		StartingBlock:       d.syncStatsChainOrigin,
   257  		CurrentBlock:        current,
   258  		HighestBlock:        d.syncStatsChainHeight,
   259  		SyncedAccounts:      progress.AccountSynced,
   260  		SyncedAccountBytes:  uint64(progress.AccountBytes),
   261  		SyncedBytecodes:     progress.BytecodeSynced,
   262  		SyncedBytecodeBytes: uint64(progress.BytecodeBytes),
   263  		SyncedStorage:       progress.StorageSynced,
   264  		SyncedStorageBytes:  uint64(progress.StorageBytes),
   265  		HealedTrienodes:     progress.TrienodeHealSynced,
   266  		HealedTrienodeBytes: uint64(progress.TrienodeHealBytes),
   267  		HealedBytecodes:     progress.BytecodeHealSynced,
   268  		HealedBytecodeBytes: uint64(progress.BytecodeHealBytes),
   269  		HealingTrienodes:    pending.TrienodeHeal,
   270  		HealingBytecode:     pending.BytecodeHeal,
   271  	}
   272  }
   273  
   274  // RegisterPeer injects a new download peer into the set of block source to be
   275  // used for fetching hashes and blocks from.
   276  func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
   277  	var logger log.Logger
   278  	if len(id) < 16 {
   279  		// Tests use short IDs, don't choke on them
   280  		logger = log.New("peer", id)
   281  	} else {
   282  		logger = log.New("peer", id[:8])
   283  	}
   284  	logger.Trace("Registering sync peer")
   285  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   286  		logger.Error("Failed to register sync peer", "err", err)
   287  		return err
   288  	}
   289  	return nil
   290  }
   291  
   292  // UnregisterPeer remove a peer from the known list, preventing any action from
   293  // the specified peer. An effort is also made to return any pending fetches into
   294  // the queue.
   295  func (d *Downloader) UnregisterPeer(id string) error {
   296  	// Unregister the peer from the active peer set and revoke any fetch tasks
   297  	var logger log.Logger
   298  	if len(id) < 16 {
   299  		// Tests use short IDs, don't choke on them
   300  		logger = log.New("peer", id)
   301  	} else {
   302  		logger = log.New("peer", id[:8])
   303  	}
   304  	logger.Trace("Unregistering sync peer")
   305  	if err := d.peers.Unregister(id); err != nil {
   306  		logger.Error("Failed to unregister sync peer", "err", err)
   307  		return err
   308  	}
   309  	d.queue.Revoke(id)
   310  
   311  	return nil
   312  }
   313  
   314  // synchronise will select the peer and use it for synchronising. If an empty string is given
   315  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   316  // checks fail an error will be returned. This method is synchronous
   317  func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error {
   318  	// The beacon header syncer is async. It will start this synchronization and
   319  	// will continue doing other tasks. However, if synchronization needs to be
   320  	// cancelled, the syncer needs to know if we reached the startup point (and
   321  	// inited the cancel channel) or not yet. Make sure that we'll signal even in
   322  	// case of a failure.
   323  	if beaconPing != nil {
   324  		defer func() {
   325  			select {
   326  			case <-beaconPing: // already notified
   327  			default:
   328  				close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
   329  			}
   330  		}()
   331  	}
   332  	// Make sure only one goroutine is ever allowed past this point at once
   333  	if !d.synchronising.CompareAndSwap(false, true) {
   334  		return errBusy
   335  	}
   336  	defer d.synchronising.Store(false)
   337  
   338  	// Post a user notification of the sync (only once per session)
   339  	if d.notified.CompareAndSwap(false, true) {
   340  		log.Info("Block synchronisation started")
   341  	}
   342  	if mode == SnapSync {
   343  		// Snap sync will directly modify the persistent state, making the entire
   344  		// trie database unusable until the state is fully synced. To prevent any
   345  		// subsequent state reads, explicitly disable the trie database and state
   346  		// syncer is responsible to address and correct any state missing.
   347  		if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
   348  			if err := d.blockchain.TrieDB().Disable(); err != nil {
   349  				return err
   350  			}
   351  		}
   352  		// Snap sync uses the snapshot namespace to store potentially flaky data until
   353  		// sync completely heals and finishes. Pause snapshot maintenance in the mean-
   354  		// time to prevent access.
   355  		if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
   356  			snapshots.Disable()
   357  		}
   358  	}
   359  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   360  	d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
   361  	d.peers.Reset()
   362  
   363  	for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   364  		select {
   365  		case <-ch:
   366  		default:
   367  		}
   368  	}
   369  	for empty := false; !empty; {
   370  		select {
   371  		case <-d.headerProcCh:
   372  		default:
   373  			empty = true
   374  		}
   375  	}
   376  	// Create cancel channel for aborting mid-flight and mark the master peer
   377  	d.cancelLock.Lock()
   378  	d.cancelCh = make(chan struct{})
   379  	d.cancelLock.Unlock()
   380  
   381  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   382  
   383  	// Atomically set the requested sync mode
   384  	d.mode.Store(uint32(mode))
   385  
   386  	if beaconPing != nil {
   387  		close(beaconPing)
   388  	}
   389  	return d.syncToHead()
   390  }
   391  
   392  func (d *Downloader) getMode() SyncMode {
   393  	return SyncMode(d.mode.Load())
   394  }
   395  
   396  // syncToHead starts a block synchronization based on the hash chain from
   397  // the specified head hash.
   398  func (d *Downloader) syncToHead() (err error) {
   399  	d.mux.Post(StartEvent{})
   400  	defer func() {
   401  		// reset on error
   402  		if err != nil {
   403  			d.mux.Post(FailedEvent{err})
   404  		} else {
   405  			latest := d.lightchain.CurrentHeader()
   406  			d.mux.Post(DoneEvent{latest})
   407  		}
   408  	}()
   409  	mode := d.getMode()
   410  
   411  	log.Debug("Backfilling with the network", "mode", mode)
   412  	defer func(start time.Time) {
   413  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   414  	}(time.Now())
   415  
   416  	// Look up the sync boundaries: the common ancestor and the target block
   417  	var latest, pivot, final *types.Header
   418  	latest, _, final, err = d.skeleton.Bounds()
   419  	if err != nil {
   420  		return err
   421  	}
   422  	if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
   423  		number := latest.Number.Uint64() - uint64(fsMinFullBlocks)
   424  
   425  		// Retrieve the pivot header from the skeleton chain segment but
   426  		// fallback to local chain if it's not found in skeleton space.
   427  		if pivot = d.skeleton.Header(number); pivot == nil {
   428  			_, oldest, _, _ := d.skeleton.Bounds() // error is already checked
   429  			if number < oldest.Number.Uint64() {
   430  				count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks
   431  				headers := d.readHeaderRange(oldest, count)
   432  				if len(headers) == count {
   433  					pivot = headers[len(headers)-1]
   434  					log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number)
   435  				}
   436  			}
   437  		}
   438  		// Print an error log and return directly in case the pivot header
   439  		// is still not found. It means the skeleton chain is not linked
   440  		// correctly with local chain.
   441  		if pivot == nil {
   442  			log.Error("Pivot header is not found", "number", number)
   443  			return errNoPivotHeader
   444  		}
   445  	}
   446  	// If no pivot block was returned, the head is below the min full block
   447  	// threshold (i.e. new chain). In that case we won't really snap sync
   448  	// anyway, but still need a valid pivot block to avoid some code hitting
   449  	// nil panics on access.
   450  	if mode == SnapSync && pivot == nil {
   451  		pivot = d.blockchain.CurrentBlock()
   452  	}
   453  	height := latest.Number.Uint64()
   454  
   455  	// In beacon mode, use the skeleton chain for the ancestor lookup
   456  	origin, err := d.findBeaconAncestor()
   457  	if err != nil {
   458  		return err
   459  	}
   460  	d.syncStatsLock.Lock()
   461  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   462  		d.syncStatsChainOrigin = origin
   463  	}
   464  	d.syncStatsChainHeight = height
   465  	d.syncStatsLock.Unlock()
   466  
   467  	// Ensure our origin point is below any snap sync pivot point
   468  	if mode == SnapSync {
   469  		if height <= uint64(fsMinFullBlocks) {
   470  			origin = 0
   471  		} else {
   472  			pivotNumber := pivot.Number.Uint64()
   473  			if pivotNumber <= origin {
   474  				origin = pivotNumber - 1
   475  			}
   476  			// Write out the pivot into the database so a rollback beyond it will
   477  			// reenable snap sync
   478  			rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
   479  		}
   480  	}
   481  	d.committed.Store(true)
   482  	if mode == SnapSync && pivot.Number.Uint64() != 0 {
   483  		d.committed.Store(false)
   484  	}
   485  	if mode == SnapSync {
   486  		// Set the ancient data limitation. If we are running snap sync, all block
   487  		// data older than ancientLimit will be written to the ancient store. More
   488  		// recent data will be written to the active database and will wait for the
   489  		// freezer to migrate.
   490  		//
   491  		// If the network is post-merge, use either the last announced finalized
   492  		// block as the ancient limit, or if we haven't yet received one, the head-
   493  		// a max fork ancestry limit. One quirky case if we've already passed the
   494  		// finalized block, in which case the skeleton.Bounds will return nil and
   495  		// we'll revert to head - 90K. That's fine, we're finishing sync anyway.
   496  		//
   497  		// For non-merged networks, if there is a checkpoint available, then calculate
   498  		// the ancientLimit through that. Otherwise calculate the ancient limit through
   499  		// the advertised height of the remote peer. This most is mostly a fallback for
   500  		// legacy networks, but should eventually be dropped. TODO(karalabe).
   501  		//
   502  		// Beacon sync, use the latest finalized block as the ancient limit
   503  		// or a reasonable height if no finalized block is yet announced.
   504  		if final != nil {
   505  			d.ancientLimit = final.Number.Uint64()
   506  		} else if height > fullMaxForkAncestry+1 {
   507  			d.ancientLimit = height - fullMaxForkAncestry - 1
   508  		} else {
   509  			d.ancientLimit = 0
   510  		}
   511  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   512  
   513  		// If a part of blockchain data has already been written into active store,
   514  		// disable the ancient style insertion explicitly.
   515  		if origin >= frozen && frozen != 0 {
   516  			d.ancientLimit = 0
   517  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   518  		} else if d.ancientLimit > 0 {
   519  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   520  		}
   521  		// Rewind the ancient store and blockchain if reorg happens.
   522  		if origin+1 < frozen {
   523  			if err := d.lightchain.SetHead(origin); err != nil {
   524  				return err
   525  			}
   526  			log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
   527  		}
   528  	}
   529  	// Initiate the sync using a concurrent header and content retrieval algorithm
   530  	d.queue.Prepare(origin+1, mode)
   531  
   532  	// In beacon mode, headers are served by the skeleton syncer
   533  	fetchers := []func() error{
   534  		func() error { return d.fetchHeaders(origin + 1) },  // Headers are always retrieved
   535  		func() error { return d.fetchBodies(origin + 1) },   // Bodies are retrieved during normal and snap sync
   536  		func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync
   537  		func() error { return d.processHeaders(origin + 1) },
   538  	}
   539  	if mode == SnapSync {
   540  		d.pivotLock.Lock()
   541  		d.pivotHeader = pivot
   542  		d.pivotLock.Unlock()
   543  
   544  		fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
   545  	} else if mode == FullSync {
   546  		fetchers = append(fetchers, func() error { return d.processFullSyncContent() })
   547  	}
   548  	return d.spawnSync(fetchers)
   549  }
   550  
   551  // spawnSync runs d.process and all given fetcher functions to completion in
   552  // separate goroutines, returning the first error that appears.
   553  func (d *Downloader) spawnSync(fetchers []func() error) error {
   554  	errc := make(chan error, len(fetchers))
   555  	d.cancelWg.Add(len(fetchers))
   556  	for _, fn := range fetchers {
   557  		fn := fn
   558  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   559  	}
   560  	// Wait for the first error, then terminate the others.
   561  	var err error
   562  	for i := 0; i < len(fetchers); i++ {
   563  		if i == len(fetchers)-1 {
   564  			// Close the queue when all fetchers have exited.
   565  			// This will cause the block processor to end when
   566  			// it has processed the queue.
   567  			d.queue.Close()
   568  		}
   569  		if got := <-errc; got != nil {
   570  			err = got
   571  			if got != errCanceled {
   572  				break // receive a meaningful error, bubble it up
   573  			}
   574  		}
   575  	}
   576  	d.queue.Close()
   577  	d.Cancel()
   578  	return err
   579  }
   580  
   581  // cancel aborts all of the operations and resets the queue. However, cancel does
   582  // not wait for the running download goroutines to finish. This method should be
   583  // used when cancelling the downloads from inside the downloader.
   584  func (d *Downloader) cancel() {
   585  	// Close the current cancel channel
   586  	d.cancelLock.Lock()
   587  	defer d.cancelLock.Unlock()
   588  
   589  	if d.cancelCh != nil {
   590  		select {
   591  		case <-d.cancelCh:
   592  			// Channel was already closed
   593  		default:
   594  			close(d.cancelCh)
   595  		}
   596  	}
   597  }
   598  
   599  // Cancel aborts all of the operations and waits for all download goroutines to
   600  // finish before returning.
   601  func (d *Downloader) Cancel() {
   602  	d.cancel()
   603  	d.cancelWg.Wait()
   604  }
   605  
   606  // Terminate interrupts the downloader, canceling all pending operations.
   607  // The downloader cannot be reused after calling Terminate.
   608  func (d *Downloader) Terminate() {
   609  	// Close the termination channel (make sure double close is allowed)
   610  	d.quitLock.Lock()
   611  	select {
   612  	case <-d.quitCh:
   613  	default:
   614  		close(d.quitCh)
   615  
   616  		// Terminate the internal beacon syncer
   617  		d.skeleton.Terminate()
   618  	}
   619  	d.quitLock.Unlock()
   620  
   621  	// Cancel any pending download requests
   622  	d.Cancel()
   623  }
   624  
   625  // fetchBodies iteratively downloads the scheduled block bodies, taking any
   626  // available peers, reserving a chunk of blocks for each, waiting for delivery
   627  // and also periodically checking for timeouts.
   628  func (d *Downloader) fetchBodies(from uint64) error {
   629  	log.Debug("Downloading block bodies", "origin", from)
   630  	err := d.concurrentFetch((*bodyQueue)(d))
   631  
   632  	log.Debug("Block body download terminated", "err", err)
   633  	return err
   634  }
   635  
   636  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
   637  // available peers, reserving a chunk of receipts for each, waiting for delivery
   638  // and also periodically checking for timeouts.
   639  func (d *Downloader) fetchReceipts(from uint64) error {
   640  	log.Debug("Downloading receipts", "origin", from)
   641  	err := d.concurrentFetch((*receiptQueue)(d))
   642  
   643  	log.Debug("Receipt download terminated", "err", err)
   644  	return err
   645  }
   646  
   647  // processHeaders takes batches of retrieved headers from an input channel and
   648  // keeps processing and scheduling them into the header chain and downloader's
   649  // queue until the stream ends or a failure occurs.
   650  func (d *Downloader) processHeaders(origin uint64) error {
   651  	var (
   652  		mode  = d.getMode()
   653  		timer = time.NewTimer(time.Second)
   654  	)
   655  	defer timer.Stop()
   656  
   657  	for {
   658  		select {
   659  		case <-d.cancelCh:
   660  			return errCanceled
   661  
   662  		case task := <-d.headerProcCh:
   663  			// Terminate header processing if we synced up
   664  			if task == nil || len(task.headers) == 0 {
   665  				// Notify everyone that headers are fully processed
   666  				for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   667  					select {
   668  					case ch <- false:
   669  					case <-d.cancelCh:
   670  					}
   671  				}
   672  				return nil
   673  			}
   674  			// Otherwise split the chunk of headers into batches and process them
   675  			headers, hashes := task.headers, task.hashes
   676  
   677  			for len(headers) > 0 {
   678  				// Terminate if something failed in between processing chunks
   679  				select {
   680  				case <-d.cancelCh:
   681  					return errCanceled
   682  				default:
   683  				}
   684  				// Select the next chunk of headers to import
   685  				limit := maxHeadersProcess
   686  				if limit > len(headers) {
   687  					limit = len(headers)
   688  				}
   689  				chunkHeaders := headers[:limit]
   690  				chunkHashes := hashes[:limit]
   691  
   692  				// In case of header only syncing, validate the chunk immediately
   693  				if mode == SnapSync || mode == LightSync {
   694  					// Although the received headers might be all valid, a legacy
   695  					// PoW/PoA sync must not accept post-merge headers. Make sure
   696  					// that any transition is rejected at this point.
   697  					if len(chunkHeaders) > 0 {
   698  						if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil {
   699  							log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
   700  							return fmt.Errorf("%w: %v", errInvalidChain, err)
   701  						}
   702  					}
   703  				}
   704  				// Unless we're doing light chains, schedule the headers for associated content retrieval
   705  				if mode == FullSync || mode == SnapSync {
   706  					// If we've reached the allowed number of pending headers, stall a bit
   707  					for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
   708  						timer.Reset(time.Second)
   709  						select {
   710  						case <-d.cancelCh:
   711  							return errCanceled
   712  						case <-timer.C:
   713  						}
   714  					}
   715  					// Otherwise insert the headers for content retrieval
   716  					inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin)
   717  					if len(inserts) != len(chunkHeaders) {
   718  						return fmt.Errorf("%w: stale headers", errBadPeer)
   719  					}
   720  				}
   721  				headers = headers[limit:]
   722  				hashes = hashes[limit:]
   723  				origin += uint64(limit)
   724  			}
   725  			// Update the highest block number we know if a higher one is found.
   726  			d.syncStatsLock.Lock()
   727  			if d.syncStatsChainHeight < origin {
   728  				d.syncStatsChainHeight = origin - 1
   729  			}
   730  			d.syncStatsLock.Unlock()
   731  
   732  			// Signal the content downloaders of the availability of new tasks
   733  			for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   734  				select {
   735  				case ch <- true:
   736  				default:
   737  				}
   738  			}
   739  		}
   740  	}
   741  }
   742  
   743  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
   744  func (d *Downloader) processFullSyncContent() error {
   745  	for {
   746  		results := d.queue.Results(true)
   747  		if len(results) == 0 {
   748  			return nil
   749  		}
   750  		if d.chainInsertHook != nil {
   751  			d.chainInsertHook(results)
   752  		}
   753  		if err := d.importBlockResults(results); err != nil {
   754  			return err
   755  		}
   756  	}
   757  }
   758  
   759  func (d *Downloader) importBlockResults(results []*fetchResult) error {
   760  	// Check for any early termination requests
   761  	if len(results) == 0 {
   762  		return nil
   763  	}
   764  	select {
   765  	case <-d.quitCh:
   766  		return errCancelContentProcessing
   767  	default:
   768  	}
   769  	// Retrieve a batch of results to import
   770  	first, last := results[0].Header, results[len(results)-1].Header
   771  	log.Debug("Inserting downloaded chain", "items", len(results),
   772  		"firstnum", first.Number, "firsthash", first.Hash(),
   773  		"lastnum", last.Number, "lasthash", last.Hash(),
   774  	)
   775  	blocks := make([]*types.Block, len(results))
   776  	for i, result := range results {
   777  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
   778  	}
   779  	// Downloaded blocks are always regarded as trusted after the
   780  	// transition. Because the downloaded chain is guided by the
   781  	// consensus-layer.
   782  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
   783  		if index < len(results) {
   784  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
   785  
   786  			// In post-merge, notify the engine API of encountered bad chains
   787  			if d.badBlock != nil {
   788  				head, _, _, err := d.skeleton.Bounds()
   789  				if err != nil {
   790  					log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err)
   791  				} else {
   792  					d.badBlock(blocks[index].Header(), head)
   793  				}
   794  			}
   795  		} else {
   796  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
   797  			// when it needs to preprocess blocks to import a sidechain.
   798  			// The importer will put together a new list of blocks to import, which is a superset
   799  			// of the blocks delivered from the downloader, and the indexing will be off.
   800  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
   801  		}
   802  		return fmt.Errorf("%w: %v", errInvalidChain, err)
   803  	}
   804  	return nil
   805  }
   806  
   807  // processSnapSyncContent takes fetch results from the queue and writes them to the
   808  // database. It also controls the synchronisation of state nodes of the pivot block.
   809  func (d *Downloader) processSnapSyncContent() error {
   810  	// Start syncing state of the reported head block. This should get us most of
   811  	// the state of the pivot block.
   812  	d.pivotLock.RLock()
   813  	sync := d.syncState(d.pivotHeader.Root)
   814  	d.pivotLock.RUnlock()
   815  
   816  	defer func() {
   817  		// The `sync` object is replaced every time the pivot moves. We need to
   818  		// defer close the very last active one, hence the lazy evaluation vs.
   819  		// calling defer sync.Cancel() !!!
   820  		sync.Cancel()
   821  	}()
   822  
   823  	closeOnErr := func(s *stateSync) {
   824  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
   825  			d.queue.Close() // wake up Results
   826  		}
   827  	}
   828  	go closeOnErr(sync)
   829  
   830  	// To cater for moving pivot points, track the pivot block and subsequently
   831  	// accumulated download results separately.
   832  	//
   833  	// These will be nil up to the point where we reach the pivot, and will only
   834  	// be set temporarily if the synced blocks are piling up, but the pivot is
   835  	// still busy downloading. In that case, we need to occasionally check for
   836  	// pivot moves, so need to unblock the loop. These fields will accumulate
   837  	// the results in the meantime.
   838  	//
   839  	// Note, there's no issue with memory piling up since after 64 blocks the
   840  	// pivot will forcefully move so these accumulators will be dropped.
   841  	var (
   842  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
   843  		oldTail  []*fetchResult // Downloaded content after the pivot
   844  		timer    = time.NewTimer(time.Second)
   845  	)
   846  	defer timer.Stop()
   847  
   848  	for {
   849  		// Wait for the next batch of downloaded data to be available. If we have
   850  		// not yet reached the pivot point, wait blockingly as there's no need to
   851  		// spin-loop check for pivot moves. If we reached the pivot but have not
   852  		// yet processed it, check for results async, so we might notice pivot
   853  		// moves while state syncing. If the pivot was passed fully, block again
   854  		// as there's no more reason to check for pivot moves at all.
   855  		results := d.queue.Results(oldPivot == nil)
   856  		if len(results) == 0 {
   857  			// If pivot sync is done, stop
   858  			if d.committed.Load() {
   859  				d.reportSnapSyncProgress(true)
   860  				return sync.Cancel()
   861  			}
   862  			// If sync failed, stop
   863  			select {
   864  			case <-d.cancelCh:
   865  				sync.Cancel()
   866  				return errCanceled
   867  			default:
   868  			}
   869  		}
   870  		if d.chainInsertHook != nil {
   871  			d.chainInsertHook(results)
   872  		}
   873  		d.reportSnapSyncProgress(false)
   874  
   875  		// If we haven't downloaded the pivot block yet, check pivot staleness
   876  		// notifications from the header downloader
   877  		d.pivotLock.RLock()
   878  		pivot := d.pivotHeader
   879  		d.pivotLock.RUnlock()
   880  
   881  		if oldPivot == nil { // no results piling up, we can move the pivot
   882  			if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
   883  				if pivot.Root != sync.root { // pivot position changed, we can move the pivot
   884  					sync.Cancel()
   885  					sync = d.syncState(pivot.Root)
   886  
   887  					go closeOnErr(sync)
   888  				}
   889  			}
   890  		} else { // results already piled up, consume before handling pivot move
   891  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
   892  		}
   893  		// Split around the pivot block and process the two sides via snap/full sync
   894  		if !d.committed.Load() {
   895  			latest := results[len(results)-1].Header
   896  			// If the height is above the pivot block by 2 sets, it means the pivot
   897  			// become stale in the network, and it was garbage collected, move to a
   898  			// new pivot.
   899  			//
   900  			// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
   901  			// need to be taken into account, otherwise we're detecting the pivot move
   902  			// late and will drop peers due to unavailable state!!!
   903  			if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
   904  				log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
   905  				pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
   906  
   907  				d.pivotLock.Lock()
   908  				d.pivotHeader = pivot
   909  				d.pivotLock.Unlock()
   910  
   911  				// Write out the pivot into the database so a rollback beyond it will
   912  				// reenable snap sync
   913  				rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
   914  			}
   915  		}
   916  		P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
   917  		if err := d.commitSnapSyncData(beforeP, sync); err != nil {
   918  			return err
   919  		}
   920  		if P != nil {
   921  			// If new pivot block found, cancel old state retrieval and restart
   922  			if oldPivot != P {
   923  				sync.Cancel()
   924  				sync = d.syncState(P.Header.Root)
   925  
   926  				go closeOnErr(sync)
   927  				oldPivot = P
   928  			}
   929  			// Wait for completion, occasionally checking for pivot staleness
   930  			timer.Reset(time.Second)
   931  			select {
   932  			case <-sync.done:
   933  				if sync.err != nil {
   934  					return sync.err
   935  				}
   936  				if err := d.commitPivotBlock(P); err != nil {
   937  					return err
   938  				}
   939  				oldPivot = nil
   940  
   941  			case <-timer.C:
   942  				oldTail = afterP
   943  				continue
   944  			}
   945  		}
   946  		// Fast sync done, pivot commit done, full import
   947  		if err := d.importBlockResults(afterP); err != nil {
   948  			return err
   949  		}
   950  	}
   951  }
   952  
   953  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
   954  	if len(results) == 0 {
   955  		return nil, nil, nil
   956  	}
   957  	if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
   958  		// the pivot is somewhere in the future
   959  		return nil, results, nil
   960  	}
   961  	// This can also be optimized, but only happens very seldom
   962  	for _, result := range results {
   963  		num := result.Header.Number.Uint64()
   964  		switch {
   965  		case num < pivot:
   966  			before = append(before, result)
   967  		case num == pivot:
   968  			p = result
   969  		default:
   970  			after = append(after, result)
   971  		}
   972  	}
   973  	return p, before, after
   974  }
   975  
   976  func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error {
   977  	// Check for any early termination requests
   978  	if len(results) == 0 {
   979  		return nil
   980  	}
   981  	select {
   982  	case <-d.quitCh:
   983  		return errCancelContentProcessing
   984  	case <-stateSync.done:
   985  		if err := stateSync.Wait(); err != nil {
   986  			return err
   987  		}
   988  	default:
   989  	}
   990  	// Retrieve the batch of results to import
   991  	first, last := results[0].Header, results[len(results)-1].Header
   992  	log.Debug("Inserting snap-sync blocks", "items", len(results),
   993  		"firstnum", first.Number, "firsthash", first.Hash(),
   994  		"lastnumn", last.Number, "lasthash", last.Hash(),
   995  	)
   996  	blocks := make([]*types.Block, len(results))
   997  	receipts := make([]types.Receipts, len(results))
   998  	for i, result := range results {
   999  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
  1000  		receipts[i] = result.Receipts
  1001  	}
  1002  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
  1003  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
  1004  		return fmt.Errorf("%w: %v", errInvalidChain, err)
  1005  	}
  1006  	return nil
  1007  }
  1008  
  1009  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
  1010  	block := types.NewBlockWithHeader(result.Header).WithBody(result.body())
  1011  	log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash())
  1012  
  1013  	// Commit the pivot block as the new head, will require full sync from here on
  1014  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
  1015  		return err
  1016  	}
  1017  	if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
  1018  		return err
  1019  	}
  1020  	d.committed.Store(true)
  1021  	return nil
  1022  }
  1023  
  1024  // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
  1025  // data packet for the local node to consume.
  1026  func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
  1027  	switch packet := packet.(type) {
  1028  	case *snap.AccountRangePacket:
  1029  		hashes, accounts, err := packet.Unpack()
  1030  		if err != nil {
  1031  			return err
  1032  		}
  1033  		return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
  1034  
  1035  	case *snap.StorageRangesPacket:
  1036  		hashset, slotset := packet.Unpack()
  1037  		return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
  1038  
  1039  	case *snap.ByteCodesPacket:
  1040  		return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
  1041  
  1042  	case *snap.TrieNodesPacket:
  1043  		return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
  1044  
  1045  	default:
  1046  		return fmt.Errorf("unexpected snap packet type: %T", packet)
  1047  	}
  1048  }
  1049  
  1050  // readHeaderRange returns a list of headers, using the given last header as the base,
  1051  // and going backwards towards genesis. This method assumes that the caller already has
  1052  // placed a reasonable cap on count.
  1053  func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header {
  1054  	var (
  1055  		current = last
  1056  		headers []*types.Header
  1057  	)
  1058  	for {
  1059  		parent := d.lightchain.GetHeaderByHash(current.ParentHash)
  1060  		if parent == nil {
  1061  			break // The chain is not continuous, or the chain is exhausted
  1062  		}
  1063  		headers = append(headers, parent)
  1064  		if len(headers) >= count {
  1065  			break
  1066  		}
  1067  		current = parent
  1068  	}
  1069  	return headers
  1070  }
  1071  
  1072  // reportSnapSyncProgress calculates various status reports and provides it to the user.
  1073  func (d *Downloader) reportSnapSyncProgress(force bool) {
  1074  	// Initialize the sync start time if it's the first time we're reporting
  1075  	if d.syncStartTime.IsZero() {
  1076  		d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero
  1077  	}
  1078  	// Don't report all the events, just occasionally
  1079  	if !force && time.Since(d.syncLogTime) < 8*time.Second {
  1080  		return
  1081  	}
  1082  	// Don't report anything until we have a meaningful progress
  1083  	var (
  1084  		headerBytes, _  = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable)
  1085  		bodyBytes, _    = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable)
  1086  		receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable)
  1087  	)
  1088  	syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes)
  1089  	if syncedBytes == 0 {
  1090  		return
  1091  	}
  1092  	var (
  1093  		header = d.blockchain.CurrentHeader()
  1094  		block  = d.blockchain.CurrentSnapBlock()
  1095  	)
  1096  	syncedBlocks := block.Number.Uint64() - d.syncStartBlock
  1097  	if syncedBlocks == 0 {
  1098  		return
  1099  	}
  1100  	// Retrieve the current chain head and calculate the ETA
  1101  	latest, _, _, err := d.skeleton.Bounds()
  1102  	if err != nil {
  1103  		// We're going to cheat for non-merged networks, but that's fine
  1104  		latest = d.pivotHeader
  1105  	}
  1106  	if latest == nil {
  1107  		// This should really never happen, but add some defensive code for now.
  1108  		// TODO(karalabe): Remove it eventually if we don't see it blow.
  1109  		log.Error("Nil latest block in sync progress report")
  1110  		return
  1111  	}
  1112  	var (
  1113  		left = latest.Number.Uint64() - block.Number.Uint64()
  1114  		eta  = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left)
  1115  
  1116  		progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64()))
  1117  		headers  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString())
  1118  		bodies   = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString())
  1119  		receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString())
  1120  	)
  1121  	log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta))
  1122  	d.syncLogTime = time.Now()
  1123  }