github.com/theQRL/go-zond@v0.2.1/zond/downloader/downloader.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package downloader contains the manual full chain synchronisation.
    18  package downloader
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"sync"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"github.com/theQRL/go-zond"
    28  	"github.com/theQRL/go-zond/common"
    29  	"github.com/theQRL/go-zond/core/rawdb"
    30  	"github.com/theQRL/go-zond/core/state/snapshot"
    31  	"github.com/theQRL/go-zond/core/types"
    32  	"github.com/theQRL/go-zond/event"
    33  	"github.com/theQRL/go-zond/log"
    34  	"github.com/theQRL/go-zond/params"
    35  	"github.com/theQRL/go-zond/trie"
    36  	"github.com/theQRL/go-zond/zond/protocols/snap"
    37  	"github.com/theQRL/go-zond/zonddb"
    38  )
    39  
    40  var (
    41  	MaxBlockFetch   = 128 // Number of blocks to be fetched per retrieval request
    42  	MaxHeaderFetch  = 192 // Number of block headers to be fetched per retrieval request
    43  	MaxReceiptFetch = 256 // Number of transaction receipts to allow fetching per request
    44  
    45  	maxQueuedHeaders           = 32 * 1024                        // [zond/62] Maximum number of headers to queue for import (DOS protection)
    46  	maxHeadersProcess          = 2048                             // Number of header download results to import at once into the chain
    47  	maxResultsProcess          = 2048                             // Number of content download results to import at once into the chain
    48  	fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it)
    49  
    50  	reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs
    51  
    52  	fsHeaderSafetyNet = 2048            // Number of headers to discard in case a chain violation is detected
    53  	fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download
    54  	fsMinFullBlocks   = 64              // Number of blocks to retrieve fully even in snap sync
    55  )
    56  
    57  var (
    58  	errBusy                    = errors.New("busy")
    59  	errBadPeer                 = errors.New("action from bad peer ignored")
    60  	errTimeout                 = errors.New("timeout")
    61  	errInvalidChain            = errors.New("retrieved hash chain is invalid")
    62  	errInvalidBody             = errors.New("retrieved block body is invalid")
    63  	errInvalidReceipt          = errors.New("retrieved receipt is invalid")
    64  	errCancelStateFetch        = errors.New("state data download canceled (requested)")
    65  	errCancelContentProcessing = errors.New("content processing canceled (requested)")
    66  	errCanceled                = errors.New("syncing canceled (requested)")
    67  	errNoPivotHeader           = errors.New("pivot header is not found")
    68  )
    69  
    70  // peerDropFn is a callback type for dropping a peer detected as malicious.
    71  type peerDropFn func(id string)
    72  
    73  // badBlockFn is a callback for the async beacon sync to notify the caller that
    74  // the origin header requested to sync to, produced a chain with a bad block.
    75  type badBlockFn func(invalid *types.Header, origin *types.Header)
    76  
    77  // headerTask is a set of downloaded headers to queue along with their precomputed
    78  // hashes to avoid constant rehashing.
    79  type headerTask struct {
    80  	headers []*types.Header
    81  	hashes  []common.Hash
    82  }
    83  
    84  type Downloader struct {
    85  	mode atomic.Uint32  // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode
    86  	mux  *event.TypeMux // Event multiplexer to announce sync operation events
    87  
    88  	queue *queue   // Scheduler for selecting the hashes to download
    89  	peers *peerSet // Set of active peers from which download can proceed
    90  
    91  	stateDB zonddb.Database // Database to state sync into (and deduplicate via)
    92  
    93  	// Statistics
    94  	syncStatsChainOrigin uint64       // Origin block number where syncing started at
    95  	syncStatsChainHeight uint64       // Highest block number known when syncing started
    96  	syncStatsLock        sync.RWMutex // Lock protecting the sync stats fields
    97  
    98  	blockchain BlockChain
    99  
   100  	// Callbacks
   101  	dropPeer peerDropFn // Drops a peer for misbehaving
   102  	badBlock badBlockFn // Reports a block as rejected by the chain
   103  
   104  	// Status
   105  	synchronising atomic.Bool
   106  	notified      atomic.Bool
   107  	committed     atomic.Bool
   108  	ancientLimit  uint64 // The maximum block number which can be regarded as ancient data.
   109  
   110  	// Channels
   111  	headerProcCh chan *headerTask // Channel to feed the header processor new tasks
   112  
   113  	// Skeleton sync
   114  	skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode)
   115  
   116  	// State sync
   117  	pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root
   118  	pivotLock   sync.RWMutex  // Lock protecting pivot header reads from updates
   119  
   120  	SnapSyncer     *snap.Syncer // TODO(karalabe): make private! hack for now
   121  	stateSyncStart chan *stateSync
   122  
   123  	// Cancellation and termination
   124  	cancelCh   chan struct{}  // Channel to cancel mid-flight syncs
   125  	cancelLock sync.RWMutex   // Lock to protect the cancel channel and peer in delivers
   126  	cancelWg   sync.WaitGroup // Make sure all fetcher goroutines have exited.
   127  
   128  	quitCh   chan struct{} // Quit channel to signal termination
   129  	quitLock sync.Mutex    // Lock to prevent double closes
   130  
   131  	// Testing hooks
   132  	bodyFetchHook    func([]*types.Header) // Method to call upon starting a block body fetch
   133  	receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch
   134  	chainInsertHook  func([]*fetchResult)  // Method to call upon inserting a chain of blocks (possibly in multiple invocations)
   135  
   136  	// Progress reporting metrics
   137  	syncStartBlock uint64    // Head snap block when Gzond was started
   138  	syncStartTime  time.Time // Time instance when chain sync started
   139  	syncLogTime    time.Time // Time instance when status was last reported
   140  }
   141  
   142  // BlockChain encapsulates functions required to sync a (full or snap) blockchain.
   143  type BlockChain interface {
   144  	// HasHeader verifies a header's presence in the local chain.
   145  	HasHeader(common.Hash, uint64) bool
   146  
   147  	// GetHeaderByHash retrieves a header from the local chain.
   148  	GetHeaderByHash(common.Hash) *types.Header
   149  
   150  	// CurrentHeader retrieves the head header from the local chain.
   151  	CurrentHeader() *types.Header
   152  
   153  	// InsertHeaderChain inserts a batch of headers into the local chain.
   154  	InsertHeaderChain([]*types.Header) (int, error)
   155  
   156  	// SetHead rewinds the local chain to a new head.
   157  	SetHead(uint64) error
   158  
   159  	// HasBlock verifies a block's presence in the local chain.
   160  	HasBlock(common.Hash, uint64) bool
   161  
   162  	// HasFastBlock verifies a snap block's presence in the local chain.
   163  	HasFastBlock(common.Hash, uint64) bool
   164  
   165  	// GetBlockByHash retrieves a block from the local chain.
   166  	GetBlockByHash(common.Hash) *types.Block
   167  
   168  	// CurrentBlock retrieves the head block from the local chain.
   169  	CurrentBlock() *types.Header
   170  
   171  	// CurrentSnapBlock retrieves the head snap block from the local chain.
   172  	CurrentSnapBlock() *types.Header
   173  
   174  	// SnapSyncCommitHead directly commits the head block to a certain entity.
   175  	SnapSyncCommitHead(common.Hash) error
   176  
   177  	// InsertChain inserts a batch of blocks into the local chain.
   178  	InsertChain(types.Blocks) (int, error)
   179  
   180  	// InsertReceiptChain inserts a batch of receipts into the local chain.
   181  	InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error)
   182  
   183  	// Snapshots returns the blockchain snapshot tree to paused it during sync.
   184  	Snapshots() *snapshot.Tree
   185  
   186  	// TrieDB retrieves the low level trie database used for interacting
   187  	// with trie nodes.
   188  	TrieDB() *trie.Database
   189  }
   190  
   191  // New creates a new downloader to fetch hashes and blocks from remote peers.
   192  func New(stateDb zonddb.Database, mux *event.TypeMux, chain BlockChain, dropPeer peerDropFn, success func()) *Downloader {
   193  	dl := &Downloader{
   194  		stateDB:        stateDb,
   195  		mux:            mux,
   196  		queue:          newQueue(blockCacheMaxItems, blockCacheInitialItems),
   197  		peers:          newPeerSet(),
   198  		blockchain:     chain,
   199  		dropPeer:       dropPeer,
   200  		headerProcCh:   make(chan *headerTask, 1),
   201  		quitCh:         make(chan struct{}),
   202  		SnapSyncer:     snap.NewSyncer(stateDb, chain.TrieDB().Scheme()),
   203  		stateSyncStart: make(chan *stateSync),
   204  		syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(),
   205  	}
   206  	// Create the post-merge skeleton syncer and start the process
   207  	dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success))
   208  
   209  	go dl.stateFetcher()
   210  	return dl
   211  }
   212  
   213  // Progress retrieves the synchronisation boundaries, specifically the origin
   214  // block where synchronisation started at (may have failed/suspended); the block
   215  // or header sync is currently at; and the latest known block which the sync targets.
   216  //
   217  // In addition, during the state download phase of snap synchronisation the number
   218  // of processed and the total number of known states are also returned. Otherwise
   219  // these are zero.
   220  func (d *Downloader) Progress() zond.SyncProgress {
   221  	// Lock the current stats and return the progress
   222  	d.syncStatsLock.RLock()
   223  	defer d.syncStatsLock.RUnlock()
   224  
   225  	current := uint64(0)
   226  	mode := d.getMode()
   227  	switch mode {
   228  	case FullSync:
   229  		current = d.blockchain.CurrentBlock().Number.Uint64()
   230  	case SnapSync:
   231  		current = d.blockchain.CurrentSnapBlock().Number.Uint64()
   232  	default:
   233  		log.Error("Unknown downloader mode", "mode", mode)
   234  	}
   235  	progress, pending := d.SnapSyncer.Progress()
   236  
   237  	return zond.SyncProgress{
   238  		StartingBlock:       d.syncStatsChainOrigin,
   239  		CurrentBlock:        current,
   240  		HighestBlock:        d.syncStatsChainHeight,
   241  		SyncedAccounts:      progress.AccountSynced,
   242  		SyncedAccountBytes:  uint64(progress.AccountBytes),
   243  		SyncedBytecodes:     progress.BytecodeSynced,
   244  		SyncedBytecodeBytes: uint64(progress.BytecodeBytes),
   245  		SyncedStorage:       progress.StorageSynced,
   246  		SyncedStorageBytes:  uint64(progress.StorageBytes),
   247  		HealedTrienodes:     progress.TrienodeHealSynced,
   248  		HealedTrienodeBytes: uint64(progress.TrienodeHealBytes),
   249  		HealedBytecodes:     progress.BytecodeHealSynced,
   250  		HealedBytecodeBytes: uint64(progress.BytecodeHealBytes),
   251  		HealingTrienodes:    pending.TrienodeHeal,
   252  		HealingBytecode:     pending.BytecodeHeal,
   253  	}
   254  }
   255  
   256  // RegisterPeer injects a new download peer into the set of block source to be
   257  // used for fetching hashes and blocks from.
   258  func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error {
   259  	var logger log.Logger
   260  	if len(id) < 16 {
   261  		// Tests use short IDs, don't choke on them
   262  		logger = log.New("peer", id)
   263  	} else {
   264  		logger = log.New("peer", id[:8])
   265  	}
   266  	logger.Trace("Registering sync peer")
   267  	if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil {
   268  		logger.Error("Failed to register sync peer", "err", err)
   269  		return err
   270  	}
   271  	return nil
   272  }
   273  
   274  // UnregisterPeer remove a peer from the known list, preventing any action from
   275  // the specified peer. An effort is also made to return any pending fetches into
   276  // the queue.
   277  func (d *Downloader) UnregisterPeer(id string) error {
   278  	// Unregister the peer from the active peer set and revoke any fetch tasks
   279  	var logger log.Logger
   280  	if len(id) < 16 {
   281  		// Tests use short IDs, don't choke on them
   282  		logger = log.New("peer", id)
   283  	} else {
   284  		logger = log.New("peer", id[:8])
   285  	}
   286  	logger.Trace("Unregistering sync peer")
   287  	if err := d.peers.Unregister(id); err != nil {
   288  		logger.Error("Failed to unregister sync peer", "err", err)
   289  		return err
   290  	}
   291  	d.queue.Revoke(id)
   292  
   293  	return nil
   294  }
   295  
   296  // synchronise will select the peer and use it for synchronising. If an empty string is given
   297  // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the
   298  // checks fail an error will be returned. This method is synchronous
   299  func (d *Downloader) synchronise(mode SyncMode, beaconPing chan struct{}) error {
   300  	// The beacon header syncer is async. It will start this synchronization and
   301  	// will continue doing other tasks. However, if synchronization needs to be
   302  	// cancelled, the syncer needs to know if we reached the startup point (and
   303  	// inited the cancel channel) or not yet. Make sure that we'll signal even in
   304  	// case of a failure.
   305  	if beaconPing != nil {
   306  		defer func() {
   307  			select {
   308  			case <-beaconPing: // already notified
   309  			default:
   310  				close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing)
   311  			}
   312  		}()
   313  	}
   314  	// Make sure only one goroutine is ever allowed past this point at once
   315  	if !d.synchronising.CompareAndSwap(false, true) {
   316  		return errBusy
   317  	}
   318  	defer d.synchronising.Store(false)
   319  
   320  	// Post a user notification of the sync (only once per session)
   321  	if d.notified.CompareAndSwap(false, true) {
   322  		log.Info("Block synchronisation started")
   323  	}
   324  	if mode == SnapSync {
   325  		// Snap sync will directly modify the persistent state, making the entire
   326  		// trie database unusable until the state is fully synced. To prevent any
   327  		// subsequent state reads, explicitly disable the trie database and state
   328  		// syncer is responsible to address and correct any state missing.
   329  		if d.blockchain.TrieDB().Scheme() == rawdb.PathScheme {
   330  			d.blockchain.TrieDB().Reset(types.EmptyRootHash)
   331  		}
   332  		// Snap sync uses the snapshot namespace to store potentially flaky data until
   333  		// sync completely heals and finishes. Pause snapshot maintenance in the mean-
   334  		// time to prevent access.
   335  		if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests
   336  			snapshots.Disable()
   337  		}
   338  	}
   339  	// Reset the queue, peer set and wake channels to clean any internal leftover state
   340  	d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems)
   341  	d.peers.Reset()
   342  
   343  	for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   344  		select {
   345  		case <-ch:
   346  		default:
   347  		}
   348  	}
   349  	for empty := false; !empty; {
   350  		select {
   351  		case <-d.headerProcCh:
   352  		default:
   353  			empty = true
   354  		}
   355  	}
   356  	// Create cancel channel for aborting mid-flight and mark the master peer
   357  	d.cancelLock.Lock()
   358  	d.cancelCh = make(chan struct{})
   359  	d.cancelLock.Unlock()
   360  
   361  	defer d.Cancel() // No matter what, we can't leave the cancel channel open
   362  
   363  	// Atomically set the requested sync mode
   364  	d.mode.Store(uint32(mode))
   365  
   366  	if beaconPing != nil {
   367  		close(beaconPing)
   368  	}
   369  	return d.syncToHead()
   370  }
   371  
   372  func (d *Downloader) getMode() SyncMode {
   373  	return SyncMode(d.mode.Load())
   374  }
   375  
   376  // syncToHead starts a block synchronization based on the hash chain from
   377  // the specified head hash.
   378  func (d *Downloader) syncToHead() (err error) {
   379  	d.mux.Post(StartEvent{})
   380  	defer func() {
   381  		// reset on error
   382  		if err != nil {
   383  			d.mux.Post(FailedEvent{err})
   384  		} else {
   385  			latest := d.blockchain.CurrentHeader()
   386  			d.mux.Post(DoneEvent{latest})
   387  		}
   388  	}()
   389  	mode := d.getMode()
   390  
   391  	log.Debug("Backfilling with the network", "mode", mode)
   392  	defer func(start time.Time) {
   393  		log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start)))
   394  	}(time.Now())
   395  
   396  	// Look up the sync boundaries: the common ancestor and the target block
   397  	var latest, pivot, final *types.Header
   398  
   399  	latest, _, final, err = d.skeleton.Bounds()
   400  	if err != nil {
   401  		return err
   402  	}
   403  	if latest.Number.Uint64() > uint64(fsMinFullBlocks) {
   404  		number := latest.Number.Uint64() - uint64(fsMinFullBlocks)
   405  
   406  		// Retrieve the pivot header from the skeleton chain segment but
   407  		// fallback to local chain if it's not found in skeleton space.
   408  		if pivot = d.skeleton.Header(number); pivot == nil {
   409  			_, oldest, _, _ := d.skeleton.Bounds() // error is already checked
   410  			if number < oldest.Number.Uint64() {
   411  				count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks
   412  				headers := d.readHeaderRange(oldest, count)
   413  				if len(headers) == count {
   414  					pivot = headers[len(headers)-1]
   415  					log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number)
   416  				}
   417  			}
   418  		}
   419  		// Print an error log and return directly in case the pivot header
   420  		// is still not found. It means the skeleton chain is not linked
   421  		// correctly with local chain.
   422  		if pivot == nil {
   423  			log.Error("Pivot header is not found", "number", number)
   424  			return errNoPivotHeader
   425  		}
   426  	}
   427  	// If no pivot block was returned, the head is below the min full block
   428  	// threshold (i.e. new chain). In that case we won't really snap sync
   429  	// anyway, but still need a valid pivot block to avoid some code hitting
   430  	// nil panics on access.
   431  	if mode == SnapSync && pivot == nil {
   432  		pivot = d.blockchain.CurrentBlock()
   433  	}
   434  	height := latest.Number.Uint64()
   435  
   436  	// In beacon mode, use the skeleton chain for the ancestor lookup
   437  	origin, err := d.findBeaconAncestor()
   438  	if err != nil {
   439  		return err
   440  	}
   441  	d.syncStatsLock.Lock()
   442  	if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin {
   443  		d.syncStatsChainOrigin = origin
   444  	}
   445  	d.syncStatsChainHeight = height
   446  	d.syncStatsLock.Unlock()
   447  
   448  	// Ensure our origin point is below any snap sync pivot point
   449  	if mode == SnapSync {
   450  		if height <= uint64(fsMinFullBlocks) {
   451  			origin = 0
   452  		} else {
   453  			pivotNumber := pivot.Number.Uint64()
   454  			if pivotNumber <= origin {
   455  				origin = pivotNumber - 1
   456  			}
   457  			// Write out the pivot into the database so a rollback beyond it will
   458  			// reenable snap sync
   459  			rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber)
   460  		}
   461  	}
   462  	d.committed.Store(true)
   463  	if mode == SnapSync && pivot.Number.Uint64() != 0 {
   464  		d.committed.Store(false)
   465  	}
   466  	if mode == SnapSync {
   467  		// Set the ancient data limitation. If we are running snap sync, all block
   468  		// data older than ancientLimit will be written to the ancient store. More
   469  		// recent data will be written to the active database and will wait for the
   470  		// freezer to migrate.
   471  		//
   472  		// If the network is post-merge, use either the last announced finalized
   473  		// block as the ancient limit, or if we haven't yet received one, the head-
   474  		// a max fork ancestry limit. One quirky case if we've already passed the
   475  		// finalized block, in which case the skeleton.Bounds will return nil and
   476  		// we'll revert to head - 90K. That's fine, we're finishing sync anyway.
   477  		//
   478  		// For non-merged networks, if there is a checkpoint available, then calculate
   479  		// the ancientLimit through that. Otherwise calculate the ancient limit through
   480  		// the advertised height of the remote peer. This most is mostly a fallback for
   481  		// legacy networks, but should eventually be dropped. TODO(karalabe).
   482  		//
   483  		// Beacon sync, use the latest finalized block as the ancient limit
   484  		// or a reasonable height if no finalized block is yet announced.
   485  		if final != nil {
   486  			d.ancientLimit = final.Number.Uint64()
   487  		} else if height > fullMaxForkAncestry+1 {
   488  			d.ancientLimit = height - fullMaxForkAncestry - 1
   489  		} else {
   490  			d.ancientLimit = 0
   491  		}
   492  
   493  		frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here.
   494  
   495  		// If a part of blockchain data has already been written into active store,
   496  		// disable the ancient style insertion explicitly.
   497  		if origin >= frozen && frozen != 0 {
   498  			d.ancientLimit = 0
   499  			log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1)
   500  		} else if d.ancientLimit > 0 {
   501  			log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit)
   502  		}
   503  		// Rewind the ancient store and blockchain if reorg happens.
   504  		if origin+1 < frozen {
   505  			if err := d.blockchain.SetHead(origin); err != nil {
   506  				return err
   507  			}
   508  			log.Info("Truncated excess ancient chain segment", "oldhead", frozen-1, "newhead", origin)
   509  		}
   510  	}
   511  	// Initiate the sync using a concurrent header and content retrieval algorithm
   512  	d.queue.Prepare(origin+1, mode)
   513  	// In beacon mode, headers are served by the skeleton syncer
   514  	fetchers := []func() error{
   515  		func() error { return d.fetchHeaders(origin + 1) },  // Headers are always retrieved
   516  		func() error { return d.fetchBodies(origin + 1) },   // Bodies are retrieved during normal and snap sync
   517  		func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during snap sync
   518  		func() error { return d.processHeaders(origin + 1) },
   519  	}
   520  	if mode == SnapSync {
   521  		d.pivotLock.Lock()
   522  		d.pivotHeader = pivot
   523  		d.pivotLock.Unlock()
   524  
   525  		fetchers = append(fetchers, func() error { return d.processSnapSyncContent() })
   526  	} else if mode == FullSync {
   527  		fetchers = append(fetchers, func() error { return d.processFullSyncContent() })
   528  	}
   529  	return d.spawnSync(fetchers)
   530  }
   531  
   532  // spawnSync runs d.process and all given fetcher functions to completion in
   533  // separate goroutines, returning the first error that appears.
   534  func (d *Downloader) spawnSync(fetchers []func() error) error {
   535  	errc := make(chan error, len(fetchers))
   536  	d.cancelWg.Add(len(fetchers))
   537  	for _, fn := range fetchers {
   538  		fn := fn
   539  		go func() { defer d.cancelWg.Done(); errc <- fn() }()
   540  	}
   541  	// Wait for the first error, then terminate the others.
   542  	var err error
   543  	for i := 0; i < len(fetchers); i++ {
   544  		if i == len(fetchers)-1 {
   545  			// Close the queue when all fetchers have exited.
   546  			// This will cause the block processor to end when
   547  			// it has processed the queue.
   548  			d.queue.Close()
   549  		}
   550  		if got := <-errc; got != nil {
   551  			err = got
   552  			if got != errCanceled {
   553  				break // receive a meaningful error, bubble it up
   554  			}
   555  		}
   556  	}
   557  	d.queue.Close()
   558  	d.Cancel()
   559  	return err
   560  }
   561  
   562  // cancel aborts all of the operations and resets the queue. However, cancel does
   563  // not wait for the running download goroutines to finish. This method should be
   564  // used when cancelling the downloads from inside the downloader.
   565  func (d *Downloader) cancel() {
   566  	// Close the current cancel channel
   567  	d.cancelLock.Lock()
   568  	defer d.cancelLock.Unlock()
   569  
   570  	if d.cancelCh != nil {
   571  		select {
   572  		case <-d.cancelCh:
   573  			// Channel was already closed
   574  		default:
   575  			close(d.cancelCh)
   576  		}
   577  	}
   578  }
   579  
   580  // Cancel aborts all of the operations and waits for all download goroutines to
   581  // finish before returning.
   582  func (d *Downloader) Cancel() {
   583  	d.cancel()
   584  	d.cancelWg.Wait()
   585  }
   586  
   587  // Terminate interrupts the downloader, canceling all pending operations.
   588  // The downloader cannot be reused after calling Terminate.
   589  func (d *Downloader) Terminate() {
   590  	// Close the termination channel (make sure double close is allowed)
   591  	d.quitLock.Lock()
   592  	select {
   593  	case <-d.quitCh:
   594  	default:
   595  		close(d.quitCh)
   596  
   597  		// Terminate the internal beacon syncer
   598  		d.skeleton.Terminate()
   599  	}
   600  	d.quitLock.Unlock()
   601  
   602  	// Cancel any pending download requests
   603  	d.Cancel()
   604  }
   605  
   606  // fetchBodies iteratively downloads the scheduled block bodies, taking any
   607  // available peers, reserving a chunk of blocks for each, waiting for delivery
   608  // and also periodically checking for timeouts.
   609  func (d *Downloader) fetchBodies(from uint64) error {
   610  	log.Debug("Downloading block bodies", "origin", from)
   611  	err := d.concurrentFetch((*bodyQueue)(d))
   612  
   613  	log.Debug("Block body download terminated", "err", err)
   614  	return err
   615  }
   616  
   617  // fetchReceipts iteratively downloads the scheduled block receipts, taking any
   618  // available peers, reserving a chunk of receipts for each, waiting for delivery
   619  // and also periodically checking for timeouts.
   620  func (d *Downloader) fetchReceipts(from uint64) error {
   621  	log.Debug("Downloading receipts", "origin", from)
   622  	err := d.concurrentFetch((*receiptQueue)(d))
   623  
   624  	log.Debug("Receipt download terminated", "err", err)
   625  	return err
   626  }
   627  
   628  // processHeaders takes batches of retrieved headers from an input channel and
   629  // keeps processing and scheduling them into the header chain and downloader's
   630  // queue until the stream ends or a failure occurs.
   631  func (d *Downloader) processHeaders(origin uint64) error {
   632  	var (
   633  		mode  = d.getMode()
   634  		timer = time.NewTimer(time.Second)
   635  	)
   636  	defer timer.Stop()
   637  
   638  	for {
   639  		select {
   640  		case <-d.cancelCh:
   641  			return errCanceled
   642  
   643  		case task := <-d.headerProcCh:
   644  			// Terminate header processing if we synced up
   645  			if task == nil || len(task.headers) == 0 {
   646  				// Notify everyone that headers are fully processed
   647  				for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   648  					select {
   649  					case ch <- false:
   650  					case <-d.cancelCh:
   651  					}
   652  				}
   653  				return nil
   654  			}
   655  			// Otherwise split the chunk of headers into batches and process them
   656  			headers, hashes := task.headers, task.hashes
   657  
   658  			for len(headers) > 0 {
   659  				// Terminate if something failed in between processing chunks
   660  				select {
   661  				case <-d.cancelCh:
   662  					return errCanceled
   663  				default:
   664  				}
   665  				// Select the next chunk of headers to import
   666  				limit := maxHeadersProcess
   667  				if limit > len(headers) {
   668  					limit = len(headers)
   669  				}
   670  				chunkHeaders := headers[:limit]
   671  				chunkHashes := hashes[:limit]
   672  
   673  				// In case of header only syncing, validate the chunk immediately
   674  				if mode == SnapSync {
   675  					// Although the received headers might be all valid, a legacy
   676  					// PoW/PoA sync must not accept post-merge headers. Make sure
   677  					// that any transition is rejected at this point.
   678  					if len(chunkHeaders) > 0 {
   679  						if n, err := d.blockchain.InsertHeaderChain(chunkHeaders); err != nil {
   680  							log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err)
   681  							return fmt.Errorf("%w: %v", errInvalidChain, err)
   682  						}
   683  					}
   684  				}
   685  				// If we've reached the allowed number of pending headers, stall a bit
   686  				for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders {
   687  					timer.Reset(time.Second)
   688  					select {
   689  					case <-d.cancelCh:
   690  						return errCanceled
   691  					case <-timer.C:
   692  					}
   693  				}
   694  				// Otherwise insert the headers for content retrieval
   695  				inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin)
   696  				if len(inserts) != len(chunkHeaders) {
   697  					return fmt.Errorf("%w: stale headers", errBadPeer)
   698  				}
   699  
   700  				headers = headers[limit:]
   701  				hashes = hashes[limit:]
   702  				origin += uint64(limit)
   703  			}
   704  			// Update the highest block number we know if a higher one is found.
   705  			d.syncStatsLock.Lock()
   706  			if d.syncStatsChainHeight < origin {
   707  				d.syncStatsChainHeight = origin - 1
   708  			}
   709  			d.syncStatsLock.Unlock()
   710  
   711  			// Signal the content downloaders of the availability of new tasks
   712  			for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} {
   713  				select {
   714  				case ch <- true:
   715  				default:
   716  				}
   717  			}
   718  		}
   719  	}
   720  }
   721  
   722  // processFullSyncContent takes fetch results from the queue and imports them into the chain.
   723  func (d *Downloader) processFullSyncContent() error {
   724  	for {
   725  		results := d.queue.Results(true)
   726  		if len(results) == 0 {
   727  			return nil
   728  		}
   729  		if d.chainInsertHook != nil {
   730  			d.chainInsertHook(results)
   731  		}
   732  		if err := d.importBlockResults(results); err != nil {
   733  			return err
   734  		}
   735  	}
   736  }
   737  
   738  func (d *Downloader) importBlockResults(results []*fetchResult) error {
   739  	// Check for any early termination requests
   740  	if len(results) == 0 {
   741  		return nil
   742  	}
   743  	select {
   744  	case <-d.quitCh:
   745  		return errCancelContentProcessing
   746  	default:
   747  	}
   748  	// Retrieve a batch of results to import
   749  	first, last := results[0].Header, results[len(results)-1].Header
   750  	log.Debug("Inserting downloaded chain", "items", len(results),
   751  		"firstnum", first.Number, "firsthash", first.Hash(),
   752  		"lastnum", last.Number, "lasthash", last.Hash(),
   753  	)
   754  	blocks := make([]*types.Block, len(results))
   755  	for i, result := range results {
   756  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
   757  	}
   758  	// Downloaded blocks are always regarded as trusted after the
   759  	// transition. Because the downloaded chain is guided by the
   760  	// consensus-layer.
   761  	if index, err := d.blockchain.InsertChain(blocks); err != nil {
   762  		if index < len(results) {
   763  			log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
   764  
   765  			// In post-merge, notify the engine API of encountered bad chains
   766  			if d.badBlock != nil {
   767  				head, _, _, err := d.skeleton.Bounds()
   768  				if err != nil {
   769  					log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err)
   770  				} else {
   771  					d.badBlock(blocks[index].Header(), head)
   772  				}
   773  			}
   774  		} else {
   775  			// The InsertChain method in blockchain.go will sometimes return an out-of-bounds index,
   776  			// when it needs to preprocess blocks to import a sidechain.
   777  			// The importer will put together a new list of blocks to import, which is a superset
   778  			// of the blocks delivered from the downloader, and the indexing will be off.
   779  			log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err)
   780  		}
   781  		return fmt.Errorf("%w: %v", errInvalidChain, err)
   782  	}
   783  	return nil
   784  }
   785  
   786  // processSnapSyncContent takes fetch results from the queue and writes them to the
   787  // database. It also controls the synchronisation of state nodes of the pivot block.
   788  func (d *Downloader) processSnapSyncContent() error {
   789  	// Start syncing state of the reported head block. This should get us most of
   790  	// the state of the pivot block.
   791  	d.pivotLock.RLock()
   792  	sync := d.syncState(d.pivotHeader.Root)
   793  	d.pivotLock.RUnlock()
   794  
   795  	defer func() {
   796  		// The `sync` object is replaced every time the pivot moves. We need to
   797  		// defer close the very last active one, hence the lazy evaluation vs.
   798  		// calling defer sync.Cancel() !!!
   799  		sync.Cancel()
   800  	}()
   801  
   802  	closeOnErr := func(s *stateSync) {
   803  		if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled {
   804  			d.queue.Close() // wake up Results
   805  		}
   806  	}
   807  	go closeOnErr(sync)
   808  
   809  	// To cater for moving pivot points, track the pivot block and subsequently
   810  	// accumulated download results separately.
   811  	//
   812  	// These will be nil up to the point where we reach the pivot, and will only
   813  	// be set temporarily if the synced blocks are piling up, but the pivot is
   814  	// still busy downloading. In that case, we need to occasionally check for
   815  	// pivot moves, so need to unblock the loop. These fields will accumulate
   816  	// the results in the meantime.
   817  	//
   818  	// Note, there's no issue with memory piling up since after 64 blocks the
   819  	// pivot will forcefully move so these accumulators will be dropped.
   820  	var (
   821  		oldPivot *fetchResult   // Locked in pivot block, might change eventually
   822  		oldTail  []*fetchResult // Downloaded content after the pivot
   823  		timer    = time.NewTimer(time.Second)
   824  	)
   825  	defer timer.Stop()
   826  
   827  	for {
   828  		// Wait for the next batch of downloaded data to be available. If we have
   829  		// not yet reached the pivot point, wait blockingly as there's no need to
   830  		// spin-loop check for pivot moves. If we reached the pivot but have not
   831  		// yet processed it, check for results async, so we might notice pivot
   832  		// moves while state syncing. If the pivot was passed fully, block again
   833  		// as there's no more reason to check for pivot moves at all.
   834  		results := d.queue.Results(oldPivot == nil)
   835  		if len(results) == 0 {
   836  			// If pivot sync is done, stop
   837  			if d.committed.Load() {
   838  				d.reportSnapSyncProgress(true)
   839  				return sync.Cancel()
   840  			}
   841  			// If sync failed, stop
   842  			select {
   843  			case <-d.cancelCh:
   844  				sync.Cancel()
   845  				return errCanceled
   846  			default:
   847  			}
   848  		}
   849  		if d.chainInsertHook != nil {
   850  			d.chainInsertHook(results)
   851  		}
   852  		d.reportSnapSyncProgress(false)
   853  
   854  		// If we haven't downloaded the pivot block yet, check pivot staleness
   855  		// notifications from the header downloader
   856  		d.pivotLock.RLock()
   857  		pivot := d.pivotHeader
   858  		d.pivotLock.RUnlock()
   859  
   860  		if oldPivot == nil { // no results piling up, we can move the pivot
   861  			if !d.committed.Load() { // not yet passed the pivot, we can move the pivot
   862  				if pivot.Root != sync.root { // pivot position changed, we can move the pivot
   863  					sync.Cancel()
   864  					sync = d.syncState(pivot.Root)
   865  
   866  					go closeOnErr(sync)
   867  				}
   868  			}
   869  		} else { // results already piled up, consume before handling pivot move
   870  			results = append(append([]*fetchResult{oldPivot}, oldTail...), results...)
   871  		}
   872  		// Split around the pivot block and process the two sides via snap/full sync
   873  		if !d.committed.Load() {
   874  			latest := results[len(results)-1].Header
   875  			// If the height is above the pivot block by 2 sets, it means the pivot
   876  			// become stale in the network, and it was garbage collected, move to a
   877  			// new pivot.
   878  			//
   879  			// Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those
   880  			// need to be taken into account, otherwise we're detecting the pivot move
   881  			// late and will drop peers due to unavailable state!!!
   882  			if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) {
   883  				log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay))
   884  				pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted
   885  
   886  				d.pivotLock.Lock()
   887  				d.pivotHeader = pivot
   888  				d.pivotLock.Unlock()
   889  
   890  				// Write out the pivot into the database so a rollback beyond it will
   891  				// reenable snap sync
   892  				rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64())
   893  			}
   894  		}
   895  		P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results)
   896  		if err := d.commitSnapSyncData(beforeP, sync); err != nil {
   897  			return err
   898  		}
   899  		if P != nil {
   900  			// If new pivot block found, cancel old state retrieval and restart
   901  			if oldPivot != P {
   902  				sync.Cancel()
   903  				sync = d.syncState(P.Header.Root)
   904  
   905  				go closeOnErr(sync)
   906  				oldPivot = P
   907  			}
   908  			// Wait for completion, occasionally checking for pivot staleness
   909  			timer.Reset(time.Second)
   910  			select {
   911  			case <-sync.done:
   912  				if sync.err != nil {
   913  					return sync.err
   914  				}
   915  				if err := d.commitPivotBlock(P); err != nil {
   916  					return err
   917  				}
   918  				oldPivot = nil
   919  
   920  			case <-timer.C:
   921  				oldTail = afterP
   922  				continue
   923  			}
   924  		}
   925  		// Fast sync done, pivot commit done, full import
   926  		if err := d.importBlockResults(afterP); err != nil {
   927  			return err
   928  		}
   929  	}
   930  }
   931  
   932  func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) {
   933  	if len(results) == 0 {
   934  		return nil, nil, nil
   935  	}
   936  	if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot {
   937  		// the pivot is somewhere in the future
   938  		return nil, results, nil
   939  	}
   940  	// This can also be optimized, but only happens very seldom
   941  	for _, result := range results {
   942  		num := result.Header.Number.Uint64()
   943  		switch {
   944  		case num < pivot:
   945  			before = append(before, result)
   946  		case num == pivot:
   947  			p = result
   948  		default:
   949  			after = append(after, result)
   950  		}
   951  	}
   952  	return p, before, after
   953  }
   954  
   955  func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error {
   956  	// Check for any early termination requests
   957  	if len(results) == 0 {
   958  		return nil
   959  	}
   960  	select {
   961  	case <-d.quitCh:
   962  		return errCancelContentProcessing
   963  	case <-stateSync.done:
   964  		if err := stateSync.Wait(); err != nil {
   965  			return err
   966  		}
   967  	default:
   968  	}
   969  	// Retrieve the batch of results to import
   970  	first, last := results[0].Header, results[len(results)-1].Header
   971  	log.Debug("Inserting snap-sync blocks", "items", len(results),
   972  		"firstnum", first.Number, "firsthash", first.Hash(),
   973  		"lastnumn", last.Number, "lasthash", last.Hash(),
   974  	)
   975  	blocks := make([]*types.Block, len(results))
   976  	receipts := make([]types.Receipts, len(results))
   977  	for i, result := range results {
   978  		blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.body())
   979  		receipts[i] = result.Receipts
   980  	}
   981  	if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil {
   982  		log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err)
   983  		return fmt.Errorf("%w: %v", errInvalidChain, err)
   984  	}
   985  	return nil
   986  }
   987  
   988  func (d *Downloader) commitPivotBlock(result *fetchResult) error {
   989  	block := types.NewBlockWithHeader(result.Header).WithBody(result.body())
   990  	log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash())
   991  
   992  	// Commit the pivot block as the new head, will require full sync from here on
   993  	if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil {
   994  		return err
   995  	}
   996  	if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil {
   997  		return err
   998  	}
   999  	d.committed.Store(true)
  1000  	return nil
  1001  }
  1002  
  1003  // DeliverSnapPacket is invoked from a peer's message handler when it transmits a
  1004  // data packet for the local node to consume.
  1005  func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error {
  1006  	switch packet := packet.(type) {
  1007  	case *snap.AccountRangePacket:
  1008  		hashes, accounts, err := packet.Unpack()
  1009  		if err != nil {
  1010  			return err
  1011  		}
  1012  		return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof)
  1013  
  1014  	case *snap.StorageRangesPacket:
  1015  		hashset, slotset := packet.Unpack()
  1016  		return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof)
  1017  
  1018  	case *snap.ByteCodesPacket:
  1019  		return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes)
  1020  
  1021  	case *snap.TrieNodesPacket:
  1022  		return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes)
  1023  
  1024  	default:
  1025  		return fmt.Errorf("unexpected snap packet type: %T", packet)
  1026  	}
  1027  }
  1028  
  1029  // readHeaderRange returns a list of headers, using the given last header as the base,
  1030  // and going backwards towards genesis. This method assumes that the caller already has
  1031  // placed a reasonable cap on count.
  1032  func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header {
  1033  	var (
  1034  		current = last
  1035  		headers []*types.Header
  1036  	)
  1037  	for {
  1038  		parent := d.blockchain.GetHeaderByHash(current.ParentHash)
  1039  		if parent == nil {
  1040  			break // The chain is not continuous, or the chain is exhausted
  1041  		}
  1042  		headers = append(headers, parent)
  1043  		if len(headers) >= count {
  1044  			break
  1045  		}
  1046  		current = parent
  1047  	}
  1048  	return headers
  1049  }
  1050  
  1051  // reportSnapSyncProgress calculates various status reports and provides it to the user.
  1052  func (d *Downloader) reportSnapSyncProgress(force bool) {
  1053  	// Initialize the sync start time if it's the first time we're reporting
  1054  	if d.syncStartTime.IsZero() {
  1055  		d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero
  1056  	}
  1057  	// Don't report all the events, just occasionally
  1058  	if !force && time.Since(d.syncLogTime) < 8*time.Second {
  1059  		return
  1060  	}
  1061  	// Don't report anything until we have a meaningful progress
  1062  	var (
  1063  		headerBytes, _  = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable)
  1064  		bodyBytes, _    = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable)
  1065  		receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable)
  1066  	)
  1067  	syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes)
  1068  	if syncedBytes == 0 {
  1069  		return
  1070  	}
  1071  	var (
  1072  		header = d.blockchain.CurrentHeader()
  1073  		block  = d.blockchain.CurrentSnapBlock()
  1074  	)
  1075  	syncedBlocks := block.Number.Uint64() - d.syncStartBlock
  1076  	if syncedBlocks == 0 {
  1077  		return
  1078  	}
  1079  	// Retrieve the current chain head and calculate the ETA
  1080  	latest, _, _, err := d.skeleton.Bounds()
  1081  	if err != nil {
  1082  		// We're going to cheat for non-merged networks, but that's fine
  1083  		latest = d.pivotHeader
  1084  	}
  1085  	if latest == nil {
  1086  		// This should really never happen, but add some defensive code for now.
  1087  		// TODO(karalabe): Remove it eventually if we don't see it blow.
  1088  		log.Error("Nil latest block in sync progress report")
  1089  		return
  1090  	}
  1091  	var (
  1092  		left = latest.Number.Uint64() - block.Number.Uint64()
  1093  		eta  = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left)
  1094  
  1095  		progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64()))
  1096  		headers  = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString())
  1097  		bodies   = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString())
  1098  		receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString())
  1099  	)
  1100  	log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta))
  1101  	d.syncLogTime = time.Now()
  1102  }