github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/eth/fetcher/block_fetcher.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the announcement based header, blocks or transaction synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"errors"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"github.com/aidoskuneen/adk-node/common"
    26  	"github.com/aidoskuneen/adk-node/common/prque"
    27  	"github.com/aidoskuneen/adk-node/consensus"
    28  	"github.com/aidoskuneen/adk-node/core/types"
    29  	"github.com/aidoskuneen/adk-node/log"
    30  	"github.com/aidoskuneen/adk-node/metrics"
    31  	"github.com/aidoskuneen/adk-node/trie"
    32  )
    33  
    34  const (
    35  	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
    36  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
    37  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    38  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
    39  )
    40  
    41  const (
    42  	maxUncleDist = 7   // Maximum allowed backward distance from the chain head
    43  	maxQueueDist = 32  // Maximum allowed distance from the chain head to queue
    44  	hashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced
    45  	blockLimit   = 64  // Maximum number of unique blocks a peer may have delivered
    46  )
    47  
    48  var (
    49  	blockAnnounceInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
    50  	blockAnnounceOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
    51  	blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
    52  	blockAnnounceDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
    53  
    54  	blockBroadcastInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
    55  	blockBroadcastOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
    56  	blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
    57  	blockBroadcastDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil)
    58  
    59  	headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
    60  	bodyFetchMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
    61  
    62  	headerFilterInMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
    63  	headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
    64  	bodyFilterInMeter    = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
    65  	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
    66  )
    67  
    68  var errTerminated = errors.New("terminated")
    69  
    70  // HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
    71  type HeaderRetrievalFn func(common.Hash) *types.Header
    72  
    73  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    74  type blockRetrievalFn func(common.Hash) *types.Block
    75  
    76  // headerRequesterFn is a callback type for sending a header retrieval request.
    77  type headerRequesterFn func(common.Hash) error
    78  
    79  // bodyRequesterFn is a callback type for sending a body retrieval request.
    80  type bodyRequesterFn func([]common.Hash) error
    81  
    82  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    83  type headerVerifierFn func(header *types.Header) error
    84  
    85  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    86  type blockBroadcasterFn func(block *types.Block, propagate bool)
    87  
    88  // chainHeightFn is a callback type to retrieve the current chain height.
    89  type chainHeightFn func() uint64
    90  
    91  // headersInsertFn is a callback type to insert a batch of headers into the local chain.
    92  type headersInsertFn func(headers []*types.Header) (int, error)
    93  
    94  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    95  type chainInsertFn func(types.Blocks) (int, error)
    96  
    97  // peerDropFn is a callback type for dropping a peer detected as malicious.
    98  type peerDropFn func(id string)
    99  
   100  // blockAnnounce is the hash notification of the availability of a new block in the
   101  // network.
   102  type blockAnnounce struct {
   103  	hash   common.Hash   // Hash of the block being announced
   104  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
   105  	header *types.Header // Header of the block partially reassembled (new protocol)
   106  	time   time.Time     // Timestamp of the announcement
   107  
   108  	origin string // Identifier of the peer originating the notification
   109  
   110  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
   111  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
   112  }
   113  
   114  // headerFilterTask represents a batch of headers needing fetcher filtering.
   115  type headerFilterTask struct {
   116  	peer    string          // The source peer of block headers
   117  	headers []*types.Header // Collection of headers to filter
   118  	time    time.Time       // Arrival time of the headers
   119  }
   120  
   121  // bodyFilterTask represents a batch of block bodies (transactions and uncles)
   122  // needing fetcher filtering.
   123  type bodyFilterTask struct {
   124  	peer         string                 // The source peer of block bodies
   125  	transactions [][]*types.Transaction // Collection of transactions per block bodies
   126  	uncles       [][]*types.Header      // Collection of uncles per block bodies
   127  	time         time.Time              // Arrival time of the blocks' contents
   128  }
   129  
   130  // blockOrHeaderInject represents a schedules import operation.
   131  type blockOrHeaderInject struct {
   132  	origin string
   133  
   134  	header *types.Header // Used for light mode fetcher which only cares about header.
   135  	block  *types.Block  // Used for normal mode fetcher which imports full block.
   136  }
   137  
   138  // number returns the block number of the injected object.
   139  func (inject *blockOrHeaderInject) number() uint64 {
   140  	if inject.header != nil {
   141  		return inject.header.Number.Uint64()
   142  	}
   143  	return inject.block.NumberU64()
   144  }
   145  
   146  // number returns the block hash of the injected object.
   147  func (inject *blockOrHeaderInject) hash() common.Hash {
   148  	if inject.header != nil {
   149  		return inject.header.Hash()
   150  	}
   151  	return inject.block.Hash()
   152  }
   153  
   154  // BlockFetcher is responsible for accumulating block announcements from various peers
   155  // and scheduling them for retrieval.
   156  type BlockFetcher struct {
   157  	light bool // The indicator whether it's a light fetcher or normal one.
   158  
   159  	// Various event channels
   160  	notify chan *blockAnnounce
   161  	inject chan *blockOrHeaderInject
   162  
   163  	headerFilter chan chan *headerFilterTask
   164  	bodyFilter   chan chan *bodyFilterTask
   165  
   166  	done chan common.Hash
   167  	quit chan struct{}
   168  
   169  	// Announce states
   170  	announces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion
   171  	announced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
   172  	fetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching
   173  	fetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
   174  	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
   175  
   176  	// Block cache
   177  	queue  *prque.Prque                         // Queue containing the import operations (block number sorted)
   178  	queues map[string]int                       // Per peer block counts to prevent memory exhaustion
   179  	queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
   180  
   181  	// Callbacks
   182  	getHeader      HeaderRetrievalFn  // Retrieves a header from the local chain
   183  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   184  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   185  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   186  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   187  	insertHeaders  headersInsertFn    // Injects a batch of headers into the chain
   188  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   189  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   190  
   191  	// Testing hooks
   192  	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
   193  	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
   194  	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch
   195  	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)
   196  	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
   197  }
   198  
   199  // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
   200  func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
   201  	return &BlockFetcher{
   202  		light:          light,
   203  		notify:         make(chan *blockAnnounce),
   204  		inject:         make(chan *blockOrHeaderInject),
   205  		headerFilter:   make(chan chan *headerFilterTask),
   206  		bodyFilter:     make(chan chan *bodyFilterTask),
   207  		done:           make(chan common.Hash),
   208  		quit:           make(chan struct{}),
   209  		announces:      make(map[string]int),
   210  		announced:      make(map[common.Hash][]*blockAnnounce),
   211  		fetching:       make(map[common.Hash]*blockAnnounce),
   212  		fetched:        make(map[common.Hash][]*blockAnnounce),
   213  		completing:     make(map[common.Hash]*blockAnnounce),
   214  		queue:          prque.New(nil),
   215  		queues:         make(map[string]int),
   216  		queued:         make(map[common.Hash]*blockOrHeaderInject),
   217  		getHeader:      getHeader,
   218  		getBlock:       getBlock,
   219  		verifyHeader:   verifyHeader,
   220  		broadcastBlock: broadcastBlock,
   221  		chainHeight:    chainHeight,
   222  		insertHeaders:  insertHeaders,
   223  		insertChain:    insertChain,
   224  		dropPeer:       dropPeer,
   225  	}
   226  }
   227  
   228  // Start boots up the announcement based synchroniser, accepting and processing
   229  // hash notifications and block fetches until termination requested.
   230  func (f *BlockFetcher) Start() {
   231  	go f.loop()
   232  }
   233  
   234  // Stop terminates the announcement based synchroniser, canceling all pending
   235  // operations.
   236  func (f *BlockFetcher) Stop() {
   237  	close(f.quit)
   238  }
   239  
   240  // Notify announces the fetcher of the potential availability of a new block in
   241  // the network.
   242  func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   243  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   244  	block := &blockAnnounce{
   245  		hash:        hash,
   246  		number:      number,
   247  		time:        time,
   248  		origin:      peer,
   249  		fetchHeader: headerFetcher,
   250  		fetchBodies: bodyFetcher,
   251  	}
   252  	select {
   253  	case f.notify <- block:
   254  		return nil
   255  	case <-f.quit:
   256  		return errTerminated
   257  	}
   258  }
   259  
   260  // Enqueue tries to fill gaps the fetcher's future import queue.
   261  func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
   262  	op := &blockOrHeaderInject{
   263  		origin: peer,
   264  		block:  block,
   265  	}
   266  	select {
   267  	case f.inject <- op:
   268  		return nil
   269  	case <-f.quit:
   270  		return errTerminated
   271  	}
   272  }
   273  
   274  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   275  // returning those that should be handled differently.
   276  func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   277  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   278  
   279  	// Send the filter channel to the fetcher
   280  	filter := make(chan *headerFilterTask)
   281  
   282  	select {
   283  	case f.headerFilter <- filter:
   284  	case <-f.quit:
   285  		return nil
   286  	}
   287  	// Request the filtering of the header list
   288  	select {
   289  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   290  	case <-f.quit:
   291  		return nil
   292  	}
   293  	// Retrieve the headers remaining after filtering
   294  	select {
   295  	case task := <-filter:
   296  		return task.headers
   297  	case <-f.quit:
   298  		return nil
   299  	}
   300  }
   301  
   302  // FilterBodies extracts all the block bodies that were explicitly requested by
   303  // the fetcher, returning those that should be handled differently.
   304  func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
   305  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
   306  
   307  	// Send the filter channel to the fetcher
   308  	filter := make(chan *bodyFilterTask)
   309  
   310  	select {
   311  	case f.bodyFilter <- filter:
   312  	case <-f.quit:
   313  		return nil, nil
   314  	}
   315  	// Request the filtering of the body list
   316  	select {
   317  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
   318  	case <-f.quit:
   319  		return nil, nil
   320  	}
   321  	// Retrieve the bodies remaining after filtering
   322  	select {
   323  	case task := <-filter:
   324  		return task.transactions, task.uncles
   325  	case <-f.quit:
   326  		return nil, nil
   327  	}
   328  }
   329  
   330  // Loop is the main fetcher loop, checking and processing various notification
   331  // events.
   332  func (f *BlockFetcher) loop() {
   333  	// Iterate the block fetching until a quit is requested
   334  	var (
   335  		fetchTimer    = time.NewTimer(0)
   336  		completeTimer = time.NewTimer(0)
   337  	)
   338  	<-fetchTimer.C // clear out the channel
   339  	<-completeTimer.C
   340  	defer fetchTimer.Stop()
   341  	defer completeTimer.Stop()
   342  
   343  	for {
   344  		// Clean up any expired block fetches
   345  		for hash, announce := range f.fetching {
   346  			if time.Since(announce.time) > fetchTimeout {
   347  				f.forgetHash(hash)
   348  			}
   349  		}
   350  		// Import any queued blocks that could potentially fit
   351  		height := f.chainHeight()
   352  		for !f.queue.Empty() {
   353  			op := f.queue.PopItem().(*blockOrHeaderInject)
   354  			hash := op.hash()
   355  			if f.queueChangeHook != nil {
   356  				f.queueChangeHook(hash, false)
   357  			}
   358  			// If too high up the chain or phase, continue later
   359  			number := op.number()
   360  			if number > height+1 {
   361  				f.queue.Push(op, -int64(number))
   362  				if f.queueChangeHook != nil {
   363  					f.queueChangeHook(hash, true)
   364  				}
   365  				break
   366  			}
   367  			// Otherwise if fresh and still unknown, try and import
   368  			if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
   369  				f.forgetBlock(hash)
   370  				continue
   371  			}
   372  			if f.light {
   373  				f.importHeaders(op.origin, op.header)
   374  			} else {
   375  				f.importBlocks(op.origin, op.block)
   376  			}
   377  		}
   378  		// Wait for an outside event to occur
   379  		select {
   380  		case <-f.quit:
   381  			// BlockFetcher terminating, abort all operations
   382  			return
   383  
   384  		case notification := <-f.notify:
   385  			// A block was announced, make sure the peer isn't DOSing us
   386  			blockAnnounceInMeter.Mark(1)
   387  
   388  			count := f.announces[notification.origin] + 1
   389  			if count > hashLimit {
   390  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   391  				blockAnnounceDOSMeter.Mark(1)
   392  				break
   393  			}
   394  			// If we have a valid block number, check that it's potentially useful
   395  			if notification.number > 0 {
   396  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   397  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   398  					blockAnnounceDropMeter.Mark(1)
   399  					break
   400  				}
   401  			}
   402  			// All is well, schedule the announce if block's not yet downloading
   403  			if _, ok := f.fetching[notification.hash]; ok {
   404  				break
   405  			}
   406  			if _, ok := f.completing[notification.hash]; ok {
   407  				break
   408  			}
   409  			f.announces[notification.origin] = count
   410  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   411  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   412  				f.announceChangeHook(notification.hash, true)
   413  			}
   414  			if len(f.announced) == 1 {
   415  				f.rescheduleFetch(fetchTimer)
   416  			}
   417  
   418  		case op := <-f.inject:
   419  			// A direct block insertion was requested, try and fill any pending gaps
   420  			blockBroadcastInMeter.Mark(1)
   421  
   422  			// Now only direct block injection is allowed, drop the header injection
   423  			// here silently if we receive.
   424  			if f.light {
   425  				continue
   426  			}
   427  			f.enqueue(op.origin, nil, op.block)
   428  
   429  		case hash := <-f.done:
   430  			// A pending import finished, remove all traces of the notification
   431  			f.forgetHash(hash)
   432  			f.forgetBlock(hash)
   433  
   434  		case <-fetchTimer.C:
   435  			// At least one block's timer ran out, check for needing retrieval
   436  			request := make(map[string][]common.Hash)
   437  
   438  			for hash, announces := range f.announced {
   439  				// In current LES protocol(les2/les3), only header announce is
   440  				// available, no need to wait too much time for header broadcast.
   441  				timeout := arriveTimeout - gatherSlack
   442  				if f.light {
   443  					timeout = 0
   444  				}
   445  				if time.Since(announces[0].time) > timeout {
   446  					// Pick a random peer to retrieve from, reset all others
   447  					announce := announces[rand.Intn(len(announces))]
   448  					f.forgetHash(hash)
   449  
   450  					// If the block still didn't arrive, queue for fetching
   451  					if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
   452  						request[announce.origin] = append(request[announce.origin], hash)
   453  						f.fetching[hash] = announce
   454  					}
   455  				}
   456  			}
   457  			// Send out all block header requests
   458  			for peer, hashes := range request {
   459  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   460  
   461  				// Create a closure of the fetch and schedule in on a new thread
   462  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   463  				go func() {
   464  					if f.fetchingHook != nil {
   465  						f.fetchingHook(hashes)
   466  					}
   467  					for _, hash := range hashes {
   468  						headerFetchMeter.Mark(1)
   469  						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   470  					}
   471  				}()
   472  			}
   473  			// Schedule the next fetch if blocks are still pending
   474  			f.rescheduleFetch(fetchTimer)
   475  
   476  		case <-completeTimer.C:
   477  			// At least one header's timer ran out, retrieve everything
   478  			request := make(map[string][]common.Hash)
   479  
   480  			for hash, announces := range f.fetched {
   481  				// Pick a random peer to retrieve from, reset all others
   482  				announce := announces[rand.Intn(len(announces))]
   483  				f.forgetHash(hash)
   484  
   485  				// If the block still didn't arrive, queue for completion
   486  				if f.getBlock(hash) == nil {
   487  					request[announce.origin] = append(request[announce.origin], hash)
   488  					f.completing[hash] = announce
   489  				}
   490  			}
   491  			// Send out all block body requests
   492  			for peer, hashes := range request {
   493  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   494  
   495  				// Create a closure of the fetch and schedule in on a new thread
   496  				if f.completingHook != nil {
   497  					f.completingHook(hashes)
   498  				}
   499  				bodyFetchMeter.Mark(int64(len(hashes)))
   500  				go f.completing[hashes[0]].fetchBodies(hashes)
   501  			}
   502  			// Schedule the next fetch if blocks are still pending
   503  			f.rescheduleComplete(completeTimer)
   504  
   505  		case filter := <-f.headerFilter:
   506  			// Headers arrived from a remote peer. Extract those that were explicitly
   507  			// requested by the fetcher, and return everything else so it's delivered
   508  			// to other parts of the system.
   509  			var task *headerFilterTask
   510  			select {
   511  			case task = <-filter:
   512  			case <-f.quit:
   513  				return
   514  			}
   515  			headerFilterInMeter.Mark(int64(len(task.headers)))
   516  
   517  			// Split the batch of headers into unknown ones (to return to the caller),
   518  			// known incomplete ones (requiring body retrievals) and completed blocks.
   519  			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
   520  			for _, header := range task.headers {
   521  				hash := header.Hash()
   522  
   523  				// Filter fetcher-requested headers from other synchronisation algorithms
   524  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   525  					// If the delivered header does not match the promised number, drop the announcer
   526  					if header.Number.Uint64() != announce.number {
   527  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   528  						f.dropPeer(announce.origin)
   529  						f.forgetHash(hash)
   530  						continue
   531  					}
   532  					// Collect all headers only if we are running in light
   533  					// mode and the headers are not imported by other means.
   534  					if f.light {
   535  						if f.getHeader(hash) == nil {
   536  							announce.header = header
   537  							lightHeaders = append(lightHeaders, announce)
   538  						}
   539  						f.forgetHash(hash)
   540  						continue
   541  					}
   542  					// Only keep if not imported by other means
   543  					if f.getBlock(hash) == nil {
   544  						announce.header = header
   545  						announce.time = task.time
   546  
   547  						// If the block is empty (header only), short circuit into the final import queue
   548  						if header.TxHash == types.EmptyRootHash && header.UncleHash == types.EmptyUncleHash {
   549  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   550  
   551  							block := types.NewBlockWithHeader(header)
   552  							block.ReceivedAt = task.time
   553  
   554  							complete = append(complete, block)
   555  							f.completing[hash] = announce
   556  							continue
   557  						}
   558  						// Otherwise add to the list of blocks needing completion
   559  						incomplete = append(incomplete, announce)
   560  					} else {
   561  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   562  						f.forgetHash(hash)
   563  					}
   564  				} else {
   565  					// BlockFetcher doesn't know about it, add to the return list
   566  					unknown = append(unknown, header)
   567  				}
   568  			}
   569  			headerFilterOutMeter.Mark(int64(len(unknown)))
   570  			select {
   571  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   572  			case <-f.quit:
   573  				return
   574  			}
   575  			// Schedule the retrieved headers for body completion
   576  			for _, announce := range incomplete {
   577  				hash := announce.header.Hash()
   578  				if _, ok := f.completing[hash]; ok {
   579  					continue
   580  				}
   581  				f.fetched[hash] = append(f.fetched[hash], announce)
   582  				if len(f.fetched) == 1 {
   583  					f.rescheduleComplete(completeTimer)
   584  				}
   585  			}
   586  			// Schedule the header for light fetcher import
   587  			for _, announce := range lightHeaders {
   588  				f.enqueue(announce.origin, announce.header, nil)
   589  			}
   590  			// Schedule the header-only blocks for import
   591  			for _, block := range complete {
   592  				if announce := f.completing[block.Hash()]; announce != nil {
   593  					f.enqueue(announce.origin, nil, block)
   594  				}
   595  			}
   596  
   597  		case filter := <-f.bodyFilter:
   598  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   599  			var task *bodyFilterTask
   600  			select {
   601  			case task = <-filter:
   602  			case <-f.quit:
   603  				return
   604  			}
   605  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   606  			blocks := []*types.Block{}
   607  			// abort early if there's nothing explicitly requested
   608  			if len(f.completing) > 0 {
   609  				for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
   610  					// Match up a body to any possible completion request
   611  					var (
   612  						matched   = false
   613  						uncleHash common.Hash // calculated lazily and reused
   614  						txnHash   common.Hash // calculated lazily and reused
   615  					)
   616  					for hash, announce := range f.completing {
   617  						if f.queued[hash] != nil || announce.origin != task.peer {
   618  							continue
   619  						}
   620  						if uncleHash == (common.Hash{}) {
   621  							uncleHash = types.CalcUncleHash(task.uncles[i])
   622  						}
   623  						if uncleHash != announce.header.UncleHash {
   624  							continue
   625  						}
   626  						if txnHash == (common.Hash{}) {
   627  							txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))
   628  						}
   629  						if txnHash != announce.header.TxHash {
   630  							continue
   631  						}
   632  						// Mark the body matched, reassemble if still unknown
   633  						matched = true
   634  						if f.getBlock(hash) == nil {
   635  							block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
   636  							block.ReceivedAt = task.time
   637  							blocks = append(blocks, block)
   638  						} else {
   639  							f.forgetHash(hash)
   640  						}
   641  
   642  					}
   643  					if matched {
   644  						task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   645  						task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   646  						i--
   647  						continue
   648  					}
   649  				}
   650  			}
   651  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   652  			select {
   653  			case filter <- task:
   654  			case <-f.quit:
   655  				return
   656  			}
   657  			// Schedule the retrieved blocks for ordered import
   658  			for _, block := range blocks {
   659  				if announce := f.completing[block.Hash()]; announce != nil {
   660  					f.enqueue(announce.origin, nil, block)
   661  				}
   662  			}
   663  		}
   664  	}
   665  }
   666  
   667  // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
   668  func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
   669  	// Short circuit if no blocks are announced
   670  	if len(f.announced) == 0 {
   671  		return
   672  	}
   673  	// Schedule announcement retrieval quickly for light mode
   674  	// since server won't send any headers to client.
   675  	if f.light {
   676  		fetch.Reset(lightTimeout)
   677  		return
   678  	}
   679  	// Otherwise find the earliest expiring announcement
   680  	earliest := time.Now()
   681  	for _, announces := range f.announced {
   682  		if earliest.After(announces[0].time) {
   683  			earliest = announces[0].time
   684  		}
   685  	}
   686  	fetch.Reset(arriveTimeout - time.Since(earliest))
   687  }
   688  
   689  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   690  func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
   691  	// Short circuit if no headers are fetched
   692  	if len(f.fetched) == 0 {
   693  		return
   694  	}
   695  	// Otherwise find the earliest expiring announcement
   696  	earliest := time.Now()
   697  	for _, announces := range f.fetched {
   698  		if earliest.After(announces[0].time) {
   699  			earliest = announces[0].time
   700  		}
   701  	}
   702  	complete.Reset(gatherSlack - time.Since(earliest))
   703  }
   704  
   705  // enqueue schedules a new header or block import operation, if the component
   706  // to be imported has not yet been seen.
   707  func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
   708  	var (
   709  		hash   common.Hash
   710  		number uint64
   711  	)
   712  	if header != nil {
   713  		hash, number = header.Hash(), header.Number.Uint64()
   714  	} else {
   715  		hash, number = block.Hash(), block.NumberU64()
   716  	}
   717  	// Ensure the peer isn't DOSing us
   718  	count := f.queues[peer] + 1
   719  	if count > blockLimit {
   720  		log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
   721  		blockBroadcastDOSMeter.Mark(1)
   722  		f.forgetHash(hash)
   723  		return
   724  	}
   725  	// Discard any past or too distant blocks
   726  	if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   727  		log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
   728  		blockBroadcastDropMeter.Mark(1)
   729  		f.forgetHash(hash)
   730  		return
   731  	}
   732  	// Schedule the block for future importing
   733  	if _, ok := f.queued[hash]; !ok {
   734  		op := &blockOrHeaderInject{origin: peer}
   735  		if header != nil {
   736  			op.header = header
   737  		} else {
   738  			op.block = block
   739  		}
   740  		f.queues[peer] = count
   741  		f.queued[hash] = op
   742  		f.queue.Push(op, -int64(number))
   743  		if f.queueChangeHook != nil {
   744  			f.queueChangeHook(hash, true)
   745  		}
   746  		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
   747  	}
   748  }
   749  
   750  // importHeaders spawns a new goroutine to run a header insertion into the chain.
   751  // If the header's number is at the same height as the current import phase, it
   752  // updates the phase states accordingly.
   753  func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
   754  	hash := header.Hash()
   755  	log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
   756  
   757  	go func() {
   758  		defer func() { f.done <- hash }()
   759  		// If the parent's unknown, abort insertion
   760  		parent := f.getHeader(header.ParentHash)
   761  		if parent == nil {
   762  			log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
   763  			return
   764  		}
   765  		// Validate the header and if something went wrong, drop the peer
   766  		if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
   767  			log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
   768  			f.dropPeer(peer)
   769  			return
   770  		}
   771  		// Run the actual import and log any issues
   772  		if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
   773  			log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
   774  			return
   775  		}
   776  		// Invoke the testing hook if needed
   777  		if f.importedHook != nil {
   778  			f.importedHook(header, nil)
   779  		}
   780  	}()
   781  }
   782  
   783  // importBlocks spawns a new goroutine to run a block insertion into the chain. If the
   784  // block's number is at the same height as the current import phase, it updates
   785  // the phase states accordingly.
   786  func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
   787  	hash := block.Hash()
   788  
   789  	// Run the import on a new thread
   790  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   791  	go func() {
   792  		defer func() { f.done <- hash }()
   793  
   794  		// If the parent's unknown, abort insertion
   795  		parent := f.getBlock(block.ParentHash())
   796  		if parent == nil {
   797  			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   798  			return
   799  		}
   800  		// Quickly validate the header and propagate the block if it passes
   801  		switch err := f.verifyHeader(block.Header()); err {
   802  		case nil:
   803  			// All ok, quickly propagate to our peers
   804  			blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   805  			go f.broadcastBlock(block, true)
   806  
   807  		case consensus.ErrFutureBlock:
   808  			// Weird future block, don't fail, but neither propagate
   809  
   810  		default:
   811  			// Something went very wrong, drop the peer
   812  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   813  			f.dropPeer(peer)
   814  			return
   815  		}
   816  		// Run the actual import and log any issues
   817  		if _, err := f.insertChain(types.Blocks{block}); err != nil {
   818  			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   819  			return
   820  		}
   821  		// If import succeeded, broadcast the block
   822  		blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   823  		go f.broadcastBlock(block, false)
   824  
   825  		// Invoke the testing hook if needed
   826  		if f.importedHook != nil {
   827  			f.importedHook(nil, block)
   828  		}
   829  	}()
   830  }
   831  
   832  // forgetHash removes all traces of a block announcement from the fetcher's
   833  // internal state.
   834  func (f *BlockFetcher) forgetHash(hash common.Hash) {
   835  	// Remove all pending announces and decrement DOS counters
   836  	if announceMap, ok := f.announced[hash]; ok {
   837  		for _, announce := range announceMap {
   838  			f.announces[announce.origin]--
   839  			if f.announces[announce.origin] <= 0 {
   840  				delete(f.announces, announce.origin)
   841  			}
   842  		}
   843  		delete(f.announced, hash)
   844  		if f.announceChangeHook != nil {
   845  			f.announceChangeHook(hash, false)
   846  		}
   847  	}
   848  	// Remove any pending fetches and decrement the DOS counters
   849  	if announce := f.fetching[hash]; announce != nil {
   850  		f.announces[announce.origin]--
   851  		if f.announces[announce.origin] <= 0 {
   852  			delete(f.announces, announce.origin)
   853  		}
   854  		delete(f.fetching, hash)
   855  	}
   856  
   857  	// Remove any pending completion requests and decrement the DOS counters
   858  	for _, announce := range f.fetched[hash] {
   859  		f.announces[announce.origin]--
   860  		if f.announces[announce.origin] <= 0 {
   861  			delete(f.announces, announce.origin)
   862  		}
   863  	}
   864  	delete(f.fetched, hash)
   865  
   866  	// Remove any pending completions and decrement the DOS counters
   867  	if announce := f.completing[hash]; announce != nil {
   868  		f.announces[announce.origin]--
   869  		if f.announces[announce.origin] <= 0 {
   870  			delete(f.announces, announce.origin)
   871  		}
   872  		delete(f.completing, hash)
   873  	}
   874  }
   875  
   876  // forgetBlock removes all traces of a queued block from the fetcher's internal
   877  // state.
   878  func (f *BlockFetcher) forgetBlock(hash common.Hash) {
   879  	if insert := f.queued[hash]; insert != nil {
   880  		f.queues[insert.origin]--
   881  		if f.queues[insert.origin] == 0 {
   882  			delete(f.queues, insert.origin)
   883  		}
   884  		delete(f.queued, hash)
   885  	}
   886  }