github.com/dominant-strategies/go-quai@v0.28.2/eth/fetcher/block_fetcher.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the announcement based header, blocks or transaction synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"errors"
    22  	"math/big"
    23  	"math/rand"
    24  	"time"
    25  
    26  	"github.com/dominant-strategies/go-quai/common"
    27  	"github.com/dominant-strategies/go-quai/common/prque"
    28  	"github.com/dominant-strategies/go-quai/consensus"
    29  	"github.com/dominant-strategies/go-quai/core/types"
    30  	"github.com/dominant-strategies/go-quai/log"
    31  	"github.com/dominant-strategies/go-quai/metrics"
    32  	"github.com/dominant-strategies/go-quai/trie"
    33  )
    34  
    35  const (
    36  	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
    37  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
    38  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    39  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
    40  )
    41  
    42  const (
    43  	maxUncleDist              = 100  // Maximum allowed backward distance from the chain head
    44  	maxQueueDist              = 32   // Maximum allowed distance from the chain head to queue
    45  	hashLimit                 = 256  // Maximum number of unique blocks or headers a peer may have announced
    46  	blockLimit                = 64   // Maximum number of unique blocks a peer may have delivered
    47  	c_maxAllowableEntropyDist = 3500 // Maximum multiple of zone intrinsic S distance allowed from the current Entropy
    48  )
    49  
    50  var (
    51  	blockAnnounceInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil)
    52  	blockAnnounceOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil)
    53  	blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil)
    54  	blockAnnounceDOSMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil)
    55  
    56  	blockBroadcastInMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil)
    57  	blockBroadcastOutTimer  = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil)
    58  	blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil)
    59  
    60  	headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil)
    61  	bodyFetchMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil)
    62  
    63  	headerFilterInMeter  = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil)
    64  	headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil)
    65  	bodyFilterInMeter    = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil)
    66  	bodyFilterOutMeter   = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil)
    67  )
    68  
    69  var errTerminated = errors.New("terminated")
    70  
    71  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    72  type blockRetrievalFn func(common.Hash) *types.Block
    73  
    74  // blockWriteFn is a callback type for retrieving a block from the local chain.
    75  type blockWriteFn func(*types.Block)
    76  
    77  // headerRequesterFn is a callback type for sending a header retrieval request.
    78  type headerRequesterFn func(common.Hash) error
    79  
    80  // bodyRequesterFn is a callback type for sending a body retrieval request.
    81  type bodyRequesterFn func([]common.Hash) error
    82  
    83  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    84  type headerVerifierFn func(header *types.Header) error
    85  
    86  // verifySealFn is a callback type to verify seal of a block header and get the PowHash
    87  type verifySealFn func(header *types.Header) (common.Hash, error)
    88  
    89  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    90  type blockBroadcasterFn func(block *types.Block, propagate bool)
    91  
    92  // chainHeightFn is a callback type to retrieve the current chain height.
    93  type chainHeightFn func() uint64
    94  
    95  // currentIntrinsicSFn is a callback type to retrieve the current chain headers intrinsic logS.
    96  type currentIntrinsicSFn func() *big.Int
    97  
    98  // currentSFn is a callback type to retrieve the current chain heads Entropy
    99  type currentSFn func() *big.Int
   100  
   101  // currentDifficultyFn is a callback type to retrieve the current chain heads difficulty
   102  type currentDifficultyFn func() *big.Int
   103  
   104  // peerDropFn is a callback type for dropping a peer detected as malicious.
   105  type peerDropFn func(id string)
   106  
   107  // badHashCheckFn is a callback type for checking if a block given by the peer exists in the badHashes list
   108  type badHashCheckFn func(hash common.Hash) bool
   109  
   110  // blockAnnounce is the hash notification of the availability of a new block in the
   111  // network.
   112  type blockAnnounce struct {
   113  	hash   common.Hash   // Hash of the block being announced
   114  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
   115  	header *types.Header // Header of the block partially reassembled (new protocol)
   116  	time   time.Time     // Timestamp of the announcement
   117  
   118  	origin string // Identifier of the peer originating the notification
   119  
   120  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
   121  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
   122  }
   123  
   124  // headerFilterTask represents a batch of headers needing fetcher filtering.
   125  type headerFilterTask struct {
   126  	peer    string          // The source peer of block headers
   127  	headers []*types.Header // Collection of headers to filter
   128  	time    time.Time       // Arrival time of the headers
   129  }
   130  
   131  // bodyFilterTask represents a batch of block bodies (transactions and uncles)
   132  // needing fetcher filtering.
   133  type bodyFilterTask struct {
   134  	peer            string                 // The source peer of block bodies
   135  	transactions    [][]*types.Transaction // Collection of transactions per block bodies
   136  	uncles          [][]*types.Header      // Collection of uncles per block bodies
   137  	extTransactions [][]*types.Transaction // Collection of external transactions per block bodies
   138  	subManifest     []types.BlockManifest  // Collection of manifests per block bodies
   139  	time            time.Time              // Arrival time of the blocks' contents
   140  }
   141  
   142  // blockOrHeaderInject represents a schedules import operation.
   143  type blockOrHeaderInject struct {
   144  	origin string
   145  
   146  	header *types.Header // Used for light mode fetcher which only cares about header.
   147  	block  *types.Block  // Used for normal mode fetcher which imports full block.
   148  }
   149  
   150  // number returns the block number of the injected object.
   151  func (inject *blockOrHeaderInject) number() uint64 {
   152  	if inject.header != nil {
   153  		return inject.header.Number().Uint64()
   154  	}
   155  	return inject.block.NumberU64()
   156  }
   157  
   158  // number returns the block hash of the injected object.
   159  func (inject *blockOrHeaderInject) hash() common.Hash {
   160  	if inject.header != nil {
   161  		return inject.header.Hash()
   162  	}
   163  	return inject.block.Hash()
   164  }
   165  
   166  // BlockFetcher is responsible for accumulating block announcements from various peers
   167  // and scheduling them for retrieval.
   168  type BlockFetcher struct {
   169  	// Various event channels
   170  	notify chan *blockAnnounce
   171  	inject chan *blockOrHeaderInject
   172  
   173  	headerFilter chan chan *headerFilterTask
   174  	bodyFilter   chan chan *bodyFilterTask
   175  
   176  	done chan common.Hash
   177  	quit chan struct{}
   178  
   179  	// Announce states
   180  	announces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion
   181  	announced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
   182  	fetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching
   183  	fetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
   184  	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
   185  
   186  	// Block cache
   187  	queue  *prque.Prque                         // Queue containing the import operations (block number sorted)
   188  	queued map[common.Hash]*blockOrHeaderInject // Set of already queued blocks (to dedup imports)
   189  
   190  	// Callbacks
   191  	getBlock            blockRetrievalFn    // Retrieves a block from the local chain
   192  	writeBlock          blockWriteFn        // Writes the block to the DB
   193  	verifyHeader        headerVerifierFn    // Checks if a block's headers have a valid proof of work
   194  	verifySeal          verifySealFn        // Checks if blocks PoWHash meets the difficulty requirement
   195  	broadcastBlock      blockBroadcasterFn  // Broadcasts a block to connected peers
   196  	chainHeight         chainHeightFn       // Retrieves the current chain's height
   197  	currentIntrinsicS   currentIntrinsicSFn // Retrieves the current headers intrinsic logS
   198  	currentS            currentSFn          // Retrieves the current heads logS
   199  	currentDifficulty   currentDifficultyFn // Retrieves the current difficulty
   200  	dropPeer            peerDropFn          // Drops a peer for misbehaving
   201  	isBlockHashABadHash badHashCheckFn      // Checks if the block hash exists in the bad hashes list
   202  
   203  	// Testing hooks
   204  	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
   205  	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
   206  	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (eth/61) or header (eth/62) fetch
   207  	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (eth/62)
   208  	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both eth/61 and eth/62)
   209  }
   210  
   211  // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
   212  func NewBlockFetcher(getBlock blockRetrievalFn, writeBlock blockWriteFn, verifyHeader headerVerifierFn, verifySeal verifySealFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, currentIntrinsicS currentIntrinsicSFn, currentS currentSFn, currentDifficulty currentDifficultyFn, dropPeer peerDropFn, isBlockHashABadHash badHashCheckFn) *BlockFetcher {
   213  	return &BlockFetcher{
   214  		notify:              make(chan *blockAnnounce),
   215  		inject:              make(chan *blockOrHeaderInject),
   216  		headerFilter:        make(chan chan *headerFilterTask),
   217  		bodyFilter:          make(chan chan *bodyFilterTask),
   218  		done:                make(chan common.Hash),
   219  		quit:                make(chan struct{}),
   220  		announces:           make(map[string]int),
   221  		announced:           make(map[common.Hash][]*blockAnnounce),
   222  		fetching:            make(map[common.Hash]*blockAnnounce),
   223  		fetched:             make(map[common.Hash][]*blockAnnounce),
   224  		completing:          make(map[common.Hash]*blockAnnounce),
   225  		queue:               prque.New(nil),
   226  		queued:              make(map[common.Hash]*blockOrHeaderInject),
   227  		getBlock:            getBlock,
   228  		writeBlock:          writeBlock,
   229  		verifyHeader:        verifyHeader,
   230  		verifySeal:          verifySeal,
   231  		broadcastBlock:      broadcastBlock,
   232  		chainHeight:         chainHeight,
   233  		currentIntrinsicS:   currentIntrinsicS,
   234  		currentDifficulty:   currentDifficulty,
   235  		currentS:            currentS,
   236  		dropPeer:            dropPeer,
   237  		isBlockHashABadHash: isBlockHashABadHash,
   238  	}
   239  }
   240  
   241  // Start boots up the announcement based synchroniser, accepting and processing
   242  // hash notifications and block fetches until termination requested.
   243  func (f *BlockFetcher) Start() {
   244  	go f.loop()
   245  }
   246  
   247  // Stop terminates the announcement based synchroniser, canceling all pending
   248  // operations.
   249  func (f *BlockFetcher) Stop() {
   250  	close(f.quit)
   251  }
   252  
   253  // Notify announces the fetcher of the potential availability of a new block in
   254  // the network.
   255  func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   256  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   257  	block := &blockAnnounce{
   258  		hash:        hash,
   259  		number:      number,
   260  		time:        time,
   261  		origin:      peer,
   262  		fetchHeader: headerFetcher,
   263  		fetchBodies: bodyFetcher,
   264  	}
   265  	select {
   266  	case f.notify <- block:
   267  		return nil
   268  	case <-f.quit:
   269  		return errTerminated
   270  	}
   271  }
   272  
   273  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   274  // returning those that should be handled differently.
   275  func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   276  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   277  
   278  	// Send the filter channel to the fetcher
   279  	filter := make(chan *headerFilterTask)
   280  
   281  	select {
   282  	case f.headerFilter <- filter:
   283  	case <-f.quit:
   284  		return nil
   285  	}
   286  	// Request the filtering of the header list
   287  	select {
   288  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   289  	case <-f.quit:
   290  		return nil
   291  	}
   292  	// Retrieve the headers remaining after filtering
   293  	select {
   294  	case task := <-filter:
   295  		return task.headers
   296  	case <-f.quit:
   297  		return nil
   298  	}
   299  }
   300  
   301  // FilterBodies extracts all the block bodies that were explicitly requested by
   302  // the fetcher, returning those that should be handled differently.
   303  func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, etxs [][]*types.Transaction, manifest []types.BlockManifest, time time.Time) ([][]*types.Transaction, [][]*types.Header, [][]*types.Transaction, []types.BlockManifest) {
   304  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles), "etxs", len(etxs), "manifest", len(manifest))
   305  
   306  	// Send the filter channel to the fetcher
   307  	filter := make(chan *bodyFilterTask)
   308  
   309  	select {
   310  	case f.bodyFilter <- filter:
   311  	case <-f.quit:
   312  		return nil, nil, nil, nil
   313  	}
   314  	// Request the filtering of the body list
   315  	select {
   316  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, extTransactions: etxs, subManifest: manifest, time: time}:
   317  	case <-f.quit:
   318  		return nil, nil, nil, nil
   319  	}
   320  	// Retrieve the bodies remaining after filtering
   321  	select {
   322  	case task := <-filter:
   323  		return task.transactions, task.uncles, task.extTransactions, task.subManifest
   324  	case <-f.quit:
   325  		return nil, nil, nil, nil
   326  	}
   327  }
   328  
   329  // Loop is the main fetcher loop, checking and processing various notification
   330  // events.
   331  func (f *BlockFetcher) loop() {
   332  	// Iterate the block fetching until a quit is requested
   333  	var (
   334  		fetchTimer    = time.NewTimer(0)
   335  		completeTimer = time.NewTimer(0)
   336  	)
   337  	<-fetchTimer.C // clear out the channel
   338  	<-completeTimer.C
   339  	defer fetchTimer.Stop()
   340  	defer completeTimer.Stop()
   341  
   342  	for {
   343  		// Clean up any expired block fetches
   344  		for hash, announce := range f.fetching {
   345  			if time.Since(announce.time) > fetchTimeout {
   346  				f.forgetHash(hash)
   347  			}
   348  		}
   349  		// Import any queued blocks
   350  		for !f.queue.Empty() {
   351  			op := f.queue.PopItem().(*blockOrHeaderInject)
   352  			hash := op.hash()
   353  			if f.queueChangeHook != nil {
   354  				f.queueChangeHook(hash, false)
   355  			}
   356  			// Otherwise if fresh and still unknown, try and import
   357  			if f.getBlock(hash) != nil {
   358  				f.forgetBlock(hash)
   359  				continue
   360  			}
   361  
   362  			f.ImportBlocks(op.origin, op.block, true)
   363  		}
   364  		// Wait for an outside event to occur
   365  		select {
   366  		case <-f.quit:
   367  			// BlockFetcher terminating, abort all operations
   368  			return
   369  
   370  		case notification := <-f.notify:
   371  			// A block was announced, make sure the peer isn't DOSing us
   372  			blockAnnounceInMeter.Mark(1)
   373  
   374  			count := f.announces[notification.origin] + 1
   375  			if count > hashLimit {
   376  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   377  				blockAnnounceDOSMeter.Mark(1)
   378  				break
   379  			}
   380  			// If we have a valid block number, check that it's potentially useful
   381  			if notification.number > 0 {
   382  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   383  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   384  					blockAnnounceDropMeter.Mark(1)
   385  					break
   386  				}
   387  			}
   388  			// All is well, schedule the announce if block's not yet downloading
   389  			if _, ok := f.fetching[notification.hash]; ok {
   390  				break
   391  			}
   392  			if _, ok := f.completing[notification.hash]; ok {
   393  				break
   394  			}
   395  			f.announces[notification.origin] = count
   396  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   397  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   398  				f.announceChangeHook(notification.hash, true)
   399  			}
   400  			if len(f.announced) == 1 {
   401  				f.rescheduleFetch(fetchTimer)
   402  			}
   403  
   404  		case op := <-f.inject:
   405  			// A direct block insertion was requested, try and fill any pending gaps
   406  			blockBroadcastInMeter.Mark(1)
   407  
   408  			f.enqueue(op.origin, nil, op.block)
   409  
   410  		case hash := <-f.done:
   411  			// A pending import finished, remove all traces of the notification
   412  			f.forgetHash(hash)
   413  			f.forgetBlock(hash)
   414  
   415  		case <-fetchTimer.C:
   416  			// At least one block's timer ran out, check for needing retrieval
   417  			request := make(map[string][]common.Hash)
   418  
   419  			for hash, announces := range f.announced {
   420  				// In current LES protocol(les2/les3), only header announce is
   421  				// available, no need to wait too much time for header broadcast.
   422  				timeout := arriveTimeout - gatherSlack
   423  				if time.Since(announces[0].time) > timeout {
   424  					// Pick a random peer to retrieve from, reset all others
   425  					announce := announces[rand.Intn(len(announces))]
   426  					f.forgetHash(hash)
   427  
   428  					// If the block still didn't arrive, queue for fetching
   429  					if f.getBlock(hash) == nil {
   430  						request[announce.origin] = append(request[announce.origin], hash)
   431  						f.fetching[hash] = announce
   432  					}
   433  				}
   434  			}
   435  			// Send out all block header requests
   436  			for peer, hashes := range request {
   437  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   438  
   439  				// Create a closure of the fetch and schedule in on a new thread
   440  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   441  				go func() {
   442  					if f.fetchingHook != nil {
   443  						f.fetchingHook(hashes)
   444  					}
   445  					for _, hash := range hashes {
   446  						headerFetchMeter.Mark(1)
   447  						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   448  					}
   449  				}()
   450  			}
   451  			// Schedule the next fetch if blocks are still pending
   452  			f.rescheduleFetch(fetchTimer)
   453  
   454  		case <-completeTimer.C:
   455  			// At least one header's timer ran out, retrieve everything
   456  			request := make(map[string][]common.Hash)
   457  
   458  			for hash, announces := range f.fetched {
   459  				// Pick a random peer to retrieve from, reset all others
   460  				announce := announces[rand.Intn(len(announces))]
   461  				f.forgetHash(hash)
   462  
   463  				// If the block still didn't arrive, queue for completion
   464  				if f.getBlock(hash) == nil {
   465  					request[announce.origin] = append(request[announce.origin], hash)
   466  					f.completing[hash] = announce
   467  				}
   468  			}
   469  			// Send out all block body requests
   470  			for peer, hashes := range request {
   471  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   472  
   473  				// Create a closure of the fetch and schedule in on a new thread
   474  				if f.completingHook != nil {
   475  					f.completingHook(hashes)
   476  				}
   477  				bodyFetchMeter.Mark(int64(len(hashes)))
   478  				go f.completing[hashes[0]].fetchBodies(hashes)
   479  			}
   480  			// Schedule the next fetch if blocks are still pending
   481  			f.rescheduleComplete(completeTimer)
   482  
   483  		case filter := <-f.headerFilter:
   484  			// Headers arrived from a remote peer. Extract those that were explicitly
   485  			// requested by the fetcher, and return everything else so it's delivered
   486  			// to other parts of the system.
   487  			var task *headerFilterTask
   488  			select {
   489  			case task = <-filter:
   490  			case <-f.quit:
   491  				return
   492  			}
   493  			headerFilterInMeter.Mark(int64(len(task.headers)))
   494  
   495  			// Split the batch of headers into unknown ones (to return to the caller),
   496  			// known incomplete ones (requiring body retrievals) and completed blocks.
   497  			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
   498  			for _, header := range task.headers {
   499  				hash := header.Hash()
   500  
   501  				// Filter fetcher-requested headers from other synchronisation algorithms
   502  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   503  					// If the delivered header does not match the promised number, drop the announcer
   504  					if header.Number().Uint64() != announce.number {
   505  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number())
   506  						f.dropPeer(announce.origin)
   507  						f.forgetHash(hash)
   508  						continue
   509  					}
   510  					// Only keep if not imported by other means
   511  					if f.getBlock(hash) == nil {
   512  						announce.header = header
   513  						announce.time = task.time
   514  
   515  						// If the block is empty (header only), short circuit into the final import queue
   516  						if header.TxHash() == types.EmptyRootHash && header.UncleHash() == types.EmptyUncleHash && header.EtxHash() == types.EmptyRootHash && header.ManifestHash() == types.EmptyRootHash {
   517  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number(), "hash", header.Hash())
   518  
   519  							block := types.NewBlockWithHeader(header)
   520  							block.ReceivedAt = task.time
   521  
   522  							complete = append(complete, block)
   523  							f.completing[hash] = announce
   524  							continue
   525  						}
   526  						// Otherwise add to the list of blocks needing completion
   527  						incomplete = append(incomplete, announce)
   528  					} else {
   529  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number(), "hash", header.Hash())
   530  						f.forgetHash(hash)
   531  					}
   532  				} else {
   533  					// BlockFetcher doesn't know about it, add to the return list
   534  					unknown = append(unknown, header)
   535  				}
   536  			}
   537  			headerFilterOutMeter.Mark(int64(len(unknown)))
   538  			select {
   539  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   540  			case <-f.quit:
   541  				return
   542  			}
   543  			// Schedule the retrieved headers for body completion
   544  			for _, announce := range incomplete {
   545  				hash := announce.header.Hash()
   546  				if _, ok := f.completing[hash]; ok {
   547  					continue
   548  				}
   549  				f.fetched[hash] = append(f.fetched[hash], announce)
   550  				if len(f.fetched) == 1 {
   551  					f.rescheduleComplete(completeTimer)
   552  				}
   553  			}
   554  			// Schedule the header for light fetcher import
   555  			for _, announce := range lightHeaders {
   556  				f.enqueue(announce.origin, announce.header, nil)
   557  			}
   558  			// Schedule the header-only blocks for import
   559  			for _, block := range complete {
   560  				if announce := f.completing[block.Hash()]; announce != nil {
   561  					f.enqueue(announce.origin, nil, block)
   562  				}
   563  			}
   564  
   565  		case filter := <-f.bodyFilter:
   566  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   567  			var task *bodyFilterTask
   568  			select {
   569  			case task = <-filter:
   570  			case <-f.quit:
   571  				return
   572  			}
   573  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   574  			blocks := []*types.Block{}
   575  			// abort early if there's nothing explicitly requested
   576  			if len(f.completing) > 0 {
   577  				for i := 0; i < len(task.transactions) && i < len(task.uncles) && i < len(task.extTransactions) && i < len(task.subManifest); i++ {
   578  					// Match up a body to any possible completion request
   579  					var (
   580  						matched      = false
   581  						uncleHash    common.Hash // calculated lazily and reused
   582  						txnHash      common.Hash // calculated lazily and reused
   583  						etxnHash     common.Hash // calculated lazily and reused
   584  						manifestHash common.Hash // calculated lazily and reused
   585  					)
   586  					for hash, announce := range f.completing {
   587  						if f.queued[hash] != nil || announce.origin != task.peer {
   588  							continue
   589  						}
   590  						if uncleHash == (common.Hash{}) {
   591  							uncleHash = types.CalcUncleHash(task.uncles[i])
   592  						}
   593  						if uncleHash != announce.header.UncleHash() {
   594  							continue
   595  						}
   596  						if txnHash == (common.Hash{}) {
   597  							txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))
   598  						}
   599  						if txnHash != announce.header.TxHash() {
   600  							continue
   601  						}
   602  						if etxnHash == (common.Hash{}) {
   603  							etxnHash = types.DeriveSha(types.Transactions(task.extTransactions[i]), trie.NewStackTrie(nil))
   604  						}
   605  						if etxnHash != announce.header.EtxHash() {
   606  							continue
   607  						}
   608  						if manifestHash == (common.Hash{}) {
   609  							manifestHash = types.DeriveSha(task.subManifest[i], trie.NewStackTrie(nil))
   610  						}
   611  						if manifestHash != announce.header.ManifestHash() {
   612  							continue
   613  						}
   614  						// Mark the body matched, reassemble if still unknown
   615  						matched = true
   616  						if f.getBlock(hash) == nil {
   617  							block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i], task.extTransactions[i], task.subManifest[i])
   618  							block.ReceivedAt = task.time
   619  							blocks = append(blocks, block)
   620  						} else {
   621  							f.forgetHash(hash)
   622  						}
   623  
   624  					}
   625  					if matched {
   626  						task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   627  						task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   628  						task.extTransactions = append(task.extTransactions[:i], task.extTransactions[i+1:]...)
   629  						task.subManifest = append(task.subManifest[:i], task.subManifest[i+1:]...)
   630  						i--
   631  						continue
   632  					}
   633  				}
   634  			}
   635  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   636  			select {
   637  			case filter <- task:
   638  			case <-f.quit:
   639  				return
   640  			}
   641  			// Schedule the retrieved blocks for ordered import
   642  			for _, block := range blocks {
   643  				if announce := f.completing[block.Hash()]; announce != nil {
   644  					f.enqueue(announce.origin, nil, block)
   645  				}
   646  			}
   647  		}
   648  	}
   649  }
   650  
   651  // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
   652  func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
   653  	// Short circuit if no blocks are announced
   654  	if len(f.announced) == 0 {
   655  		return
   656  	}
   657  	// Otherwise find the earliest expiring announcement
   658  	earliest := time.Now()
   659  	for _, announces := range f.announced {
   660  		if earliest.After(announces[0].time) {
   661  			earliest = announces[0].time
   662  		}
   663  	}
   664  	fetch.Reset(arriveTimeout - time.Since(earliest))
   665  }
   666  
   667  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   668  func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
   669  	// Short circuit if no headers are fetched
   670  	if len(f.fetched) == 0 {
   671  		return
   672  	}
   673  	// Otherwise find the earliest expiring announcement
   674  	earliest := time.Now()
   675  	for _, announces := range f.fetched {
   676  		if earliest.After(announces[0].time) {
   677  			earliest = announces[0].time
   678  		}
   679  	}
   680  	complete.Reset(gatherSlack - time.Since(earliest))
   681  }
   682  
   683  // enqueue schedules a new header or block import operation, if the component
   684  // to be imported has not yet been seen.
   685  func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
   686  	var (
   687  		hash   common.Hash
   688  		number uint64
   689  	)
   690  	if header != nil {
   691  		hash, number = header.Hash(), header.Number().Uint64()
   692  	} else {
   693  		hash, number = block.Hash(), block.NumberU64()
   694  	}
   695  
   696  	// Schedule the block for future importing
   697  	if _, ok := f.queued[hash]; !ok {
   698  		op := &blockOrHeaderInject{origin: peer}
   699  		if header != nil {
   700  			op.header = header
   701  		} else {
   702  			op.block = block
   703  		}
   704  		f.queued[hash] = op
   705  		f.queue.Push(op, -int64(number))
   706  		if f.queueChangeHook != nil {
   707  			f.queueChangeHook(hash, true)
   708  		}
   709  		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
   710  	}
   711  }
   712  
   713  // importBlocks spawns a new goroutine to run a block insertion into the chain. If the
   714  // block's number is at the same height as the current import phase, it updates
   715  // the phase states accordingly.
   716  func (f *BlockFetcher) ImportBlocks(peer string, block *types.Block, relay bool) {
   717  	hash := block.Hash()
   718  	nodeCtx := common.NodeLocation.Context()
   719  
   720  	powhash, err := f.verifySeal(block.Header())
   721  	if err != nil {
   722  		return
   723  	}
   724  	// Check if the Block is atleast half the current difficulty in Zone Context,
   725  	// this makes sure that the nodes don't listen to the forks with the PowHash
   726  	//	with less than 50% of current difficulty
   727  	if nodeCtx == common.ZONE_CTX && new(big.Int).SetBytes(powhash.Bytes()).Cmp(new(big.Int).Div(f.currentDifficulty(), big.NewInt(2))) < 0 {
   728  		return
   729  	}
   730  
   731  	currentIntrinsicS := f.currentIntrinsicS()
   732  	MaxAllowableEntropyDist := new(big.Int).Mul(currentIntrinsicS, big.NewInt(c_maxAllowableEntropyDist))
   733  	looseMaxAllowableEntropy := new(big.Int).Div(MaxAllowableEntropyDist, big.NewInt(100))
   734  	looseSyncEntropyDist := new(big.Int).Add(MaxAllowableEntropyDist, looseMaxAllowableEntropy)
   735  
   736  	broadCastEntropy := block.ParentEntropy()
   737  
   738  	// If someone is mining not within MaxAllowableEntropyDist*currentIntrinsicS dont broadcast
   739  	if relay && f.currentS().Cmp(new(big.Int).Add(broadCastEntropy, MaxAllowableEntropyDist)) > 0 {
   740  		return
   741  	}
   742  	// But don't drop the peers if within 1% of that distance
   743  	if relay && f.currentS().Cmp(new(big.Int).Add(broadCastEntropy, looseSyncEntropyDist)) > 0 {
   744  		if nodeCtx != common.PRIME_CTX {
   745  			f.dropPeer(peer)
   746  		}
   747  		return
   748  	}
   749  
   750  	// Run the import on a new thread
   751  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   752  	go func() {
   753  		defer func() { f.done <- hash }()
   754  
   755  		// If Block broadcasted by the peer exists in the bad block list drop the peer
   756  		if f.isBlockHashABadHash(block.Hash()) {
   757  			f.dropPeer(peer)
   758  			return
   759  		}
   760  		// Quickly validate the header and propagate the block if it passes
   761  		err := f.verifyHeader(block.Header())
   762  
   763  		// Including the ErrUnknownAncestor as well because a filter has already
   764  		// been applied for all the blocks that come until here. Since there
   765  		// exists a timedCache where the blocks expire, it is okay to let this
   766  		// block through and broadcast the block.
   767  		if err == nil || err.Error() == consensus.ErrUnknownAncestor.Error() {
   768  			// All ok, quickly propagate to our peers
   769  			blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   770  
   771  			// Only relay the Mined Blocks that meet the depth criteria
   772  			if relay {
   773  				go f.broadcastBlock(block, true)
   774  			}
   775  		} else if err.Error() == consensus.ErrFutureBlock.Error() {
   776  			// Weird future block, don't fail, but neither propagate
   777  		} else {
   778  			// Something went very wrong, drop the peer
   779  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   780  			f.dropPeer(peer)
   781  			return
   782  		}
   783  		// TODO: verify the Headers work to be in a certain threshold window
   784  		f.writeBlock(block)
   785  		// If import succeeded, broadcast the block
   786  		blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   787  
   788  		// Only relay the Mined Blocks that meet the depth criteria
   789  		if relay {
   790  			go f.broadcastBlock(block, false)
   791  		}
   792  
   793  		// Invoke the testing hook if needed
   794  		if f.importedHook != nil {
   795  			f.importedHook(nil, block)
   796  		}
   797  	}()
   798  }
   799  
   800  // forgetHash removes all traces of a block announcement from the fetcher's
   801  // internal state.
   802  func (f *BlockFetcher) forgetHash(hash common.Hash) {
   803  	// Remove all pending announces and decrement DOS counters
   804  	if announceMap, ok := f.announced[hash]; ok {
   805  		for _, announce := range announceMap {
   806  			f.announces[announce.origin]--
   807  			if f.announces[announce.origin] <= 0 {
   808  				delete(f.announces, announce.origin)
   809  			}
   810  		}
   811  		delete(f.announced, hash)
   812  		if f.announceChangeHook != nil {
   813  			f.announceChangeHook(hash, false)
   814  		}
   815  	}
   816  	// Remove any pending fetches and decrement the DOS counters
   817  	if announce := f.fetching[hash]; announce != nil {
   818  		f.announces[announce.origin]--
   819  		if f.announces[announce.origin] <= 0 {
   820  			delete(f.announces, announce.origin)
   821  		}
   822  		delete(f.fetching, hash)
   823  	}
   824  
   825  	// Remove any pending completion requests and decrement the DOS counters
   826  	for _, announce := range f.fetched[hash] {
   827  		f.announces[announce.origin]--
   828  		if f.announces[announce.origin] <= 0 {
   829  			delete(f.announces, announce.origin)
   830  		}
   831  	}
   832  	delete(f.fetched, hash)
   833  
   834  	// Remove any pending completions and decrement the DOS counters
   835  	if announce := f.completing[hash]; announce != nil {
   836  		f.announces[announce.origin]--
   837  		if f.announces[announce.origin] <= 0 {
   838  			delete(f.announces, announce.origin)
   839  		}
   840  		delete(f.completing, hash)
   841  	}
   842  }
   843  
   844  // forgetBlock removes all traces of a queued block from the fetcher's internal
   845  // state.
   846  func (f *BlockFetcher) forgetBlock(hash common.Hash) {
   847  	if insert := f.queued[hash]; insert != nil {
   848  		delete(f.queued, hash)
   849  	}
   850  }