github.com/theQRL/go-zond@v0.1.1/zond/fetcher/block_fetcher.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the announcement based header, blocks or transaction synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"errors"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"github.com/theQRL/go-zond/common"
    26  	"github.com/theQRL/go-zond/common/prque"
    27  	"github.com/theQRL/go-zond/consensus"
    28  	"github.com/theQRL/go-zond/core/types"
    29  	"github.com/theQRL/go-zond/log"
    30  	"github.com/theQRL/go-zond/metrics"
    31  	"github.com/theQRL/go-zond/trie"
    32  	"github.com/theQRL/go-zond/zond/protocols/zond"
    33  )
    34  
    35  const (
    36  	lightTimeout  = time.Millisecond       // Time allowance before an announced header is explicitly requested
    37  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested
    38  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    39  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block/transaction
    40  )
    41  
    42  const (
    43  	maxUncleDist = 7   // Maximum allowed backward distance from the chain head
    44  	maxQueueDist = 32  // Maximum allowed distance from the chain head to queue
    45  	hashLimit    = 256 // Maximum number of unique blocks or headers a peer may have announced
    46  	blockLimit   = 64  // Maximum number of unique blocks a peer may have delivered
    47  )
    48  
    49  var (
    50  	blockAnnounceInMeter   = metrics.NewRegisteredMeter("zond/fetcher/block/announces/in", nil)
    51  	blockAnnounceOutTimer  = metrics.NewRegisteredTimer("zond/fetcher/block/announces/out", nil)
    52  	blockAnnounceDropMeter = metrics.NewRegisteredMeter("zond/fetcher/block/announces/drop", nil)
    53  	blockAnnounceDOSMeter  = metrics.NewRegisteredMeter("zond/fetcher/block/announces/dos", nil)
    54  
    55  	blockBroadcastInMeter   = metrics.NewRegisteredMeter("zond/fetcher/block/broadcasts/in", nil)
    56  	blockBroadcastOutTimer  = metrics.NewRegisteredTimer("zond/fetcher/block/broadcasts/out", nil)
    57  	blockBroadcastDropMeter = metrics.NewRegisteredMeter("zond/fetcher/block/broadcasts/drop", nil)
    58  	blockBroadcastDOSMeter  = metrics.NewRegisteredMeter("zond/fetcher/block/broadcasts/dos", nil)
    59  
    60  	headerFetchMeter = metrics.NewRegisteredMeter("zond/fetcher/block/headers", nil)
    61  	bodyFetchMeter   = metrics.NewRegisteredMeter("zond/fetcher/block/bodies", nil)
    62  
    63  	headerFilterInMeter  = metrics.NewRegisteredMeter("zond/fetcher/block/filter/headers/in", nil)
    64  	headerFilterOutMeter = metrics.NewRegisteredMeter("zond/fetcher/block/filter/headers/out", nil)
    65  	bodyFilterInMeter    = metrics.NewRegisteredMeter("zond/fetcher/block/filter/bodies/in", nil)
    66  	bodyFilterOutMeter   = metrics.NewRegisteredMeter("zond/fetcher/block/filter/bodies/out", nil)
    67  )
    68  
    69  var errTerminated = errors.New("terminated")
    70  
    71  // HeaderRetrievalFn is a callback type for retrieving a header from the local chain.
    72  type HeaderRetrievalFn func(common.Hash) *types.Header
    73  
    74  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    75  type blockRetrievalFn func(common.Hash) *types.Block
    76  
    77  // headerRequesterFn is a callback type for sending a header retrieval request.
    78  type headerRequesterFn func(common.Hash, chan *zond.Response) (*zond.Request, error)
    79  
    80  // bodyRequesterFn is a callback type for sending a body retrieval request.
    81  type bodyRequesterFn func([]common.Hash, chan *zond.Response) (*zond.Request, error)
    82  
    83  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    84  type headerVerifierFn func(header *types.Header) error
    85  
    86  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    87  type blockBroadcasterFn func(block *types.Block, propagate bool)
    88  
    89  // chainHeightFn is a callback type to retrieve the current chain height.
    90  type chainHeightFn func() uint64
    91  
    92  // headersInsertFn is a callback type to insert a batch of headers into the local chain.
    93  type headersInsertFn func(headers []*types.Header) (int, error)
    94  
    95  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    96  type chainInsertFn func(types.Blocks) (int, error)
    97  
    98  // peerDropFn is a callback type for dropping a peer detected as malicious.
    99  type peerDropFn func(id string)
   100  
   101  // blockAnnounce is the hash notification of the availability of a new block in the
   102  // network.
   103  type blockAnnounce struct {
   104  	hash   common.Hash   // Hash of the block being announced
   105  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
   106  	header *types.Header // Header of the block partially reassembled (new protocol)
   107  	time   time.Time     // Timestamp of the announcement
   108  
   109  	origin string // Identifier of the peer originating the notification
   110  
   111  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
   112  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
   113  }
   114  
   115  // headerFilterTask represents a batch of headers needing fetcher filtering.
   116  type headerFilterTask struct {
   117  	peer    string          // The source peer of block headers
   118  	headers []*types.Header // Collection of headers to filter
   119  	time    time.Time       // Arrival time of the headers
   120  }
   121  
   122  // bodyFilterTask represents a batch of block bodies (transactions and uncles)
   123  // needing fetcher filtering.
   124  type bodyFilterTask struct {
   125  	peer         string                 // The source peer of block bodies
   126  	transactions [][]*types.Transaction // Collection of transactions per block bodies
   127  	uncles       [][]*types.Header      // Collection of uncles per block bodies
   128  	time         time.Time              // Arrival time of the blocks' contents
   129  }
   130  
   131  // blockOrHeaderInject represents a schedules import operation.
   132  type blockOrHeaderInject struct {
   133  	origin string
   134  
   135  	header *types.Header // Used for light mode fetcher which only cares about header.
   136  	block  *types.Block  // Used for normal mode fetcher which imports full block.
   137  }
   138  
   139  // number returns the block number of the injected object.
   140  func (inject *blockOrHeaderInject) number() uint64 {
   141  	if inject.header != nil {
   142  		return inject.header.Number.Uint64()
   143  	}
   144  	return inject.block.NumberU64()
   145  }
   146  
   147  // number returns the block hash of the injected object.
   148  func (inject *blockOrHeaderInject) hash() common.Hash {
   149  	if inject.header != nil {
   150  		return inject.header.Hash()
   151  	}
   152  	return inject.block.Hash()
   153  }
   154  
   155  // BlockFetcher is responsible for accumulating block announcements from various peers
   156  // and scheduling them for retrieval.
   157  type BlockFetcher struct {
   158  	light bool // The indicator whether it's a light fetcher or normal one.
   159  
   160  	// Various event channels
   161  	notify chan *blockAnnounce
   162  	inject chan *blockOrHeaderInject
   163  
   164  	headerFilter chan chan *headerFilterTask
   165  	bodyFilter   chan chan *bodyFilterTask
   166  
   167  	done chan common.Hash
   168  	quit chan struct{}
   169  
   170  	// Announce states
   171  	announces  map[string]int                   // Per peer blockAnnounce counts to prevent memory exhaustion
   172  	announced  map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching
   173  	fetching   map[common.Hash]*blockAnnounce   // Announced blocks, currently fetching
   174  	fetched    map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval
   175  	completing map[common.Hash]*blockAnnounce   // Blocks with headers, currently body-completing
   176  
   177  	// Block cache
   178  	queue  *prque.Prque[int64, *blockOrHeaderInject] // Queue containing the import operations (block number sorted)
   179  	queues map[string]int                            // Per peer block counts to prevent memory exhaustion
   180  	queued map[common.Hash]*blockOrHeaderInject      // Set of already queued blocks (to dedup imports)
   181  
   182  	// Callbacks
   183  	getHeader      HeaderRetrievalFn  // Retrieves a header from the local chain
   184  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   185  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   186  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   187  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   188  	insertHeaders  headersInsertFn    // Injects a batch of headers into the chain
   189  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   190  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   191  
   192  	// Testing hooks
   193  	announceChangeHook func(common.Hash, bool)           // Method to call upon adding or deleting a hash from the blockAnnounce list
   194  	queueChangeHook    func(common.Hash, bool)           // Method to call upon adding or deleting a block from the import queue
   195  	fetchingHook       func([]common.Hash)               // Method to call upon starting a block (zond/61) or header (zond/62) fetch
   196  	completingHook     func([]common.Hash)               // Method to call upon starting a block body fetch (zond/62)
   197  	importedHook       func(*types.Header, *types.Block) // Method to call upon successful header or block import (both zond/61 and zond/62)
   198  }
   199  
   200  // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements.
   201  func NewBlockFetcher(light bool, getHeader HeaderRetrievalFn, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertHeaders headersInsertFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher {
   202  	return &BlockFetcher{
   203  		light:          light,
   204  		notify:         make(chan *blockAnnounce),
   205  		inject:         make(chan *blockOrHeaderInject),
   206  		headerFilter:   make(chan chan *headerFilterTask),
   207  		bodyFilter:     make(chan chan *bodyFilterTask),
   208  		done:           make(chan common.Hash),
   209  		quit:           make(chan struct{}),
   210  		announces:      make(map[string]int),
   211  		announced:      make(map[common.Hash][]*blockAnnounce),
   212  		fetching:       make(map[common.Hash]*blockAnnounce),
   213  		fetched:        make(map[common.Hash][]*blockAnnounce),
   214  		completing:     make(map[common.Hash]*blockAnnounce),
   215  		queue:          prque.New[int64, *blockOrHeaderInject](nil),
   216  		queues:         make(map[string]int),
   217  		queued:         make(map[common.Hash]*blockOrHeaderInject),
   218  		getHeader:      getHeader,
   219  		getBlock:       getBlock,
   220  		verifyHeader:   verifyHeader,
   221  		broadcastBlock: broadcastBlock,
   222  		chainHeight:    chainHeight,
   223  		insertHeaders:  insertHeaders,
   224  		insertChain:    insertChain,
   225  		dropPeer:       dropPeer,
   226  	}
   227  }
   228  
   229  // Start boots up the announcement based synchroniser, accepting and processing
   230  // hash notifications and block fetches until termination requested.
   231  func (f *BlockFetcher) Start() {
   232  	go f.loop()
   233  }
   234  
   235  // Stop terminates the announcement based synchroniser, canceling all pending
   236  // operations.
   237  func (f *BlockFetcher) Stop() {
   238  	close(f.quit)
   239  }
   240  
   241  // Notify announces the fetcher of the potential availability of a new block in
   242  // the network.
   243  func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   244  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   245  	block := &blockAnnounce{
   246  		hash:        hash,
   247  		number:      number,
   248  		time:        time,
   249  		origin:      peer,
   250  		fetchHeader: headerFetcher,
   251  		fetchBodies: bodyFetcher,
   252  	}
   253  	select {
   254  	case f.notify <- block:
   255  		return nil
   256  	case <-f.quit:
   257  		return errTerminated
   258  	}
   259  }
   260  
   261  // Enqueue tries to fill gaps the fetcher's future import queue.
   262  func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error {
   263  	op := &blockOrHeaderInject{
   264  		origin: peer,
   265  		block:  block,
   266  	}
   267  	select {
   268  	case f.inject <- op:
   269  		return nil
   270  	case <-f.quit:
   271  		return errTerminated
   272  	}
   273  }
   274  
   275  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   276  // returning those that should be handled differently.
   277  func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   278  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   279  
   280  	// Send the filter channel to the fetcher
   281  	filter := make(chan *headerFilterTask)
   282  
   283  	select {
   284  	case f.headerFilter <- filter:
   285  	case <-f.quit:
   286  		return nil
   287  	}
   288  	// Request the filtering of the header list
   289  	select {
   290  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   291  	case <-f.quit:
   292  		return nil
   293  	}
   294  	// Retrieve the headers remaining after filtering
   295  	select {
   296  	case task := <-filter:
   297  		return task.headers
   298  	case <-f.quit:
   299  		return nil
   300  	}
   301  }
   302  
   303  // FilterBodies extracts all the block bodies that were explicitly requested by
   304  // the fetcher, returning those that should be handled differently.
   305  func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
   306  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
   307  
   308  	// Send the filter channel to the fetcher
   309  	filter := make(chan *bodyFilterTask)
   310  
   311  	select {
   312  	case f.bodyFilter <- filter:
   313  	case <-f.quit:
   314  		return nil, nil
   315  	}
   316  	// Request the filtering of the body list
   317  	select {
   318  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
   319  	case <-f.quit:
   320  		return nil, nil
   321  	}
   322  	// Retrieve the bodies remaining after filtering
   323  	select {
   324  	case task := <-filter:
   325  		return task.transactions, task.uncles
   326  	case <-f.quit:
   327  		return nil, nil
   328  	}
   329  }
   330  
   331  // Loop is the main fetcher loop, checking and processing various notification
   332  // events.
   333  func (f *BlockFetcher) loop() {
   334  	// Iterate the block fetching until a quit is requested
   335  	var (
   336  		fetchTimer    = time.NewTimer(0)
   337  		completeTimer = time.NewTimer(0)
   338  	)
   339  	<-fetchTimer.C // clear out the channel
   340  	<-completeTimer.C
   341  	defer fetchTimer.Stop()
   342  	defer completeTimer.Stop()
   343  
   344  	for {
   345  		// Clean up any expired block fetches
   346  		for hash, announce := range f.fetching {
   347  			if time.Since(announce.time) > fetchTimeout {
   348  				f.forgetHash(hash)
   349  			}
   350  		}
   351  		// Import any queued blocks that could potentially fit
   352  		height := f.chainHeight()
   353  		for !f.queue.Empty() {
   354  			op := f.queue.PopItem()
   355  			hash := op.hash()
   356  			if f.queueChangeHook != nil {
   357  				f.queueChangeHook(hash, false)
   358  			}
   359  			// If too high up the chain or phase, continue later
   360  			number := op.number()
   361  			if number > height+1 {
   362  				f.queue.Push(op, -int64(number))
   363  				if f.queueChangeHook != nil {
   364  					f.queueChangeHook(hash, true)
   365  				}
   366  				break
   367  			}
   368  			// Otherwise if fresh and still unknown, try and import
   369  			if (number+maxUncleDist < height) || (f.light && f.getHeader(hash) != nil) || (!f.light && f.getBlock(hash) != nil) {
   370  				f.forgetBlock(hash)
   371  				continue
   372  			}
   373  			if f.light {
   374  				f.importHeaders(op.origin, op.header)
   375  			} else {
   376  				f.importBlocks(op.origin, op.block)
   377  			}
   378  		}
   379  		// Wait for an outside event to occur
   380  		select {
   381  		case <-f.quit:
   382  			// BlockFetcher terminating, abort all operations
   383  			return
   384  
   385  		case notification := <-f.notify:
   386  			// A block was announced, make sure the peer isn't DOSing us
   387  			blockAnnounceInMeter.Mark(1)
   388  
   389  			count := f.announces[notification.origin] + 1
   390  			if count > hashLimit {
   391  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   392  				blockAnnounceDOSMeter.Mark(1)
   393  				break
   394  			}
   395  			if notification.number == 0 {
   396  				break
   397  			}
   398  			// If we have a valid block number, check that it's potentially useful
   399  			if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   400  				log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   401  				blockAnnounceDropMeter.Mark(1)
   402  				break
   403  			}
   404  			// All is well, schedule the announce if block's not yet downloading
   405  			if _, ok := f.fetching[notification.hash]; ok {
   406  				break
   407  			}
   408  			if _, ok := f.completing[notification.hash]; ok {
   409  				break
   410  			}
   411  			f.announces[notification.origin] = count
   412  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   413  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   414  				f.announceChangeHook(notification.hash, true)
   415  			}
   416  			if len(f.announced) == 1 {
   417  				f.rescheduleFetch(fetchTimer)
   418  			}
   419  
   420  		case op := <-f.inject:
   421  			// A direct block insertion was requested, try and fill any pending gaps
   422  			blockBroadcastInMeter.Mark(1)
   423  
   424  			// Now only direct block injection is allowed, drop the header injection
   425  			// here silently if we receive.
   426  			if f.light {
   427  				continue
   428  			}
   429  			f.enqueue(op.origin, nil, op.block)
   430  
   431  		case hash := <-f.done:
   432  			// A pending import finished, remove all traces of the notification
   433  			f.forgetHash(hash)
   434  			f.forgetBlock(hash)
   435  
   436  		case <-fetchTimer.C:
   437  			// At least one block's timer ran out, check for needing retrieval
   438  			request := make(map[string][]common.Hash)
   439  
   440  			for hash, announces := range f.announced {
   441  				// In current LES protocol(les2/les3), only header announce is
   442  				// available, no need to wait too much time for header broadcast.
   443  				timeout := arriveTimeout - gatherSlack
   444  				if f.light {
   445  					timeout = 0
   446  				}
   447  				if time.Since(announces[0].time) > timeout {
   448  					// Pick a random peer to retrieve from, reset all others
   449  					announce := announces[rand.Intn(len(announces))]
   450  					f.forgetHash(hash)
   451  
   452  					// If the block still didn't arrive, queue for fetching
   453  					if (f.light && f.getHeader(hash) == nil) || (!f.light && f.getBlock(hash) == nil) {
   454  						request[announce.origin] = append(request[announce.origin], hash)
   455  						f.fetching[hash] = announce
   456  					}
   457  				}
   458  			}
   459  			// Send out all block header requests
   460  			for peer, hashes := range request {
   461  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   462  
   463  				// Create a closure of the fetch and schedule in on a new thread
   464  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   465  				go func(peer string) {
   466  					if f.fetchingHook != nil {
   467  						f.fetchingHook(hashes)
   468  					}
   469  					for _, hash := range hashes {
   470  						headerFetchMeter.Mark(1)
   471  						go func(hash common.Hash) {
   472  							resCh := make(chan *zond.Response)
   473  
   474  							req, err := fetchHeader(hash, resCh)
   475  							if err != nil {
   476  								return // Legacy code, yolo
   477  							}
   478  							defer req.Close()
   479  
   480  							timeout := time.NewTimer(2 * fetchTimeout) // 2x leeway before dropping the peer
   481  							defer timeout.Stop()
   482  
   483  							select {
   484  							case res := <-resCh:
   485  								res.Done <- nil
   486  								f.FilterHeaders(peer, *res.Res.(*zond.BlockHeadersPacket), time.Now().Add(res.Time))
   487  
   488  							case <-timeout.C:
   489  								// The peer didn't respond in time. The request
   490  								// was already rescheduled at this point, we were
   491  								// waiting for a catchup. With an unresponsive
   492  								// peer however, it's a protocol violation.
   493  								f.dropPeer(peer)
   494  							}
   495  						}(hash)
   496  					}
   497  				}(peer)
   498  			}
   499  			// Schedule the next fetch if blocks are still pending
   500  			f.rescheduleFetch(fetchTimer)
   501  
   502  		case <-completeTimer.C:
   503  			// At least one header's timer ran out, retrieve everything
   504  			request := make(map[string][]common.Hash)
   505  
   506  			for hash, announces := range f.fetched {
   507  				// Pick a random peer to retrieve from, reset all others
   508  				announce := announces[rand.Intn(len(announces))]
   509  				f.forgetHash(hash)
   510  
   511  				// If the block still didn't arrive, queue for completion
   512  				if f.getBlock(hash) == nil {
   513  					request[announce.origin] = append(request[announce.origin], hash)
   514  					f.completing[hash] = announce
   515  				}
   516  			}
   517  			// Send out all block body requests
   518  			for peer, hashes := range request {
   519  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   520  
   521  				// Create a closure of the fetch and schedule in on a new thread
   522  				if f.completingHook != nil {
   523  					f.completingHook(hashes)
   524  				}
   525  				fetchBodies := f.completing[hashes[0]].fetchBodies
   526  				bodyFetchMeter.Mark(int64(len(hashes)))
   527  
   528  				go func(peer string, hashes []common.Hash) {
   529  					resCh := make(chan *zond.Response)
   530  
   531  					req, err := fetchBodies(hashes, resCh)
   532  					if err != nil {
   533  						return // Legacy code, yolo
   534  					}
   535  					defer req.Close()
   536  
   537  					timeout := time.NewTimer(2 * fetchTimeout) // 2x leeway before dropping the peer
   538  					defer timeout.Stop()
   539  
   540  					select {
   541  					case res := <-resCh:
   542  						res.Done <- nil
   543  						// Ignoring withdrawals here, since the block fetcher is not used post-merge.
   544  						txs, uncles, _ := res.Res.(*zond.BlockBodiesPacket).Unpack()
   545  						f.FilterBodies(peer, txs, uncles, time.Now())
   546  
   547  					case <-timeout.C:
   548  						// The peer didn't respond in time. The request
   549  						// was already rescheduled at this point, we were
   550  						// waiting for a catchup. With an unresponsive
   551  						// peer however, it's a protocol violation.
   552  						f.dropPeer(peer)
   553  					}
   554  				}(peer, hashes)
   555  			}
   556  			// Schedule the next fetch if blocks are still pending
   557  			f.rescheduleComplete(completeTimer)
   558  
   559  		case filter := <-f.headerFilter:
   560  			// Headers arrived from a remote peer. Extract those that were explicitly
   561  			// requested by the fetcher, and return everything else so it's delivered
   562  			// to other parts of the system.
   563  			var task *headerFilterTask
   564  			select {
   565  			case task = <-filter:
   566  			case <-f.quit:
   567  				return
   568  			}
   569  			headerFilterInMeter.Mark(int64(len(task.headers)))
   570  
   571  			// Split the batch of headers into unknown ones (to return to the caller),
   572  			// known incomplete ones (requiring body retrievals) and completed blocks.
   573  			unknown, incomplete, complete, lightHeaders := []*types.Header{}, []*blockAnnounce{}, []*types.Block{}, []*blockAnnounce{}
   574  			for _, header := range task.headers {
   575  				hash := header.Hash()
   576  
   577  				// Filter fetcher-requested headers from other synchronisation algorithms
   578  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   579  					// If the delivered header does not match the promised number, drop the announcer
   580  					if header.Number.Uint64() != announce.number {
   581  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   582  						f.dropPeer(announce.origin)
   583  						f.forgetHash(hash)
   584  						continue
   585  					}
   586  					// Collect all headers only if we are running in light
   587  					// mode and the headers are not imported by other means.
   588  					if f.light {
   589  						if f.getHeader(hash) == nil {
   590  							announce.header = header
   591  							lightHeaders = append(lightHeaders, announce)
   592  						}
   593  						f.forgetHash(hash)
   594  						continue
   595  					}
   596  					// Only keep if not imported by other means
   597  					if f.getBlock(hash) == nil {
   598  						announce.header = header
   599  						announce.time = task.time
   600  
   601  						// If the block is empty (header only), short circuit into the final import queue
   602  						if header.TxHash == types.EmptyTxsHash && header.UncleHash == types.EmptyUncleHash {
   603  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   604  
   605  							block := types.NewBlockWithHeader(header)
   606  							block.ReceivedAt = task.time
   607  
   608  							complete = append(complete, block)
   609  							f.completing[hash] = announce
   610  							continue
   611  						}
   612  						// Otherwise add to the list of blocks needing completion
   613  						incomplete = append(incomplete, announce)
   614  					} else {
   615  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   616  						f.forgetHash(hash)
   617  					}
   618  				} else {
   619  					// BlockFetcher doesn't know about it, add to the return list
   620  					unknown = append(unknown, header)
   621  				}
   622  			}
   623  			headerFilterOutMeter.Mark(int64(len(unknown)))
   624  			select {
   625  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   626  			case <-f.quit:
   627  				return
   628  			}
   629  			// Schedule the retrieved headers for body completion
   630  			for _, announce := range incomplete {
   631  				hash := announce.header.Hash()
   632  				if _, ok := f.completing[hash]; ok {
   633  					continue
   634  				}
   635  				f.fetched[hash] = append(f.fetched[hash], announce)
   636  				if len(f.fetched) == 1 {
   637  					f.rescheduleComplete(completeTimer)
   638  				}
   639  			}
   640  			// Schedule the header for light fetcher import
   641  			for _, announce := range lightHeaders {
   642  				f.enqueue(announce.origin, announce.header, nil)
   643  			}
   644  			// Schedule the header-only blocks for import
   645  			for _, block := range complete {
   646  				if announce := f.completing[block.Hash()]; announce != nil {
   647  					f.enqueue(announce.origin, nil, block)
   648  				}
   649  			}
   650  
   651  		case filter := <-f.bodyFilter:
   652  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   653  			var task *bodyFilterTask
   654  			select {
   655  			case task = <-filter:
   656  			case <-f.quit:
   657  				return
   658  			}
   659  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   660  			blocks := []*types.Block{}
   661  			// abort early if there's nothing explicitly requested
   662  			if len(f.completing) > 0 {
   663  				for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
   664  					// Match up a body to any possible completion request
   665  					var (
   666  						matched   = false
   667  						uncleHash common.Hash // calculated lazily and reused
   668  						txnHash   common.Hash // calculated lazily and reused
   669  					)
   670  					for hash, announce := range f.completing {
   671  						if f.queued[hash] != nil || announce.origin != task.peer {
   672  							continue
   673  						}
   674  						if uncleHash == (common.Hash{}) {
   675  							uncleHash = types.CalcUncleHash(task.uncles[i])
   676  						}
   677  						if uncleHash != announce.header.UncleHash {
   678  							continue
   679  						}
   680  						if txnHash == (common.Hash{}) {
   681  							txnHash = types.DeriveSha(types.Transactions(task.transactions[i]), trie.NewStackTrie(nil))
   682  						}
   683  						if txnHash != announce.header.TxHash {
   684  							continue
   685  						}
   686  						// Mark the body matched, reassemble if still unknown
   687  						matched = true
   688  						if f.getBlock(hash) == nil {
   689  							block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
   690  							block.ReceivedAt = task.time
   691  							blocks = append(blocks, block)
   692  						} else {
   693  							f.forgetHash(hash)
   694  						}
   695  					}
   696  					if matched {
   697  						task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   698  						task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   699  						i--
   700  						continue
   701  					}
   702  				}
   703  			}
   704  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   705  			select {
   706  			case filter <- task:
   707  			case <-f.quit:
   708  				return
   709  			}
   710  			// Schedule the retrieved blocks for ordered import
   711  			for _, block := range blocks {
   712  				if announce := f.completing[block.Hash()]; announce != nil {
   713  					f.enqueue(announce.origin, nil, block)
   714  				}
   715  			}
   716  		}
   717  	}
   718  }
   719  
   720  // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout.
   721  func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) {
   722  	// Short circuit if no blocks are announced
   723  	if len(f.announced) == 0 {
   724  		return
   725  	}
   726  	// Schedule announcement retrieval quickly for light mode
   727  	// since server won't send any headers to client.
   728  	if f.light {
   729  		fetch.Reset(lightTimeout)
   730  		return
   731  	}
   732  	// Otherwise find the earliest expiring announcement
   733  	earliest := time.Now()
   734  	for _, announces := range f.announced {
   735  		if earliest.After(announces[0].time) {
   736  			earliest = announces[0].time
   737  		}
   738  	}
   739  	fetch.Reset(arriveTimeout - time.Since(earliest))
   740  }
   741  
   742  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   743  func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) {
   744  	// Short circuit if no headers are fetched
   745  	if len(f.fetched) == 0 {
   746  		return
   747  	}
   748  	// Otherwise find the earliest expiring announcement
   749  	earliest := time.Now()
   750  	for _, announces := range f.fetched {
   751  		if earliest.After(announces[0].time) {
   752  			earliest = announces[0].time
   753  		}
   754  	}
   755  	complete.Reset(gatherSlack - time.Since(earliest))
   756  }
   757  
   758  // enqueue schedules a new header or block import operation, if the component
   759  // to be imported has not yet been seen.
   760  func (f *BlockFetcher) enqueue(peer string, header *types.Header, block *types.Block) {
   761  	var (
   762  		hash   common.Hash
   763  		number uint64
   764  	)
   765  	if header != nil {
   766  		hash, number = header.Hash(), header.Number.Uint64()
   767  	} else {
   768  		hash, number = block.Hash(), block.NumberU64()
   769  	}
   770  	// Ensure the peer isn't DOSing us
   771  	count := f.queues[peer] + 1
   772  	if count > blockLimit {
   773  		log.Debug("Discarded delivered header or block, exceeded allowance", "peer", peer, "number", number, "hash", hash, "limit", blockLimit)
   774  		blockBroadcastDOSMeter.Mark(1)
   775  		f.forgetHash(hash)
   776  		return
   777  	}
   778  	// Discard any past or too distant blocks
   779  	if dist := int64(number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   780  		log.Debug("Discarded delivered header or block, too far away", "peer", peer, "number", number, "hash", hash, "distance", dist)
   781  		blockBroadcastDropMeter.Mark(1)
   782  		f.forgetHash(hash)
   783  		return
   784  	}
   785  	// Schedule the block for future importing
   786  	if _, ok := f.queued[hash]; !ok {
   787  		op := &blockOrHeaderInject{origin: peer}
   788  		if header != nil {
   789  			op.header = header
   790  		} else {
   791  			op.block = block
   792  		}
   793  		f.queues[peer] = count
   794  		f.queued[hash] = op
   795  		f.queue.Push(op, -int64(number))
   796  		if f.queueChangeHook != nil {
   797  			f.queueChangeHook(hash, true)
   798  		}
   799  		log.Debug("Queued delivered header or block", "peer", peer, "number", number, "hash", hash, "queued", f.queue.Size())
   800  	}
   801  }
   802  
   803  // importHeaders spawns a new goroutine to run a header insertion into the chain.
   804  // If the header's number is at the same height as the current import phase, it
   805  // updates the phase states accordingly.
   806  func (f *BlockFetcher) importHeaders(peer string, header *types.Header) {
   807  	hash := header.Hash()
   808  	log.Debug("Importing propagated header", "peer", peer, "number", header.Number, "hash", hash)
   809  
   810  	go func() {
   811  		defer func() { f.done <- hash }()
   812  		// If the parent's unknown, abort insertion
   813  		parent := f.getHeader(header.ParentHash)
   814  		if parent == nil {
   815  			log.Debug("Unknown parent of propagated header", "peer", peer, "number", header.Number, "hash", hash, "parent", header.ParentHash)
   816  			return
   817  		}
   818  		// Validate the header and if something went wrong, drop the peer
   819  		if err := f.verifyHeader(header); err != nil && err != consensus.ErrFutureBlock {
   820  			log.Debug("Propagated header verification failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
   821  			f.dropPeer(peer)
   822  			return
   823  		}
   824  		// Run the actual import and log any issues
   825  		if _, err := f.insertHeaders([]*types.Header{header}); err != nil {
   826  			log.Debug("Propagated header import failed", "peer", peer, "number", header.Number, "hash", hash, "err", err)
   827  			return
   828  		}
   829  		// Invoke the testing hook if needed
   830  		if f.importedHook != nil {
   831  			f.importedHook(header, nil)
   832  		}
   833  	}()
   834  }
   835  
   836  // importBlocks spawns a new goroutine to run a block insertion into the chain. If the
   837  // block's number is at the same height as the current import phase, it updates
   838  // the phase states accordingly.
   839  func (f *BlockFetcher) importBlocks(peer string, block *types.Block) {
   840  	hash := block.Hash()
   841  
   842  	// Run the import on a new thread
   843  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   844  	go func() {
   845  		defer func() { f.done <- hash }()
   846  
   847  		// If the parent's unknown, abort insertion
   848  		parent := f.getBlock(block.ParentHash())
   849  		if parent == nil {
   850  			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   851  			return
   852  		}
   853  		// Quickly validate the header and propagate the block if it passes
   854  		switch err := f.verifyHeader(block.Header()); err {
   855  		case nil:
   856  			// All ok, quickly propagate to our peers
   857  			blockBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   858  			go f.broadcastBlock(block, true)
   859  
   860  		case consensus.ErrFutureBlock:
   861  			// Weird future block, don't fail, but neither propagate
   862  
   863  		default:
   864  			// Something went very wrong, drop the peer
   865  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   866  			f.dropPeer(peer)
   867  			return
   868  		}
   869  		// Run the actual import and log any issues
   870  		if _, err := f.insertChain(types.Blocks{block}); err != nil {
   871  			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   872  			return
   873  		}
   874  		// If import succeeded, broadcast the block
   875  		blockAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   876  		go f.broadcastBlock(block, false)
   877  
   878  		// Invoke the testing hook if needed
   879  		if f.importedHook != nil {
   880  			f.importedHook(nil, block)
   881  		}
   882  	}()
   883  }
   884  
   885  // forgetHash removes all traces of a block announcement from the fetcher's
   886  // internal state.
   887  func (f *BlockFetcher) forgetHash(hash common.Hash) {
   888  	// Remove all pending announces and decrement DOS counters
   889  	if announceMap, ok := f.announced[hash]; ok {
   890  		for _, announce := range announceMap {
   891  			f.announces[announce.origin]--
   892  			if f.announces[announce.origin] <= 0 {
   893  				delete(f.announces, announce.origin)
   894  			}
   895  		}
   896  		delete(f.announced, hash)
   897  		if f.announceChangeHook != nil {
   898  			f.announceChangeHook(hash, false)
   899  		}
   900  	}
   901  	// Remove any pending fetches and decrement the DOS counters
   902  	if announce := f.fetching[hash]; announce != nil {
   903  		f.announces[announce.origin]--
   904  		if f.announces[announce.origin] <= 0 {
   905  			delete(f.announces, announce.origin)
   906  		}
   907  		delete(f.fetching, hash)
   908  	}
   909  
   910  	// Remove any pending completion requests and decrement the DOS counters
   911  	for _, announce := range f.fetched[hash] {
   912  		f.announces[announce.origin]--
   913  		if f.announces[announce.origin] <= 0 {
   914  			delete(f.announces, announce.origin)
   915  		}
   916  	}
   917  	delete(f.fetched, hash)
   918  
   919  	// Remove any pending completions and decrement the DOS counters
   920  	if announce := f.completing[hash]; announce != nil {
   921  		f.announces[announce.origin]--
   922  		if f.announces[announce.origin] <= 0 {
   923  			delete(f.announces, announce.origin)
   924  		}
   925  		delete(f.completing, hash)
   926  	}
   927  }
   928  
   929  // forgetBlock removes all traces of a queued block from the fetcher's internal
   930  // state.
   931  func (f *BlockFetcher) forgetBlock(hash common.Hash) {
   932  	if insert := f.queued[hash]; insert != nil {
   933  		f.queues[insert.origin]--
   934  		if f.queues[insert.origin] == 0 {
   935  			delete(f.queues, insert.origin)
   936  		}
   937  		delete(f.queued, hash)
   938  	}
   939  }