github.com/ebakus/go-ebakus@v1.0.5-0.20200520105415-dbccef9ec421/eth/fetcher/fetcher.go (about)

     1  // Copyright 2019 The ebakus/go-ebakus Authors
     2  // This file is part of the ebakus/go-ebakus library.
     3  //
     4  // The ebakus/go-ebakus library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The ebakus/go-ebakus library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the ebakus/go-ebakus library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the block announcement based synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"errors"
    22  	"math/rand"
    23  	"time"
    24  
    25  	"github.com/ebakus/go-ebakus/common"
    26  	"github.com/ebakus/go-ebakus/common/prque"
    27  	"github.com/ebakus/go-ebakus/consensus"
    28  	"github.com/ebakus/go-ebakus/core/types"
    29  	"github.com/ebakus/go-ebakus/log"
    30  )
    31  
    32  const (
    33  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
    34  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    35  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block
    36  	maxQueueDist  = 256                    // Maximum allowed distance from the chain head to queue
    37  	hashLimit     = 256                    // Maximum number of unique blocks a peer may have announced
    38  	blockLimit    = 64                     // Maximum number of unique blocks a peer may have delivered
    39  )
    40  
    41  var (
    42  	errTerminated = errors.New("terminated")
    43  )
    44  
    45  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    46  type blockRetrievalFn func(common.Hash) *types.Block
    47  
    48  // headerRequesterFn is a callback type for sending a header retrieval request.
    49  type headerRequesterFn func(common.Hash) error
    50  
    51  // bodyRequesterFn is a callback type for sending a body retrieval request.
    52  type bodyRequesterFn func([]common.Hash) error
    53  
    54  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    55  type headerVerifierFn func(header *types.Header) error
    56  
    57  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    58  type blockBroadcasterFn func(block *types.Block, propagate bool)
    59  
    60  // chainHeightFn is a callback type to retrieve the current chain height.
    61  type chainHeightFn func() uint64
    62  
    63  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    64  type chainInsertFn func(types.Blocks) (int, error)
    65  
    66  // peerDropFn is a callback type for dropping a peer detected as malicious.
    67  type peerDropFn func(id string)
    68  
    69  // announce is the hash notification of the availability of a new block in the
    70  // network.
    71  type announce struct {
    72  	hash   common.Hash   // Hash of the block being announced
    73  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
    74  	header *types.Header // Header of the block partially reassembled (new protocol)
    75  	time   time.Time     // Timestamp of the announcement
    76  
    77  	origin string // Identifier of the peer originating the notification
    78  
    79  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
    80  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
    81  }
    82  
    83  // headerFilterTask represents a batch of headers needing fetcher filtering.
    84  type headerFilterTask struct {
    85  	peer    string          // The source peer of block headers
    86  	headers []*types.Header // Collection of headers to filter
    87  	time    time.Time       // Arrival time of the headers
    88  }
    89  
    90  // bodyFilterTask represents a batch of block bodies (transactions and uncles)
    91  // needing fetcher filtering.
    92  type bodyFilterTask struct {
    93  	peer         string                 // The source peer of block bodies
    94  	transactions [][]*types.Transaction // Collection of transactions per block bodies
    95  	time         time.Time              // Arrival time of the blocks' contents
    96  }
    97  
    98  // inject represents a schedules import operation.
    99  type inject struct {
   100  	origin string
   101  	block  *types.Block
   102  }
   103  
   104  // Fetcher is responsible for accumulating block announcements from various peers
   105  // and scheduling them for retrieval.
   106  type Fetcher struct {
   107  	// Various event channels
   108  	notify chan *announce
   109  	inject chan *inject
   110  
   111  	headerFilter chan chan *headerFilterTask
   112  	bodyFilter   chan chan *bodyFilterTask
   113  
   114  	done chan common.Hash
   115  	quit chan struct{}
   116  
   117  	// Announce states
   118  	announces  map[string]int              // Per peer announce counts to prevent memory exhaustion
   119  	announced  map[common.Hash][]*announce // Announced blocks, scheduled for fetching
   120  	fetching   map[common.Hash]*announce   // Announced blocks, currently fetching
   121  	fetched    map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
   122  	completing map[common.Hash]*announce   // Blocks with headers, currently body-completing
   123  
   124  	// Block cache
   125  	queue  *prque.Prque            // Queue containing the import operations (block number sorted)
   126  	queues map[string]int          // Per peer block counts to prevent memory exhaustion
   127  	queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
   128  
   129  	// Callbacks
   130  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   131  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   132  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   133  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   134  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   135  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   136  
   137  	// Testing hooks
   138  	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
   139  	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
   140  	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (eth/61) or header (eth/62) fetch
   141  	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
   142  	importedHook       func(*types.Block)      // Method to call upon successful block import (both eth/61 and eth/62)
   143  }
   144  
   145  // New creates a block fetcher to retrieve blocks based on hash announcements.
   146  func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
   147  	return &Fetcher{
   148  		notify:         make(chan *announce),
   149  		inject:         make(chan *inject),
   150  		headerFilter:   make(chan chan *headerFilterTask),
   151  		bodyFilter:     make(chan chan *bodyFilterTask),
   152  		done:           make(chan common.Hash),
   153  		quit:           make(chan struct{}),
   154  		announces:      make(map[string]int),
   155  		announced:      make(map[common.Hash][]*announce),
   156  		fetching:       make(map[common.Hash]*announce),
   157  		fetched:        make(map[common.Hash][]*announce),
   158  		completing:     make(map[common.Hash]*announce),
   159  		queue:          prque.New(nil),
   160  		queues:         make(map[string]int),
   161  		queued:         make(map[common.Hash]*inject),
   162  		getBlock:       getBlock,
   163  		verifyHeader:   verifyHeader,
   164  		broadcastBlock: broadcastBlock,
   165  		chainHeight:    chainHeight,
   166  		insertChain:    insertChain,
   167  		dropPeer:       dropPeer,
   168  	}
   169  }
   170  
   171  // Start boots up the announcement based synchroniser, accepting and processing
   172  // hash notifications and block fetches until termination requested.
   173  func (f *Fetcher) Start() {
   174  	go f.loop()
   175  }
   176  
   177  // Stop terminates the announcement based synchroniser, canceling all pending
   178  // operations.
   179  func (f *Fetcher) Stop() {
   180  	close(f.quit)
   181  }
   182  
   183  // Notify announces the fetcher of the potential availability of a new block in
   184  // the network.
   185  func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   186  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   187  	block := &announce{
   188  		hash:        hash,
   189  		number:      number,
   190  		time:        time,
   191  		origin:      peer,
   192  		fetchHeader: headerFetcher,
   193  		fetchBodies: bodyFetcher,
   194  	}
   195  	select {
   196  	case f.notify <- block:
   197  		return nil
   198  	case <-f.quit:
   199  		return errTerminated
   200  	}
   201  }
   202  
   203  // Enqueue tries to fill gaps the fetcher's future import queue.
   204  func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
   205  	op := &inject{
   206  		origin: peer,
   207  		block:  block,
   208  	}
   209  	select {
   210  	case f.inject <- op:
   211  		return nil
   212  	case <-f.quit:
   213  		return errTerminated
   214  	}
   215  }
   216  
   217  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   218  // returning those that should be handled differently.
   219  func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   220  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   221  
   222  	// Send the filter channel to the fetcher
   223  	filter := make(chan *headerFilterTask)
   224  
   225  	select {
   226  	case f.headerFilter <- filter:
   227  	case <-f.quit:
   228  		return nil
   229  	}
   230  	// Request the filtering of the header list
   231  	select {
   232  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   233  	case <-f.quit:
   234  		return nil
   235  	}
   236  	// Retrieve the headers remaining after filtering
   237  	select {
   238  	case task := <-filter:
   239  		return task.headers
   240  	case <-f.quit:
   241  		return nil
   242  	}
   243  }
   244  
   245  // FilterBodies extracts all the block bodies that were explicitly requested by
   246  // the fetcher, returning those that should be handled differently.
   247  func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, time time.Time) [][]*types.Transaction {
   248  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions))
   249  
   250  	// Send the filter channel to the fetcher
   251  	filter := make(chan *bodyFilterTask)
   252  
   253  	select {
   254  	case f.bodyFilter <- filter:
   255  	case <-f.quit:
   256  		return nil
   257  	}
   258  	// Request the filtering of the body list
   259  	select {
   260  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, time: time}:
   261  	case <-f.quit:
   262  		return nil
   263  	}
   264  	// Retrieve the bodies remaining after filtering
   265  	select {
   266  	case task := <-filter:
   267  		return task.transactions
   268  	case <-f.quit:
   269  		return nil
   270  	}
   271  }
   272  
   273  // Loop is the main fetcher loop, checking and processing various notification
   274  // events.
   275  func (f *Fetcher) loop() {
   276  	// Iterate the block fetching until a quit is requested
   277  	fetchTimer := time.NewTimer(0)
   278  	completeTimer := time.NewTimer(0)
   279  
   280  	for {
   281  		// Clean up any expired block fetches
   282  		for hash, announce := range f.fetching {
   283  			if time.Since(announce.time) > fetchTimeout {
   284  				f.forgetHash(hash)
   285  			}
   286  		}
   287  		// Import any queued blocks that could potentially fit
   288  		height := f.chainHeight()
   289  		for !f.queue.Empty() {
   290  			op := f.queue.PopItem().(*inject)
   291  			hash := op.block.Hash()
   292  			if f.queueChangeHook != nil {
   293  				f.queueChangeHook(hash, false)
   294  			}
   295  			// If too high up the chain or phase, continue later
   296  			number := op.block.NumberU64()
   297  			if number > height+1 {
   298  				f.queue.Push(op, -int64(number))
   299  				if f.queueChangeHook != nil {
   300  					f.queueChangeHook(hash, true)
   301  				}
   302  				break
   303  			}
   304  			// Otherwise if fresh and still unknown, try and import
   305  			if number < height || f.getBlock(hash) != nil {
   306  				f.forgetBlock(hash)
   307  				continue
   308  			}
   309  			f.insert(op.origin, op.block)
   310  		}
   311  		// Wait for an outside event to occur
   312  		select {
   313  		case <-f.quit:
   314  			// Fetcher terminating, abort all operations
   315  			return
   316  
   317  		case notification := <-f.notify:
   318  			// A block was announced, make sure the peer isn't DOSing us
   319  			propAnnounceInMeter.Mark(1)
   320  
   321  			count := f.announces[notification.origin] + 1
   322  			if count > hashLimit {
   323  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   324  				propAnnounceDOSMeter.Mark(1)
   325  				break
   326  			}
   327  			// If we have a valid block number, check that it's potentially useful
   328  			if notification.number > 0 {
   329  				if dist := int64(notification.number) - int64(f.chainHeight()); dist > maxQueueDist {
   330  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   331  					propAnnounceDropMeter.Mark(1)
   332  					break
   333  				}
   334  			}
   335  			// All is well, schedule the announce if block's not yet downloading
   336  			if _, ok := f.fetching[notification.hash]; ok {
   337  				break
   338  			}
   339  			if _, ok := f.completing[notification.hash]; ok {
   340  				break
   341  			}
   342  			f.announces[notification.origin] = count
   343  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   344  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   345  				f.announceChangeHook(notification.hash, true)
   346  			}
   347  			if len(f.announced) == 1 {
   348  				f.rescheduleFetch(fetchTimer)
   349  			}
   350  
   351  		case op := <-f.inject:
   352  			// A direct block insertion was requested, try and fill any pending gaps
   353  			propBroadcastInMeter.Mark(1)
   354  			f.enqueue(op.origin, op.block)
   355  
   356  		case hash := <-f.done:
   357  			// A pending import finished, remove all traces of the notification
   358  			f.forgetHash(hash)
   359  			f.forgetBlock(hash)
   360  
   361  		case <-fetchTimer.C:
   362  			// At least one block's timer ran out, check for needing retrieval
   363  			request := make(map[string][]common.Hash)
   364  
   365  			for hash, announces := range f.announced {
   366  				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
   367  					// Pick a random peer to retrieve from, reset all others
   368  					announce := announces[rand.Intn(len(announces))]
   369  					f.forgetHash(hash)
   370  
   371  					// If the block still didn't arrive, queue for fetching
   372  					if f.getBlock(hash) == nil {
   373  						request[announce.origin] = append(request[announce.origin], hash)
   374  						f.fetching[hash] = announce
   375  					}
   376  				}
   377  			}
   378  			// Send out all block header requests
   379  			for peer, hashes := range request {
   380  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   381  
   382  				// Create a closure of the fetch and schedule in on a new thread
   383  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   384  				go func() {
   385  					if f.fetchingHook != nil {
   386  						f.fetchingHook(hashes)
   387  					}
   388  					for _, hash := range hashes {
   389  						headerFetchMeter.Mark(1)
   390  						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   391  					}
   392  				}()
   393  			}
   394  			// Schedule the next fetch if blocks are still pending
   395  			f.rescheduleFetch(fetchTimer)
   396  
   397  		case <-completeTimer.C:
   398  			// At least one header's timer ran out, retrieve everything
   399  			request := make(map[string][]common.Hash)
   400  
   401  			for hash, announces := range f.fetched {
   402  				// Pick a random peer to retrieve from, reset all others
   403  				announce := announces[rand.Intn(len(announces))]
   404  				f.forgetHash(hash)
   405  
   406  				// If the block still didn't arrive, queue for completion
   407  				if f.getBlock(hash) == nil {
   408  					request[announce.origin] = append(request[announce.origin], hash)
   409  					f.completing[hash] = announce
   410  				}
   411  			}
   412  			// Send out all block body requests
   413  			for peer, hashes := range request {
   414  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   415  
   416  				// Create a closure of the fetch and schedule in on a new thread
   417  				if f.completingHook != nil {
   418  					f.completingHook(hashes)
   419  				}
   420  				bodyFetchMeter.Mark(int64(len(hashes)))
   421  				go f.completing[hashes[0]].fetchBodies(hashes)
   422  			}
   423  			// Schedule the next fetch if blocks are still pending
   424  			f.rescheduleComplete(completeTimer)
   425  
   426  		case filter := <-f.headerFilter:
   427  			// Headers arrived from a remote peer. Extract those that were explicitly
   428  			// requested by the fetcher, and return everything else so it's delivered
   429  			// to other parts of the system.
   430  			var task *headerFilterTask
   431  			select {
   432  			case task = <-filter:
   433  			case <-f.quit:
   434  				return
   435  			}
   436  			headerFilterInMeter.Mark(int64(len(task.headers)))
   437  
   438  			// Split the batch of headers into unknown ones (to return to the caller),
   439  			// known incomplete ones (requiring body retrievals) and completed blocks.
   440  			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
   441  			for _, header := range task.headers {
   442  				hash := header.Hash()
   443  
   444  				// Filter fetcher-requested headers from other synchronisation algorithms
   445  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   446  					// If the delivered header does not match the promised number, drop the announcer
   447  					if header.Number.Uint64() != announce.number {
   448  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   449  						f.dropPeer(announce.origin)
   450  						f.forgetHash(hash)
   451  						continue
   452  					}
   453  					// Only keep if not imported by other means
   454  					if f.getBlock(hash) == nil {
   455  						announce.header = header
   456  						announce.time = task.time
   457  
   458  						// If the block is empty (header only), short circuit into the final import queue
   459  						if header.TxHash == types.DeriveSha(types.Transactions{}) {
   460  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   461  
   462  							block := types.NewBlockWithHeader(header)
   463  							block.ReceivedAt = task.time
   464  
   465  							complete = append(complete, block)
   466  							f.completing[hash] = announce
   467  							continue
   468  						}
   469  						// Otherwise add to the list of blocks needing completion
   470  						incomplete = append(incomplete, announce)
   471  					} else {
   472  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   473  						f.forgetHash(hash)
   474  					}
   475  				} else {
   476  					// Fetcher doesn't know about it, add to the return list
   477  					unknown = append(unknown, header)
   478  				}
   479  			}
   480  			headerFilterOutMeter.Mark(int64(len(unknown)))
   481  			select {
   482  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   483  			case <-f.quit:
   484  				return
   485  			}
   486  			// Schedule the retrieved headers for body completion
   487  			for _, announce := range incomplete {
   488  				hash := announce.header.Hash()
   489  				if _, ok := f.completing[hash]; ok {
   490  					continue
   491  				}
   492  				f.fetched[hash] = append(f.fetched[hash], announce)
   493  				if len(f.fetched) == 1 {
   494  					f.rescheduleComplete(completeTimer)
   495  				}
   496  			}
   497  			// Schedule the header-only blocks for import
   498  			for _, block := range complete {
   499  				if announce := f.completing[block.Hash()]; announce != nil {
   500  					f.enqueue(announce.origin, block)
   501  				}
   502  			}
   503  
   504  		case filter := <-f.bodyFilter:
   505  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   506  			var task *bodyFilterTask
   507  			select {
   508  			case task = <-filter:
   509  			case <-f.quit:
   510  				return
   511  			}
   512  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   513  
   514  			blocks := []*types.Block{}
   515  			for i := 0; i < len(task.transactions); i++ {
   516  				// Match up a body to any possible completion request
   517  				matched := false
   518  
   519  				for hash, announce := range f.completing {
   520  					if f.queued[hash] == nil {
   521  						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
   522  
   523  						if txnHash == announce.header.TxHash && announce.origin == task.peer {
   524  							// Mark the body matched, reassemble if still unknown
   525  							matched = true
   526  
   527  							if f.getBlock(hash) == nil {
   528  								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i])
   529  								block.ReceivedAt = task.time
   530  
   531  								blocks = append(blocks, block)
   532  							} else {
   533  								f.forgetHash(hash)
   534  							}
   535  						}
   536  					}
   537  				}
   538  				if matched {
   539  					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   540  					i--
   541  					continue
   542  				}
   543  			}
   544  
   545  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   546  			select {
   547  			case filter <- task:
   548  			case <-f.quit:
   549  				return
   550  			}
   551  			// Schedule the retrieved blocks for ordered import
   552  			for _, block := range blocks {
   553  				if announce := f.completing[block.Hash()]; announce != nil {
   554  					f.enqueue(announce.origin, block)
   555  				}
   556  			}
   557  		}
   558  	}
   559  }
   560  
   561  // rescheduleFetch resets the specified fetch timer to the next announce timeout.
   562  func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
   563  	// Short circuit if no blocks are announced
   564  	if len(f.announced) == 0 {
   565  		return
   566  	}
   567  	// Otherwise find the earliest expiring announcement
   568  	earliest := time.Now()
   569  	for _, announces := range f.announced {
   570  		if earliest.After(announces[0].time) {
   571  			earliest = announces[0].time
   572  		}
   573  	}
   574  	fetch.Reset(arriveTimeout - time.Since(earliest))
   575  }
   576  
   577  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   578  func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
   579  	// Short circuit if no headers are fetched
   580  	if len(f.fetched) == 0 {
   581  		return
   582  	}
   583  	// Otherwise find the earliest expiring announcement
   584  	earliest := time.Now()
   585  	for _, announces := range f.fetched {
   586  		if earliest.After(announces[0].time) {
   587  			earliest = announces[0].time
   588  		}
   589  	}
   590  	complete.Reset(gatherSlack - time.Since(earliest))
   591  }
   592  
   593  // enqueue schedules a new future import operation, if the block to be imported
   594  // has not yet been seen.
   595  func (f *Fetcher) enqueue(peer string, block *types.Block) {
   596  	hash := block.Hash()
   597  
   598  	// Ensure the peer isn't DOSing us
   599  	count := f.queues[peer] + 1
   600  	if count > blockLimit {
   601  		log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
   602  		propBroadcastDOSMeter.Mark(1)
   603  		f.forgetHash(hash)
   604  		return
   605  	}
   606  	// Discard any past or too distant blocks
   607  	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist > maxQueueDist {
   608  		log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
   609  		propBroadcastDropMeter.Mark(1)
   610  		f.forgetHash(hash)
   611  		return
   612  	}
   613  	// Schedule the block for future importing
   614  	if _, ok := f.queued[hash]; !ok {
   615  		op := &inject{
   616  			origin: peer,
   617  			block:  block,
   618  		}
   619  		f.queues[peer] = count
   620  		f.queued[hash] = op
   621  		f.queue.Push(op, -int64(block.NumberU64()))
   622  		if f.queueChangeHook != nil {
   623  			f.queueChangeHook(op.block.Hash(), true)
   624  		}
   625  		log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
   626  	}
   627  }
   628  
   629  // insert spawns a new goroutine to run a block insertion into the chain. If the
   630  // block's number is at the same height as the current import phase, it updates
   631  // the phase states accordingly.
   632  func (f *Fetcher) insert(peer string, block *types.Block) {
   633  	hash := block.Hash()
   634  
   635  	// Run the import on a new thread
   636  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   637  	go func() {
   638  		defer func() { f.done <- hash }()
   639  
   640  		// If the parent's unknown, abort insertion
   641  		parent := f.getBlock(block.ParentHash())
   642  		if parent == nil {
   643  			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   644  			return
   645  		}
   646  		// Quickly validate the header and propagate the block if it passes
   647  		switch err := f.verifyHeader(block.Header()); err {
   648  		case nil:
   649  			// All ok, quickly propagate to our peers
   650  			propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   651  			go f.broadcastBlock(block, true)
   652  
   653  		case consensus.ErrFutureBlock:
   654  			// Weird future block, don't fail, but neither propagate
   655  
   656  		default:
   657  			// Something went very wrong, drop the peer
   658  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   659  			f.dropPeer(peer)
   660  			return
   661  		}
   662  		// Run the actual import and log any issues
   663  		if _, err := f.insertChain(types.Blocks{block}); err != nil {
   664  			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   665  			return
   666  		}
   667  		// If import succeeded, broadcast the block
   668  		propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   669  		go f.broadcastBlock(block, false)
   670  
   671  		// Invoke the testing hook if needed
   672  		if f.importedHook != nil {
   673  			f.importedHook(block)
   674  		}
   675  	}()
   676  }
   677  
   678  // forgetHash removes all traces of a block announcement from the fetcher's
   679  // internal state.
   680  func (f *Fetcher) forgetHash(hash common.Hash) {
   681  	// Remove all pending announces and decrement DOS counters
   682  	for _, announce := range f.announced[hash] {
   683  		f.announces[announce.origin]--
   684  		if f.announces[announce.origin] <= 0 {
   685  			delete(f.announces, announce.origin)
   686  		}
   687  	}
   688  	delete(f.announced, hash)
   689  	if f.announceChangeHook != nil {
   690  		f.announceChangeHook(hash, false)
   691  	}
   692  	// Remove any pending fetches and decrement the DOS counters
   693  	if announce := f.fetching[hash]; announce != nil {
   694  		f.announces[announce.origin]--
   695  		if f.announces[announce.origin] <= 0 {
   696  			delete(f.announces, announce.origin)
   697  		}
   698  		delete(f.fetching, hash)
   699  	}
   700  
   701  	// Remove any pending completion requests and decrement the DOS counters
   702  	for _, announce := range f.fetched[hash] {
   703  		f.announces[announce.origin]--
   704  		if f.announces[announce.origin] <= 0 {
   705  			delete(f.announces, announce.origin)
   706  		}
   707  	}
   708  	delete(f.fetched, hash)
   709  
   710  	// Remove any pending completions and decrement the DOS counters
   711  	if announce := f.completing[hash]; announce != nil {
   712  		f.announces[announce.origin]--
   713  		if f.announces[announce.origin] <= 0 {
   714  			delete(f.announces, announce.origin)
   715  		}
   716  		delete(f.completing, hash)
   717  	}
   718  }
   719  
   720  // forgetBlock removes all traces of a queued block from the fetcher's internal
   721  // state.
   722  func (f *Fetcher) forgetBlock(hash common.Hash) {
   723  	if insert := f.queued[hash]; insert != nil {
   724  		f.queues[insert.origin]--
   725  		if f.queues[insert.origin] == 0 {
   726  			delete(f.queues, insert.origin)
   727  		}
   728  		delete(f.queued, hash)
   729  	}
   730  }