github.com/arieschain/arieschain@v0.0.0-20191023063405-37c074544356/qct/fetcher/fetcher.go (about)

     1  // Package fetcher contains the block announcement based synchronisation.
     2  package fetcher
     3  
     4  import (
     5  	"errors"
     6  	"math/rand"
     7  	"time"
     8  
     9  	"github.com/quickchainproject/quickchain/common"
    10  	"github.com/quickchainproject/quickchain/consensus"
    11  	"github.com/quickchainproject/quickchain/core/types"
    12  	"github.com/quickchainproject/quickchain/log"
    13  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    14  )
    15  
    16  const (
    17  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
    18  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    19  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block
    20  	maxUncleDist  = 7                      // Maximum allowed backward distance from the chain head
    21  	maxQueueDist  = 32                     // Maximum allowed distance from the chain head to queue
    22  	hashLimit     = 256                    // Maximum number of unique blocks a peer may have announced
    23  	blockLimit    = 64                     // Maximum number of unique blocks a peer may have delivered
    24  )
    25  
    26  var (
    27  	errTerminated = errors.New("terminated")
    28  )
    29  
    30  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    31  type blockRetrievalFn func(common.Hash) *types.Block
    32  
    33  // headerRequesterFn is a callback type for sending a header retrieval request.
    34  type headerRequesterFn func(common.Hash) error
    35  
    36  // bodyRequesterFn is a callback type for sending a body retrieval request.
    37  type bodyRequesterFn func([]common.Hash) error
    38  
    39  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    40  type headerVerifierFn func(header *types.Header) error
    41  
    42  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    43  type blockBroadcasterFn func(block *types.Block, propagate bool)
    44  
    45  // chainHeightFn is a callback type to retrieve the current chain height.
    46  type chainHeightFn func() uint64
    47  
    48  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    49  type chainInsertFn func(types.Blocks) (int, error)
    50  
    51  // peerDropFn is a callback type for dropping a peer detected as malicious.
    52  type peerDropFn func(id string)
    53  
    54  // announce is the hash notification of the availability of a new block in the
    55  // network.
    56  type announce struct {
    57  	hash   common.Hash   // Hash of the block being announced
    58  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
    59  	header *types.Header // Header of the block partially reassembled (new protocol)
    60  	time   time.Time     // Timestamp of the announcement
    61  
    62  	origin string // Identifier of the peer originating the notification
    63  
    64  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
    65  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
    66  }
    67  
    68  // headerFilterTask represents a batch of headers needing fetcher filtering.
    69  type headerFilterTask struct {
    70  	peer    string          // The source peer of block headers
    71  	headers []*types.Header // Collection of headers to filter
    72  	time    time.Time       // Arrival time of the headers
    73  }
    74  
    75  // headerFilterTask represents a batch of block bodies (transactions and uncles)
    76  // needing fetcher filtering.
    77  type bodyFilterTask struct {
    78  	peer         string                 // The source peer of block bodies
    79  	transactions [][]*types.Transaction // Collection of transactions per block bodies
    80  	uncles       [][]*types.Header      // Collection of uncles per block bodies
    81  	time         time.Time              // Arrival time of the blocks' contents
    82  }
    83  
    84  // inject represents a schedules import operation.
    85  type inject struct {
    86  	origin string
    87  	block  *types.Block
    88  }
    89  
    90  // Fetcher is responsible for accumulating block announcements from various peers
    91  // and scheduling them for retrieval.
    92  type Fetcher struct {
    93  	// Various event channels
    94  	notify chan *announce
    95  	inject chan *inject
    96  
    97  	blockFilter  chan chan []*types.Block
    98  	headerFilter chan chan *headerFilterTask
    99  	bodyFilter   chan chan *bodyFilterTask
   100  
   101  	done chan common.Hash
   102  	quit chan struct{}
   103  
   104  	// Announce states
   105  	announces  map[string]int              // Per peer announce counts to prevent memory exhaustion
   106  	announced  map[common.Hash][]*announce // Announced blocks, scheduled for fetching
   107  	fetching   map[common.Hash]*announce   // Announced blocks, currently fetching
   108  	fetched    map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
   109  	completing map[common.Hash]*announce   // Blocks with headers, currently body-completing
   110  
   111  	// Block cache
   112  	queue  *prque.Prque            // Queue containing the import operations (block number sorted)
   113  	queues map[string]int          // Per peer block counts to prevent memory exhaustion
   114  	queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
   115  
   116  	// Callbacks
   117  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   118  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   119  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   120  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   121  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   122  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   123  
   124  	// Testing hooks
   125  	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
   126  	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
   127  	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (let/61) or header (let/62) fetch
   128  	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
   129  	importedHook       func(*types.Block)      // Method to call upon successful block import (both let/61 and let/62)
   130  }
   131  
   132  // New creates a block fetcher to retrieve blocks based on hash announcements.
   133  func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
   134  	return &Fetcher{
   135  		notify:         make(chan *announce),
   136  		inject:         make(chan *inject),
   137  		blockFilter:    make(chan chan []*types.Block),
   138  		headerFilter:   make(chan chan *headerFilterTask),
   139  		bodyFilter:     make(chan chan *bodyFilterTask),
   140  		done:           make(chan common.Hash),
   141  		quit:           make(chan struct{}),
   142  		announces:      make(map[string]int),
   143  		announced:      make(map[common.Hash][]*announce),
   144  		fetching:       make(map[common.Hash]*announce),
   145  		fetched:        make(map[common.Hash][]*announce),
   146  		completing:     make(map[common.Hash]*announce),
   147  		queue:          prque.New(),
   148  		queues:         make(map[string]int),
   149  		queued:         make(map[common.Hash]*inject),
   150  		getBlock:       getBlock,
   151  		verifyHeader:   verifyHeader,
   152  		broadcastBlock: broadcastBlock,
   153  		chainHeight:    chainHeight,
   154  		insertChain:    insertChain,
   155  		dropPeer:       dropPeer,
   156  	}
   157  }
   158  
   159  // Start boots up the announcement based synchroniser, accepting and processing
   160  // hash notifications and block fetches until termination requested.
   161  func (f *Fetcher) Start() {
   162  	go f.loop()
   163  }
   164  
   165  // Stop terminates the announcement based synchroniser, canceling all pending
   166  // operations.
   167  func (f *Fetcher) Stop() {
   168  	close(f.quit)
   169  }
   170  
   171  // Notify announces the fetcher of the potential availability of a new block in
   172  // the network.
   173  func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   174  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   175  	block := &announce{
   176  		hash:        hash,
   177  		number:      number,
   178  		time:        time,
   179  		origin:      peer,
   180  		fetchHeader: headerFetcher,
   181  		fetchBodies: bodyFetcher,
   182  	}
   183  	select {
   184  	case f.notify <- block:
   185  		return nil
   186  	case <-f.quit:
   187  		return errTerminated
   188  	}
   189  }
   190  
   191  // Enqueue tries to fill gaps the the fetcher's future import queue.
   192  func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
   193  	op := &inject{
   194  		origin: peer,
   195  		block:  block,
   196  	}
   197  	select {
   198  	case f.inject <- op:
   199  		return nil
   200  	case <-f.quit:
   201  		return errTerminated
   202  	}
   203  }
   204  
   205  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   206  // returning those that should be handled differently.
   207  func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   208  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   209  
   210  	// Send the filter channel to the fetcher
   211  	filter := make(chan *headerFilterTask)
   212  
   213  	select {
   214  	case f.headerFilter <- filter:
   215  	case <-f.quit:
   216  		return nil
   217  	}
   218  	// Request the filtering of the header list
   219  	select {
   220  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   221  	case <-f.quit:
   222  		return nil
   223  	}
   224  	// Retrieve the headers remaining after filtering
   225  	select {
   226  	case task := <-filter:
   227  		return task.headers
   228  	case <-f.quit:
   229  		return nil
   230  	}
   231  }
   232  
   233  // FilterBodies extracts all the block bodies that were explicitly requested by
   234  // the fetcher, returning those that should be handled differently.
   235  func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
   236  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles))
   237  
   238  	// Send the filter channel to the fetcher
   239  	filter := make(chan *bodyFilterTask)
   240  
   241  	select {
   242  	case f.bodyFilter <- filter:
   243  	case <-f.quit:
   244  		return nil, nil
   245  	}
   246  	// Request the filtering of the body list
   247  	select {
   248  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
   249  	case <-f.quit:
   250  		return nil, nil
   251  	}
   252  	// Retrieve the bodies remaining after filtering
   253  	select {
   254  	case task := <-filter:
   255  		return task.transactions, task.uncles
   256  	case <-f.quit:
   257  		return nil, nil
   258  	}
   259  }
   260  
   261  // Loop is the main fetcher loop, checking and processing various notification
   262  // events.
   263  func (f *Fetcher) loop() {
   264  	// Iterate the block fetching until a quit is requested
   265  	fetchTimer := time.NewTimer(0)
   266  	completeTimer := time.NewTimer(0)
   267  
   268  	for {
   269  		// Clean up any expired block fetches
   270  		for hash, announce := range f.fetching {
   271  			if time.Since(announce.time) > fetchTimeout {
   272  				f.forgetHash(hash)
   273  			}
   274  		}
   275  		// Import any queued blocks that could potentially fit
   276  		height := f.chainHeight()
   277  		for !f.queue.Empty() {
   278  			op := f.queue.PopItem().(*inject)
   279  			if f.queueChangeHook != nil {
   280  				f.queueChangeHook(op.block.Hash(), false)
   281  			}
   282  			// If too high up the chain or phase, continue later
   283  			number := op.block.NumberU64()
   284  			if number > height+1 {
   285  				f.queue.Push(op, -float32(op.block.NumberU64()))
   286  				if f.queueChangeHook != nil {
   287  					f.queueChangeHook(op.block.Hash(), true)
   288  				}
   289  				break
   290  			}
   291  			// Otherwise if fresh and still unknown, try and import
   292  			hash := op.block.Hash()
   293  			if number+maxUncleDist < height || f.getBlock(hash) != nil {
   294  				f.forgetBlock(hash)
   295  				continue
   296  			}
   297  			f.insert(op.origin, op.block)
   298  		}
   299  		// Wait for an outside event to occur
   300  		select {
   301  		case <-f.quit:
   302  			// Fetcher terminating, abort all operations
   303  			return
   304  
   305  		case notification := <-f.notify:
   306  			// A block was announced, make sure the peer isn't DOSing us
   307  			propAnnounceInMeter.Mark(1)
   308  
   309  			count := f.announces[notification.origin] + 1
   310  			if count > hashLimit {
   311  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   312  				propAnnounceDOSMeter.Mark(1)
   313  				break
   314  			}
   315  			// If we have a valid block number, check that it's potentially useful
   316  			if notification.number > 0 {
   317  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   318  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   319  					propAnnounceDropMeter.Mark(1)
   320  					break
   321  				}
   322  			}
   323  			// All is well, schedule the announce if block's not yet downloading
   324  			if _, ok := f.fetching[notification.hash]; ok {
   325  				break
   326  			}
   327  			if _, ok := f.completing[notification.hash]; ok {
   328  				break
   329  			}
   330  			f.announces[notification.origin] = count
   331  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   332  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   333  				f.announceChangeHook(notification.hash, true)
   334  			}
   335  			if len(f.announced) == 1 {
   336  				f.rescheduleFetch(fetchTimer)
   337  			}
   338  
   339  		case op := <-f.inject:
   340  			// A direct block insertion was requested, try and fill any pending gaps
   341  			propBroadcastInMeter.Mark(1)
   342  			f.enqueue(op.origin, op.block)
   343  
   344  		case hash := <-f.done:
   345  			// A pending import finished, remove all traces of the notification
   346  			f.forgetHash(hash)
   347  			f.forgetBlock(hash)
   348  
   349  		case <-fetchTimer.C:
   350  			// At least one block's timer ran out, check for needing retrieval
   351  			request := make(map[string][]common.Hash)
   352  
   353  			for hash, announces := range f.announced {
   354  				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
   355  					// Pick a random peer to retrieve from, reset all others
   356  					announce := announces[rand.Intn(len(announces))]
   357  					f.forgetHash(hash)
   358  
   359  					// If the block still didn't arrive, queue for fetching
   360  					if f.getBlock(hash) == nil {
   361  						request[announce.origin] = append(request[announce.origin], hash)
   362  						f.fetching[hash] = announce
   363  					}
   364  				}
   365  			}
   366  			// Send out all block header requests
   367  			for peer, hashes := range request {
   368  				log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   369  
   370  				// Create a closure of the fetch and schedule in on a new thread
   371  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   372  				go func() {
   373  					if f.fetchingHook != nil {
   374  						f.fetchingHook(hashes)
   375  					}
   376  					for _, hash := range hashes {
   377  						headerFetchMeter.Mark(1)
   378  						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   379  					}
   380  				}()
   381  			}
   382  			// Schedule the next fetch if blocks are still pending
   383  			f.rescheduleFetch(fetchTimer)
   384  
   385  		case <-completeTimer.C:
   386  			// At least one header's timer ran out, retrieve everything
   387  			request := make(map[string][]common.Hash)
   388  
   389  			for hash, announces := range f.fetched {
   390  				// Pick a random peer to retrieve from, reset all others
   391  				announce := announces[rand.Intn(len(announces))]
   392  				f.forgetHash(hash)
   393  
   394  				// If the block still didn't arrive, queue for completion
   395  				if f.getBlock(hash) == nil {
   396  					request[announce.origin] = append(request[announce.origin], hash)
   397  					f.completing[hash] = announce
   398  				}
   399  			}
   400  			// Send out all block body requests
   401  			for peer, hashes := range request {
   402  				log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   403  
   404  				// Create a closure of the fetch and schedule in on a new thread
   405  				if f.completingHook != nil {
   406  					f.completingHook(hashes)
   407  				}
   408  				bodyFetchMeter.Mark(int64(len(hashes)))
   409  				go f.completing[hashes[0]].fetchBodies(hashes)
   410  			}
   411  			// Schedule the next fetch if blocks are still pending
   412  			f.rescheduleComplete(completeTimer)
   413  
   414  		case filter := <-f.headerFilter:
   415  			// Headers arrived from a remote peer. Extract those that were explicitly
   416  			// requested by the fetcher, and return everything else so it's delivered
   417  			// to other parts of the system.
   418  			var task *headerFilterTask
   419  			select {
   420  			case task = <-filter:
   421  			case <-f.quit:
   422  				return
   423  			}
   424  			headerFilterInMeter.Mark(int64(len(task.headers)))
   425  
   426  			// Split the batch of headers into unknown ones (to return to the caller),
   427  			// known incomplete ones (requiring body retrievals) and completed blocks.
   428  			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
   429  			for _, header := range task.headers {
   430  				hash := header.Hash()
   431  
   432  				// Filter fetcher-requested headers from other synchronisation algorithms
   433  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   434  					// If the delivered header does not match the promised number, drop the announcer
   435  					if header.Number.Uint64() != announce.number {
   436  						log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   437  						f.dropPeer(announce.origin)
   438  						f.forgetHash(hash)
   439  						continue
   440  					}
   441  					// Only keep if not imported by other means
   442  					if f.getBlock(hash) == nil {
   443  						announce.header = header
   444  						announce.time = task.time
   445  
   446  						// If the block is empty (header only), short circuit into the final import queue
   447  						if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
   448  							log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   449  
   450  							block := types.NewBlockWithHeader(header)
   451  							block.ReceivedAt = task.time
   452  
   453  							complete = append(complete, block)
   454  							f.completing[hash] = announce
   455  							continue
   456  						}
   457  						// Otherwise add to the list of blocks needing completion
   458  						incomplete = append(incomplete, announce)
   459  					} else {
   460  						log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   461  						f.forgetHash(hash)
   462  					}
   463  				} else {
   464  					// Fetcher doesn't know about it, add to the return list
   465  					unknown = append(unknown, header)
   466  				}
   467  			}
   468  			headerFilterOutMeter.Mark(int64(len(unknown)))
   469  			select {
   470  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   471  			case <-f.quit:
   472  				return
   473  			}
   474  			// Schedule the retrieved headers for body completion
   475  			for _, announce := range incomplete {
   476  				hash := announce.header.Hash()
   477  				if _, ok := f.completing[hash]; ok {
   478  					continue
   479  				}
   480  				f.fetched[hash] = append(f.fetched[hash], announce)
   481  				if len(f.fetched) == 1 {
   482  					f.rescheduleComplete(completeTimer)
   483  				}
   484  			}
   485  			// Schedule the header-only blocks for import
   486  			for _, block := range complete {
   487  				if announce := f.completing[block.Hash()]; announce != nil {
   488  					f.enqueue(announce.origin, block)
   489  				}
   490  			}
   491  
   492  		case filter := <-f.bodyFilter:
   493  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   494  			var task *bodyFilterTask
   495  			select {
   496  			case task = <-filter:
   497  			case <-f.quit:
   498  				return
   499  			}
   500  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   501  
   502  			blocks := []*types.Block{}
   503  			for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
   504  				// Match up a body to any possible completion request
   505  				matched := false
   506  
   507  				for hash, announce := range f.completing {
   508  					if f.queued[hash] == nil {
   509  						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
   510  						uncleHash := types.CalcUncleHash(task.uncles[i])
   511  
   512  						if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
   513  							// Mark the body matched, reassemble if still unknown
   514  							matched = true
   515  
   516  							if f.getBlock(hash) == nil {
   517  								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
   518  								block.ReceivedAt = task.time
   519  
   520  								blocks = append(blocks, block)
   521  							} else {
   522  								f.forgetHash(hash)
   523  							}
   524  						}
   525  					}
   526  				}
   527  				if matched {
   528  					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   529  					task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   530  					i--
   531  					continue
   532  				}
   533  			}
   534  
   535  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   536  			select {
   537  			case filter <- task:
   538  			case <-f.quit:
   539  				return
   540  			}
   541  			// Schedule the retrieved blocks for ordered import
   542  			for _, block := range blocks {
   543  				if announce := f.completing[block.Hash()]; announce != nil {
   544  					f.enqueue(announce.origin, block)
   545  				}
   546  			}
   547  		}
   548  	}
   549  }
   550  
   551  // rescheduleFetch resets the specified fetch timer to the next announce timeout.
   552  func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
   553  	// Short circuit if no blocks are announced
   554  	if len(f.announced) == 0 {
   555  		return
   556  	}
   557  	// Otherwise find the earliest expiring announcement
   558  	earliest := time.Now()
   559  	for _, announces := range f.announced {
   560  		if earliest.After(announces[0].time) {
   561  			earliest = announces[0].time
   562  		}
   563  	}
   564  	fetch.Reset(arriveTimeout - time.Since(earliest))
   565  }
   566  
   567  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   568  func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
   569  	// Short circuit if no headers are fetched
   570  	if len(f.fetched) == 0 {
   571  		return
   572  	}
   573  	// Otherwise find the earliest expiring announcement
   574  	earliest := time.Now()
   575  	for _, announces := range f.fetched {
   576  		if earliest.After(announces[0].time) {
   577  			earliest = announces[0].time
   578  		}
   579  	}
   580  	complete.Reset(gatherSlack - time.Since(earliest))
   581  }
   582  
   583  // enqueue schedules a new future import operation, if the block to be imported
   584  // has not yet been seen.
   585  func (f *Fetcher) enqueue(peer string, block *types.Block) {
   586  	hash := block.Hash()
   587  
   588  	// Ensure the peer isn't DOSing us
   589  	count := f.queues[peer] + 1
   590  	if count > blockLimit {
   591  		log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
   592  		propBroadcastDOSMeter.Mark(1)
   593  		f.forgetHash(hash)
   594  		return
   595  	}
   596  	// Discard any past or too distant blocks
   597  	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   598  		log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
   599  		propBroadcastDropMeter.Mark(1)
   600  		f.forgetHash(hash)
   601  		return
   602  	}
   603  	// Schedule the block for future importing
   604  	if _, ok := f.queued[hash]; !ok {
   605  		op := &inject{
   606  			origin: peer,
   607  			block:  block,
   608  		}
   609  		f.queues[peer] = count
   610  		f.queued[hash] = op
   611  		f.queue.Push(op, -float32(block.NumberU64()))
   612  		if f.queueChangeHook != nil {
   613  			f.queueChangeHook(op.block.Hash(), true)
   614  		}
   615  		log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
   616  	}
   617  }
   618  
   619  // insert spawns a new goroutine to run a block insertion into the chain. If the
   620  // block's number is at the same height as the current import phase, it updates
   621  // the phase states accordingly.
   622  func (f *Fetcher) insert(peer string, block *types.Block) {
   623  	hash := block.Hash()
   624  
   625  	// Run the import on a new thread
   626  	log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   627  	go func() {
   628  		defer func() { f.done <- hash }()
   629  
   630  		// If the parent's unknown, abort insertion
   631  		parent := f.getBlock(block.ParentHash())
   632  		if parent == nil {
   633  			log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   634  			return
   635  		}
   636  		// Quickly validate the header and propagate the block if it passes
   637  		switch err := f.verifyHeader(block.Header()); err {
   638  		case nil:
   639  			// All ok, quickly propagate to our peers
   640  			propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   641  			go f.broadcastBlock(block, true)
   642  
   643  		case consensus.ErrFutureBlock:
   644  			// Weird future block, don't fail, but neither propagate
   645  
   646  		default:
   647  			// Something went very wrong, drop the peer
   648  			log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   649  			f.dropPeer(peer)
   650  			return
   651  		}
   652  		// Run the actual import and log any issues
   653  		if _, err := f.insertChain(types.Blocks{block}); err != nil {
   654  			log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   655  			return
   656  		}
   657  		// If import succeeded, broadcast the block
   658  		propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   659  		go f.broadcastBlock(block, false)
   660  
   661  		// Invoke the testing hook if needed
   662  		if f.importedHook != nil {
   663  			f.importedHook(block)
   664  		}
   665  	}()
   666  }
   667  
   668  // forgetHash removes all traces of a block announcement from the fetcher's
   669  // internal state.
   670  func (f *Fetcher) forgetHash(hash common.Hash) {
   671  	// Remove all pending announces and decrement DOS counters
   672  	for _, announce := range f.announced[hash] {
   673  		f.announces[announce.origin]--
   674  		if f.announces[announce.origin] == 0 {
   675  			delete(f.announces, announce.origin)
   676  		}
   677  	}
   678  	delete(f.announced, hash)
   679  	if f.announceChangeHook != nil {
   680  		f.announceChangeHook(hash, false)
   681  	}
   682  	// Remove any pending fetches and decrement the DOS counters
   683  	if announce := f.fetching[hash]; announce != nil {
   684  		f.announces[announce.origin]--
   685  		if f.announces[announce.origin] == 0 {
   686  			delete(f.announces, announce.origin)
   687  		}
   688  		delete(f.fetching, hash)
   689  	}
   690  
   691  	// Remove any pending completion requests and decrement the DOS counters
   692  	for _, announce := range f.fetched[hash] {
   693  		f.announces[announce.origin]--
   694  		if f.announces[announce.origin] == 0 {
   695  			delete(f.announces, announce.origin)
   696  		}
   697  	}
   698  	delete(f.fetched, hash)
   699  
   700  	// Remove any pending completions and decrement the DOS counters
   701  	if announce := f.completing[hash]; announce != nil {
   702  		f.announces[announce.origin]--
   703  		if f.announces[announce.origin] == 0 {
   704  			delete(f.announces, announce.origin)
   705  		}
   706  		delete(f.completing, hash)
   707  	}
   708  }
   709  
   710  // forgetBlock removes all traces of a queued block from the fetcher's internal
   711  // state.
   712  func (f *Fetcher) forgetBlock(hash common.Hash) {
   713  	if insert := f.queued[hash]; insert != nil {
   714  		f.queues[insert.origin]--
   715  		if f.queues[insert.origin] == 0 {
   716  			delete(f.queues, insert.origin)
   717  		}
   718  		delete(f.queued, hash)
   719  	}
   720  }