github.com/beyonderyue/gochain@v2.2.26+incompatible/eth/fetcher/fetcher.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the block announcement based synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"context"
    22  	"errors"
    23  	"math/rand"
    24  	"time"
    25  
    26  	"go.opencensus.io/trace"
    27  
    28  	"github.com/gochain-io/gochain/common"
    29  	"github.com/gochain-io/gochain/common/prque"
    30  	"github.com/gochain-io/gochain/consensus"
    31  	"github.com/gochain-io/gochain/core/types"
    32  	"github.com/gochain-io/gochain/log"
    33  )
    34  
    35  const (
    36  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
    37  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    38  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block
    39  	maxUncleDist  = 7                      // Maximum allowed backward distance from the chain head
    40  	maxQueueDist  = 64                     // Maximum allowed distance from the chain head to queue
    41  	hashLimit     = 256                    // Maximum number of unique blocks a peer may have announced
    42  	blockLimit    = 64                     // Maximum number of unique blocks a peer may have delivered
    43  )
    44  
    45  var (
    46  	errTerminated = errors.New("terminated")
    47  )
    48  
    49  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    50  type blockRetrievalFn func(context.Context, common.Hash) *types.Block
    51  
    52  // headerRequesterFn is a callback type for sending a header retrieval request.
    53  type headerRequesterFn func(context.Context, common.Hash) error
    54  
    55  // bodyRequesterFn is a callback type for sending a body retrieval request.
    56  type bodyRequesterFn func(context.Context, []common.Hash) error
    57  
    58  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    59  type headerVerifierFn func(ctx context.Context, header *types.Header) error
    60  
    61  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    62  type blockBroadcasterFn func(ctx context.Context, block *types.Block, propagate bool)
    63  
    64  // chainHeightFn is a callback type to retrieve the current chain height.
    65  type chainHeightFn func() uint64
    66  
    67  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    68  type chainInsertFn func(context.Context, types.Blocks) (int, error)
    69  
    70  // peerDropFn is a callback type for dropping a peer detected as malicious.
    71  type peerDropFn func(id string)
    72  
    73  // announce is the hash notification of the availability of a new block in the
    74  // network.
    75  type announce struct {
    76  	hash   common.Hash   // Hash of the block being announced
    77  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
    78  	header *types.Header // Header of the block partially reassembled (new protocol)
    79  	time   time.Time     // Timestamp of the announcement
    80  
    81  	origin string // Identifier of the peer originating the notification
    82  
    83  	fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block
    84  	fetchBodies bodyRequesterFn   // Fetcher function to retrieve the body of an announced block
    85  }
    86  
    87  // headerFilterTask represents a batch of headers needing fetcher filtering.
    88  type headerFilterTask struct {
    89  	peer    string          // The source peer of block headers
    90  	headers []*types.Header // Collection of headers to filter
    91  	time    time.Time       // Arrival time of the headers
    92  }
    93  
    94  // bodyFilterTask represents a batch of block bodies (transactions and uncles)
    95  // needing fetcher filtering.
    96  type bodyFilterTask struct {
    97  	peer         string                 // The source peer of block bodies
    98  	transactions [][]*types.Transaction // Collection of transactions per block bodies
    99  	time         time.Time              // Arrival time of the blocks' contents
   100  }
   101  
   102  // inject represents a schedules import operation.
   103  type inject struct {
   104  	origin string
   105  	block  *types.Block
   106  }
   107  
   108  // Fetcher is responsible for accumulating block announcements from various peers
   109  // and scheduling them for retrieval.
   110  type Fetcher struct {
   111  	// Various event channels
   112  	notify chan *announce
   113  	inject chan *inject
   114  
   115  	blockFilter  chan chan []*types.Block
   116  	headerFilter chan chan *headerFilterTask
   117  	bodyFilter   chan chan *bodyFilterTask
   118  
   119  	done chan common.Hash
   120  	quit chan struct{}
   121  
   122  	// Announce states
   123  	announces  map[string]int              // Per peer announce counts to prevent memory exhaustion
   124  	announced  map[common.Hash][]*announce // Announced blocks, scheduled for fetching
   125  	fetching   map[common.Hash]*announce   // Announced blocks, currently fetching
   126  	fetched    map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
   127  	completing map[common.Hash]*announce   // Blocks with headers, currently body-completing
   128  
   129  	// Block cache
   130  	queue  *prque.Prque            // Queue containing the import operations (block number sorted)
   131  	queues map[string]int          // Per peer block counts to prevent memory exhaustion
   132  	queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
   133  
   134  	// Callbacks
   135  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   136  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   137  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   138  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   139  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   140  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   141  
   142  	// Testing hooks
   143  	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
   144  	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
   145  	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (eth/61) or header (eth/62) fetch
   146  	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
   147  	importedHook       func(*types.Block)      // Method to call upon successful block import (both eth/61 and eth/62)
   148  }
   149  
   150  // New creates a block fetcher to retrieve blocks based on hash announcements.
   151  func New(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
   152  	return &Fetcher{
   153  		notify:         make(chan *announce),
   154  		inject:         make(chan *inject),
   155  		blockFilter:    make(chan chan []*types.Block),
   156  		headerFilter:   make(chan chan *headerFilterTask),
   157  		bodyFilter:     make(chan chan *bodyFilterTask),
   158  		done:           make(chan common.Hash),
   159  		quit:           make(chan struct{}),
   160  		announces:      make(map[string]int),
   161  		announced:      make(map[common.Hash][]*announce),
   162  		fetching:       make(map[common.Hash]*announce),
   163  		fetched:        make(map[common.Hash][]*announce),
   164  		completing:     make(map[common.Hash]*announce),
   165  		queue:          prque.New(nil),
   166  		queues:         make(map[string]int),
   167  		queued:         make(map[common.Hash]*inject),
   168  		getBlock:       getBlock,
   169  		verifyHeader:   verifyHeader,
   170  		broadcastBlock: broadcastBlock,
   171  		chainHeight:    chainHeight,
   172  		insertChain:    insertChain,
   173  		dropPeer:       dropPeer,
   174  	}
   175  }
   176  
   177  // Start boots up the announcement based synchroniser, accepting and processing
   178  // hash notifications and block fetches until termination requested.
   179  func (f *Fetcher) Start() {
   180  	go f.loop()
   181  }
   182  
   183  // Stop terminates the announcement based synchroniser, canceling all pending
   184  // operations.
   185  func (f *Fetcher) Stop() {
   186  	close(f.quit)
   187  }
   188  
   189  // Notify announces the fetcher of the potential availability of a new block in
   190  // the network.
   191  func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   192  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   193  	block := &announce{
   194  		hash:        hash,
   195  		number:      number,
   196  		time:        time,
   197  		origin:      peer,
   198  		fetchHeader: headerFetcher,
   199  		fetchBodies: bodyFetcher,
   200  	}
   201  	select {
   202  	case f.notify <- block:
   203  		return nil
   204  	case <-f.quit:
   205  		return errTerminated
   206  	}
   207  }
   208  
   209  // Enqueue tries to fill gaps the fetcher's future import queue.
   210  func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
   211  	op := &inject{
   212  		origin: peer,
   213  		block:  block,
   214  	}
   215  	select {
   216  	case f.inject <- op:
   217  		return nil
   218  	case <-f.quit:
   219  		return errTerminated
   220  	}
   221  }
   222  
   223  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   224  // returning those that should be handled differently.
   225  func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   226  	log.Trace("Filtering headers", "peer", peer, "headers", len(headers))
   227  
   228  	// Send the filter channel to the fetcher
   229  	filter := make(chan *headerFilterTask)
   230  
   231  	select {
   232  	case f.headerFilter <- filter:
   233  	case <-f.quit:
   234  		return nil
   235  	}
   236  	// Request the filtering of the header list
   237  	select {
   238  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   239  	case <-f.quit:
   240  		return nil
   241  	}
   242  	// Retrieve the headers remaining after filtering
   243  	select {
   244  	case task := <-filter:
   245  		return task.headers
   246  	case <-f.quit:
   247  		return nil
   248  	}
   249  }
   250  
   251  // FilterBodies extracts all the block bodies that were explicitly requested by
   252  // the fetcher, returning those that should be handled differently.
   253  func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, time time.Time) [][]*types.Transaction {
   254  	log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions))
   255  
   256  	// Send the filter channel to the fetcher
   257  	filter := make(chan *bodyFilterTask)
   258  
   259  	select {
   260  	case f.bodyFilter <- filter:
   261  	case <-f.quit:
   262  		return nil
   263  	}
   264  	// Request the filtering of the body list
   265  	select {
   266  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, time: time}:
   267  	case <-f.quit:
   268  		return nil
   269  	}
   270  	// Retrieve the bodies remaining after filtering
   271  	select {
   272  	case task := <-filter:
   273  		return task.transactions
   274  	case <-f.quit:
   275  		return nil
   276  	}
   277  }
   278  
   279  // Loop is the main fetcher loop, checking and processing various notification
   280  // events.
   281  func (f *Fetcher) loop() {
   282  	// Iterate the block fetching until a quit is requested
   283  	fetchTimer := time.NewTimer(0)
   284  	completeTimer := time.NewTimer(0)
   285  
   286  	for {
   287  		// Clean up any expired block fetches
   288  		for hash, announce := range f.fetching {
   289  			if time.Since(announce.time) > fetchTimeout {
   290  				f.forgetHash(hash)
   291  			}
   292  		}
   293  		// Import any queued blocks that could potentially fit
   294  		height := f.chainHeight()
   295  		for !f.queue.Empty() {
   296  			op := f.queue.PopItem().(*inject)
   297  			hash := op.block.Hash()
   298  			if f.queueChangeHook != nil {
   299  				f.queueChangeHook(hash, false)
   300  			}
   301  			// If too high up the chain or phase, continue later
   302  			number := op.block.NumberU64()
   303  			if number > height+1 {
   304  				f.queue.Push(op, -int64(number))
   305  				if f.queueChangeHook != nil {
   306  					f.queueChangeHook(hash, true)
   307  				}
   308  				break
   309  			}
   310  			// Otherwise if fresh and still unknown, try and import
   311  			if number+maxUncleDist < height || f.getBlock(context.Background(), hash) != nil {
   312  				f.forgetBlock(hash)
   313  				continue
   314  			}
   315  			go f.insert(op.origin, op.block)
   316  		}
   317  		// Wait for an outside event to occur
   318  		select {
   319  		case <-f.quit:
   320  			// Fetcher terminating, abort all operations
   321  			return
   322  
   323  		case notification := <-f.notify:
   324  			// A block was announced, make sure the peer isn't DOSing us
   325  			propAnnounceInMeter.Mark(1)
   326  
   327  			count := f.announces[notification.origin] + 1
   328  			if count > hashLimit {
   329  				log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit)
   330  				propAnnounceDOSMeter.Mark(1)
   331  				break
   332  			}
   333  			// If we have a valid block number, check that it's potentially useful
   334  			if notification.number > 0 {
   335  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   336  					log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist)
   337  					propAnnounceDropMeter.Mark(1)
   338  					break
   339  				}
   340  			}
   341  			// All is well, schedule the announce if block's not yet downloading
   342  			if _, ok := f.fetching[notification.hash]; ok {
   343  				break
   344  			}
   345  			if _, ok := f.completing[notification.hash]; ok {
   346  				break
   347  			}
   348  			f.announces[notification.origin] = count
   349  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   350  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   351  				f.announceChangeHook(notification.hash, true)
   352  			}
   353  			if len(f.announced) == 1 {
   354  				f.rescheduleFetch(fetchTimer)
   355  			}
   356  
   357  		case op := <-f.inject:
   358  			// A direct block insertion was requested, try and fill any pending gaps
   359  			propBroadcastInMeter.Mark(1)
   360  			f.enqueue(op.origin, op.block)
   361  
   362  		case hash := <-f.done:
   363  			// A pending import finished, remove all traces of the notification
   364  			f.forgetHash(hash)
   365  			f.forgetBlock(hash)
   366  
   367  		case <-fetchTimer.C:
   368  			ctx, span := trace.StartSpan(context.Background(), "Fetcher.loop-fetchTimer")
   369  			// At least one block's timer ran out, check for needing retrieval
   370  			request := make(map[string][]common.Hash)
   371  
   372  			for hash, announces := range f.announced {
   373  				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
   374  					// Pick a random peer to retrieve from, reset all others
   375  					announce := announces[rand.Intn(len(announces))]
   376  					f.forgetHash(hash)
   377  
   378  					// If the block still didn't arrive, queue for fetching
   379  					if f.getBlock(ctx, hash) == nil {
   380  						request[announce.origin] = append(request[announce.origin], hash)
   381  						f.fetching[hash] = announce
   382  					}
   383  				}
   384  			}
   385  			tracing := log.Tracing()
   386  			// Send out all block header requests
   387  			for peer, hashes := range request {
   388  				if tracing {
   389  					log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes)
   390  				}
   391  
   392  				// Create a closure of the fetch and schedule in on a new thread
   393  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   394  				go func() {
   395  					ctx, span := trace.StartSpan(ctx, "fetchHeader")
   396  					defer span.End()
   397  
   398  					if f.fetchingHook != nil {
   399  						f.fetchingHook(hashes)
   400  					}
   401  					for _, hash := range hashes {
   402  						headerFetchMeter.Mark(1)
   403  						fetchHeader(ctx, hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   404  					}
   405  				}()
   406  			}
   407  			span.End()
   408  			// Schedule the next fetch if blocks are still pending
   409  			f.rescheduleFetch(fetchTimer)
   410  
   411  		case <-completeTimer.C:
   412  			ctx, span := trace.StartSpan(context.Background(), "Fetcher.loop-completeTimer")
   413  			// At least one header's timer ran out, retrieve everything
   414  			request := make(map[string][]common.Hash)
   415  
   416  			for hash, announces := range f.fetched {
   417  				// Pick a random peer to retrieve from, reset all others
   418  				announce := announces[rand.Intn(len(announces))]
   419  				f.forgetHash(hash)
   420  
   421  				// If the block still didn't arrive, queue for completion
   422  				if f.getBlock(ctx, hash) == nil {
   423  					request[announce.origin] = append(request[announce.origin], hash)
   424  					f.completing[hash] = announce
   425  				}
   426  			}
   427  			tracing := log.Tracing()
   428  			spanCtx := span.SpanContext()
   429  			link := trace.Link{
   430  				Type:    trace.LinkTypeParent,
   431  				TraceID: spanCtx.TraceID,
   432  				SpanID:  spanCtx.SpanID,
   433  			}
   434  			// Send out all block body requests
   435  			for peer, hashes := range request {
   436  				if tracing {
   437  					log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes)
   438  				}
   439  
   440  				// Create a closure of the fetch and schedule in on a new thread
   441  				if f.completingHook != nil {
   442  					f.completingHook(hashes)
   443  				}
   444  				bodyFetchMeter.Mark(int64(len(hashes)))
   445  				go func(ann *announce) {
   446  					ctx, bs := trace.StartSpan(context.Background(), "Fetcher.loop-fetchBodies")
   447  					defer bs.End()
   448  					bs.AddLink(link)
   449  					ann.fetchBodies(ctx, hashes)
   450  				}(f.completing[hashes[0]])
   451  			}
   452  
   453  			span.End()
   454  			// Schedule the next fetch if blocks are still pending
   455  			f.rescheduleComplete(completeTimer)
   456  
   457  		case filter := <-f.headerFilter:
   458  			// Headers arrived from a remote peer. Extract those that were explicitly
   459  			// requested by the fetcher, and return everything else so it's delivered
   460  			// to other parts of the system.
   461  			var task *headerFilterTask
   462  			select {
   463  			case task = <-filter:
   464  			case <-f.quit:
   465  				return
   466  			}
   467  			headerFilterInMeter.Mark(int64(len(task.headers)))
   468  
   469  			ctx, span := trace.StartSpan(context.Background(), "Fetcher.loop-headerFilter")
   470  			tracing := log.Tracing()
   471  			// Split the batch of headers into unknown ones (to return to the caller),
   472  			// known incomplete ones (requiring body retrievals) and completed blocks.
   473  			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
   474  			for _, header := range task.headers {
   475  				hash := header.Hash()
   476  
   477  				// Filter fetcher-requested headers from other synchronisation algorithms
   478  				if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil {
   479  					// If the delivered header does not match the promised number, drop the announcer
   480  					if header.Number.Uint64() != announce.number {
   481  						log.Error("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number)
   482  						f.dropPeer(announce.origin)
   483  						f.forgetHash(hash)
   484  						continue
   485  					}
   486  					// Only keep if not imported by other means
   487  					if f.getBlock(ctx, hash) == nil {
   488  						announce.header = header
   489  						announce.time = task.time
   490  
   491  						// If the block is empty (header only), short circuit into the final import queue
   492  						if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) {
   493  							if tracing {
   494  								log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   495  							}
   496  
   497  							block := types.NewBlockWithHeader(header)
   498  							block.ReceivedAt = task.time
   499  
   500  							complete = append(complete, block)
   501  							f.completing[hash] = announce
   502  							continue
   503  						}
   504  						// Otherwise add to the list of blocks needing completion
   505  						incomplete = append(incomplete, announce)
   506  					} else {
   507  						if tracing {
   508  							log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash())
   509  						}
   510  						f.forgetHash(hash)
   511  					}
   512  				} else {
   513  					// Fetcher doesn't know about it, add to the return list
   514  					unknown = append(unknown, header)
   515  				}
   516  			}
   517  			headerFilterOutMeter.Mark(int64(len(unknown)))
   518  			select {
   519  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   520  			case <-f.quit:
   521  				span.End()
   522  				return
   523  			}
   524  			// Schedule the retrieved headers for body completion
   525  			for _, announce := range incomplete {
   526  				hash := announce.header.Hash()
   527  				if _, ok := f.completing[hash]; ok {
   528  					continue
   529  				}
   530  				f.fetched[hash] = append(f.fetched[hash], announce)
   531  				if len(f.fetched) == 1 {
   532  					f.rescheduleComplete(completeTimer)
   533  				}
   534  			}
   535  			// Schedule the header-only blocks for import
   536  			for _, block := range complete {
   537  				if announce := f.completing[block.Hash()]; announce != nil {
   538  					f.enqueue(announce.origin, block)
   539  				}
   540  			}
   541  			span.End()
   542  
   543  		case filter := <-f.bodyFilter:
   544  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   545  			var task *bodyFilterTask
   546  			select {
   547  			case task = <-filter:
   548  			case <-f.quit:
   549  				return
   550  			}
   551  			bodyFilterInMeter.Mark(int64(len(task.transactions)))
   552  
   553  			ctx, span := trace.StartSpan(context.Background(), "Fetcher.loop-bodyFilter")
   554  
   555  			var blocks []*types.Block
   556  			for i := 0; i < len(task.transactions); i++ {
   557  				// Match up a body to any possible completion request
   558  				matched := false
   559  
   560  				for hash, announce := range f.completing {
   561  					if f.queued[hash] == nil {
   562  						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
   563  						if txnHash == announce.header.TxHash && announce.origin == task.peer {
   564  							// Mark the body matched, reassemble if still unknown
   565  							matched = true
   566  
   567  							if f.getBlock(ctx, hash) == nil {
   568  								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], nil)
   569  								block.ReceivedAt = task.time
   570  
   571  								blocks = append(blocks, block)
   572  							} else {
   573  								f.forgetHash(hash)
   574  							}
   575  						}
   576  					}
   577  				}
   578  				if matched {
   579  					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   580  					i--
   581  					continue
   582  				}
   583  			}
   584  
   585  			bodyFilterOutMeter.Mark(int64(len(task.transactions)))
   586  			select {
   587  			case filter <- task:
   588  			case <-f.quit:
   589  				span.End()
   590  				return
   591  			}
   592  			// Schedule the retrieved blocks for ordered import
   593  			for _, block := range blocks {
   594  				if announce := f.completing[block.Hash()]; announce != nil {
   595  					f.enqueue(announce.origin, block)
   596  				}
   597  			}
   598  			span.End()
   599  		}
   600  	}
   601  }
   602  
   603  // rescheduleFetch resets the specified fetch timer to the next announce timeout.
   604  func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
   605  	// Short circuit if no blocks are announced
   606  	if len(f.announced) == 0 {
   607  		return
   608  	}
   609  	// Otherwise find the earliest expiring announcement
   610  	earliest := time.Now()
   611  	for _, announces := range f.announced {
   612  		if earliest.After(announces[0].time) {
   613  			earliest = announces[0].time
   614  		}
   615  	}
   616  	fetch.Reset(arriveTimeout - time.Since(earliest))
   617  }
   618  
   619  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   620  func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
   621  	// Short circuit if no headers are fetched
   622  	if len(f.fetched) == 0 {
   623  		return
   624  	}
   625  	// Otherwise find the earliest expiring announcement
   626  	earliest := time.Now()
   627  	for _, announces := range f.fetched {
   628  		if earliest.After(announces[0].time) {
   629  			earliest = announces[0].time
   630  		}
   631  	}
   632  	complete.Reset(gatherSlack - time.Since(earliest))
   633  }
   634  
   635  // enqueue schedules a new future import operation, if the block to be imported
   636  // has not yet been seen.
   637  func (f *Fetcher) enqueue(peer string, block *types.Block) {
   638  	hash := block.Hash()
   639  
   640  	// Ensure the peer isn't DOSing us
   641  	count := f.queues[peer] + 1
   642  	if count > blockLimit {
   643  		log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit)
   644  		propBroadcastDOSMeter.Mark(1)
   645  		f.forgetHash(hash)
   646  		return
   647  	}
   648  	// Discard any past or too distant blocks
   649  	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   650  		log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist)
   651  		propBroadcastDropMeter.Mark(1)
   652  		f.forgetHash(hash)
   653  		return
   654  	}
   655  	// Schedule the block for future importing
   656  	if _, ok := f.queued[hash]; !ok {
   657  		op := &inject{
   658  			origin: peer,
   659  			block:  block,
   660  		}
   661  		f.queues[peer] = count
   662  		f.queued[hash] = op
   663  		f.queue.Push(op, -int64(block.NumberU64()))
   664  		if f.queueChangeHook != nil {
   665  			f.queueChangeHook(op.block.Hash(), true)
   666  		}
   667  		log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size())
   668  	}
   669  }
   670  
   671  // insert inserts a block into the chain. It is safe to run in a separate goroutine. If the block's number
   672  // is at the same height as the current import phase, if updates the phase states accordingly.
   673  func (f *Fetcher) insert(peer string, block *types.Block) {
   674  	hash := block.Hash()
   675  	defer func() { f.done <- hash }()
   676  	ctx, span := trace.StartSpan(context.Background(), "Fetcher.insert")
   677  	defer span.End()
   678  	span.AddAttributes(trace.Int64Attribute("num", int64(block.NumberU64())))
   679  
   680  	log.Info("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash)
   681  
   682  	// If the parent's unknown, abort insertion
   683  	parent := f.getBlock(ctx, block.ParentHash())
   684  	if parent == nil {
   685  		log.Error("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash())
   686  		return
   687  	}
   688  	// Quickly validate the header and propagate the block if it passes
   689  	switch err := f.verifyHeader(ctx, block.Header()); err {
   690  	case nil:
   691  		// All ok, quickly propagate to our peers
   692  		propBroadcastOutTimer.UpdateSince(block.ReceivedAt)
   693  		go func() {
   694  			ctx, bs := trace.StartSpan(context.Background(), "Fetcher.insert-broadcast")
   695  			defer bs.End()
   696  			parent := span.SpanContext()
   697  			bs.AddLink(trace.Link{
   698  				Type:    trace.LinkTypeParent,
   699  				TraceID: parent.TraceID,
   700  				SpanID:  parent.SpanID,
   701  			})
   702  			f.broadcastBlock(ctx, block, true)
   703  		}()
   704  
   705  	case consensus.ErrFutureBlock:
   706  		// Weird future block, don't fail, but neither propagate
   707  
   708  	default:
   709  		// Something went very wrong, drop the peer
   710  		log.Error("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   711  		f.dropPeer(peer)
   712  		return
   713  	}
   714  	// Run the actual import and log any issues
   715  	if _, err := f.insertChain(ctx, types.Blocks{block}); err != nil {
   716  		log.Error("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err)
   717  		return
   718  	}
   719  	// If import succeeded, broadcast the block
   720  	propAnnounceOutTimer.UpdateSince(block.ReceivedAt)
   721  	go func() {
   722  		ctx, bs := trace.StartSpan(context.Background(), "Fetcher.insert-announce")
   723  		defer bs.End()
   724  		parent := span.SpanContext()
   725  		bs.AddLink(trace.Link{
   726  			Type:    trace.LinkTypeParent,
   727  			TraceID: parent.TraceID,
   728  			SpanID:  parent.SpanID,
   729  		})
   730  		f.broadcastBlock(ctx, block, false)
   731  	}()
   732  
   733  	// Invoke the testing hook if needed
   734  	if f.importedHook != nil {
   735  		f.importedHook(block)
   736  	}
   737  }
   738  
   739  // forgetHash removes all traces of a block announcement from the fetcher's
   740  // internal state.
   741  func (f *Fetcher) forgetHash(hash common.Hash) {
   742  	// Remove all pending announces and decrement DOS counters
   743  	for _, announce := range f.announced[hash] {
   744  		f.announces[announce.origin]--
   745  		if f.announces[announce.origin] == 0 {
   746  			delete(f.announces, announce.origin)
   747  		}
   748  	}
   749  	delete(f.announced, hash)
   750  	if f.announceChangeHook != nil {
   751  		f.announceChangeHook(hash, false)
   752  	}
   753  	// Remove any pending fetches and decrement the DOS counters
   754  	if announce := f.fetching[hash]; announce != nil {
   755  		f.announces[announce.origin]--
   756  		if f.announces[announce.origin] == 0 {
   757  			delete(f.announces, announce.origin)
   758  		}
   759  		delete(f.fetching, hash)
   760  	}
   761  
   762  	// Remove any pending completion requests and decrement the DOS counters
   763  	for _, announce := range f.fetched[hash] {
   764  		f.announces[announce.origin]--
   765  		if f.announces[announce.origin] == 0 {
   766  			delete(f.announces, announce.origin)
   767  		}
   768  	}
   769  	delete(f.fetched, hash)
   770  
   771  	// Remove any pending completions and decrement the DOS counters
   772  	if announce := f.completing[hash]; announce != nil {
   773  		f.announces[announce.origin]--
   774  		if f.announces[announce.origin] == 0 {
   775  			delete(f.announces, announce.origin)
   776  		}
   777  		delete(f.completing, hash)
   778  	}
   779  }
   780  
   781  // forgetBlock removes all traces of a queued block from the fetcher's internal
   782  // state.
   783  func (f *Fetcher) forgetBlock(hash common.Hash) {
   784  	if insert := f.queued[hash]; insert != nil {
   785  		f.queues[insert.origin]--
   786  		if f.queues[insert.origin] == 0 {
   787  			delete(f.queues, insert.origin)
   788  		}
   789  		delete(f.queued, hash)
   790  	}
   791  }