github.com/ethereumproject/go-ethereum@v5.5.2+incompatible/eth/fetcher/fetcher.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package fetcher contains the block announcement based synchronisation.
    18  package fetcher
    19  
    20  import (
    21  	"errors"
    22  	"fmt"
    23  	"math/rand"
    24  	"time"
    25  
    26  	"github.com/ethereumproject/go-ethereum/common"
    27  	"github.com/ethereumproject/go-ethereum/core"
    28  	"github.com/ethereumproject/go-ethereum/core/types"
    29  	"github.com/ethereumproject/go-ethereum/event"
    30  	"github.com/ethereumproject/go-ethereum/logger"
    31  	"github.com/ethereumproject/go-ethereum/logger/glog"
    32  	"github.com/ethereumproject/go-ethereum/metrics"
    33  	"gopkg.in/karalabe/cookiejar.v2/collections/prque"
    34  )
    35  
    36  const (
    37  	arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested
    38  	gatherSlack   = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches
    39  	fetchTimeout  = 5 * time.Second        // Maximum allotted time to return an explicitly requested block
    40  	maxUncleDist  = 7                      // Maximum allowed backward distance from the chain head
    41  	maxQueueDist  = 32                     // Maximum allowed distance from the chain head to queue
    42  	hashLimit     = 256                    // Maximum number of unique blocks a peer may have announced
    43  	blockLimit    = 64                     // Maximum number of unique blocks a peer may have delivered
    44  )
    45  
    46  var (
    47  	errTerminated = errors.New("terminated")
    48  )
    49  
    50  // blockRetrievalFn is a callback type for retrieving a block from the local chain.
    51  type blockRetrievalFn func(common.Hash) *types.Block
    52  
    53  // headerRequesterFn is a callback type for sending a header retrieval request.
    54  type headerRequesterFn func(common.Hash) error
    55  
    56  // bodyRequesterFn is a callback type for sending a body retrieval request.
    57  type bodyRequesterFn func([]common.Hash) error
    58  
    59  // headerVerifierFn is a callback type to verify a block's header for fast propagation.
    60  type headerVerifierFn func(header *types.Header) error
    61  
    62  // blockBroadcasterFn is a callback type for broadcasting a block to connected peers.
    63  type blockBroadcasterFn func(block *types.Block, propagate bool)
    64  
    65  // chainHeightFn is a callback type to retrieve the current chain height.
    66  type chainHeightFn func() uint64
    67  
    68  // chainInsertFn is a callback type to insert a batch of blocks into the local chain.
    69  type chainInsertFn func(types.Blocks) *core.ChainInsertResult
    70  
    71  // peerDropFn is a callback type for dropping a peer detected as malicious.
    72  type peerDropFn func(id string)
    73  
    74  // announce is the hash notification of the availability of a new block in the
    75  // network.
    76  type announce struct {
    77  	hash   common.Hash   // Hash of the block being announced
    78  	number uint64        // Number of the block being announced (0 = unknown | old protocol)
    79  	header *types.Header // Header of the block partially reassembled (new protocol)
    80  	time   time.Time     // Timestamp of the announcement
    81  
    82  	origin string // Identifier of the peer originating the notification
    83  
    84  	fetchHeader headerRequesterFn // [eth/62] Fetcher function to retrieve the header of an announced block
    85  	fetchBodies bodyRequesterFn   // [eth/62] Fetcher function to retrieve the body of an announced block
    86  }
    87  
    88  // headerFilterTask represents a batch of headers needing fetcher filtering.
    89  type headerFilterTask struct {
    90  	peer    string          // The source peer of block headers
    91  	headers []*types.Header // Collection of headers to filter
    92  	time    time.Time       // Arrival time of the headers
    93  }
    94  
    95  // headerFilterTask represents a batch of block bodies (transactions and uncles)
    96  // needing fetcher filtering.
    97  type bodyFilterTask struct {
    98  	peer         string                 // The source peer of block bodies
    99  	transactions [][]*types.Transaction // Collection of transactions per block bodies
   100  	uncles       [][]*types.Header      // Collection of uncles per block bodies
   101  	time         time.Time              // Arrival time of the blocks' contents
   102  }
   103  
   104  // inject represents a schedules import operation.
   105  type inject struct {
   106  	origin string
   107  	block  *types.Block
   108  }
   109  
   110  // Fetcher is responsible for accumulating block announcements from various peers
   111  // and scheduling them for retrieval.
   112  type Fetcher struct {
   113  	// Various event channels
   114  	notify chan *announce
   115  	inject chan *inject
   116  
   117  	blockFilter  chan chan []*types.Block
   118  	headerFilter chan chan *headerFilterTask
   119  	bodyFilter   chan chan *bodyFilterTask
   120  
   121  	done chan common.Hash
   122  	quit chan struct{}
   123  
   124  	// Mux
   125  	mux *event.TypeMux
   126  
   127  	// Announce states
   128  	announces  map[string]int              // Per peer announce counts to prevent memory exhaustion
   129  	announced  map[common.Hash][]*announce // Announced blocks, scheduled for fetching
   130  	fetching   map[common.Hash]*announce   // Announced blocks, currently fetching
   131  	fetched    map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval
   132  	completing map[common.Hash]*announce   // Blocks with headers, currently body-completing
   133  
   134  	// Block cache
   135  	queue  *prque.Prque            // Queue containing the import operations (block number sorted)
   136  	queues map[string]int          // Per peer block counts to prevent memory exhaustion
   137  	queued map[common.Hash]*inject // Set of already queued blocks (to dedupe imports)
   138  
   139  	// Callbacks
   140  	getBlock       blockRetrievalFn   // Retrieves a block from the local chain
   141  	verifyHeader   headerVerifierFn   // Checks if a block's headers have a valid proof of work
   142  	broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers
   143  	chainHeight    chainHeightFn      // Retrieves the current chain's height
   144  	insertChain    chainInsertFn      // Injects a batch of blocks into the chain
   145  	dropPeer       peerDropFn         // Drops a peer for misbehaving
   146  
   147  	// Testing hooks
   148  	announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list
   149  	queueChangeHook    func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue
   150  	fetchingHook       func([]common.Hash)     // Method to call upon starting a block (eth/61) or header (eth/62) fetch
   151  	completingHook     func([]common.Hash)     // Method to call upon starting a block body fetch (eth/62)
   152  	importedHook       func(*types.Block)      // Method to call upon successful block import (both eth/61 and eth/62)
   153  }
   154  
   155  // New creates a block fetcher to retrieve blocks based on hash announcements.
   156  func New(mux *event.TypeMux, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher {
   157  	return &Fetcher{
   158  		mux:            mux,
   159  		notify:         make(chan *announce),
   160  		inject:         make(chan *inject),
   161  		blockFilter:    make(chan chan []*types.Block),
   162  		headerFilter:   make(chan chan *headerFilterTask),
   163  		bodyFilter:     make(chan chan *bodyFilterTask),
   164  		done:           make(chan common.Hash),
   165  		quit:           make(chan struct{}),
   166  		announces:      make(map[string]int),
   167  		announced:      make(map[common.Hash][]*announce),
   168  		fetching:       make(map[common.Hash]*announce),
   169  		fetched:        make(map[common.Hash][]*announce),
   170  		completing:     make(map[common.Hash]*announce),
   171  		queue:          prque.New(),
   172  		queues:         make(map[string]int),
   173  		queued:         make(map[common.Hash]*inject),
   174  		getBlock:       getBlock,
   175  		verifyHeader:   verifyHeader,
   176  		broadcastBlock: broadcastBlock,
   177  		chainHeight:    chainHeight,
   178  		insertChain:    insertChain,
   179  		dropPeer:       dropPeer,
   180  	}
   181  }
   182  
   183  // Start boots up the announcement based synchroniser, accepting and processing
   184  // hash notifications and block fetches until termination requested.
   185  func (f *Fetcher) Start() {
   186  	go f.loop()
   187  }
   188  
   189  // Stop terminates the announcement based synchroniser, canceling all pending
   190  // operations.
   191  func (f *Fetcher) Stop() {
   192  	close(f.quit)
   193  }
   194  
   195  // Notify announces the fetcher of the potential availability of a new block in
   196  // the network.
   197  func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time,
   198  	headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error {
   199  	block := &announce{
   200  		hash:        hash,
   201  		number:      number,
   202  		time:        time,
   203  		origin:      peer,
   204  		fetchHeader: headerFetcher,
   205  		fetchBodies: bodyFetcher,
   206  	}
   207  	select {
   208  	case f.notify <- block:
   209  		return nil
   210  	case <-f.quit:
   211  		return errTerminated
   212  	}
   213  }
   214  
   215  // Enqueue tries to fill gaps in the fetcher's future import queue.
   216  func (f *Fetcher) Enqueue(peer string, block *types.Block) error {
   217  	op := &inject{
   218  		origin: peer,
   219  		block:  block,
   220  	}
   221  	select {
   222  	case f.inject <- op:
   223  		return nil
   224  	case <-f.quit:
   225  		return errTerminated
   226  	}
   227  }
   228  
   229  // FilterHeaders extracts all the headers that were explicitly requested by the fetcher,
   230  // returning those that should be handled differently.
   231  func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header {
   232  	glog.V(logger.Detail).Infof("[eth/62] filtering %d headers", len(headers))
   233  
   234  	// Send the filter channel to the fetcher
   235  	filter := make(chan *headerFilterTask)
   236  
   237  	select {
   238  	case f.headerFilter <- filter:
   239  	case <-f.quit:
   240  		return nil
   241  	}
   242  	// Request the filtering of the header list
   243  	select {
   244  	case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}:
   245  	case <-f.quit:
   246  		return nil
   247  	}
   248  	// Retrieve the headers remaining after filtering
   249  	select {
   250  	case task := <-filter:
   251  		return task.headers
   252  	case <-f.quit:
   253  		return nil
   254  	}
   255  }
   256  
   257  // FilterBodies extracts all the block bodies that were explicitly requested by
   258  // the fetcher, returning those that should be handled differently.
   259  func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) {
   260  	glog.V(logger.Detail).Infof("[eth/62] filtering %d:%d bodies", len(transactions), len(uncles))
   261  
   262  	// Send the filter channel to the fetcher
   263  	filter := make(chan *bodyFilterTask)
   264  
   265  	select {
   266  	case f.bodyFilter <- filter:
   267  	case <-f.quit:
   268  		return nil, nil
   269  	}
   270  	// Request the filtering of the body list
   271  	select {
   272  	case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}:
   273  	case <-f.quit:
   274  		return nil, nil
   275  	}
   276  	// Retrieve the bodies remaining after filtering
   277  	select {
   278  	case task := <-filter:
   279  		return task.transactions, task.uncles
   280  	case <-f.quit:
   281  		return nil, nil
   282  	}
   283  }
   284  
   285  // Loop is the main fetcher loop, checking and processing various notification
   286  // events.
   287  func (f *Fetcher) loop() {
   288  	// Iterate the block fetching until a quit is requested
   289  	fetchTimer := time.NewTimer(0)
   290  	completeTimer := time.NewTimer(0)
   291  
   292  	for {
   293  		// Clean up any expired block fetches
   294  		for hash, announce := range f.fetching {
   295  			if time.Since(announce.time) > fetchTimeout {
   296  				f.forgetHash(hash)
   297  			}
   298  		}
   299  		// Import any queued blocks that could potentially fit
   300  		height := f.chainHeight()
   301  		for !f.queue.Empty() {
   302  			op := f.queue.PopItem().(*inject)
   303  			hash := op.block.Hash()
   304  			if f.queueChangeHook != nil {
   305  				f.queueChangeHook(hash, false)
   306  			}
   307  			// If too high up the chain or phase, continue later
   308  			number := op.block.NumberU64()
   309  			if number > height+1 {
   310  				f.queue.Push(op, -float32(number))
   311  				if f.queueChangeHook != nil {
   312  					f.queueChangeHook(hash, true)
   313  				}
   314  				break
   315  			}
   316  			// Otherwise if fresh and still unknown, try and import
   317  			if number+maxUncleDist < height || f.getBlock(hash) != nil {
   318  				f.forgetBlock(hash)
   319  				continue
   320  			}
   321  			f.insert(op.origin, op.block)
   322  		}
   323  		// Wait for an outside event to occur
   324  		select {
   325  		case <-f.quit:
   326  			// Fetcher terminating, abort all operations
   327  			return
   328  
   329  		case notification := <-f.notify:
   330  			// A block was announced, make sure the peer isn't DOSing us
   331  			metrics.FetchAnnounces.Mark(1)
   332  
   333  			count := f.announces[notification.origin] + 1
   334  			if count > hashLimit {
   335  				glog.V(logger.Debug).Infof("Peer %s: exceeded outstanding announces (%d)", notification.origin, hashLimit)
   336  				metrics.FetchAnnounceDOS.Mark(1)
   337  				break
   338  			}
   339  			// If we have a valid block number, check that it's potentially useful
   340  			if notification.number > 0 {
   341  				if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   342  					if logger.MlogEnabled() {
   343  						mlogFetcherDiscardAnnouncement.AssignDetails(
   344  							notification.origin,
   345  							notification.number,
   346  							notification.hash.Hex(),
   347  							dist,
   348  						).Send(mlogFetcher)
   349  					}
   350  					glog.V(logger.Debug).Infof("[eth/62] Peer %s: discarded announcement #%d [%s], distance %d", notification.origin, notification.number, notification.hash.Hex(), dist)
   351  					metrics.FetchAnnounceDrops.Mark(1)
   352  					break
   353  				}
   354  			}
   355  			// All is well, schedule the announce if block's not yet downloading
   356  			if _, ok := f.fetching[notification.hash]; ok {
   357  				break
   358  			}
   359  			if _, ok := f.completing[notification.hash]; ok {
   360  				break
   361  			}
   362  			f.announces[notification.origin] = count
   363  			f.announced[notification.hash] = append(f.announced[notification.hash], notification)
   364  			if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 {
   365  				f.announceChangeHook(notification.hash, true)
   366  			}
   367  			if len(f.announced) == 1 {
   368  				f.rescheduleFetch(fetchTimer)
   369  			}
   370  
   371  		case op := <-f.inject:
   372  			// A direct block insertion was requested, try and fill any pending gaps
   373  			metrics.FetchBroadcasts.Mark(1)
   374  			f.enqueue(op.origin, op.block)
   375  
   376  		case hash := <-f.done:
   377  			// A pending import finished, remove all traces of the notification
   378  			f.forgetHash(hash)
   379  			f.forgetBlock(hash)
   380  
   381  		case <-fetchTimer.C:
   382  			// At least one block's timer ran out, check for needing retrieval
   383  			request := make(map[string][]common.Hash)
   384  
   385  			for hash, announces := range f.announced {
   386  				if time.Since(announces[0].time) > arriveTimeout-gatherSlack {
   387  					// Pick a random peer to retrieve from, reset all others
   388  					announce := announces[rand.Intn(len(announces))]
   389  					f.forgetHash(hash)
   390  
   391  					// If the block still didn't arrive, queue for fetching
   392  					if f.getBlock(hash) == nil {
   393  						request[announce.origin] = append(request[announce.origin], hash)
   394  						f.fetching[hash] = announce
   395  					}
   396  				}
   397  			}
   398  			// Send out all block (eth/61) or header (eth/62) requests
   399  			for peer, hashes := range request {
   400  				if glog.V(logger.Detail) && len(hashes) > 0 {
   401  					list := "["
   402  					for _, hash := range hashes {
   403  						list += fmt.Sprintf("%x…, ", hash[:4])
   404  					}
   405  					list = list[:len(list)-2] + "]"
   406  					glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching headers %s", peer, list)
   407  				}
   408  				// Create a closure of the fetch and schedule in on a new thread
   409  				fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes
   410  				go func() {
   411  					if f.fetchingHook != nil {
   412  						f.fetchingHook(hashes)
   413  					}
   414  					for _, hash := range hashes {
   415  						metrics.FetchHeaders.Mark(1)
   416  						fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals
   417  					}
   418  				}()
   419  			}
   420  			// Schedule the next fetch if blocks are still pending
   421  			f.rescheduleFetch(fetchTimer)
   422  
   423  		case <-completeTimer.C:
   424  			// At least one header's timer ran out, retrieve everything
   425  			request := make(map[string][]common.Hash)
   426  
   427  			for hash, announces := range f.fetched {
   428  				// Pick a random peer to retrieve from, reset all others
   429  				announce := announces[rand.Intn(len(announces))]
   430  				f.forgetHash(hash)
   431  
   432  				// If the block still didn't arrive, queue for completion
   433  				if f.getBlock(hash) == nil {
   434  					request[announce.origin] = append(request[announce.origin], hash)
   435  					f.completing[hash] = announce
   436  				}
   437  			}
   438  			// Send out all block body requests
   439  			for peer, hashes := range request {
   440  				if glog.V(logger.Detail) && len(hashes) > 0 {
   441  					list := "["
   442  					for _, hash := range hashes {
   443  						list += fmt.Sprintf("%x…, ", hash[:4])
   444  					}
   445  					list = list[:len(list)-2] + "]"
   446  
   447  					glog.V(logger.Detail).Infof("[eth/62] Peer %s: fetching bodies %s", peer, list)
   448  				}
   449  				// Create a closure of the fetch and schedule in on a new thread
   450  				if f.completingHook != nil {
   451  					f.completingHook(hashes)
   452  				}
   453  				metrics.FetchBodies.Mark(int64(len(hashes)))
   454  				go f.completing[hashes[0]].fetchBodies(hashes)
   455  			}
   456  			// Schedule the next fetch if blocks are still pending
   457  			f.rescheduleComplete(completeTimer)
   458  
   459  		case filter := <-f.headerFilter:
   460  			// Headers arrived from a remote peer. Extract those that were explicitly
   461  			// requested by the fetcher, and return everything else so it's delivered
   462  			// to other parts of the system.
   463  			var task *headerFilterTask
   464  			select {
   465  			case task = <-filter:
   466  			case <-f.quit:
   467  				return
   468  			}
   469  			metrics.FetchFilterHeaderIns.Mark(int64(len(task.headers)))
   470  
   471  			// Split the batch of headers into unknown ones (to return to the caller),
   472  			// known incomplete ones (requiring body retrievals) and completed blocks.
   473  			unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{}
   474  			for _, header := range task.headers {
   475  				hash := header.Hash()
   476  
   477  				// Filter fetcher-requested headers from other synchronisation algorithms
   478  				announce := f.fetching[hash]
   479  				cond := announce != nil &&
   480  					announce.origin == task.peer &&
   481  					f.fetched[hash] == nil &&
   482  					f.completing[hash] == nil &&
   483  					f.queued[hash] == nil
   484  				if cond {
   485  					// If the delivered header does not match the promised number, drop the announcer
   486  					if header.Number.Uint64() != announce.number {
   487  						glog.V(logger.Detail).Infof("[eth/62] Peer %s: invalid block number for [%s]: announced %d, provided %d", announce.origin, header.Hash().Hex(), announce.number, header.Number.Uint64())
   488  						f.dropPeer(announce.origin)
   489  						f.forgetHash(hash)
   490  						continue
   491  					}
   492  					// Only keep if not imported by other means
   493  					if f.getBlock(hash) == nil {
   494  						announce.header = header
   495  						announce.time = task.time
   496  
   497  						// If the block is empty (header only), short circuit into the final import queue
   498  						blockEmpty := header.TxHash == types.DeriveSha(types.Transactions{}) &&
   499  							header.UncleHash == types.CalcUncleHash([]*types.Header{})
   500  						if blockEmpty {
   501  							glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%s] empty, skipping body retrieval", announce.origin, header.Number.Uint64(), header.Hash().Hex())
   502  
   503  							block := types.NewBlockWithHeader(header)
   504  							block.ReceivedAt = task.time
   505  
   506  							complete = append(complete, block)
   507  							f.completing[hash] = announce
   508  							continue
   509  						}
   510  						// Otherwise add to the list of blocks needing completion
   511  						incomplete = append(incomplete, announce)
   512  					} else {
   513  						glog.V(logger.Detail).Infof("[eth/62] Peer %s: block #%d [%s] already imported, discarding header", announce.origin, header.Number.Uint64(), header.Hash().Hex())
   514  						f.forgetHash(hash)
   515  					}
   516  				} else {
   517  					// Fetcher doesn't know about it, add to the return list
   518  					unknown = append(unknown, header)
   519  				}
   520  			}
   521  			metrics.FetchFilterHeaderOuts.Mark(int64(len(unknown)))
   522  			select {
   523  			case filter <- &headerFilterTask{headers: unknown, time: task.time}:
   524  			case <-f.quit:
   525  				return
   526  			}
   527  			// Schedule the retrieved headers for body completion
   528  			for _, announce := range incomplete {
   529  				hash := announce.header.Hash()
   530  				if _, ok := f.completing[hash]; ok {
   531  					continue
   532  				}
   533  				f.fetched[hash] = append(f.fetched[hash], announce)
   534  				if len(f.fetched) == 1 {
   535  					f.rescheduleComplete(completeTimer)
   536  				}
   537  			}
   538  			// Schedule the header-only blocks for import
   539  			for _, block := range complete {
   540  				if announce := f.completing[block.Hash()]; announce != nil {
   541  					f.enqueue(announce.origin, block)
   542  				}
   543  			}
   544  
   545  		case filter := <-f.bodyFilter:
   546  			// Block bodies arrived, extract any explicitly requested blocks, return the rest
   547  			var task *bodyFilterTask
   548  			select {
   549  			case task = <-filter:
   550  			case <-f.quit:
   551  				return
   552  			}
   553  			metrics.FetchFilterBodyIns.Mark(int64(len(task.transactions)))
   554  
   555  			blocks := []*types.Block{}
   556  			for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ {
   557  				// Match up a body to any possible completion request
   558  				matched := false
   559  
   560  				for hash, announce := range f.completing {
   561  					if f.queued[hash] == nil {
   562  						txnHash := types.DeriveSha(types.Transactions(task.transactions[i]))
   563  						uncleHash := types.CalcUncleHash(task.uncles[i])
   564  
   565  						if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer {
   566  							// Mark the body matched, reassemble if still unknown
   567  							matched = true
   568  
   569  							if f.getBlock(hash) == nil {
   570  								block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i])
   571  								block.ReceivedAt = task.time
   572  
   573  								blocks = append(blocks, block)
   574  							} else {
   575  								f.forgetHash(hash)
   576  							}
   577  						}
   578  					}
   579  				}
   580  				if matched {
   581  					task.transactions = append(task.transactions[:i], task.transactions[i+1:]...)
   582  					task.uncles = append(task.uncles[:i], task.uncles[i+1:]...)
   583  					i--
   584  					continue
   585  				}
   586  			}
   587  
   588  			metrics.FetchFilterBodyOuts.Mark(int64(len(task.transactions)))
   589  			select {
   590  			case filter <- task:
   591  			case <-f.quit:
   592  				return
   593  			}
   594  			// Schedule the retrieved blocks for ordered import
   595  			for _, block := range blocks {
   596  				if announce := f.completing[block.Hash()]; announce != nil {
   597  					f.enqueue(announce.origin, block)
   598  				}
   599  			}
   600  		}
   601  	}
   602  }
   603  
   604  // rescheduleFetch resets the specified fetch timer to the next announce timeout.
   605  func (f *Fetcher) rescheduleFetch(fetch *time.Timer) {
   606  	// Short circuit if no blocks are announced
   607  	if len(f.announced) == 0 {
   608  		return
   609  	}
   610  	// Otherwise find the earliest expiring announcement
   611  	earliest := time.Now()
   612  	for _, announces := range f.announced {
   613  		if earliest.After(announces[0].time) {
   614  			earliest = announces[0].time
   615  		}
   616  	}
   617  	fetch.Reset(arriveTimeout - time.Since(earliest))
   618  }
   619  
   620  // rescheduleComplete resets the specified completion timer to the next fetch timeout.
   621  func (f *Fetcher) rescheduleComplete(complete *time.Timer) {
   622  	// Short circuit if no headers are fetched
   623  	if len(f.fetched) == 0 {
   624  		return
   625  	}
   626  	// Otherwise find the earliest expiring announcement
   627  	earliest := time.Now()
   628  	for _, announces := range f.fetched {
   629  		if earliest.After(announces[0].time) {
   630  			earliest = announces[0].time
   631  		}
   632  	}
   633  	complete.Reset(gatherSlack - time.Since(earliest))
   634  }
   635  
   636  // enqueue schedules a new future import operation, if the block to be imported
   637  // has not yet been seen.
   638  func (f *Fetcher) enqueue(peer string, block *types.Block) {
   639  	hash := block.Hash()
   640  
   641  	// Ensure the peer isn't DOSing us
   642  	count := f.queues[peer] + 1
   643  	if count > blockLimit {
   644  		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], exceeded allowance (%d)", peer, block.NumberU64(), hash.Hex(), blockLimit)
   645  		metrics.FetchBroadcastDOS.Mark(1)
   646  		f.forgetHash(hash)
   647  		return
   648  	}
   649  	// Discard any past or too distant blocks
   650  	if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist {
   651  		if logger.MlogEnabled() {
   652  			mlogFetcherDiscardAnnouncement.AssignDetails(
   653  				peer,
   654  				block.NumberU64(),
   655  				hash.Hex(),
   656  				dist,
   657  			).Send(mlogFetcher)
   658  		}
   659  		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], distance %d", peer, block.NumberU64(), hash.Hex(), dist)
   660  		metrics.FetchBroadcastDrops.Mark(1)
   661  		f.forgetHash(hash)
   662  		return
   663  	}
   664  	// Don't queue block if we already have it.
   665  	if f.getBlock(hash) != nil {
   666  		glog.V(logger.Debug).Infof("Peer %s: discarded block #%d [%s], already have", peer, block.NumberU64(), hash.Hex())
   667  		metrics.FetchBroadcastDrops.Mark(1)
   668  		f.forgetHash(hash)
   669  		return
   670  	}
   671  	// Schedule the block for future importing
   672  	if _, ok := f.queued[hash]; !ok {
   673  		op := &inject{
   674  			origin: peer,
   675  			block:  block,
   676  		}
   677  		f.queues[peer] = count
   678  		f.queued[hash] = op
   679  		f.queue.Push(op, -float32(block.NumberU64()))
   680  		if f.queueChangeHook != nil {
   681  			f.queueChangeHook(op.block.Hash(), true)
   682  		}
   683  		if glog.V(logger.Debug) {
   684  			glog.Infof("Peer %s: queued block #%d [%s], total %v", peer, block.NumberU64(), hash.Hex(), f.queue.Size())
   685  		}
   686  	}
   687  }
   688  
   689  // insert spawns a new goroutine to run a block insertion into the chain. If the
   690  // block's number is at the same height as the current import phase, it updates
   691  // the phase states accordingly.
   692  func (f *Fetcher) insert(peer string, block *types.Block) {
   693  	hash := block.Hash()
   694  
   695  	// Run the import on a new thread
   696  	glog.V(logger.Debug).Infof("Peer %s: importing block #%d [%s]", peer, block.NumberU64(), hash.Hex())
   697  	go func() {
   698  		defer func() { f.done <- hash }()
   699  
   700  		// If the parent's unknown, abort insertion, and don't forget the hash and block;
   701  		// use queue gap fill to get unknown parent.
   702  		parent := f.getBlock(block.ParentHash())
   703  		if parent == nil {
   704  			glog.V(logger.Debug).Infof("Peer %s: parent [%s] of block #%d [%s] unknown", peer, block.ParentHash().Hex(), block.NumberU64(), hash.Hex())
   705  			return
   706  		}
   707  		// Quickly validate the header and propagate the block if it passes
   708  		switch err := f.verifyHeader(block.Header()); err {
   709  		case nil:
   710  			// All ok, quickly propagate to our peers
   711  			metrics.FetchBroadcastTimer.UpdateSince(block.ReceivedAt)
   712  			go f.broadcastBlock(block, true)
   713  
   714  		case core.BlockFutureErr:
   715  			// Weird future block, don't fail, but neither propagate
   716  
   717  		default:
   718  			// Something went very wrong, drop the peer
   719  			glog.V(logger.Debug).Infof("Peer %s: block #%d [%s] verification failed: %v", peer, block.NumberU64(), hash.Hex(), err)
   720  			f.dropPeer(peer)
   721  			return
   722  		}
   723  		// Run the actual import and log any issues
   724  		if res := f.insertChain(types.Blocks{block}); res.Error != nil {
   725  			glog.V(logger.Warn).Infof("Peer %s: block #%d [%s] import failed: %v", peer, block.NumberU64(), hash.Hex(), res.Error)
   726  			glog.D(logger.Warn).Warnf("Peer %s: block #%d [%s] import failed: %v", peer, block.NumberU64(), hash.Hex(), res.Error)
   727  			return
   728  		} else {
   729  			f.mux.Post(FetcherInsertBlockEvent{Peer: peer, Block: block})
   730  		}
   731  		// If import succeeded, broadcast the block
   732  		metrics.FetchAnnounceTimer.UpdateSince(block.ReceivedAt)
   733  		go f.broadcastBlock(block, false)
   734  
   735  		// Invoke the testing hook if needed
   736  		if f.importedHook != nil {
   737  			f.importedHook(block)
   738  		}
   739  	}()
   740  }
   741  
   742  // forgetHash removes all traces of a block announcement from the fetcher's
   743  // internal state.
   744  func (f *Fetcher) forgetHash(hash common.Hash) {
   745  	// Remove all pending announces and decrement DOS counters
   746  	for _, announce := range f.announced[hash] {
   747  		f.announces[announce.origin]--
   748  		if f.announces[announce.origin] == 0 {
   749  			delete(f.announces, announce.origin)
   750  		}
   751  	}
   752  	delete(f.announced, hash)
   753  	if f.announceChangeHook != nil {
   754  		f.announceChangeHook(hash, false)
   755  	}
   756  	// Remove any pending fetches and decrement the DOS counters
   757  	if announce := f.fetching[hash]; announce != nil {
   758  		f.announces[announce.origin]--
   759  		if f.announces[announce.origin] == 0 {
   760  			delete(f.announces, announce.origin)
   761  		}
   762  		delete(f.fetching, hash)
   763  	}
   764  
   765  	// Remove any pending completion requests and decrement the DOS counters
   766  	for _, announce := range f.fetched[hash] {
   767  		f.announces[announce.origin]--
   768  		if f.announces[announce.origin] == 0 {
   769  			delete(f.announces, announce.origin)
   770  		}
   771  	}
   772  	delete(f.fetched, hash)
   773  
   774  	// Remove any pending completions and decrement the DOS counters
   775  	if announce := f.completing[hash]; announce != nil {
   776  		f.announces[announce.origin]--
   777  		if f.announces[announce.origin] == 0 {
   778  			delete(f.announces, announce.origin)
   779  		}
   780  		delete(f.completing, hash)
   781  	}
   782  }
   783  
   784  // forgetBlock removes all traces of a queued block from the fetcher's internal
   785  // state.
   786  func (f *Fetcher) forgetBlock(hash common.Hash) {
   787  	if insert := f.queued[hash]; insert != nil {
   788  		f.queues[insert.origin]--
   789  		if f.queues[insert.origin] == 0 {
   790  			delete(f.queues, insert.origin)
   791  		}
   792  		delete(f.queued, hash)
   793  	}
   794  }