github.com/calmw/ethereum@v0.1.1/eth/fetcher/tx_fetcher.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package fetcher
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	mrand "math/rand"
    24  	"sort"
    25  	"time"
    26  
    27  	"github.com/calmw/ethereum/common"
    28  	"github.com/calmw/ethereum/common/mclock"
    29  	"github.com/calmw/ethereum/core/txpool"
    30  	"github.com/calmw/ethereum/core/types"
    31  	"github.com/calmw/ethereum/log"
    32  	"github.com/calmw/ethereum/metrics"
    33  	mapset "github.com/deckarep/golang-set/v2"
    34  )
    35  
    36  const (
    37  	// maxTxAnnounces is the maximum number of unique transaction a peer
    38  	// can announce in a short time.
    39  	maxTxAnnounces = 4096
    40  
    41  	// maxTxRetrievals is the maximum transaction number can be fetched in one
    42  	// request. The rationale to pick 256 is:
    43  	//   - In eth protocol, the softResponseLimit is 2MB. Nowadays according to
    44  	//     Etherscan the average transaction size is around 200B, so in theory
    45  	//     we can include lots of transaction in a single protocol packet.
    46  	//   - However the maximum size of a single transaction is raised to 128KB,
    47  	//     so pick a middle value here to ensure we can maximize the efficiency
    48  	//     of the retrieval and response size overflow won't happen in most cases.
    49  	maxTxRetrievals = 256
    50  
    51  	// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
    52  	// is used to track recent transactions that have been dropped so we don't
    53  	// re-request them.
    54  	maxTxUnderpricedSetSize = 32768
    55  
    56  	// txArriveTimeout is the time allowance before an announced transaction is
    57  	// explicitly requested.
    58  	txArriveTimeout = 500 * time.Millisecond
    59  
    60  	// txGatherSlack is the interval used to collate almost-expired announces
    61  	// with network fetches.
    62  	txGatherSlack = 100 * time.Millisecond
    63  )
    64  
    65  var (
    66  	// txFetchTimeout is the maximum allotted time to return an explicitly
    67  	// requested transaction.
    68  	txFetchTimeout = 5 * time.Second
    69  )
    70  
    71  var (
    72  	txAnnounceInMeter          = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil)
    73  	txAnnounceKnownMeter       = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil)
    74  	txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil)
    75  	txAnnounceDOSMeter         = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil)
    76  
    77  	txBroadcastInMeter          = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil)
    78  	txBroadcastKnownMeter       = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil)
    79  	txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil)
    80  	txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil)
    81  
    82  	txRequestOutMeter     = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil)
    83  	txRequestFailMeter    = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil)
    84  	txRequestDoneMeter    = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil)
    85  	txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil)
    86  
    87  	txReplyInMeter          = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil)
    88  	txReplyKnownMeter       = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil)
    89  	txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil)
    90  	txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil)
    91  
    92  	txFetcherWaitingPeers   = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil)
    93  	txFetcherWaitingHashes  = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil)
    94  	txFetcherQueueingPeers  = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil)
    95  	txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil)
    96  	txFetcherFetchingPeers  = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil)
    97  	txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil)
    98  )
    99  
   100  // txAnnounce is the notification of the availability of a batch
   101  // of new transactions in the network.
   102  type txAnnounce struct {
   103  	origin string        // Identifier of the peer originating the notification
   104  	hashes []common.Hash // Batch of transaction hashes being announced
   105  }
   106  
   107  // txRequest represents an in-flight transaction retrieval request destined to
   108  // a specific peers.
   109  type txRequest struct {
   110  	hashes []common.Hash            // Transactions having been requested
   111  	stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
   112  	time   mclock.AbsTime           // Timestamp of the request
   113  }
   114  
   115  // txDelivery is the notification that a batch of transactions have been added
   116  // to the pool and should be untracked.
   117  type txDelivery struct {
   118  	origin string        // Identifier of the peer originating the notification
   119  	hashes []common.Hash // Batch of transaction hashes having been delivered
   120  	direct bool          // Whether this is a direct reply or a broadcast
   121  }
   122  
   123  // txDrop is the notification that a peer has disconnected.
   124  type txDrop struct {
   125  	peer string
   126  }
   127  
   128  // TxFetcher is responsible for retrieving new transaction based on announcements.
   129  //
   130  // The fetcher operates in 3 stages:
   131  //   - Transactions that are newly discovered are moved into a wait list.
   132  //   - After ~500ms passes, transactions from the wait list that have not been
   133  //     broadcast to us in whole are moved into a queueing area.
   134  //   - When a connected peer doesn't have in-flight retrieval requests, any
   135  //     transaction queued up (and announced by the peer) are allocated to the
   136  //     peer and moved into a fetching status until it's fulfilled or fails.
   137  //
   138  // The invariants of the fetcher are:
   139  //   - Each tracked transaction (hash) must only be present in one of the
   140  //     three stages. This ensures that the fetcher operates akin to a finite
   141  //     state automata and there's do data leak.
   142  //   - Each peer that announced transactions may be scheduled retrievals, but
   143  //     only ever one concurrently. This ensures we can immediately know what is
   144  //     missing from a reply and reschedule it.
   145  type TxFetcher struct {
   146  	notify  chan *txAnnounce
   147  	cleanup chan *txDelivery
   148  	drop    chan *txDrop
   149  	quit    chan struct{}
   150  
   151  	underpriced mapset.Set[common.Hash] // Transactions discarded as too cheap (don't re-fetch)
   152  
   153  	// Stage 1: Waiting lists for newly discovered transactions that might be
   154  	// broadcast without needing explicit request/reply round trips.
   155  	waitlist  map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
   156  	waittime  map[common.Hash]mclock.AbsTime      // Timestamps when transactions were added to the waitlist
   157  	waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection)
   158  
   159  	// Stage 2: Queue of transactions that waiting to be allocated to some peer
   160  	// to be retrieved directly.
   161  	announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
   162  	announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
   163  
   164  	// Stage 3: Set of transactions currently being retrieved, some which may be
   165  	// fulfilled and some rescheduled. Note, this step shares 'announces' from the
   166  	// previous stage to avoid having to duplicate (need it for DoS checks).
   167  	fetching   map[common.Hash]string              // Transaction set currently being retrieved
   168  	requests   map[string]*txRequest               // In-flight transaction retrievals
   169  	alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
   170  
   171  	// Callbacks
   172  	hasTx    func(common.Hash) bool             // Retrieves a tx from the local txpool
   173  	addTxs   func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
   174  	fetchTxs func(string, []common.Hash) error  // Retrieves a set of txs from a remote peer
   175  
   176  	step  chan struct{} // Notification channel when the fetcher loop iterates
   177  	clock mclock.Clock  // Time wrapper to simulate in tests
   178  	rand  *mrand.Rand   // Randomizer to use in tests instead of map range loops (soft-random)
   179  }
   180  
   181  // NewTxFetcher creates a transaction fetcher to retrieve transaction
   182  // based on hash announcements.
   183  func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
   184  	return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
   185  }
   186  
   187  // NewTxFetcherForTests is a testing method to mock out the realtime clock with
   188  // a simulated version and the internal randomness with a deterministic one.
   189  func NewTxFetcherForTests(
   190  	hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
   191  	clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
   192  	return &TxFetcher{
   193  		notify:      make(chan *txAnnounce),
   194  		cleanup:     make(chan *txDelivery),
   195  		drop:        make(chan *txDrop),
   196  		quit:        make(chan struct{}),
   197  		waitlist:    make(map[common.Hash]map[string]struct{}),
   198  		waittime:    make(map[common.Hash]mclock.AbsTime),
   199  		waitslots:   make(map[string]map[common.Hash]struct{}),
   200  		announces:   make(map[string]map[common.Hash]struct{}),
   201  		announced:   make(map[common.Hash]map[string]struct{}),
   202  		fetching:    make(map[common.Hash]string),
   203  		requests:    make(map[string]*txRequest),
   204  		alternates:  make(map[common.Hash]map[string]struct{}),
   205  		underpriced: mapset.NewSet[common.Hash](),
   206  		hasTx:       hasTx,
   207  		addTxs:      addTxs,
   208  		fetchTxs:    fetchTxs,
   209  		clock:       clock,
   210  		rand:        rand,
   211  	}
   212  }
   213  
   214  // Notify announces the fetcher of the potential availability of a new batch of
   215  // transactions in the network.
   216  func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
   217  	// Keep track of all the announced transactions
   218  	txAnnounceInMeter.Mark(int64(len(hashes)))
   219  
   220  	// Skip any transaction announcements that we already know of, or that we've
   221  	// previously marked as cheap and discarded. This check is of course racy,
   222  	// because multiple concurrent notifies will still manage to pass it, but it's
   223  	// still valuable to check here because it runs concurrent  to the internal
   224  	// loop, so anything caught here is time saved internally.
   225  	var (
   226  		unknowns               = make([]common.Hash, 0, len(hashes))
   227  		duplicate, underpriced int64
   228  	)
   229  	for _, hash := range hashes {
   230  		switch {
   231  		case f.hasTx(hash):
   232  			duplicate++
   233  
   234  		case f.underpriced.Contains(hash):
   235  			underpriced++
   236  
   237  		default:
   238  			unknowns = append(unknowns, hash)
   239  		}
   240  	}
   241  	txAnnounceKnownMeter.Mark(duplicate)
   242  	txAnnounceUnderpricedMeter.Mark(underpriced)
   243  
   244  	// If anything's left to announce, push it into the internal loop
   245  	if len(unknowns) == 0 {
   246  		return nil
   247  	}
   248  	announce := &txAnnounce{
   249  		origin: peer,
   250  		hashes: unknowns,
   251  	}
   252  	select {
   253  	case f.notify <- announce:
   254  		return nil
   255  	case <-f.quit:
   256  		return errTerminated
   257  	}
   258  }
   259  
   260  // Enqueue imports a batch of received transaction into the transaction pool
   261  // and the fetcher. This method may be called by both transaction broadcasts and
   262  // direct request replies. The differentiation is important so the fetcher can
   263  // re-schedule missing transactions as soon as possible.
   264  func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
   265  	var (
   266  		inMeter          = txReplyInMeter
   267  		knownMeter       = txReplyKnownMeter
   268  		underpricedMeter = txReplyUnderpricedMeter
   269  		otherRejectMeter = txReplyOtherRejectMeter
   270  	)
   271  	if !direct {
   272  		inMeter = txBroadcastInMeter
   273  		knownMeter = txBroadcastKnownMeter
   274  		underpricedMeter = txBroadcastUnderpricedMeter
   275  		otherRejectMeter = txBroadcastOtherRejectMeter
   276  	}
   277  	// Keep track of all the propagated transactions
   278  	inMeter.Mark(int64(len(txs)))
   279  
   280  	// Push all the transactions into the pool, tracking underpriced ones to avoid
   281  	// re-requesting them and dropping the peer in case of malicious transfers.
   282  	var (
   283  		added = make([]common.Hash, 0, len(txs))
   284  	)
   285  	// proceed in batches
   286  	for i := 0; i < len(txs); i += 128 {
   287  		end := i + 128
   288  		if end > len(txs) {
   289  			end = len(txs)
   290  		}
   291  		var (
   292  			duplicate   int64
   293  			underpriced int64
   294  			otherreject int64
   295  		)
   296  		batch := txs[i:end]
   297  		for j, err := range f.addTxs(batch) {
   298  			// Track the transaction hash if the price is too low for us.
   299  			// Avoid re-request this transaction when we receive another
   300  			// announcement.
   301  			if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
   302  				for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
   303  					f.underpriced.Pop()
   304  				}
   305  				f.underpriced.Add(batch[j].Hash())
   306  			}
   307  			// Track a few interesting failure types
   308  			switch {
   309  			case err == nil: // Noop, but need to handle to not count these
   310  
   311  			case errors.Is(err, txpool.ErrAlreadyKnown):
   312  				duplicate++
   313  
   314  			case errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced):
   315  				underpriced++
   316  
   317  			default:
   318  				otherreject++
   319  			}
   320  			added = append(added, batch[j].Hash())
   321  		}
   322  		knownMeter.Mark(duplicate)
   323  		underpricedMeter.Mark(underpriced)
   324  		otherRejectMeter.Mark(otherreject)
   325  
   326  		// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
   327  		if otherreject > 128/4 {
   328  			time.Sleep(200 * time.Millisecond)
   329  			log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
   330  		}
   331  	}
   332  	select {
   333  	case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
   334  		return nil
   335  	case <-f.quit:
   336  		return errTerminated
   337  	}
   338  }
   339  
   340  // Drop should be called when a peer disconnects. It cleans up all the internal
   341  // data structures of the given node.
   342  func (f *TxFetcher) Drop(peer string) error {
   343  	select {
   344  	case f.drop <- &txDrop{peer: peer}:
   345  		return nil
   346  	case <-f.quit:
   347  		return errTerminated
   348  	}
   349  }
   350  
   351  // Start boots up the announcement based synchroniser, accepting and processing
   352  // hash notifications and block fetches until termination requested.
   353  func (f *TxFetcher) Start() {
   354  	go f.loop()
   355  }
   356  
   357  // Stop terminates the announcement based synchroniser, canceling all pending
   358  // operations.
   359  func (f *TxFetcher) Stop() {
   360  	close(f.quit)
   361  }
   362  
   363  func (f *TxFetcher) loop() {
   364  	var (
   365  		waitTimer    = new(mclock.Timer)
   366  		timeoutTimer = new(mclock.Timer)
   367  
   368  		waitTrigger    = make(chan struct{}, 1)
   369  		timeoutTrigger = make(chan struct{}, 1)
   370  	)
   371  	for {
   372  		select {
   373  		case ann := <-f.notify:
   374  			// Drop part of the new announcements if there are too many accumulated.
   375  			// Note, we could but do not filter already known transactions here as
   376  			// the probability of something arriving between this call and the pre-
   377  			// filter outside is essentially zero.
   378  			used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
   379  			if used >= maxTxAnnounces {
   380  				// This can happen if a set of transactions are requested but not
   381  				// all fulfilled, so the remainder are rescheduled without the cap
   382  				// check. Should be fine as the limit is in the thousands and the
   383  				// request size in the hundreds.
   384  				txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
   385  				break
   386  			}
   387  			want := used + len(ann.hashes)
   388  			if want > maxTxAnnounces {
   389  				txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
   390  				ann.hashes = ann.hashes[:want-maxTxAnnounces]
   391  			}
   392  			// All is well, schedule the remainder of the transactions
   393  			idleWait := len(f.waittime) == 0
   394  			_, oldPeer := f.announces[ann.origin]
   395  
   396  			for _, hash := range ann.hashes {
   397  				// If the transaction is already downloading, add it to the list
   398  				// of possible alternates (in case the current retrieval fails) and
   399  				// also account it for the peer.
   400  				if f.alternates[hash] != nil {
   401  					f.alternates[hash][ann.origin] = struct{}{}
   402  
   403  					// Stage 2 and 3 share the set of origins per tx
   404  					if announces := f.announces[ann.origin]; announces != nil {
   405  						announces[hash] = struct{}{}
   406  					} else {
   407  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   408  					}
   409  					continue
   410  				}
   411  				// If the transaction is not downloading, but is already queued
   412  				// from a different peer, track it for the new peer too.
   413  				if f.announced[hash] != nil {
   414  					f.announced[hash][ann.origin] = struct{}{}
   415  
   416  					// Stage 2 and 3 share the set of origins per tx
   417  					if announces := f.announces[ann.origin]; announces != nil {
   418  						announces[hash] = struct{}{}
   419  					} else {
   420  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   421  					}
   422  					continue
   423  				}
   424  				// If the transaction is already known to the fetcher, but not
   425  				// yet downloading, add the peer as an alternate origin in the
   426  				// waiting list.
   427  				if f.waitlist[hash] != nil {
   428  					f.waitlist[hash][ann.origin] = struct{}{}
   429  
   430  					if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   431  						waitslots[hash] = struct{}{}
   432  					} else {
   433  						f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   434  					}
   435  					continue
   436  				}
   437  				// Transaction unknown to the fetcher, insert it into the waiting list
   438  				f.waitlist[hash] = map[string]struct{}{ann.origin: {}}
   439  				f.waittime[hash] = f.clock.Now()
   440  
   441  				if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   442  					waitslots[hash] = struct{}{}
   443  				} else {
   444  					f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   445  				}
   446  			}
   447  			// If a new item was added to the waitlist, schedule it into the fetcher
   448  			if idleWait && len(f.waittime) > 0 {
   449  				f.rescheduleWait(waitTimer, waitTrigger)
   450  			}
   451  			// If this peer is new and announced something already queued, maybe
   452  			// request transactions from them
   453  			if !oldPeer && len(f.announces[ann.origin]) > 0 {
   454  				f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}})
   455  			}
   456  
   457  		case <-waitTrigger:
   458  			// At least one transaction's waiting time ran out, push all expired
   459  			// ones into the retrieval queues
   460  			actives := make(map[string]struct{})
   461  			for hash, instance := range f.waittime {
   462  				if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
   463  					// Transaction expired without propagation, schedule for retrieval
   464  					if f.announced[hash] != nil {
   465  						panic("announce tracker already contains waitlist item")
   466  					}
   467  					f.announced[hash] = f.waitlist[hash]
   468  					for peer := range f.waitlist[hash] {
   469  						if announces := f.announces[peer]; announces != nil {
   470  							announces[hash] = struct{}{}
   471  						} else {
   472  							f.announces[peer] = map[common.Hash]struct{}{hash: {}}
   473  						}
   474  						delete(f.waitslots[peer], hash)
   475  						if len(f.waitslots[peer]) == 0 {
   476  							delete(f.waitslots, peer)
   477  						}
   478  						actives[peer] = struct{}{}
   479  					}
   480  					delete(f.waittime, hash)
   481  					delete(f.waitlist, hash)
   482  				}
   483  			}
   484  			// If transactions are still waiting for propagation, reschedule the wait timer
   485  			if len(f.waittime) > 0 {
   486  				f.rescheduleWait(waitTimer, waitTrigger)
   487  			}
   488  			// If any peers became active and are idle, request transactions from them
   489  			if len(actives) > 0 {
   490  				f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
   491  			}
   492  
   493  		case <-timeoutTrigger:
   494  			// Clean up any expired retrievals and avoid re-requesting them from the
   495  			// same peer (either overloaded or malicious, useless in both cases). We
   496  			// could also penalize (Drop), but there's nothing to gain, and if could
   497  			// possibly further increase the load on it.
   498  			for peer, req := range f.requests {
   499  				if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
   500  					txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
   501  
   502  					// Reschedule all the not-yet-delivered fetches to alternate peers
   503  					for _, hash := range req.hashes {
   504  						// Skip rescheduling hashes already delivered by someone else
   505  						if req.stolen != nil {
   506  							if _, ok := req.stolen[hash]; ok {
   507  								continue
   508  							}
   509  						}
   510  						// Move the delivery back from fetching to queued
   511  						if _, ok := f.announced[hash]; ok {
   512  							panic("announced tracker already contains alternate item")
   513  						}
   514  						if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
   515  							f.announced[hash] = f.alternates[hash]
   516  						}
   517  						delete(f.announced[hash], peer)
   518  						if len(f.announced[hash]) == 0 {
   519  							delete(f.announced, hash)
   520  						}
   521  						delete(f.announces[peer], hash)
   522  						delete(f.alternates, hash)
   523  						delete(f.fetching, hash)
   524  					}
   525  					if len(f.announces[peer]) == 0 {
   526  						delete(f.announces, peer)
   527  					}
   528  					// Keep track of the request as dangling, but never expire
   529  					f.requests[peer].hashes = nil
   530  				}
   531  			}
   532  			// Schedule a new transaction retrieval
   533  			f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   534  
   535  			// No idea if we scheduled something or not, trigger the timer if needed
   536  			// TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
   537  			f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   538  
   539  		case delivery := <-f.cleanup:
   540  			// Independent if the delivery was direct or broadcast, remove all
   541  			// traces of the hash from internal trackers
   542  			for _, hash := range delivery.hashes {
   543  				if _, ok := f.waitlist[hash]; ok {
   544  					for peer, txset := range f.waitslots {
   545  						delete(txset, hash)
   546  						if len(txset) == 0 {
   547  							delete(f.waitslots, peer)
   548  						}
   549  					}
   550  					delete(f.waitlist, hash)
   551  					delete(f.waittime, hash)
   552  				} else {
   553  					for peer, txset := range f.announces {
   554  						delete(txset, hash)
   555  						if len(txset) == 0 {
   556  							delete(f.announces, peer)
   557  						}
   558  					}
   559  					delete(f.announced, hash)
   560  					delete(f.alternates, hash)
   561  
   562  					// If a transaction currently being fetched from a different
   563  					// origin was delivered (delivery stolen), mark it so the
   564  					// actual delivery won't double schedule it.
   565  					if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
   566  						stolen := f.requests[origin].stolen
   567  						if stolen == nil {
   568  							f.requests[origin].stolen = make(map[common.Hash]struct{})
   569  							stolen = f.requests[origin].stolen
   570  						}
   571  						stolen[hash] = struct{}{}
   572  					}
   573  					delete(f.fetching, hash)
   574  				}
   575  			}
   576  			// In case of a direct delivery, also reschedule anything missing
   577  			// from the original query
   578  			if delivery.direct {
   579  				// Mark the requesting successful (independent of individual status)
   580  				txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
   581  
   582  				// Make sure something was pending, nuke it
   583  				req := f.requests[delivery.origin]
   584  				if req == nil {
   585  					log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
   586  					break
   587  				}
   588  				delete(f.requests, delivery.origin)
   589  
   590  				// Anything not delivered should be re-scheduled (with or without
   591  				// this peer, depending on the response cutoff)
   592  				delivered := make(map[common.Hash]struct{})
   593  				for _, hash := range delivery.hashes {
   594  					delivered[hash] = struct{}{}
   595  				}
   596  				cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
   597  				for i, hash := range req.hashes {
   598  					if _, ok := delivered[hash]; ok {
   599  						cutoff = i
   600  					}
   601  				}
   602  				// Reschedule missing hashes from alternates, not-fulfilled from alt+self
   603  				for i, hash := range req.hashes {
   604  					// Skip rescheduling hashes already delivered by someone else
   605  					if req.stolen != nil {
   606  						if _, ok := req.stolen[hash]; ok {
   607  							continue
   608  						}
   609  					}
   610  					if _, ok := delivered[hash]; !ok {
   611  						if i < cutoff {
   612  							delete(f.alternates[hash], delivery.origin)
   613  							delete(f.announces[delivery.origin], hash)
   614  							if len(f.announces[delivery.origin]) == 0 {
   615  								delete(f.announces, delivery.origin)
   616  							}
   617  						}
   618  						if len(f.alternates[hash]) > 0 {
   619  							if _, ok := f.announced[hash]; ok {
   620  								panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
   621  							}
   622  							f.announced[hash] = f.alternates[hash]
   623  						}
   624  					}
   625  					delete(f.alternates, hash)
   626  					delete(f.fetching, hash)
   627  				}
   628  				// Something was delivered, try to reschedule requests
   629  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
   630  			}
   631  
   632  		case drop := <-f.drop:
   633  			// A peer was dropped, remove all traces of it
   634  			if _, ok := f.waitslots[drop.peer]; ok {
   635  				for hash := range f.waitslots[drop.peer] {
   636  					delete(f.waitlist[hash], drop.peer)
   637  					if len(f.waitlist[hash]) == 0 {
   638  						delete(f.waitlist, hash)
   639  						delete(f.waittime, hash)
   640  					}
   641  				}
   642  				delete(f.waitslots, drop.peer)
   643  				if len(f.waitlist) > 0 {
   644  					f.rescheduleWait(waitTimer, waitTrigger)
   645  				}
   646  			}
   647  			// Clean up any active requests
   648  			var request *txRequest
   649  			if request = f.requests[drop.peer]; request != nil {
   650  				for _, hash := range request.hashes {
   651  					// Skip rescheduling hashes already delivered by someone else
   652  					if request.stolen != nil {
   653  						if _, ok := request.stolen[hash]; ok {
   654  							continue
   655  						}
   656  					}
   657  					// Undelivered hash, reschedule if there's an alternative origin available
   658  					delete(f.alternates[hash], drop.peer)
   659  					if len(f.alternates[hash]) == 0 {
   660  						delete(f.alternates, hash)
   661  					} else {
   662  						f.announced[hash] = f.alternates[hash]
   663  						delete(f.alternates, hash)
   664  					}
   665  					delete(f.fetching, hash)
   666  				}
   667  				delete(f.requests, drop.peer)
   668  			}
   669  			// Clean up general announcement tracking
   670  			if _, ok := f.announces[drop.peer]; ok {
   671  				for hash := range f.announces[drop.peer] {
   672  					delete(f.announced[hash], drop.peer)
   673  					if len(f.announced[hash]) == 0 {
   674  						delete(f.announced, hash)
   675  					}
   676  				}
   677  				delete(f.announces, drop.peer)
   678  			}
   679  			// If a request was cancelled, check if anything needs to be rescheduled
   680  			if request != nil {
   681  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   682  				f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   683  			}
   684  
   685  		case <-f.quit:
   686  			return
   687  		}
   688  		// No idea what happened, but bump some sanity metrics
   689  		txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
   690  		txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
   691  		txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
   692  		txFetcherQueueingHashes.Update(int64(len(f.announced)))
   693  		txFetcherFetchingPeers.Update(int64(len(f.requests)))
   694  		txFetcherFetchingHashes.Update(int64(len(f.fetching)))
   695  
   696  		// Loop did something, ping the step notifier if needed (tests)
   697  		if f.step != nil {
   698  			f.step <- struct{}{}
   699  		}
   700  	}
   701  }
   702  
   703  // rescheduleWait iterates over all the transactions currently in the waitlist
   704  // and schedules the movement into the fetcher for the earliest.
   705  //
   706  // The method has a granularity of 'gatherSlack', since there's not much point in
   707  // spinning over all the transactions just to maybe find one that should trigger
   708  // a few ms earlier.
   709  func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
   710  	if *timer != nil {
   711  		(*timer).Stop()
   712  	}
   713  	now := f.clock.Now()
   714  
   715  	earliest := now
   716  	for _, instance := range f.waittime {
   717  		if earliest > instance {
   718  			earliest = instance
   719  			if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
   720  				break
   721  			}
   722  		}
   723  	}
   724  	*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
   725  		trigger <- struct{}{}
   726  	})
   727  }
   728  
   729  // rescheduleTimeout iterates over all the transactions currently in flight and
   730  // schedules a cleanup run when the first would trigger.
   731  //
   732  // The method has a granularity of 'gatherSlack', since there's not much point in
   733  // spinning over all the transactions just to maybe find one that should trigger
   734  // a few ms earlier.
   735  //
   736  // This method is a bit "flaky" "by design". In theory the timeout timer only ever
   737  // should be rescheduled if some request is pending. In practice, a timeout will
   738  // cause the timer to be rescheduled every 5 secs (until the peer comes through or
   739  // disconnects). This is a limitation of the fetcher code because we don't trac
   740  // pending requests and timed out requests separately. Without double tracking, if
   741  // we simply didn't reschedule the timer on all-timeout then the timer would never
   742  // be set again since len(request) > 0 => something's running.
   743  func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
   744  	if *timer != nil {
   745  		(*timer).Stop()
   746  	}
   747  	now := f.clock.Now()
   748  
   749  	earliest := now
   750  	for _, req := range f.requests {
   751  		// If this request already timed out, skip it altogether
   752  		if req.hashes == nil {
   753  			continue
   754  		}
   755  		if earliest > req.time {
   756  			earliest = req.time
   757  			if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
   758  				break
   759  			}
   760  		}
   761  	}
   762  	*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
   763  		trigger <- struct{}{}
   764  	})
   765  }
   766  
   767  // scheduleFetches starts a batch of retrievals for all available idle peers.
   768  func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
   769  	// Gather the set of peers we want to retrieve from (default to all)
   770  	actives := whitelist
   771  	if actives == nil {
   772  		actives = make(map[string]struct{})
   773  		for peer := range f.announces {
   774  			actives[peer] = struct{}{}
   775  		}
   776  	}
   777  	if len(actives) == 0 {
   778  		return
   779  	}
   780  	// For each active peer, try to schedule some transaction fetches
   781  	idle := len(f.requests) == 0
   782  
   783  	f.forEachPeer(actives, func(peer string) {
   784  		if f.requests[peer] != nil {
   785  			return // continue in the for-each
   786  		}
   787  		if len(f.announces[peer]) == 0 {
   788  			return // continue in the for-each
   789  		}
   790  		hashes := make([]common.Hash, 0, maxTxRetrievals)
   791  		f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
   792  			if _, ok := f.fetching[hash]; !ok {
   793  				// Mark the hash as fetching and stash away possible alternates
   794  				f.fetching[hash] = peer
   795  
   796  				if _, ok := f.alternates[hash]; ok {
   797  					panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
   798  				}
   799  				f.alternates[hash] = f.announced[hash]
   800  				delete(f.announced, hash)
   801  
   802  				// Accumulate the hash and stop if the limit was reached
   803  				hashes = append(hashes, hash)
   804  				if len(hashes) >= maxTxRetrievals {
   805  					return false // break in the for-each
   806  				}
   807  			}
   808  			return true // continue in the for-each
   809  		})
   810  		// If any hashes were allocated, request them from the peer
   811  		if len(hashes) > 0 {
   812  			f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
   813  			txRequestOutMeter.Mark(int64(len(hashes)))
   814  
   815  			go func(peer string, hashes []common.Hash) {
   816  				// Try to fetch the transactions, but in case of a request
   817  				// failure (e.g. peer disconnected), reschedule the hashes.
   818  				if err := f.fetchTxs(peer, hashes); err != nil {
   819  					txRequestFailMeter.Mark(int64(len(hashes)))
   820  					f.Drop(peer)
   821  				}
   822  			}(peer, hashes)
   823  		}
   824  	})
   825  	// If a new request was fired, schedule a timeout timer
   826  	if idle && len(f.requests) > 0 {
   827  		f.rescheduleTimeout(timer, timeout)
   828  	}
   829  }
   830  
   831  // forEachPeer does a range loop over a map of peers in production, but during
   832  // testing it does a deterministic sorted random to allow reproducing issues.
   833  func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
   834  	// If we're running production, use whatever Go's map gives us
   835  	if f.rand == nil {
   836  		for peer := range peers {
   837  			do(peer)
   838  		}
   839  		return
   840  	}
   841  	// We're running the test suite, make iteration deterministic
   842  	list := make([]string, 0, len(peers))
   843  	for peer := range peers {
   844  		list = append(list, peer)
   845  	}
   846  	sort.Strings(list)
   847  	rotateStrings(list, f.rand.Intn(len(list)))
   848  	for _, peer := range list {
   849  		do(peer)
   850  	}
   851  }
   852  
   853  // forEachHash does a range loop over a map of hashes in production, but during
   854  // testing it does a deterministic sorted random to allow reproducing issues.
   855  func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
   856  	// If we're running production, use whatever Go's map gives us
   857  	if f.rand == nil {
   858  		for hash := range hashes {
   859  			if !do(hash) {
   860  				return
   861  			}
   862  		}
   863  		return
   864  	}
   865  	// We're running the test suite, make iteration deterministic
   866  	list := make([]common.Hash, 0, len(hashes))
   867  	for hash := range hashes {
   868  		list = append(list, hash)
   869  	}
   870  	sortHashes(list)
   871  	rotateHashes(list, f.rand.Intn(len(list)))
   872  	for _, hash := range list {
   873  		if !do(hash) {
   874  			return
   875  		}
   876  	}
   877  }
   878  
   879  // rotateStrings rotates the contents of a slice by n steps. This method is only
   880  // used in tests to simulate random map iteration but keep it deterministic.
   881  func rotateStrings(slice []string, n int) {
   882  	orig := make([]string, len(slice))
   883  	copy(orig, slice)
   884  
   885  	for i := 0; i < len(orig); i++ {
   886  		slice[i] = orig[(i+n)%len(orig)]
   887  	}
   888  }
   889  
   890  // sortHashes sorts a slice of hashes. This method is only used in tests in order
   891  // to simulate random map iteration but keep it deterministic.
   892  func sortHashes(slice []common.Hash) {
   893  	for i := 0; i < len(slice); i++ {
   894  		for j := i + 1; j < len(slice); j++ {
   895  			if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
   896  				slice[i], slice[j] = slice[j], slice[i]
   897  			}
   898  		}
   899  	}
   900  }
   901  
   902  // rotateHashes rotates the contents of a slice by n steps. This method is only
   903  // used in tests to simulate random map iteration but keep it deterministic.
   904  func rotateHashes(slice []common.Hash, n int) {
   905  	orig := make([]common.Hash, len(slice))
   906  	copy(orig, slice)
   907  
   908  	for i := 0; i < len(orig); i++ {
   909  		slice[i] = orig[(i+n)%len(orig)]
   910  	}
   911  }