github.com/theQRL/go-zond@v0.1.1/zond/fetcher/tx_fetcher.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package fetcher
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	mrand "math/rand"
    24  	"sort"
    25  	"time"
    26  
    27  	mapset "github.com/deckarep/golang-set/v2"
    28  	"github.com/theQRL/go-zond/common"
    29  	"github.com/theQRL/go-zond/common/mclock"
    30  	"github.com/theQRL/go-zond/core/txpool"
    31  	"github.com/theQRL/go-zond/core/types"
    32  	"github.com/theQRL/go-zond/log"
    33  	"github.com/theQRL/go-zond/metrics"
    34  )
    35  
    36  const (
    37  	// maxTxAnnounces is the maximum number of unique transaction a peer
    38  	// can announce in a short time.
    39  	maxTxAnnounces = 4096
    40  
    41  	// maxTxRetrievals is the maximum transaction number can be fetched in one
    42  	// request. The rationale to pick 256 is:
    43  	//   - In zond protocol, the softResponseLimit is 2MB. Nowadays according to
    44  	//     Etherscan the average transaction size is around 200B, so in theory
    45  	//     we can include lots of transaction in a single protocol packet.
    46  	//   - However the maximum size of a single transaction is raised to 128KB,
    47  	//     so pick a middle value here to ensure we can maximize the efficiency
    48  	//     of the retrieval and response size overflow won't happen in most cases.
    49  	maxTxRetrievals = 256
    50  
    51  	// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
    52  	// is used to track recent transactions that have been dropped so we don't
    53  	// re-request them.
    54  	maxTxUnderpricedSetSize = 32768
    55  
    56  	// txArriveTimeout is the time allowance before an announced transaction is
    57  	// explicitly requested.
    58  	txArriveTimeout = 500 * time.Millisecond
    59  
    60  	// txGatherSlack is the interval used to collate almost-expired announces
    61  	// with network fetches.
    62  	txGatherSlack = 100 * time.Millisecond
    63  )
    64  
    65  var (
    66  	// txFetchTimeout is the maximum allotted time to return an explicitly
    67  	// requested transaction.
    68  	txFetchTimeout = 5 * time.Second
    69  )
    70  
    71  var (
    72  	txAnnounceInMeter          = metrics.NewRegisteredMeter("zond/fetcher/transaction/announces/in", nil)
    73  	txAnnounceKnownMeter       = metrics.NewRegisteredMeter("zond/fetcher/transaction/announces/known", nil)
    74  	txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/announces/underpriced", nil)
    75  	txAnnounceDOSMeter         = metrics.NewRegisteredMeter("zond/fetcher/transaction/announces/dos", nil)
    76  
    77  	txBroadcastInMeter          = metrics.NewRegisteredMeter("zond/fetcher/transaction/broadcasts/in", nil)
    78  	txBroadcastKnownMeter       = metrics.NewRegisteredMeter("zond/fetcher/transaction/broadcasts/known", nil)
    79  	txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/broadcasts/underpriced", nil)
    80  	txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/broadcasts/otherreject", nil)
    81  
    82  	txRequestOutMeter     = metrics.NewRegisteredMeter("zond/fetcher/transaction/request/out", nil)
    83  	txRequestFailMeter    = metrics.NewRegisteredMeter("zond/fetcher/transaction/request/fail", nil)
    84  	txRequestDoneMeter    = metrics.NewRegisteredMeter("zond/fetcher/transaction/request/done", nil)
    85  	txRequestTimeoutMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/request/timeout", nil)
    86  
    87  	txReplyInMeter          = metrics.NewRegisteredMeter("zond/fetcher/transaction/replies/in", nil)
    88  	txReplyKnownMeter       = metrics.NewRegisteredMeter("zond/fetcher/transaction/replies/known", nil)
    89  	txReplyUnderpricedMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/replies/underpriced", nil)
    90  	txReplyOtherRejectMeter = metrics.NewRegisteredMeter("zond/fetcher/transaction/replies/otherreject", nil)
    91  
    92  	txFetcherWaitingPeers   = metrics.NewRegisteredGauge("zond/fetcher/transaction/waiting/peers", nil)
    93  	txFetcherWaitingHashes  = metrics.NewRegisteredGauge("zond/fetcher/transaction/waiting/hashes", nil)
    94  	txFetcherQueueingPeers  = metrics.NewRegisteredGauge("zond/fetcher/transaction/queueing/peers", nil)
    95  	txFetcherQueueingHashes = metrics.NewRegisteredGauge("zond/fetcher/transaction/queueing/hashes", nil)
    96  	txFetcherFetchingPeers  = metrics.NewRegisteredGauge("zond/fetcher/transaction/fetching/peers", nil)
    97  	txFetcherFetchingHashes = metrics.NewRegisteredGauge("zond/fetcher/transaction/fetching/hashes", nil)
    98  )
    99  
   100  // txAnnounce is the notification of the availability of a batch
   101  // of new transactions in the network.
   102  type txAnnounce struct {
   103  	origin string        // Identifier of the peer originating the notification
   104  	hashes []common.Hash // Batch of transaction hashes being announced
   105  }
   106  
   107  // txRequest represents an in-flight transaction retrieval request destined to
   108  // a specific peers.
   109  type txRequest struct {
   110  	hashes []common.Hash            // Transactions having been requested
   111  	stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
   112  	time   mclock.AbsTime           // Timestamp of the request
   113  }
   114  
   115  // txDelivery is the notification that a batch of transactions have been added
   116  // to the pool and should be untracked.
   117  type txDelivery struct {
   118  	origin string        // Identifier of the peer originating the notification
   119  	hashes []common.Hash // Batch of transaction hashes having been delivered
   120  	direct bool          // Whether this is a direct reply or a broadcast
   121  }
   122  
   123  // txDrop is the notification that a peer has disconnected.
   124  type txDrop struct {
   125  	peer string
   126  }
   127  
   128  // TxFetcher is responsible for retrieving new transaction based on announcements.
   129  //
   130  // The fetcher operates in 3 stages:
   131  //   - Transactions that are newly discovered are moved into a wait list.
   132  //   - After ~500ms passes, transactions from the wait list that have not been
   133  //     broadcast to us in whole are moved into a queueing area.
   134  //   - When a connected peer doesn't have in-flight retrieval requests, any
   135  //     transaction queued up (and announced by the peer) are allocated to the
   136  //     peer and moved into a fetching status until it's fulfilled or fails.
   137  //
   138  // The invariants of the fetcher are:
   139  //   - Each tracked transaction (hash) must only be present in one of the
   140  //     three stages. This ensures that the fetcher operates akin to a finite
   141  //     state automata and there's do data leak.
   142  //   - Each peer that announced transactions may be scheduled retrievals, but
   143  //     only ever one concurrently. This ensures we can immediately know what is
   144  //     missing from a reply and reschedule it.
   145  type TxFetcher struct {
   146  	notify  chan *txAnnounce
   147  	cleanup chan *txDelivery
   148  	drop    chan *txDrop
   149  	quit    chan struct{}
   150  
   151  	underpriced mapset.Set[common.Hash] // Transactions discarded as too cheap (don't re-fetch)
   152  
   153  	// Stage 1: Waiting lists for newly discovered transactions that might be
   154  	// broadcast without needing explicit request/reply round trips.
   155  	waitlist  map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
   156  	waittime  map[common.Hash]mclock.AbsTime      // Timestamps when transactions were added to the waitlist
   157  	waitslots map[string]map[common.Hash]struct{} // Waiting announcements grouped by peer (DoS protection)
   158  
   159  	// Stage 2: Queue of transactions that waiting to be allocated to some peer
   160  	// to be retrieved directly.
   161  	announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
   162  	announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
   163  
   164  	// Stage 3: Set of transactions currently being retrieved, some which may be
   165  	// fulfilled and some rescheduled. Note, this step shares 'announces' from the
   166  	// previous stage to avoid having to duplicate (need it for DoS checks).
   167  	fetching   map[common.Hash]string              // Transaction set currently being retrieved
   168  	requests   map[string]*txRequest               // In-flight transaction retrievals
   169  	alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
   170  
   171  	// Callbacks
   172  	hasTx    func(common.Hash) bool             // Retrieves a tx from the local txpool
   173  	addTxs   func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
   174  	fetchTxs func(string, []common.Hash) error  // Retrieves a set of txs from a remote peer
   175  
   176  	step  chan struct{} // Notification channel when the fetcher loop iterates
   177  	clock mclock.Clock  // Time wrapper to simulate in tests
   178  	rand  *mrand.Rand   // Randomizer to use in tests instead of map range loops (soft-random)
   179  }
   180  
   181  // NewTxFetcher creates a transaction fetcher to retrieve transaction
   182  // based on hash announcements.
   183  func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
   184  	return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
   185  }
   186  
   187  // NewTxFetcherForTests is a testing method to mock out the realtime clock with
   188  // a simulated version and the internal randomness with a deterministic one.
   189  func NewTxFetcherForTests(
   190  	hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
   191  	clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
   192  	return &TxFetcher{
   193  		notify:      make(chan *txAnnounce),
   194  		cleanup:     make(chan *txDelivery),
   195  		drop:        make(chan *txDrop),
   196  		quit:        make(chan struct{}),
   197  		waitlist:    make(map[common.Hash]map[string]struct{}),
   198  		waittime:    make(map[common.Hash]mclock.AbsTime),
   199  		waitslots:   make(map[string]map[common.Hash]struct{}),
   200  		announces:   make(map[string]map[common.Hash]struct{}),
   201  		announced:   make(map[common.Hash]map[string]struct{}),
   202  		fetching:    make(map[common.Hash]string),
   203  		requests:    make(map[string]*txRequest),
   204  		alternates:  make(map[common.Hash]map[string]struct{}),
   205  		underpriced: mapset.NewSet[common.Hash](),
   206  		hasTx:       hasTx,
   207  		addTxs:      addTxs,
   208  		fetchTxs:    fetchTxs,
   209  		clock:       clock,
   210  		rand:        rand,
   211  	}
   212  }
   213  
   214  // Notify announces the fetcher of the potential availability of a new batch of
   215  // transactions in the network.
   216  func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
   217  	// Keep track of all the announced transactions
   218  	txAnnounceInMeter.Mark(int64(len(hashes)))
   219  
   220  	// Skip any transaction announcements that we already know of, or that we've
   221  	// previously marked as cheap and discarded. This check is of course racy,
   222  	// because multiple concurrent notifies will still manage to pass it, but it's
   223  	// still valuable to check here because it runs concurrent  to the internal
   224  	// loop, so anything caught here is time saved internally.
   225  	var (
   226  		unknowns               = make([]common.Hash, 0, len(hashes))
   227  		duplicate, underpriced int64
   228  	)
   229  	for _, hash := range hashes {
   230  		switch {
   231  		case f.hasTx(hash):
   232  			duplicate++
   233  
   234  		case f.underpriced.Contains(hash):
   235  			underpriced++
   236  
   237  		default:
   238  			unknowns = append(unknowns, hash)
   239  		}
   240  	}
   241  	txAnnounceKnownMeter.Mark(duplicate)
   242  	txAnnounceUnderpricedMeter.Mark(underpriced)
   243  
   244  	// If anything's left to announce, push it into the internal loop
   245  	if len(unknowns) == 0 {
   246  		return nil
   247  	}
   248  	announce := &txAnnounce{
   249  		origin: peer,
   250  		hashes: unknowns,
   251  	}
   252  	select {
   253  	case f.notify <- announce:
   254  		return nil
   255  	case <-f.quit:
   256  		return errTerminated
   257  	}
   258  }
   259  
   260  // Enqueue imports a batch of received transaction into the transaction pool
   261  // and the fetcher. This method may be called by both transaction broadcasts and
   262  // direct request replies. The differentiation is important so the fetcher can
   263  // re-schedule missing transactions as soon as possible.
   264  func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
   265  	var (
   266  		inMeter          = txReplyInMeter
   267  		knownMeter       = txReplyKnownMeter
   268  		underpricedMeter = txReplyUnderpricedMeter
   269  		otherRejectMeter = txReplyOtherRejectMeter
   270  	)
   271  	if !direct {
   272  		inMeter = txBroadcastInMeter
   273  		knownMeter = txBroadcastKnownMeter
   274  		underpricedMeter = txBroadcastUnderpricedMeter
   275  		otherRejectMeter = txBroadcastOtherRejectMeter
   276  	}
   277  	// Keep track of all the propagated transactions
   278  	inMeter.Mark(int64(len(txs)))
   279  
   280  	// Push all the transactions into the pool, tracking underpriced ones to avoid
   281  	// re-requesting them and dropping the peer in case of malicious transfers.
   282  	var (
   283  		added = make([]common.Hash, 0, len(txs))
   284  	)
   285  	// proceed in batches
   286  	for i := 0; i < len(txs); i += 128 {
   287  		end := i + 128
   288  		if end > len(txs) {
   289  			end = len(txs)
   290  		}
   291  		var (
   292  			duplicate   int64
   293  			underpriced int64
   294  			otherreject int64
   295  		)
   296  		batch := txs[i:end]
   297  
   298  		for j, err := range f.addTxs(batch) {
   299  			// Track the transaction hash if the price is too low for us.
   300  			// Avoid re-request this transaction when we receive another
   301  			// announcement.
   302  			if errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced) {
   303  				for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
   304  					f.underpriced.Pop()
   305  				}
   306  				f.underpriced.Add(batch[j].Hash())
   307  			}
   308  			// Track a few interesting failure types
   309  			switch {
   310  			case err == nil: // Noop, but need to handle to not count these
   311  
   312  			case errors.Is(err, txpool.ErrAlreadyKnown):
   313  				duplicate++
   314  
   315  			case errors.Is(err, txpool.ErrUnderpriced) || errors.Is(err, txpool.ErrReplaceUnderpriced):
   316  				underpriced++
   317  
   318  			default:
   319  				otherreject++
   320  			}
   321  			added = append(added, batch[j].Hash())
   322  		}
   323  		knownMeter.Mark(duplicate)
   324  		underpricedMeter.Mark(underpriced)
   325  		otherRejectMeter.Mark(otherreject)
   326  
   327  		// If 'other reject' is >25% of the deliveries in any batch, sleep a bit.
   328  		if otherreject > 128/4 {
   329  			time.Sleep(200 * time.Millisecond)
   330  			log.Warn("Peer delivering stale transactions", "peer", peer, "rejected", otherreject)
   331  		}
   332  	}
   333  	select {
   334  	case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
   335  		return nil
   336  	case <-f.quit:
   337  		return errTerminated
   338  	}
   339  }
   340  
   341  // Drop should be called when a peer disconnects. It cleans up all the internal
   342  // data structures of the given node.
   343  func (f *TxFetcher) Drop(peer string) error {
   344  	select {
   345  	case f.drop <- &txDrop{peer: peer}:
   346  		return nil
   347  	case <-f.quit:
   348  		return errTerminated
   349  	}
   350  }
   351  
   352  // Start boots up the announcement based synchroniser, accepting and processing
   353  // hash notifications and block fetches until termination requested.
   354  func (f *TxFetcher) Start() {
   355  	go f.loop()
   356  }
   357  
   358  // Stop terminates the announcement based synchroniser, canceling all pending
   359  // operations.
   360  func (f *TxFetcher) Stop() {
   361  	close(f.quit)
   362  }
   363  
   364  func (f *TxFetcher) loop() {
   365  	var (
   366  		waitTimer    = new(mclock.Timer)
   367  		timeoutTimer = new(mclock.Timer)
   368  
   369  		waitTrigger    = make(chan struct{}, 1)
   370  		timeoutTrigger = make(chan struct{}, 1)
   371  	)
   372  	for {
   373  		select {
   374  		case ann := <-f.notify:
   375  			// Drop part of the new announcements if there are too many accumulated.
   376  			// Note, we could but do not filter already known transactions here as
   377  			// the probability of something arriving between this call and the pre-
   378  			// filter outside is essentially zero.
   379  			used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
   380  			if used >= maxTxAnnounces {
   381  				// This can happen if a set of transactions are requested but not
   382  				// all fulfilled, so the remainder are rescheduled without the cap
   383  				// check. Should be fine as the limit is in the thousands and the
   384  				// request size in the hundreds.
   385  				txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
   386  				break
   387  			}
   388  			want := used + len(ann.hashes)
   389  			if want > maxTxAnnounces {
   390  				txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
   391  				ann.hashes = ann.hashes[:want-maxTxAnnounces]
   392  			}
   393  			// All is well, schedule the remainder of the transactions
   394  			idleWait := len(f.waittime) == 0
   395  			_, oldPeer := f.announces[ann.origin]
   396  
   397  			for _, hash := range ann.hashes {
   398  				// If the transaction is already downloading, add it to the list
   399  				// of possible alternates (in case the current retrieval fails) and
   400  				// also account it for the peer.
   401  				if f.alternates[hash] != nil {
   402  					f.alternates[hash][ann.origin] = struct{}{}
   403  
   404  					// Stage 2 and 3 share the set of origins per tx
   405  					if announces := f.announces[ann.origin]; announces != nil {
   406  						announces[hash] = struct{}{}
   407  					} else {
   408  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   409  					}
   410  					continue
   411  				}
   412  				// If the transaction is not downloading, but is already queued
   413  				// from a different peer, track it for the new peer too.
   414  				if f.announced[hash] != nil {
   415  					f.announced[hash][ann.origin] = struct{}{}
   416  
   417  					// Stage 2 and 3 share the set of origins per tx
   418  					if announces := f.announces[ann.origin]; announces != nil {
   419  						announces[hash] = struct{}{}
   420  					} else {
   421  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   422  					}
   423  					continue
   424  				}
   425  				// If the transaction is already known to the fetcher, but not
   426  				// yet downloading, add the peer as an alternate origin in the
   427  				// waiting list.
   428  				if f.waitlist[hash] != nil {
   429  					f.waitlist[hash][ann.origin] = struct{}{}
   430  
   431  					if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   432  						waitslots[hash] = struct{}{}
   433  					} else {
   434  						f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   435  					}
   436  					continue
   437  				}
   438  				// Transaction unknown to the fetcher, insert it into the waiting list
   439  				f.waitlist[hash] = map[string]struct{}{ann.origin: {}}
   440  				f.waittime[hash] = f.clock.Now()
   441  
   442  				if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   443  					waitslots[hash] = struct{}{}
   444  				} else {
   445  					f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   446  				}
   447  			}
   448  			// If a new item was added to the waitlist, schedule it into the fetcher
   449  			if idleWait && len(f.waittime) > 0 {
   450  				f.rescheduleWait(waitTimer, waitTrigger)
   451  			}
   452  			// If this peer is new and announced something already queued, maybe
   453  			// request transactions from them
   454  			if !oldPeer && len(f.announces[ann.origin]) > 0 {
   455  				f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}})
   456  			}
   457  
   458  		case <-waitTrigger:
   459  			// At least one transaction's waiting time ran out, push all expired
   460  			// ones into the retrieval queues
   461  			actives := make(map[string]struct{})
   462  			for hash, instance := range f.waittime {
   463  				if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
   464  					// Transaction expired without propagation, schedule for retrieval
   465  					if f.announced[hash] != nil {
   466  						panic("announce tracker already contains waitlist item")
   467  					}
   468  					f.announced[hash] = f.waitlist[hash]
   469  					for peer := range f.waitlist[hash] {
   470  						if announces := f.announces[peer]; announces != nil {
   471  							announces[hash] = struct{}{}
   472  						} else {
   473  							f.announces[peer] = map[common.Hash]struct{}{hash: {}}
   474  						}
   475  						delete(f.waitslots[peer], hash)
   476  						if len(f.waitslots[peer]) == 0 {
   477  							delete(f.waitslots, peer)
   478  						}
   479  						actives[peer] = struct{}{}
   480  					}
   481  					delete(f.waittime, hash)
   482  					delete(f.waitlist, hash)
   483  				}
   484  			}
   485  			// If transactions are still waiting for propagation, reschedule the wait timer
   486  			if len(f.waittime) > 0 {
   487  				f.rescheduleWait(waitTimer, waitTrigger)
   488  			}
   489  			// If any peers became active and are idle, request transactions from them
   490  			if len(actives) > 0 {
   491  				f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
   492  			}
   493  
   494  		case <-timeoutTrigger:
   495  			// Clean up any expired retrievals and avoid re-requesting them from the
   496  			// same peer (either overloaded or malicious, useless in both cases). We
   497  			// could also penalize (Drop), but there's nothing to gain, and if could
   498  			// possibly further increase the load on it.
   499  			for peer, req := range f.requests {
   500  				if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
   501  					txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
   502  
   503  					// Reschedule all the not-yet-delivered fetches to alternate peers
   504  					for _, hash := range req.hashes {
   505  						// Skip rescheduling hashes already delivered by someone else
   506  						if req.stolen != nil {
   507  							if _, ok := req.stolen[hash]; ok {
   508  								continue
   509  							}
   510  						}
   511  						// Move the delivery back from fetching to queued
   512  						if _, ok := f.announced[hash]; ok {
   513  							panic("announced tracker already contains alternate item")
   514  						}
   515  						if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
   516  							f.announced[hash] = f.alternates[hash]
   517  						}
   518  						delete(f.announced[hash], peer)
   519  						if len(f.announced[hash]) == 0 {
   520  							delete(f.announced, hash)
   521  						}
   522  						delete(f.announces[peer], hash)
   523  						delete(f.alternates, hash)
   524  						delete(f.fetching, hash)
   525  					}
   526  					if len(f.announces[peer]) == 0 {
   527  						delete(f.announces, peer)
   528  					}
   529  					// Keep track of the request as dangling, but never expire
   530  					f.requests[peer].hashes = nil
   531  				}
   532  			}
   533  			// Schedule a new transaction retrieval
   534  			f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   535  
   536  			// No idea if we scheduled something or not, trigger the timer if needed
   537  			// TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow?
   538  			f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   539  
   540  		case delivery := <-f.cleanup:
   541  			// Independent if the delivery was direct or broadcast, remove all
   542  			// traces of the hash from internal trackers
   543  			for _, hash := range delivery.hashes {
   544  				if _, ok := f.waitlist[hash]; ok {
   545  					for peer, txset := range f.waitslots {
   546  						delete(txset, hash)
   547  						if len(txset) == 0 {
   548  							delete(f.waitslots, peer)
   549  						}
   550  					}
   551  					delete(f.waitlist, hash)
   552  					delete(f.waittime, hash)
   553  				} else {
   554  					for peer, txset := range f.announces {
   555  						delete(txset, hash)
   556  						if len(txset) == 0 {
   557  							delete(f.announces, peer)
   558  						}
   559  					}
   560  					delete(f.announced, hash)
   561  					delete(f.alternates, hash)
   562  
   563  					// If a transaction currently being fetched from a different
   564  					// origin was delivered (delivery stolen), mark it so the
   565  					// actual delivery won't double schedule it.
   566  					if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
   567  						stolen := f.requests[origin].stolen
   568  						if stolen == nil {
   569  							f.requests[origin].stolen = make(map[common.Hash]struct{})
   570  							stolen = f.requests[origin].stolen
   571  						}
   572  						stolen[hash] = struct{}{}
   573  					}
   574  					delete(f.fetching, hash)
   575  				}
   576  			}
   577  			// In case of a direct delivery, also reschedule anything missing
   578  			// from the original query
   579  			if delivery.direct {
   580  				// Mark the requesting successful (independent of individual status)
   581  				txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
   582  
   583  				// Make sure something was pending, nuke it
   584  				req := f.requests[delivery.origin]
   585  				if req == nil {
   586  					log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
   587  					break
   588  				}
   589  				delete(f.requests, delivery.origin)
   590  
   591  				// Anything not delivered should be re-scheduled (with or without
   592  				// this peer, depending on the response cutoff)
   593  				delivered := make(map[common.Hash]struct{})
   594  				for _, hash := range delivery.hashes {
   595  					delivered[hash] = struct{}{}
   596  				}
   597  				cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
   598  				for i, hash := range req.hashes {
   599  					if _, ok := delivered[hash]; ok {
   600  						cutoff = i
   601  					}
   602  				}
   603  				// Reschedule missing hashes from alternates, not-fulfilled from alt+self
   604  				for i, hash := range req.hashes {
   605  					// Skip rescheduling hashes already delivered by someone else
   606  					if req.stolen != nil {
   607  						if _, ok := req.stolen[hash]; ok {
   608  							continue
   609  						}
   610  					}
   611  					if _, ok := delivered[hash]; !ok {
   612  						if i < cutoff {
   613  							delete(f.alternates[hash], delivery.origin)
   614  							delete(f.announces[delivery.origin], hash)
   615  							if len(f.announces[delivery.origin]) == 0 {
   616  								delete(f.announces, delivery.origin)
   617  							}
   618  						}
   619  						if len(f.alternates[hash]) > 0 {
   620  							if _, ok := f.announced[hash]; ok {
   621  								panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
   622  							}
   623  							f.announced[hash] = f.alternates[hash]
   624  						}
   625  					}
   626  					delete(f.alternates, hash)
   627  					delete(f.fetching, hash)
   628  				}
   629  				// Something was delivered, try to reschedule requests
   630  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
   631  			}
   632  
   633  		case drop := <-f.drop:
   634  			// A peer was dropped, remove all traces of it
   635  			if _, ok := f.waitslots[drop.peer]; ok {
   636  				for hash := range f.waitslots[drop.peer] {
   637  					delete(f.waitlist[hash], drop.peer)
   638  					if len(f.waitlist[hash]) == 0 {
   639  						delete(f.waitlist, hash)
   640  						delete(f.waittime, hash)
   641  					}
   642  				}
   643  				delete(f.waitslots, drop.peer)
   644  				if len(f.waitlist) > 0 {
   645  					f.rescheduleWait(waitTimer, waitTrigger)
   646  				}
   647  			}
   648  			// Clean up any active requests
   649  			var request *txRequest
   650  			if request = f.requests[drop.peer]; request != nil {
   651  				for _, hash := range request.hashes {
   652  					// Skip rescheduling hashes already delivered by someone else
   653  					if request.stolen != nil {
   654  						if _, ok := request.stolen[hash]; ok {
   655  							continue
   656  						}
   657  					}
   658  					// Undelivered hash, reschedule if there's an alternative origin available
   659  					delete(f.alternates[hash], drop.peer)
   660  					if len(f.alternates[hash]) == 0 {
   661  						delete(f.alternates, hash)
   662  					} else {
   663  						f.announced[hash] = f.alternates[hash]
   664  						delete(f.alternates, hash)
   665  					}
   666  					delete(f.fetching, hash)
   667  				}
   668  				delete(f.requests, drop.peer)
   669  			}
   670  			// Clean up general announcement tracking
   671  			if _, ok := f.announces[drop.peer]; ok {
   672  				for hash := range f.announces[drop.peer] {
   673  					delete(f.announced[hash], drop.peer)
   674  					if len(f.announced[hash]) == 0 {
   675  						delete(f.announced, hash)
   676  					}
   677  				}
   678  				delete(f.announces, drop.peer)
   679  			}
   680  			// If a request was cancelled, check if anything needs to be rescheduled
   681  			if request != nil {
   682  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   683  				f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   684  			}
   685  
   686  		case <-f.quit:
   687  			return
   688  		}
   689  		// No idea what happened, but bump some sanity metrics
   690  		txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
   691  		txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
   692  		txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
   693  		txFetcherQueueingHashes.Update(int64(len(f.announced)))
   694  		txFetcherFetchingPeers.Update(int64(len(f.requests)))
   695  		txFetcherFetchingHashes.Update(int64(len(f.fetching)))
   696  
   697  		// Loop did something, ping the step notifier if needed (tests)
   698  		if f.step != nil {
   699  			f.step <- struct{}{}
   700  		}
   701  	}
   702  }
   703  
   704  // rescheduleWait iterates over all the transactions currently in the waitlist
   705  // and schedules the movement into the fetcher for the earliest.
   706  //
   707  // The method has a granularity of 'gatherSlack', since there's not much point in
   708  // spinning over all the transactions just to maybe find one that should trigger
   709  // a few ms earlier.
   710  func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
   711  	if *timer != nil {
   712  		(*timer).Stop()
   713  	}
   714  	now := f.clock.Now()
   715  
   716  	earliest := now
   717  	for _, instance := range f.waittime {
   718  		if earliest > instance {
   719  			earliest = instance
   720  			if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
   721  				break
   722  			}
   723  		}
   724  	}
   725  	*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
   726  		trigger <- struct{}{}
   727  	})
   728  }
   729  
   730  // rescheduleTimeout iterates over all the transactions currently in flight and
   731  // schedules a cleanup run when the first would trigger.
   732  //
   733  // The method has a granularity of 'gatherSlack', since there's not much point in
   734  // spinning over all the transactions just to maybe find one that should trigger
   735  // a few ms earlier.
   736  //
   737  // This method is a bit "flaky" "by design". In theory the timeout timer only ever
   738  // should be rescheduled if some request is pending. In practice, a timeout will
   739  // cause the timer to be rescheduled every 5 secs (until the peer comes through or
   740  // disconnects). This is a limitation of the fetcher code because we don't trac
   741  // pending requests and timed out requests separately. Without double tracking, if
   742  // we simply didn't reschedule the timer on all-timeout then the timer would never
   743  // be set again since len(request) > 0 => something's running.
   744  func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
   745  	if *timer != nil {
   746  		(*timer).Stop()
   747  	}
   748  	now := f.clock.Now()
   749  
   750  	earliest := now
   751  	for _, req := range f.requests {
   752  		// If this request already timed out, skip it altogether
   753  		if req.hashes == nil {
   754  			continue
   755  		}
   756  		if earliest > req.time {
   757  			earliest = req.time
   758  			if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
   759  				break
   760  			}
   761  		}
   762  	}
   763  	*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
   764  		trigger <- struct{}{}
   765  	})
   766  }
   767  
   768  // scheduleFetches starts a batch of retrievals for all available idle peers.
   769  func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
   770  	// Gather the set of peers we want to retrieve from (default to all)
   771  	actives := whitelist
   772  	if actives == nil {
   773  		actives = make(map[string]struct{})
   774  		for peer := range f.announces {
   775  			actives[peer] = struct{}{}
   776  		}
   777  	}
   778  	if len(actives) == 0 {
   779  		return
   780  	}
   781  	// For each active peer, try to schedule some transaction fetches
   782  	idle := len(f.requests) == 0
   783  
   784  	f.forEachPeer(actives, func(peer string) {
   785  		if f.requests[peer] != nil {
   786  			return // continue in the for-each
   787  		}
   788  		if len(f.announces[peer]) == 0 {
   789  			return // continue in the for-each
   790  		}
   791  		hashes := make([]common.Hash, 0, maxTxRetrievals)
   792  		f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
   793  			if _, ok := f.fetching[hash]; !ok {
   794  				// Mark the hash as fetching and stash away possible alternates
   795  				f.fetching[hash] = peer
   796  
   797  				if _, ok := f.alternates[hash]; ok {
   798  					panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
   799  				}
   800  				f.alternates[hash] = f.announced[hash]
   801  				delete(f.announced, hash)
   802  
   803  				// Accumulate the hash and stop if the limit was reached
   804  				hashes = append(hashes, hash)
   805  				if len(hashes) >= maxTxRetrievals {
   806  					return false // break in the for-each
   807  				}
   808  			}
   809  			return true // continue in the for-each
   810  		})
   811  		// If any hashes were allocated, request them from the peer
   812  		if len(hashes) > 0 {
   813  			f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
   814  			txRequestOutMeter.Mark(int64(len(hashes)))
   815  
   816  			go func(peer string, hashes []common.Hash) {
   817  				// Try to fetch the transactions, but in case of a request
   818  				// failure (e.g. peer disconnected), reschedule the hashes.
   819  				if err := f.fetchTxs(peer, hashes); err != nil {
   820  					txRequestFailMeter.Mark(int64(len(hashes)))
   821  					f.Drop(peer)
   822  				}
   823  			}(peer, hashes)
   824  		}
   825  	})
   826  	// If a new request was fired, schedule a timeout timer
   827  	if idle && len(f.requests) > 0 {
   828  		f.rescheduleTimeout(timer, timeout)
   829  	}
   830  }
   831  
   832  // forEachPeer does a range loop over a map of peers in production, but during
   833  // testing it does a deterministic sorted random to allow reproducing issues.
   834  func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
   835  	// If we're running production, use whatever Go's map gives us
   836  	if f.rand == nil {
   837  		for peer := range peers {
   838  			do(peer)
   839  		}
   840  		return
   841  	}
   842  	// We're running the test suite, make iteration deterministic
   843  	list := make([]string, 0, len(peers))
   844  	for peer := range peers {
   845  		list = append(list, peer)
   846  	}
   847  	sort.Strings(list)
   848  	rotateStrings(list, f.rand.Intn(len(list)))
   849  	for _, peer := range list {
   850  		do(peer)
   851  	}
   852  }
   853  
   854  // forEachHash does a range loop over a map of hashes in production, but during
   855  // testing it does a deterministic sorted random to allow reproducing issues.
   856  func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
   857  	// If we're running production, use whatever Go's map gives us
   858  	if f.rand == nil {
   859  		for hash := range hashes {
   860  			if !do(hash) {
   861  				return
   862  			}
   863  		}
   864  		return
   865  	}
   866  	// We're running the test suite, make iteration deterministic
   867  	list := make([]common.Hash, 0, len(hashes))
   868  	for hash := range hashes {
   869  		list = append(list, hash)
   870  	}
   871  	sortHashes(list)
   872  	rotateHashes(list, f.rand.Intn(len(list)))
   873  	for _, hash := range list {
   874  		if !do(hash) {
   875  			return
   876  		}
   877  	}
   878  }
   879  
   880  // rotateStrings rotates the contents of a slice by n steps. This method is only
   881  // used in tests to simulate random map iteration but keep it deterministic.
   882  func rotateStrings(slice []string, n int) {
   883  	orig := make([]string, len(slice))
   884  	copy(orig, slice)
   885  
   886  	for i := 0; i < len(orig); i++ {
   887  		slice[i] = orig[(i+n)%len(orig)]
   888  	}
   889  }
   890  
   891  // sortHashes sorts a slice of hashes. This method is only used in tests in order
   892  // to simulate random map iteration but keep it deterministic.
   893  func sortHashes(slice []common.Hash) {
   894  	for i := 0; i < len(slice); i++ {
   895  		for j := i + 1; j < len(slice); j++ {
   896  			if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
   897  				slice[i], slice[j] = slice[j], slice[i]
   898  			}
   899  		}
   900  	}
   901  }
   902  
   903  // rotateHashes rotates the contents of a slice by n steps. This method is only
   904  // used in tests to simulate random map iteration but keep it deterministic.
   905  func rotateHashes(slice []common.Hash, n int) {
   906  	orig := make([]common.Hash, len(slice))
   907  	copy(orig, slice)
   908  
   909  	for i := 0; i < len(orig); i++ {
   910  		slice[i] = orig[(i+n)%len(orig)]
   911  	}
   912  }