github.com/core-coin/go-core/v2@v2.1.9/xcb/fetcher/tx_fetcher.go (about)

     1  // Copyright 2019 by the Authors
     2  // This file is part of the go-core library.
     3  //
     4  // The go-core library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-core library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-core library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package fetcher
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	mrand "math/rand"
    23  	"sort"
    24  	"time"
    25  
    26  	mapset "github.com/deckarep/golang-set"
    27  
    28  	"github.com/core-coin/go-core/v2/common"
    29  	"github.com/core-coin/go-core/v2/common/mclock"
    30  	"github.com/core-coin/go-core/v2/core"
    31  	"github.com/core-coin/go-core/v2/core/types"
    32  	"github.com/core-coin/go-core/v2/log"
    33  	"github.com/core-coin/go-core/v2/metrics"
    34  )
    35  
    36  const (
    37  	// maxTxAnnounces is the maximum number of unique transaction a peer
    38  	// can announce in a short time.
    39  	maxTxAnnounces = 4096
    40  
    41  	// maxTxRetrievals is the maximum transaction number can be fetched in one
    42  	// request. The rationale to pick 256 is:
    43  	//   - In xcb protocol, the softResponseLimit is 2MB. Nowadays according to
    44  	//     Corescan the average transaction size is around 200B, so in theory
    45  	//     we can include lots of transaction in a single protocol packet.
    46  	//   - However the maximum size of a single transaction is raised to 128KB,
    47  	//     so pick a middle value here to ensure we can maximize the efficiency
    48  	//     of the retrieval and response size overflow won't happen in most cases.
    49  	maxTxRetrievals = 256
    50  
    51  	// maxTxUnderpricedSetSize is the size of the underpriced transaction set that
    52  	// is used to track recent transactions that have been dropped so we don't
    53  	// re-request them.
    54  	maxTxUnderpricedSetSize = 32768
    55  
    56  	// txArriveTimeout is the time allowance before an announced transaction is
    57  	// explicitly requested.
    58  	txArriveTimeout = 500 * time.Millisecond
    59  
    60  	// txGatherSlack is the interval used to collate almost-expired announces
    61  	// with network fetches.
    62  	txGatherSlack = 100 * time.Millisecond
    63  )
    64  
    65  var (
    66  	// txFetchTimeout is the maximum allotted time to return an explicitly
    67  	// requested transaction.
    68  	txFetchTimeout = 5 * time.Second
    69  )
    70  
    71  var (
    72  	txAnnounceInMeter          = metrics.NewRegisteredMeter("xcb/fetcher/transaction/announces/in", nil)
    73  	txAnnounceKnownMeter       = metrics.NewRegisteredMeter("xcb/fetcher/transaction/announces/known", nil)
    74  	txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/announces/underpriced", nil)
    75  	txAnnounceDOSMeter         = metrics.NewRegisteredMeter("xcb/fetcher/transaction/announces/dos", nil)
    76  
    77  	txBroadcastInMeter          = metrics.NewRegisteredMeter("xcb/fetcher/transaction/broadcasts/in", nil)
    78  	txBroadcastKnownMeter       = metrics.NewRegisteredMeter("xcb/fetcher/transaction/broadcasts/known", nil)
    79  	txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/broadcasts/underpriced", nil)
    80  	txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/broadcasts/otherreject", nil)
    81  
    82  	txRequestOutMeter     = metrics.NewRegisteredMeter("xcb/fetcher/transaction/request/out", nil)
    83  	txRequestFailMeter    = metrics.NewRegisteredMeter("xcb/fetcher/transaction/request/fail", nil)
    84  	txRequestDoneMeter    = metrics.NewRegisteredMeter("xcb/fetcher/transaction/request/done", nil)
    85  	txRequestTimeoutMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/request/timeout", nil)
    86  
    87  	txReplyInMeter          = metrics.NewRegisteredMeter("xcb/fetcher/transaction/replies/in", nil)
    88  	txReplyKnownMeter       = metrics.NewRegisteredMeter("xcb/fetcher/transaction/replies/known", nil)
    89  	txReplyUnderpricedMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/replies/underpriced", nil)
    90  	txReplyOtherRejectMeter = metrics.NewRegisteredMeter("xcb/fetcher/transaction/replies/otherreject", nil)
    91  
    92  	txFetcherWaitingPeers   = metrics.NewRegisteredGauge("xcb/fetcher/transaction/waiting/peers", nil)
    93  	txFetcherWaitingHashes  = metrics.NewRegisteredGauge("xcb/fetcher/transaction/waiting/hashes", nil)
    94  	txFetcherQueueingPeers  = metrics.NewRegisteredGauge("xcb/fetcher/transaction/queueing/peers", nil)
    95  	txFetcherQueueingHashes = metrics.NewRegisteredGauge("xcb/fetcher/transaction/queueing/hashes", nil)
    96  	txFetcherFetchingPeers  = metrics.NewRegisteredGauge("xcb/fetcher/transaction/fetching/peers", nil)
    97  	txFetcherFetchingHashes = metrics.NewRegisteredGauge("xcb/fetcher/transaction/fetching/hashes", nil)
    98  )
    99  
   100  // txAnnounce is the notification of the availability of a batch
   101  // of new transactions in the network.
   102  type txAnnounce struct {
   103  	origin string        // Identifier of the peer originating the notification
   104  	hashes []common.Hash // Batch of transaction hashes being announced
   105  }
   106  
   107  // txRequest represents an in-flight transaction retrieval request destined to
   108  // a specific peers.
   109  type txRequest struct {
   110  	hashes []common.Hash            // Transactions having been requested
   111  	stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request)
   112  	time   mclock.AbsTime           // Timestamp of the request
   113  }
   114  
   115  // txDelivery is the notification that a batch of transactions have been added
   116  // to the pool and should be untracked.
   117  type txDelivery struct {
   118  	origin string        // Identifier of the peer originating the notification
   119  	hashes []common.Hash // Batch of transaction hashes having been delivered
   120  	direct bool          // Whether this is a direct reply or a broadcast
   121  }
   122  
   123  // txDrop is the notiication that a peer has disconnected.
   124  type txDrop struct {
   125  	peer string
   126  }
   127  
   128  // TxFetcher is responsible for retrieving new transaction based on announcements.
   129  //
   130  // The fetcher operates in 3 stages:
   131  //   - Transactions that are newly discovered are moved into a wait list.
   132  //   - After ~500ms passes, transactions from the wait list that have not been
   133  //     broadcast to us in whole are moved into a queueing area.
   134  //   - When a connected peer doesn't have in-flight retrieval requests, any
   135  //     transaction queued up (and announced by the peer) are allocated to the
   136  //     peer and moved into a fetching status until it's fulfilled or fails.
   137  //
   138  // The invariants of the fetcher are:
   139  //   - Each tracked transaction (hash) must only be present in one of the
   140  //     three stages. This ensures that the fetcher operates akin to a finite
   141  //     state automata and there's do data leak.
   142  //   - Each peer that announced transactions may be scheduled retrievals, but
   143  //     only ever one concurrently. This ensures we can immediately know what is
   144  //     missing from a reply and reschedule it.
   145  type TxFetcher struct {
   146  	notify  chan *txAnnounce
   147  	cleanup chan *txDelivery
   148  	drop    chan *txDrop
   149  	quit    chan struct{}
   150  
   151  	underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch)
   152  
   153  	// Stage 1: Waiting lists for newly discovered transactions that might be
   154  	// broadcast without needing explicit request/reply round trips.
   155  	waitlist  map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast
   156  	waittime  map[common.Hash]mclock.AbsTime      // Timestamps when transactions were added to the waitlist
   157  	waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection)
   158  
   159  	// Stage 2: Queue of transactions that waiting to be allocated to some peer
   160  	// to be retrieved directly.
   161  	announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer
   162  	announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash
   163  
   164  	// Stage 3: Set of transactions currently being retrieved, some which may be
   165  	// fulfilled and some rescheduled. Note, this step shares 'announces' from the
   166  	// previous stage to avoid having to duplicate (need it for DoS checks).
   167  	fetching   map[common.Hash]string              // Transaction set currently being retrieved
   168  	requests   map[string]*txRequest               // In-flight transaction retrievals
   169  	alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails
   170  
   171  	// Callbacks
   172  	hasTx    func(common.Hash) bool             // Retrieves a tx from the local txpool
   173  	addTxs   func([]*types.Transaction) []error // Insert a batch of transactions into local txpool
   174  	fetchTxs func(string, []common.Hash) error  // Retrieves a set of txs from a remote peer
   175  
   176  	step  chan struct{} // Notification channel when the fetcher loop iterates
   177  	clock mclock.Clock  // Time wrapper to simulate in tests
   178  	rand  *mrand.Rand   // Randomizer to use in tests instead of map range loops (soft-random)
   179  }
   180  
   181  // NewTxFetcher creates a transaction fetcher to retrieve transaction
   182  // based on hash announcements.
   183  func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher {
   184  	return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil)
   185  }
   186  
   187  // NewTxFetcherForTests is a testing method to mock out the realtime clock with
   188  // a simulated version and the internal randomness with a deterministic one.
   189  func NewTxFetcherForTests(
   190  	hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error,
   191  	clock mclock.Clock, rand *mrand.Rand) *TxFetcher {
   192  	return &TxFetcher{
   193  		notify:      make(chan *txAnnounce),
   194  		cleanup:     make(chan *txDelivery),
   195  		drop:        make(chan *txDrop),
   196  		quit:        make(chan struct{}),
   197  		waitlist:    make(map[common.Hash]map[string]struct{}),
   198  		waittime:    make(map[common.Hash]mclock.AbsTime),
   199  		waitslots:   make(map[string]map[common.Hash]struct{}),
   200  		announces:   make(map[string]map[common.Hash]struct{}),
   201  		announced:   make(map[common.Hash]map[string]struct{}),
   202  		fetching:    make(map[common.Hash]string),
   203  		requests:    make(map[string]*txRequest),
   204  		alternates:  make(map[common.Hash]map[string]struct{}),
   205  		underpriced: mapset.NewSet(),
   206  		hasTx:       hasTx,
   207  		addTxs:      addTxs,
   208  		fetchTxs:    fetchTxs,
   209  		clock:       clock,
   210  		rand:        rand,
   211  	}
   212  }
   213  
   214  // Notify announces the fetcher of the potential availability of a new batch of
   215  // transactions in the network.
   216  func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error {
   217  	// Keep track of all the announced transactions
   218  	txAnnounceInMeter.Mark(int64(len(hashes)))
   219  
   220  	// Skip any transaction announcements that we already know of, or that we've
   221  	// previously marked as cheap and discarded. This check is of course racey,
   222  	// because multiple concurrent notifies will still manage to pass it, but it's
   223  	// still valuable to check here because it runs concurrent  to the internal
   224  	// loop, so anything caught here is time saved internally.
   225  	var (
   226  		unknowns               = make([]common.Hash, 0, len(hashes))
   227  		duplicate, underpriced int64
   228  	)
   229  	for _, hash := range hashes {
   230  		switch {
   231  		case f.hasTx(hash):
   232  			duplicate++
   233  
   234  		case f.underpriced.Contains(hash):
   235  			underpriced++
   236  
   237  		default:
   238  			unknowns = append(unknowns, hash)
   239  		}
   240  	}
   241  	txAnnounceKnownMeter.Mark(duplicate)
   242  	txAnnounceUnderpricedMeter.Mark(underpriced)
   243  
   244  	// If anything's left to announce, push it into the internal loop
   245  	if len(unknowns) == 0 {
   246  		return nil
   247  	}
   248  	announce := &txAnnounce{
   249  		origin: peer,
   250  		hashes: unknowns,
   251  	}
   252  	select {
   253  	case f.notify <- announce:
   254  		return nil
   255  	case <-f.quit:
   256  		return errTerminated
   257  	}
   258  }
   259  
   260  // Enqueue imports a batch of received transaction into the transaction pool
   261  // and the fetcher. This method may be called by both transaction broadcasts and
   262  // direct request replies. The differentiation is important so the fetcher can
   263  // re-shedule missing transactions as soon as possible.
   264  func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error {
   265  	// Keep track of all the propagated transactions
   266  	if direct {
   267  		txReplyInMeter.Mark(int64(len(txs)))
   268  	} else {
   269  		txBroadcastInMeter.Mark(int64(len(txs)))
   270  	}
   271  	// Push all the transactions into the pool, tracking underpriced ones to avoid
   272  	// re-requesting them and dropping the peer in case of malicious transfers.
   273  	var (
   274  		added       = make([]common.Hash, 0, len(txs))
   275  		duplicate   int64
   276  		underpriced int64
   277  		otherreject int64
   278  	)
   279  	errs := f.addTxs(txs)
   280  	for i, err := range errs {
   281  		if err != nil {
   282  			// Track the transaction hash if the price is too low for us.
   283  			// Avoid re-request this transaction when we receive another
   284  			// announcement.
   285  			if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced {
   286  				for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize {
   287  					f.underpriced.Pop()
   288  				}
   289  				f.underpriced.Add(txs[i].Hash())
   290  			}
   291  			// Track a few interesting failure types
   292  			switch err {
   293  			case nil: // Noop, but need to handle to not count these
   294  
   295  			case core.ErrAlreadyKnown:
   296  				duplicate++
   297  
   298  			case core.ErrUnderpriced, core.ErrReplaceUnderpriced:
   299  				underpriced++
   300  
   301  			default:
   302  				otherreject++
   303  			}
   304  		}
   305  		added = append(added, txs[i].Hash())
   306  	}
   307  	if direct {
   308  		txReplyKnownMeter.Mark(duplicate)
   309  		txReplyUnderpricedMeter.Mark(underpriced)
   310  		txReplyOtherRejectMeter.Mark(otherreject)
   311  	} else {
   312  		txBroadcastKnownMeter.Mark(duplicate)
   313  		txBroadcastUnderpricedMeter.Mark(underpriced)
   314  		txBroadcastOtherRejectMeter.Mark(otherreject)
   315  	}
   316  	select {
   317  	case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}:
   318  		return nil
   319  	case <-f.quit:
   320  		return errTerminated
   321  	}
   322  }
   323  
   324  // Drop should be called when a peer disconnects. It cleans up all the internal
   325  // data structures of the given node.
   326  func (f *TxFetcher) Drop(peer string) error {
   327  	select {
   328  	case f.drop <- &txDrop{peer: peer}:
   329  		return nil
   330  	case <-f.quit:
   331  		return errTerminated
   332  	}
   333  }
   334  
   335  // Start boots up the announcement based synchroniser, accepting and processing
   336  // hash notifications and block fetches until termination requested.
   337  func (f *TxFetcher) Start() {
   338  	go f.loop()
   339  }
   340  
   341  // Stop terminates the announcement based synchroniser, canceling all pending
   342  // operations.
   343  func (f *TxFetcher) Stop() {
   344  	close(f.quit)
   345  }
   346  
   347  func (f *TxFetcher) loop() {
   348  	var (
   349  		waitTimer    = new(mclock.Timer)
   350  		timeoutTimer = new(mclock.Timer)
   351  
   352  		waitTrigger    = make(chan struct{}, 1)
   353  		timeoutTrigger = make(chan struct{}, 1)
   354  	)
   355  	for {
   356  		select {
   357  		case ann := <-f.notify:
   358  			// Drop part of the new announcements if there are too many accumulated.
   359  			// Note, we could but do not filter already known transactions here as
   360  			// the probability of something arriving between this call and the pre-
   361  			// filter outside is essentially zero.
   362  			used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin])
   363  			if used >= maxTxAnnounces {
   364  				// This can happen if a set of transactions are requested but not
   365  				// all fulfilled, so the remainder are rescheduled without the cap
   366  				// check. Should be fine as the limit is in the thousands and the
   367  				// request size in the hundreds.
   368  				txAnnounceDOSMeter.Mark(int64(len(ann.hashes)))
   369  				break
   370  			}
   371  			want := used + len(ann.hashes)
   372  			if want > maxTxAnnounces {
   373  				txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces))
   374  				ann.hashes = ann.hashes[:want-maxTxAnnounces]
   375  			}
   376  			// All is well, schedule the remainder of the transactions
   377  			idleWait := len(f.waittime) == 0
   378  			_, oldPeer := f.announces[ann.origin]
   379  
   380  			for _, hash := range ann.hashes {
   381  				// If the transaction is already downloading, add it to the list
   382  				// of possible alternates (in case the current retrieval fails) and
   383  				// also account it for the peer.
   384  				if f.alternates[hash] != nil {
   385  					f.alternates[hash][ann.origin] = struct{}{}
   386  
   387  					// Stage 2 and 3 share the set of origins per tx
   388  					if announces := f.announces[ann.origin]; announces != nil {
   389  						announces[hash] = struct{}{}
   390  					} else {
   391  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   392  					}
   393  					continue
   394  				}
   395  				// If the transaction is not downloading, but is already queued
   396  				// from a different peer, track it for the new peer too.
   397  				if f.announced[hash] != nil {
   398  					f.announced[hash][ann.origin] = struct{}{}
   399  
   400  					// Stage 2 and 3 share the set of origins per tx
   401  					if announces := f.announces[ann.origin]; announces != nil {
   402  						announces[hash] = struct{}{}
   403  					} else {
   404  						f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}}
   405  					}
   406  					continue
   407  				}
   408  				// If the transaction is already known to the fetcher, but not
   409  				// yet downloading, add the peer as an alternate origin in the
   410  				// waiting list.
   411  				if f.waitlist[hash] != nil {
   412  					f.waitlist[hash][ann.origin] = struct{}{}
   413  
   414  					if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   415  						waitslots[hash] = struct{}{}
   416  					} else {
   417  						f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   418  					}
   419  					continue
   420  				}
   421  				// Transaction unknown to the fetcher, insert it into the waiting list
   422  				f.waitlist[hash] = map[string]struct{}{ann.origin: {}}
   423  				f.waittime[hash] = f.clock.Now()
   424  
   425  				if waitslots := f.waitslots[ann.origin]; waitslots != nil {
   426  					waitslots[hash] = struct{}{}
   427  				} else {
   428  					f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}}
   429  				}
   430  			}
   431  			// If a new item was added to the waitlist, schedule it into the fetcher
   432  			if idleWait && len(f.waittime) > 0 {
   433  				f.rescheduleWait(waitTimer, waitTrigger)
   434  			}
   435  			// If this peer is new and announced something already queued, maybe
   436  			// request transactions from them
   437  			if !oldPeer && len(f.announces[ann.origin]) > 0 {
   438  				f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}})
   439  			}
   440  
   441  		case <-waitTrigger:
   442  			// At least one transaction's waiting time ran out, push all expired
   443  			// ones into the retrieval queues
   444  			actives := make(map[string]struct{})
   445  			for hash, instance := range f.waittime {
   446  				if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout {
   447  					// Transaction expired without propagation, schedule for retrieval
   448  					if f.announced[hash] != nil {
   449  						panic("announce tracker already contains waitlist item")
   450  					}
   451  					f.announced[hash] = f.waitlist[hash]
   452  					for peer := range f.waitlist[hash] {
   453  						if announces := f.announces[peer]; announces != nil {
   454  							announces[hash] = struct{}{}
   455  						} else {
   456  							f.announces[peer] = map[common.Hash]struct{}{hash: {}}
   457  						}
   458  						delete(f.waitslots[peer], hash)
   459  						if len(f.waitslots[peer]) == 0 {
   460  							delete(f.waitslots, peer)
   461  						}
   462  						actives[peer] = struct{}{}
   463  					}
   464  					delete(f.waittime, hash)
   465  					delete(f.waitlist, hash)
   466  				}
   467  			}
   468  			// If transactions are still waiting for propagation, reschedule the wait timer
   469  			if len(f.waittime) > 0 {
   470  				f.rescheduleWait(waitTimer, waitTrigger)
   471  			}
   472  			// If any peers became active and are idle, request transactions from them
   473  			if len(actives) > 0 {
   474  				f.scheduleFetches(timeoutTimer, timeoutTrigger, actives)
   475  			}
   476  
   477  		case <-timeoutTrigger:
   478  			// Clean up any expired retrievals and avoid re-requesting them from the
   479  			// same peer (either overloaded or malicious, useless in both cases). We
   480  			// could also penalize (Drop), but there's nothing to gain, and if could
   481  			// possibly further increase the load on it.
   482  			for peer, req := range f.requests {
   483  				if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout {
   484  					txRequestTimeoutMeter.Mark(int64(len(req.hashes)))
   485  
   486  					// Reschedule all the not-yet-delivered fetches to alternate peers
   487  					for _, hash := range req.hashes {
   488  						// Skip rescheduling hashes already delivered by someone else
   489  						if req.stolen != nil {
   490  							if _, ok := req.stolen[hash]; ok {
   491  								continue
   492  							}
   493  						}
   494  						// Move the delivery back from fetching to queued
   495  						if _, ok := f.announced[hash]; ok {
   496  							panic("announced tracker already contains alternate item")
   497  						}
   498  						if f.alternates[hash] != nil { // nil if tx was broadcast during fetch
   499  							f.announced[hash] = f.alternates[hash]
   500  						}
   501  						delete(f.announced[hash], peer)
   502  						if len(f.announced[hash]) == 0 {
   503  							delete(f.announced, hash)
   504  						}
   505  						delete(f.announces[peer], hash)
   506  						delete(f.alternates, hash)
   507  						delete(f.fetching, hash)
   508  					}
   509  					if len(f.announces[peer]) == 0 {
   510  						delete(f.announces, peer)
   511  					}
   512  					// Keep track of the request as dangling, but never expire
   513  					f.requests[peer].hashes = nil
   514  				}
   515  			}
   516  			// Schedule a new transaction retrieval
   517  			f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   518  
   519  			// No idea if we scheduled something or not, trigger the timer if needed
   520  			// TODO(raisty): this is kind of lame, can't we dump it into scheduleFetches somehow?
   521  			f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   522  
   523  		case delivery := <-f.cleanup:
   524  			// Independent if the delivery was direct or broadcast, remove all
   525  			// traces of the hash from internal trackers
   526  			for _, hash := range delivery.hashes {
   527  				if _, ok := f.waitlist[hash]; ok {
   528  					for peer, txset := range f.waitslots {
   529  						delete(txset, hash)
   530  						if len(txset) == 0 {
   531  							delete(f.waitslots, peer)
   532  						}
   533  					}
   534  					delete(f.waitlist, hash)
   535  					delete(f.waittime, hash)
   536  				} else {
   537  					for peer, txset := range f.announces {
   538  						delete(txset, hash)
   539  						if len(txset) == 0 {
   540  							delete(f.announces, peer)
   541  						}
   542  					}
   543  					delete(f.announced, hash)
   544  					delete(f.alternates, hash)
   545  
   546  					// If a transaction currently being fetched from a different
   547  					// origin was delivered (delivery stolen), mark it so the
   548  					// actual delivery won't double schedule it.
   549  					if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) {
   550  						stolen := f.requests[origin].stolen
   551  						if stolen == nil {
   552  							f.requests[origin].stolen = make(map[common.Hash]struct{})
   553  							stolen = f.requests[origin].stolen
   554  						}
   555  						stolen[hash] = struct{}{}
   556  					}
   557  					delete(f.fetching, hash)
   558  				}
   559  			}
   560  			// In case of a direct delivery, also reschedule anything missing
   561  			// from the original query
   562  			if delivery.direct {
   563  				// Mark the reqesting successful (independent of individual status)
   564  				txRequestDoneMeter.Mark(int64(len(delivery.hashes)))
   565  
   566  				// Make sure something was pending, nuke it
   567  				req := f.requests[delivery.origin]
   568  				if req == nil {
   569  					log.Warn("Unexpected transaction delivery", "peer", delivery.origin)
   570  					break
   571  				}
   572  				delete(f.requests, delivery.origin)
   573  
   574  				// Anything not delivered should be re-scheduled (with or without
   575  				// this peer, depending on the response cutoff)
   576  				delivered := make(map[common.Hash]struct{})
   577  				for _, hash := range delivery.hashes {
   578  					delivered[hash] = struct{}{}
   579  				}
   580  				cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!!
   581  				for i, hash := range req.hashes {
   582  					if _, ok := delivered[hash]; ok {
   583  						cutoff = i
   584  					}
   585  				}
   586  				// Reschedule missing hashes from alternates, not-fulfilled from alt+self
   587  				for i, hash := range req.hashes {
   588  					// Skip rescheduling hashes already delivered by someone else
   589  					if req.stolen != nil {
   590  						if _, ok := req.stolen[hash]; ok {
   591  							continue
   592  						}
   593  					}
   594  					if _, ok := delivered[hash]; !ok {
   595  						if i < cutoff {
   596  							delete(f.alternates[hash], delivery.origin)
   597  							delete(f.announces[delivery.origin], hash)
   598  							if len(f.announces[delivery.origin]) == 0 {
   599  								delete(f.announces, delivery.origin)
   600  							}
   601  						}
   602  						if len(f.alternates[hash]) > 0 {
   603  							if _, ok := f.announced[hash]; ok {
   604  								panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash]))
   605  							}
   606  							f.announced[hash] = f.alternates[hash]
   607  						}
   608  					}
   609  					delete(f.alternates, hash)
   610  					delete(f.fetching, hash)
   611  				}
   612  				// Something was delivered, try to rechedule requests
   613  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too
   614  			}
   615  
   616  		case drop := <-f.drop:
   617  			// A peer was dropped, remove all traces of it
   618  			if _, ok := f.waitslots[drop.peer]; ok {
   619  				for hash := range f.waitslots[drop.peer] {
   620  					delete(f.waitlist[hash], drop.peer)
   621  					if len(f.waitlist[hash]) == 0 {
   622  						delete(f.waitlist, hash)
   623  						delete(f.waittime, hash)
   624  					}
   625  				}
   626  				delete(f.waitslots, drop.peer)
   627  				if len(f.waitlist) > 0 {
   628  					f.rescheduleWait(waitTimer, waitTrigger)
   629  				}
   630  			}
   631  			// Clean up any active requests
   632  			var request *txRequest
   633  			if request = f.requests[drop.peer]; request != nil {
   634  				for _, hash := range request.hashes {
   635  					// Skip rescheduling hashes already delivered by someone else
   636  					if request.stolen != nil {
   637  						if _, ok := request.stolen[hash]; ok {
   638  							continue
   639  						}
   640  					}
   641  					// Undelivered hash, reschedule if there's an alternative origin available
   642  					delete(f.alternates[hash], drop.peer)
   643  					if len(f.alternates[hash]) == 0 {
   644  						delete(f.alternates, hash)
   645  					} else {
   646  						f.announced[hash] = f.alternates[hash]
   647  						delete(f.alternates, hash)
   648  					}
   649  					delete(f.fetching, hash)
   650  				}
   651  				delete(f.requests, drop.peer)
   652  			}
   653  			// Clean up general announcement tracking
   654  			if _, ok := f.announces[drop.peer]; ok {
   655  				for hash := range f.announces[drop.peer] {
   656  					delete(f.announced[hash], drop.peer)
   657  					if len(f.announced[hash]) == 0 {
   658  						delete(f.announced, hash)
   659  					}
   660  				}
   661  				delete(f.announces, drop.peer)
   662  			}
   663  			// If a request was cancelled, check if anything needs to be rescheduled
   664  			if request != nil {
   665  				f.scheduleFetches(timeoutTimer, timeoutTrigger, nil)
   666  				f.rescheduleTimeout(timeoutTimer, timeoutTrigger)
   667  			}
   668  
   669  		case <-f.quit:
   670  			return
   671  		}
   672  		// No idea what happened, but bump some sanity metrics
   673  		txFetcherWaitingPeers.Update(int64(len(f.waitslots)))
   674  		txFetcherWaitingHashes.Update(int64(len(f.waitlist)))
   675  		txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests)))
   676  		txFetcherQueueingHashes.Update(int64(len(f.announced)))
   677  		txFetcherFetchingPeers.Update(int64(len(f.requests)))
   678  		txFetcherFetchingHashes.Update(int64(len(f.fetching)))
   679  
   680  		// Loop did something, ping the step notifier if needed (tests)
   681  		if f.step != nil {
   682  			f.step <- struct{}{}
   683  		}
   684  	}
   685  }
   686  
   687  // rescheduleWait iterates over all the transactions currently in the waitlist
   688  // and schedules the movement into the fetcher for the earliest.
   689  //
   690  // The method has a granularity of 'gatherSlack', since there's not much point in
   691  // spinning over all the transactions just to maybe find one that should trigger
   692  // a few ms earlier.
   693  func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) {
   694  	if *timer != nil {
   695  		(*timer).Stop()
   696  	}
   697  	now := f.clock.Now()
   698  
   699  	earliest := now
   700  	for _, instance := range f.waittime {
   701  		if earliest > instance {
   702  			earliest = instance
   703  			if txArriveTimeout-time.Duration(now-earliest) < gatherSlack {
   704  				break
   705  			}
   706  		}
   707  	}
   708  	*timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() {
   709  		trigger <- struct{}{}
   710  	})
   711  }
   712  
   713  // rescheduleTimeout iterates over all the transactions currently in flight and
   714  // schedules a cleanup run when the first would trigger.
   715  //
   716  // The method has a granularity of 'gatherSlack', since there's not much point in
   717  // spinning over all the transactions just to maybe find one that should trigger
   718  // a few ms earlier.
   719  //
   720  // This method is a bit "flaky" "by design". In theory the timeout timer only ever
   721  // should be rescheduled if some request is pending. In practice, a timeout will
   722  // cause the timer to be rescheduled every 5 secs (until the peer comes through or
   723  // disconnects). This is a limitation of the fetcher code because we don't trac
   724  // pending requests and timed out requests separatey. Without double tracking, if
   725  // we simply didn't reschedule the timer on all-timeout then the timer would never
   726  // be set again since len(request) > 0 => something's running.
   727  func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) {
   728  	if *timer != nil {
   729  		(*timer).Stop()
   730  	}
   731  	now := f.clock.Now()
   732  
   733  	earliest := now
   734  	for _, req := range f.requests {
   735  		// If this request already timed out, skip it altogether
   736  		if req.hashes == nil {
   737  			continue
   738  		}
   739  		if earliest > req.time {
   740  			earliest = req.time
   741  			if txFetchTimeout-time.Duration(now-earliest) < gatherSlack {
   742  				break
   743  			}
   744  		}
   745  	}
   746  	*timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() {
   747  		trigger <- struct{}{}
   748  	})
   749  }
   750  
   751  // scheduleFetches starts a batch of retrievals for all available idle peers.
   752  func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) {
   753  	// Gather the set of peers we want to retrieve from (default to all)
   754  	actives := whitelist
   755  	if actives == nil {
   756  		actives = make(map[string]struct{})
   757  		for peer := range f.announces {
   758  			actives[peer] = struct{}{}
   759  		}
   760  	}
   761  	if len(actives) == 0 {
   762  		return
   763  	}
   764  	// For each active peer, try to schedule some transaction fetches
   765  	idle := len(f.requests) == 0
   766  
   767  	f.forEachPeer(actives, func(peer string) {
   768  		if f.requests[peer] != nil {
   769  			return // continue in the for-each
   770  		}
   771  		if len(f.announces[peer]) == 0 {
   772  			return // continue in the for-each
   773  		}
   774  		hashes := make([]common.Hash, 0, maxTxRetrievals)
   775  		f.forEachHash(f.announces[peer], func(hash common.Hash) bool {
   776  			if _, ok := f.fetching[hash]; !ok {
   777  				// Mark the hash as fetching and stash away possible alternates
   778  				f.fetching[hash] = peer
   779  
   780  				if _, ok := f.alternates[hash]; ok {
   781  					panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash]))
   782  				}
   783  				f.alternates[hash] = f.announced[hash]
   784  				delete(f.announced, hash)
   785  
   786  				// Accumulate the hash and stop if the limit was reached
   787  				hashes = append(hashes, hash)
   788  				if len(hashes) >= maxTxRetrievals {
   789  					return false // break in the for-each
   790  				}
   791  			}
   792  			return true // continue in the for-each
   793  		})
   794  		// If any hashes were allocated, request them from the peer
   795  		if len(hashes) > 0 {
   796  			f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()}
   797  			txRequestOutMeter.Mark(int64(len(hashes)))
   798  
   799  			go func(peer string, hashes []common.Hash) {
   800  				// Try to fetch the transactions, but in case of a request
   801  				// failure (e.g. peer disconnected), reschedule the hashes.
   802  				if err := f.fetchTxs(peer, hashes); err != nil {
   803  					txRequestFailMeter.Mark(int64(len(hashes)))
   804  					f.Drop(peer)
   805  				}
   806  			}(peer, hashes)
   807  		}
   808  	})
   809  	// If a new request was fired, schedule a timeout timer
   810  	if idle && len(f.requests) > 0 {
   811  		f.rescheduleTimeout(timer, timeout)
   812  	}
   813  }
   814  
   815  // forEachPeer does a range loop over a map of peers in production, but during
   816  // testing it does a deterministic sorted random to allow reproducing issues.
   817  func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) {
   818  	// If we're running production, use whatever Go's map gives us
   819  	if f.rand == nil {
   820  		for peer := range peers {
   821  			do(peer)
   822  		}
   823  		return
   824  	}
   825  	// We're running the test suite, make iteration deterministic
   826  	list := make([]string, 0, len(peers))
   827  	for peer := range peers {
   828  		list = append(list, peer)
   829  	}
   830  	sort.Strings(list)
   831  	rotateStrings(list, f.rand.Intn(len(list)))
   832  	for _, peer := range list {
   833  		do(peer)
   834  	}
   835  }
   836  
   837  // forEachHash does a range loop over a map of hashes in production, but during
   838  // testing it does a deterministic sorted random to allow reproducing issues.
   839  func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) {
   840  	// If we're running production, use whatever Go's map gives us
   841  	if f.rand == nil {
   842  		for hash := range hashes {
   843  			if !do(hash) {
   844  				return
   845  			}
   846  		}
   847  		return
   848  	}
   849  	// We're running the test suite, make iteration deterministic
   850  	list := make([]common.Hash, 0, len(hashes))
   851  	for hash := range hashes {
   852  		list = append(list, hash)
   853  	}
   854  	sortHashes(list)
   855  	rotateHashes(list, f.rand.Intn(len(list)))
   856  	for _, hash := range list {
   857  		if !do(hash) {
   858  			return
   859  		}
   860  	}
   861  }
   862  
   863  // rotateStrings rotates the contents of a slice by n steps. This method is only
   864  // used in tests to simulate random map iteration but keep it deterministic.
   865  func rotateStrings(slice []string, n int) {
   866  	orig := make([]string, len(slice))
   867  	copy(orig, slice)
   868  
   869  	for i := 0; i < len(orig); i++ {
   870  		slice[i] = orig[(i+n)%len(orig)]
   871  	}
   872  }
   873  
   874  // sortHashes sorts a slice of hashes. This method is only used in tests in order
   875  // to simulate random map iteration but keep it deterministic.
   876  func sortHashes(slice []common.Hash) {
   877  	for i := 0; i < len(slice); i++ {
   878  		for j := i + 1; j < len(slice); j++ {
   879  			if bytes.Compare(slice[i][:], slice[j][:]) > 0 {
   880  				slice[i], slice[j] = slice[j], slice[i]
   881  			}
   882  		}
   883  	}
   884  }
   885  
   886  // rotateHashes rotates the contents of a slice by n steps. This method is only
   887  // used in tests to simulate random map iteration but keep it deterministic.
   888  func rotateHashes(slice []common.Hash, n int) {
   889  	orig := make([]common.Hash, len(slice))
   890  	copy(orig, slice)
   891  
   892  	for i := 0; i < len(orig); i++ {
   893  		slice[i] = orig[(i+n)%len(orig)]
   894  	}
   895  }