github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/eth/fetcher/tx_fetcher_test.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package fetcher
    18  
    19  import (
    20  	"errors"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/aidoskuneen/adk-node/common"
    27  	"github.com/aidoskuneen/adk-node/common/mclock"
    28  	"github.com/aidoskuneen/adk-node/core"
    29  	"github.com/aidoskuneen/adk-node/core/types"
    30  )
    31  
    32  var (
    33  	// testTxs is a set of transactions to use during testing that have meaningful hashes.
    34  	testTxs = []*types.Transaction{
    35  		types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil),
    36  		types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil),
    37  		types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil),
    38  		types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil),
    39  	}
    40  	// testTxsHashes is the hashes of the test transactions above
    41  	testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
    42  )
    43  
    44  type doTxNotify struct {
    45  	peer   string
    46  	hashes []common.Hash
    47  }
    48  type doTxEnqueue struct {
    49  	peer   string
    50  	txs    []*types.Transaction
    51  	direct bool
    52  }
    53  type doWait struct {
    54  	time time.Duration
    55  	step bool
    56  }
    57  type doDrop string
    58  type doFunc func()
    59  
    60  type isWaiting map[string][]common.Hash
    61  type isScheduled struct {
    62  	tracking map[string][]common.Hash
    63  	fetching map[string][]common.Hash
    64  	dangling map[string][]common.Hash
    65  }
    66  type isUnderpriced int
    67  
    68  // txFetcherTest represents a test scenario that can be executed by the test
    69  // runner.
    70  type txFetcherTest struct {
    71  	init  func() *TxFetcher
    72  	steps []interface{}
    73  }
    74  
    75  // Tests that transaction announcements are added to a waitlist, and none
    76  // of them are scheduled for retrieval until the wait expires.
    77  func TestTransactionFetcherWaiting(t *testing.T) {
    78  	testTransactionFetcherParallel(t, txFetcherTest{
    79  		init: func() *TxFetcher {
    80  			return NewTxFetcher(
    81  				func(common.Hash) bool { return false },
    82  				nil,
    83  				func(string, []common.Hash) error { return nil },
    84  			)
    85  		},
    86  		steps: []interface{}{
    87  			// Initial announcement to get something into the waitlist
    88  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
    89  			isWaiting(map[string][]common.Hash{
    90  				"A": {{0x01}, {0x02}},
    91  			}),
    92  			// Announce from a new peer to check that no overwrite happens
    93  			doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}},
    94  			isWaiting(map[string][]common.Hash{
    95  				"A": {{0x01}, {0x02}},
    96  				"B": {{0x03}, {0x04}},
    97  			}),
    98  			// Announce clashing hashes but unique new peer
    99  			doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}},
   100  			isWaiting(map[string][]common.Hash{
   101  				"A": {{0x01}, {0x02}},
   102  				"B": {{0x03}, {0x04}},
   103  				"C": {{0x01}, {0x04}},
   104  			}),
   105  			// Announce existing and clashing hashes from existing peer
   106  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}},
   107  			isWaiting(map[string][]common.Hash{
   108  				"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   109  				"B": {{0x03}, {0x04}},
   110  				"C": {{0x01}, {0x04}},
   111  			}),
   112  			isScheduled{tracking: nil, fetching: nil},
   113  
   114  			// Wait for the arrival timeout which should move all expired items
   115  			// from the wait list to the scheduler
   116  			doWait{time: txArriveTimeout, step: true},
   117  			isWaiting(nil),
   118  			isScheduled{
   119  				tracking: map[string][]common.Hash{
   120  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   121  					"B": {{0x03}, {0x04}},
   122  					"C": {{0x01}, {0x04}},
   123  				},
   124  				fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
   125  					"A": {{0x02}, {0x03}, {0x05}},
   126  					"C": {{0x01}, {0x04}},
   127  				},
   128  			},
   129  			// Queue up a non-fetchable transaction and then trigger it with a new
   130  			// peer (weird case to test 1 line in the fetcher)
   131  			doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}},
   132  			isWaiting(map[string][]common.Hash{
   133  				"C": {{0x06}, {0x07}},
   134  			}),
   135  			doWait{time: txArriveTimeout, step: true},
   136  			isScheduled{
   137  				tracking: map[string][]common.Hash{
   138  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   139  					"B": {{0x03}, {0x04}},
   140  					"C": {{0x01}, {0x04}, {0x06}, {0x07}},
   141  				},
   142  				fetching: map[string][]common.Hash{
   143  					"A": {{0x02}, {0x03}, {0x05}},
   144  					"C": {{0x01}, {0x04}},
   145  				},
   146  			},
   147  			doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}},
   148  			isScheduled{
   149  				tracking: map[string][]common.Hash{
   150  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   151  					"B": {{0x03}, {0x04}},
   152  					"C": {{0x01}, {0x04}, {0x06}, {0x07}},
   153  					"D": {{0x06}, {0x07}},
   154  				},
   155  				fetching: map[string][]common.Hash{
   156  					"A": {{0x02}, {0x03}, {0x05}},
   157  					"C": {{0x01}, {0x04}},
   158  					"D": {{0x06}, {0x07}},
   159  				},
   160  			},
   161  		},
   162  	})
   163  }
   164  
   165  // Tests that transaction announcements skip the waiting list if they are
   166  // already scheduled.
   167  func TestTransactionFetcherSkipWaiting(t *testing.T) {
   168  	testTransactionFetcherParallel(t, txFetcherTest{
   169  		init: func() *TxFetcher {
   170  			return NewTxFetcher(
   171  				func(common.Hash) bool { return false },
   172  				nil,
   173  				func(string, []common.Hash) error { return nil },
   174  			)
   175  		},
   176  		steps: []interface{}{
   177  			// Push an initial announcement through to the scheduled stage
   178  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   179  			isWaiting(map[string][]common.Hash{
   180  				"A": {{0x01}, {0x02}},
   181  			}),
   182  			isScheduled{tracking: nil, fetching: nil},
   183  
   184  			doWait{time: txArriveTimeout, step: true},
   185  			isWaiting(nil),
   186  			isScheduled{
   187  				tracking: map[string][]common.Hash{
   188  					"A": {{0x01}, {0x02}},
   189  				},
   190  				fetching: map[string][]common.Hash{
   191  					"A": {{0x01}, {0x02}},
   192  				},
   193  			},
   194  			// Announce overlaps from the same peer, ensure the new ones end up
   195  			// in stage one, and clashing ones don't get double tracked
   196  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}},
   197  			isWaiting(map[string][]common.Hash{
   198  				"A": {{0x03}},
   199  			}),
   200  			isScheduled{
   201  				tracking: map[string][]common.Hash{
   202  					"A": {{0x01}, {0x02}},
   203  				},
   204  				fetching: map[string][]common.Hash{
   205  					"A": {{0x01}, {0x02}},
   206  				},
   207  			},
   208  			// Announce overlaps from a new peer, ensure new transactions end up
   209  			// in stage one and clashing ones get tracked for the new peer
   210  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}},
   211  			isWaiting(map[string][]common.Hash{
   212  				"A": {{0x03}},
   213  				"B": {{0x03}, {0x04}},
   214  			}),
   215  			isScheduled{
   216  				tracking: map[string][]common.Hash{
   217  					"A": {{0x01}, {0x02}},
   218  					"B": {{0x02}},
   219  				},
   220  				fetching: map[string][]common.Hash{
   221  					"A": {{0x01}, {0x02}},
   222  				},
   223  			},
   224  		},
   225  	})
   226  }
   227  
   228  // Tests that only a single transaction request gets scheduled to a peer
   229  // and subsequent announces block or get allotted to someone else.
   230  func TestTransactionFetcherSingletonRequesting(t *testing.T) {
   231  	testTransactionFetcherParallel(t, txFetcherTest{
   232  		init: func() *TxFetcher {
   233  			return NewTxFetcher(
   234  				func(common.Hash) bool { return false },
   235  				nil,
   236  				func(string, []common.Hash) error { return nil },
   237  			)
   238  		},
   239  		steps: []interface{}{
   240  			// Push an initial announcement through to the scheduled stage
   241  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   242  			isWaiting(map[string][]common.Hash{
   243  				"A": {{0x01}, {0x02}},
   244  			}),
   245  			isScheduled{tracking: nil, fetching: nil},
   246  
   247  			doWait{time: txArriveTimeout, step: true},
   248  			isWaiting(nil),
   249  			isScheduled{
   250  				tracking: map[string][]common.Hash{
   251  					"A": {{0x01}, {0x02}},
   252  				},
   253  				fetching: map[string][]common.Hash{
   254  					"A": {{0x01}, {0x02}},
   255  				},
   256  			},
   257  			// Announce a new set of transactions from the same peer and ensure
   258  			// they do not start fetching since the peer is already busy
   259  			doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}},
   260  			isWaiting(map[string][]common.Hash{
   261  				"A": {{0x03}, {0x04}},
   262  			}),
   263  			isScheduled{
   264  				tracking: map[string][]common.Hash{
   265  					"A": {{0x01}, {0x02}},
   266  				},
   267  				fetching: map[string][]common.Hash{
   268  					"A": {{0x01}, {0x02}},
   269  				},
   270  			},
   271  			doWait{time: txArriveTimeout, step: true},
   272  			isWaiting(nil),
   273  			isScheduled{
   274  				tracking: map[string][]common.Hash{
   275  					"A": {{0x01}, {0x02}, {0x03}, {0x04}},
   276  				},
   277  				fetching: map[string][]common.Hash{
   278  					"A": {{0x01}, {0x02}},
   279  				},
   280  			},
   281  			// Announce a duplicate set of transactions from a new peer and ensure
   282  			// uniquely new ones start downloading, even if clashing.
   283  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}},
   284  			isWaiting(map[string][]common.Hash{
   285  				"B": {{0x05}, {0x06}},
   286  			}),
   287  			isScheduled{
   288  				tracking: map[string][]common.Hash{
   289  					"A": {{0x01}, {0x02}, {0x03}, {0x04}},
   290  					"B": {{0x02}, {0x03}},
   291  				},
   292  				fetching: map[string][]common.Hash{
   293  					"A": {{0x01}, {0x02}},
   294  					"B": {{0x03}},
   295  				},
   296  			},
   297  		},
   298  	})
   299  }
   300  
   301  // Tests that if a transaction retrieval fails, all the transactions get
   302  // instantly schedule back to someone else or the announcements dropped
   303  // if no alternate source is available.
   304  func TestTransactionFetcherFailedRescheduling(t *testing.T) {
   305  	// Create a channel to control when tx requests can fail
   306  	proceed := make(chan struct{})
   307  
   308  	testTransactionFetcherParallel(t, txFetcherTest{
   309  		init: func() *TxFetcher {
   310  			return NewTxFetcher(
   311  				func(common.Hash) bool { return false },
   312  				nil,
   313  				func(origin string, hashes []common.Hash) error {
   314  					<-proceed
   315  					return errors.New("peer disconnected")
   316  				},
   317  			)
   318  		},
   319  		steps: []interface{}{
   320  			// Push an initial announcement through to the scheduled stage
   321  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   322  			isWaiting(map[string][]common.Hash{
   323  				"A": {{0x01}, {0x02}},
   324  			}),
   325  			isScheduled{tracking: nil, fetching: nil},
   326  
   327  			doWait{time: txArriveTimeout, step: true},
   328  			isWaiting(nil),
   329  			isScheduled{
   330  				tracking: map[string][]common.Hash{
   331  					"A": {{0x01}, {0x02}},
   332  				},
   333  				fetching: map[string][]common.Hash{
   334  					"A": {{0x01}, {0x02}},
   335  				},
   336  			},
   337  			// While the original peer is stuck in the request, push in an second
   338  			// data source.
   339  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
   340  			isWaiting(nil),
   341  			isScheduled{
   342  				tracking: map[string][]common.Hash{
   343  					"A": {{0x01}, {0x02}},
   344  					"B": {{0x02}},
   345  				},
   346  				fetching: map[string][]common.Hash{
   347  					"A": {{0x01}, {0x02}},
   348  				},
   349  			},
   350  			// Wait until the original request fails and check that transactions
   351  			// are either rescheduled or dropped
   352  			doFunc(func() {
   353  				proceed <- struct{}{} // Allow peer A to return the failure
   354  			}),
   355  			doWait{time: 0, step: true},
   356  			isWaiting(nil),
   357  			isScheduled{
   358  				tracking: map[string][]common.Hash{
   359  					"B": {{0x02}},
   360  				},
   361  				fetching: map[string][]common.Hash{
   362  					"B": {{0x02}},
   363  				},
   364  			},
   365  			doFunc(func() {
   366  				proceed <- struct{}{} // Allow peer B to return the failure
   367  			}),
   368  			doWait{time: 0, step: true},
   369  			isWaiting(nil),
   370  			isScheduled{nil, nil, nil},
   371  		},
   372  	})
   373  }
   374  
   375  // Tests that if a transaction retrieval succeeds, all alternate origins
   376  // are cleaned up.
   377  func TestTransactionFetcherCleanup(t *testing.T) {
   378  	testTransactionFetcherParallel(t, txFetcherTest{
   379  		init: func() *TxFetcher {
   380  			return NewTxFetcher(
   381  				func(common.Hash) bool { return false },
   382  				func(txs []*types.Transaction) []error {
   383  					return make([]error, len(txs))
   384  				},
   385  				func(string, []common.Hash) error { return nil },
   386  			)
   387  		},
   388  		steps: []interface{}{
   389  			// Push an initial announcement through to the scheduled stage
   390  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   391  			isWaiting(map[string][]common.Hash{
   392  				"A": {testTxsHashes[0]},
   393  			}),
   394  			isScheduled{tracking: nil, fetching: nil},
   395  
   396  			doWait{time: txArriveTimeout, step: true},
   397  			isWaiting(nil),
   398  			isScheduled{
   399  				tracking: map[string][]common.Hash{
   400  					"A": {testTxsHashes[0]},
   401  				},
   402  				fetching: map[string][]common.Hash{
   403  					"A": {testTxsHashes[0]},
   404  				},
   405  			},
   406  			// Request should be delivered
   407  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
   408  			isScheduled{nil, nil, nil},
   409  		},
   410  	})
   411  }
   412  
   413  // Tests that if a transaction retrieval succeeds, but the response is empty (no
   414  // transactions available, then all are nuked instead of being rescheduled (yes,
   415  // this was a bug)).
   416  func TestTransactionFetcherCleanupEmpty(t *testing.T) {
   417  	testTransactionFetcherParallel(t, txFetcherTest{
   418  		init: func() *TxFetcher {
   419  			return NewTxFetcher(
   420  				func(common.Hash) bool { return false },
   421  				func(txs []*types.Transaction) []error {
   422  					return make([]error, len(txs))
   423  				},
   424  				func(string, []common.Hash) error { return nil },
   425  			)
   426  		},
   427  		steps: []interface{}{
   428  			// Push an initial announcement through to the scheduled stage
   429  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   430  			isWaiting(map[string][]common.Hash{
   431  				"A": {testTxsHashes[0]},
   432  			}),
   433  			isScheduled{tracking: nil, fetching: nil},
   434  
   435  			doWait{time: txArriveTimeout, step: true},
   436  			isWaiting(nil),
   437  			isScheduled{
   438  				tracking: map[string][]common.Hash{
   439  					"A": {testTxsHashes[0]},
   440  				},
   441  				fetching: map[string][]common.Hash{
   442  					"A": {testTxsHashes[0]},
   443  				},
   444  			},
   445  			// Deliver an empty response and ensure the transaction is cleared, not rescheduled
   446  			doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true},
   447  			isScheduled{nil, nil, nil},
   448  		},
   449  	})
   450  }
   451  
   452  // Tests that non-returned transactions are either re-scheduled from a
   453  // different peer, or self if they are after the cutoff point.
   454  func TestTransactionFetcherMissingRescheduling(t *testing.T) {
   455  	testTransactionFetcherParallel(t, txFetcherTest{
   456  		init: func() *TxFetcher {
   457  			return NewTxFetcher(
   458  				func(common.Hash) bool { return false },
   459  				func(txs []*types.Transaction) []error {
   460  					return make([]error, len(txs))
   461  				},
   462  				func(string, []common.Hash) error { return nil },
   463  			)
   464  		},
   465  		steps: []interface{}{
   466  			// Push an initial announcement through to the scheduled stage
   467  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}},
   468  			isWaiting(map[string][]common.Hash{
   469  				"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   470  			}),
   471  			isScheduled{tracking: nil, fetching: nil},
   472  
   473  			doWait{time: txArriveTimeout, step: true},
   474  			isWaiting(nil),
   475  			isScheduled{
   476  				tracking: map[string][]common.Hash{
   477  					"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   478  				},
   479  				fetching: map[string][]common.Hash{
   480  					"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   481  				},
   482  			},
   483  			// Deliver the middle transaction requested, the one before which
   484  			// should be dropped and the one after re-requested.
   485  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random
   486  			isScheduled{
   487  				tracking: map[string][]common.Hash{
   488  					"A": {testTxsHashes[2]},
   489  				},
   490  				fetching: map[string][]common.Hash{
   491  					"A": {testTxsHashes[2]},
   492  				},
   493  			},
   494  		},
   495  	})
   496  }
   497  
   498  // Tests that out of two transactions, if one is missing and the last is
   499  // delivered, the peer gets properly cleaned out from the internal state.
   500  func TestTransactionFetcherMissingCleanup(t *testing.T) {
   501  	testTransactionFetcherParallel(t, txFetcherTest{
   502  		init: func() *TxFetcher {
   503  			return NewTxFetcher(
   504  				func(common.Hash) bool { return false },
   505  				func(txs []*types.Transaction) []error {
   506  					return make([]error, len(txs))
   507  				},
   508  				func(string, []common.Hash) error { return nil },
   509  			)
   510  		},
   511  		steps: []interface{}{
   512  			// Push an initial announcement through to the scheduled stage
   513  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
   514  			isWaiting(map[string][]common.Hash{
   515  				"A": {testTxsHashes[0], testTxsHashes[1]},
   516  			}),
   517  			isScheduled{tracking: nil, fetching: nil},
   518  
   519  			doWait{time: txArriveTimeout, step: true},
   520  			isWaiting(nil),
   521  			isScheduled{
   522  				tracking: map[string][]common.Hash{
   523  					"A": {testTxsHashes[0], testTxsHashes[1]},
   524  				},
   525  				fetching: map[string][]common.Hash{
   526  					"A": {testTxsHashes[0], testTxsHashes[1]},
   527  				},
   528  			},
   529  			// Deliver the middle transaction requested, the one before which
   530  			// should be dropped and the one after re-requested.
   531  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random
   532  			isScheduled{nil, nil, nil},
   533  		},
   534  	})
   535  }
   536  
   537  // Tests that transaction broadcasts properly clean up announcements.
   538  func TestTransactionFetcherBroadcasts(t *testing.T) {
   539  	testTransactionFetcherParallel(t, txFetcherTest{
   540  		init: func() *TxFetcher {
   541  			return NewTxFetcher(
   542  				func(common.Hash) bool { return false },
   543  				func(txs []*types.Transaction) []error {
   544  					return make([]error, len(txs))
   545  				},
   546  				func(string, []common.Hash) error { return nil },
   547  			)
   548  		},
   549  		steps: []interface{}{
   550  			// Set up three transactions to be in different stats, waiting, queued and fetching
   551  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   552  			doWait{time: txArriveTimeout, step: true},
   553  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
   554  			doWait{time: txArriveTimeout, step: true},
   555  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
   556  
   557  			isWaiting(map[string][]common.Hash{
   558  				"A": {testTxsHashes[2]},
   559  			}),
   560  			isScheduled{
   561  				tracking: map[string][]common.Hash{
   562  					"A": {testTxsHashes[0], testTxsHashes[1]},
   563  				},
   564  				fetching: map[string][]common.Hash{
   565  					"A": {testTxsHashes[0]},
   566  				},
   567  			},
   568  			// Broadcast all the transactions and ensure everything gets cleaned
   569  			// up, but the dangling request is left alone to avoid doing multiple
   570  			// concurrent requests.
   571  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false},
   572  			isWaiting(nil),
   573  			isScheduled{
   574  				tracking: nil,
   575  				fetching: nil,
   576  				dangling: map[string][]common.Hash{
   577  					"A": {testTxsHashes[0]},
   578  				},
   579  			},
   580  			// Deliver the requested hashes
   581  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true},
   582  			isScheduled{nil, nil, nil},
   583  		},
   584  	})
   585  }
   586  
   587  // Tests that the waiting list timers properly reset and reschedule.
   588  func TestTransactionFetcherWaitTimerResets(t *testing.T) {
   589  	testTransactionFetcherParallel(t, txFetcherTest{
   590  		init: func() *TxFetcher {
   591  			return NewTxFetcher(
   592  				func(common.Hash) bool { return false },
   593  				nil,
   594  				func(string, []common.Hash) error { return nil },
   595  			)
   596  		},
   597  		steps: []interface{}{
   598  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
   599  			isWaiting(map[string][]common.Hash{
   600  				"A": {{0x01}},
   601  			}),
   602  			isScheduled{nil, nil, nil},
   603  			doWait{time: txArriveTimeout / 2, step: false},
   604  			isWaiting(map[string][]common.Hash{
   605  				"A": {{0x01}},
   606  			}),
   607  			isScheduled{nil, nil, nil},
   608  
   609  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
   610  			isWaiting(map[string][]common.Hash{
   611  				"A": {{0x01}, {0x02}},
   612  			}),
   613  			isScheduled{nil, nil, nil},
   614  			doWait{time: txArriveTimeout / 2, step: true},
   615  			isWaiting(map[string][]common.Hash{
   616  				"A": {{0x02}},
   617  			}),
   618  			isScheduled{
   619  				tracking: map[string][]common.Hash{
   620  					"A": {{0x01}},
   621  				},
   622  				fetching: map[string][]common.Hash{
   623  					"A": {{0x01}},
   624  				},
   625  			},
   626  
   627  			doWait{time: txArriveTimeout / 2, step: true},
   628  			isWaiting(nil),
   629  			isScheduled{
   630  				tracking: map[string][]common.Hash{
   631  					"A": {{0x01}, {0x02}},
   632  				},
   633  				fetching: map[string][]common.Hash{
   634  					"A": {{0x01}},
   635  				},
   636  			},
   637  		},
   638  	})
   639  }
   640  
   641  // Tests that if a transaction request is not replied to, it will time
   642  // out and be re-scheduled for someone else.
   643  func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
   644  	testTransactionFetcherParallel(t, txFetcherTest{
   645  		init: func() *TxFetcher {
   646  			return NewTxFetcher(
   647  				func(common.Hash) bool { return false },
   648  				func(txs []*types.Transaction) []error {
   649  					return make([]error, len(txs))
   650  				},
   651  				func(string, []common.Hash) error { return nil },
   652  			)
   653  		},
   654  		steps: []interface{}{
   655  			// Push an initial announcement through to the scheduled stage
   656  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   657  			isWaiting(map[string][]common.Hash{
   658  				"A": {testTxsHashes[0]},
   659  			}),
   660  			isScheduled{tracking: nil, fetching: nil},
   661  
   662  			doWait{time: txArriveTimeout, step: true},
   663  			isWaiting(nil),
   664  			isScheduled{
   665  				tracking: map[string][]common.Hash{
   666  					"A": {testTxsHashes[0]},
   667  				},
   668  				fetching: map[string][]common.Hash{
   669  					"A": {testTxsHashes[0]},
   670  				},
   671  			},
   672  			// Wait until the delivery times out, everything should be cleaned up
   673  			doWait{time: txFetchTimeout, step: true},
   674  			isWaiting(nil),
   675  			isScheduled{
   676  				tracking: nil,
   677  				fetching: nil,
   678  				dangling: map[string][]common.Hash{
   679  					"A": {},
   680  				},
   681  			},
   682  			// Ensure that followup announcements don't get scheduled
   683  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
   684  			doWait{time: txArriveTimeout, step: true},
   685  			isScheduled{
   686  				tracking: map[string][]common.Hash{
   687  					"A": {testTxsHashes[1]},
   688  				},
   689  				fetching: nil,
   690  				dangling: map[string][]common.Hash{
   691  					"A": {},
   692  				},
   693  			},
   694  			// If the dangling request arrives a bit later, do not choke
   695  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
   696  			isWaiting(nil),
   697  			isScheduled{
   698  				tracking: map[string][]common.Hash{
   699  					"A": {testTxsHashes[1]},
   700  				},
   701  				fetching: map[string][]common.Hash{
   702  					"A": {testTxsHashes[1]},
   703  				},
   704  			},
   705  		},
   706  	})
   707  }
   708  
   709  // Tests that the fetching timeout timers properly reset and reschedule.
   710  func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
   711  	testTransactionFetcherParallel(t, txFetcherTest{
   712  		init: func() *TxFetcher {
   713  			return NewTxFetcher(
   714  				func(common.Hash) bool { return false },
   715  				nil,
   716  				func(string, []common.Hash) error { return nil },
   717  			)
   718  		},
   719  		steps: []interface{}{
   720  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
   721  			doWait{time: txArriveTimeout, step: true},
   722  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
   723  			doWait{time: txArriveTimeout, step: true},
   724  
   725  			isWaiting(nil),
   726  			isScheduled{
   727  				tracking: map[string][]common.Hash{
   728  					"A": {{0x01}},
   729  					"B": {{0x02}},
   730  				},
   731  				fetching: map[string][]common.Hash{
   732  					"A": {{0x01}},
   733  					"B": {{0x02}},
   734  				},
   735  			},
   736  			doWait{time: txFetchTimeout - txArriveTimeout, step: true},
   737  			isScheduled{
   738  				tracking: map[string][]common.Hash{
   739  					"B": {{0x02}},
   740  				},
   741  				fetching: map[string][]common.Hash{
   742  					"B": {{0x02}},
   743  				},
   744  				dangling: map[string][]common.Hash{
   745  					"A": {},
   746  				},
   747  			},
   748  			doWait{time: txArriveTimeout, step: true},
   749  			isScheduled{
   750  				tracking: nil,
   751  				fetching: nil,
   752  				dangling: map[string][]common.Hash{
   753  					"A": {},
   754  					"B": {},
   755  				},
   756  			},
   757  		},
   758  	})
   759  }
   760  
   761  // Tests that if thousands of transactions are announces, only a small
   762  // number of them will be requested at a time.
   763  func TestTransactionFetcherRateLimiting(t *testing.T) {
   764  	// Create a slew of transactions and to announce them
   765  	var hashes []common.Hash
   766  	for i := 0; i < maxTxAnnounces; i++ {
   767  		hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
   768  	}
   769  
   770  	testTransactionFetcherParallel(t, txFetcherTest{
   771  		init: func() *TxFetcher {
   772  			return NewTxFetcher(
   773  				func(common.Hash) bool { return false },
   774  				nil,
   775  				func(string, []common.Hash) error { return nil },
   776  			)
   777  		},
   778  		steps: []interface{}{
   779  			// Announce all the transactions, wait a bit and ensure only a small
   780  			// percentage gets requested
   781  			doTxNotify{peer: "A", hashes: hashes},
   782  			doWait{time: txArriveTimeout, step: true},
   783  			isWaiting(nil),
   784  			isScheduled{
   785  				tracking: map[string][]common.Hash{
   786  					"A": hashes,
   787  				},
   788  				fetching: map[string][]common.Hash{
   789  					"A": hashes[1643 : 1643+maxTxRetrievals],
   790  				},
   791  			},
   792  		},
   793  	})
   794  }
   795  
   796  // Tests that then number of transactions a peer is allowed to announce and/or
   797  // request at the same time is hard capped.
   798  func TestTransactionFetcherDoSProtection(t *testing.T) {
   799  	// Create a slew of transactions and to announce them
   800  	var hashesA []common.Hash
   801  	for i := 0; i < maxTxAnnounces+1; i++ {
   802  		hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)})
   803  	}
   804  	var hashesB []common.Hash
   805  	for i := 0; i < maxTxAnnounces+1; i++ {
   806  		hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)})
   807  	}
   808  	testTransactionFetcherParallel(t, txFetcherTest{
   809  		init: func() *TxFetcher {
   810  			return NewTxFetcher(
   811  				func(common.Hash) bool { return false },
   812  				nil,
   813  				func(string, []common.Hash) error { return nil },
   814  			)
   815  		},
   816  		steps: []interface{}{
   817  			// Announce half of the transaction and wait for them to be scheduled
   818  			doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]},
   819  			doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]},
   820  			doWait{time: txArriveTimeout, step: true},
   821  
   822  			// Announce the second half and keep them in the wait list
   823  			doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},
   824  			doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]},
   825  
   826  			// Ensure the hashes are split half and half
   827  			isWaiting(map[string][]common.Hash{
   828  				"A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
   829  				"B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1],
   830  			}),
   831  			isScheduled{
   832  				tracking: map[string][]common.Hash{
   833  					"A": hashesA[:maxTxAnnounces/2],
   834  					"B": hashesB[:maxTxAnnounces/2-1],
   835  				},
   836  				fetching: map[string][]common.Hash{
   837  					"A": hashesA[1643 : 1643+maxTxRetrievals],
   838  					"B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
   839  				},
   840  			},
   841  			// Ensure that adding even one more hash results in dropping the hash
   842  			doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}},
   843  			doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]},
   844  
   845  			isWaiting(map[string][]common.Hash{
   846  				"A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
   847  				"B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces],
   848  			}),
   849  			isScheduled{
   850  				tracking: map[string][]common.Hash{
   851  					"A": hashesA[:maxTxAnnounces/2],
   852  					"B": hashesB[:maxTxAnnounces/2-1],
   853  				},
   854  				fetching: map[string][]common.Hash{
   855  					"A": hashesA[1643 : 1643+maxTxRetrievals],
   856  					"B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
   857  				},
   858  			},
   859  		},
   860  	})
   861  }
   862  
   863  // Tests that underpriced transactions don't get rescheduled after being rejected.
   864  func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
   865  	testTransactionFetcherParallel(t, txFetcherTest{
   866  		init: func() *TxFetcher {
   867  			return NewTxFetcher(
   868  				func(common.Hash) bool { return false },
   869  				func(txs []*types.Transaction) []error {
   870  					errs := make([]error, len(txs))
   871  					for i := 0; i < len(errs); i++ {
   872  						if i%2 == 0 {
   873  							errs[i] = core.ErrUnderpriced
   874  						} else {
   875  							errs[i] = core.ErrReplaceUnderpriced
   876  						}
   877  					}
   878  					return errs
   879  				},
   880  				func(string, []common.Hash) error { return nil },
   881  			)
   882  		},
   883  		steps: []interface{}{
   884  			// Deliver a transaction through the fetcher, but reject as underpriced
   885  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
   886  			doWait{time: txArriveTimeout, step: true},
   887  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},
   888  			isScheduled{nil, nil, nil},
   889  
   890  			// Try to announce the transaction again, ensure it's not scheduled back
   891  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher
   892  			isWaiting(map[string][]common.Hash{
   893  				"A": {testTxsHashes[2]},
   894  			}),
   895  			isScheduled{nil, nil, nil},
   896  		},
   897  	})
   898  }
   899  
   900  // Tests that underpriced transactions don't get rescheduled after being rejected,
   901  // but at the same time there's a hard cap on the number of transactions that are
   902  // tracked.
   903  func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
   904  	// Temporarily disable fetch timeouts as they massively mess up the simulated clock
   905  	defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout)
   906  	txFetchTimeout = 24 * time.Hour
   907  
   908  	// Create a slew of transactions to max out the underpriced set
   909  	var txs []*types.Transaction
   910  	for i := 0; i < maxTxUnderpricedSetSize+1; i++ {
   911  		txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil))
   912  	}
   913  	hashes := make([]common.Hash, len(txs))
   914  	for i, tx := range txs {
   915  		hashes[i] = tx.Hash()
   916  	}
   917  	// Generate a set of steps to announce and deliver the entire set of transactions
   918  	var steps []interface{}
   919  	for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ {
   920  		steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]})
   921  		steps = append(steps, isWaiting(map[string][]common.Hash{
   922  			"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   923  		}))
   924  		steps = append(steps, doWait{time: txArriveTimeout, step: true})
   925  		steps = append(steps, isScheduled{
   926  			tracking: map[string][]common.Hash{
   927  				"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   928  			},
   929  			fetching: map[string][]common.Hash{
   930  				"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   931  			},
   932  		})
   933  		steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true})
   934  		steps = append(steps, isWaiting(nil))
   935  		steps = append(steps, isScheduled{nil, nil, nil})
   936  		steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals))
   937  	}
   938  	testTransactionFetcher(t, txFetcherTest{
   939  		init: func() *TxFetcher {
   940  			return NewTxFetcher(
   941  				func(common.Hash) bool { return false },
   942  				func(txs []*types.Transaction) []error {
   943  					errs := make([]error, len(txs))
   944  					for i := 0; i < len(errs); i++ {
   945  						errs[i] = core.ErrUnderpriced
   946  					}
   947  					return errs
   948  				},
   949  				func(string, []common.Hash) error { return nil },
   950  			)
   951  		},
   952  		steps: append(steps, []interface{}{
   953  			// The preparation of the test has already been done in `steps`, add the last check
   954  			doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},
   955  			doWait{time: txArriveTimeout, step: true},
   956  			doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},
   957  			isUnderpriced(maxTxUnderpricedSetSize),
   958  		}...),
   959  	})
   960  }
   961  
   962  // Tests that unexpected deliveries don't corrupt the internal state.
   963  func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
   964  	testTransactionFetcherParallel(t, txFetcherTest{
   965  		init: func() *TxFetcher {
   966  			return NewTxFetcher(
   967  				func(common.Hash) bool { return false },
   968  				func(txs []*types.Transaction) []error {
   969  					return make([]error, len(txs))
   970  				},
   971  				func(string, []common.Hash) error { return nil },
   972  			)
   973  		},
   974  		steps: []interface{}{
   975  			// Deliver something out of the blue
   976  			isWaiting(nil),
   977  			isScheduled{nil, nil, nil},
   978  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false},
   979  			isWaiting(nil),
   980  			isScheduled{nil, nil, nil},
   981  
   982  			// Set up a few hashes into various stages
   983  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   984  			doWait{time: txArriveTimeout, step: true},
   985  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
   986  			doWait{time: txArriveTimeout, step: true},
   987  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
   988  
   989  			isWaiting(map[string][]common.Hash{
   990  				"A": {testTxsHashes[2]},
   991  			}),
   992  			isScheduled{
   993  				tracking: map[string][]common.Hash{
   994  					"A": {testTxsHashes[0], testTxsHashes[1]},
   995  				},
   996  				fetching: map[string][]common.Hash{
   997  					"A": {testTxsHashes[0]},
   998  				},
   999  			},
  1000  			// Deliver everything and more out of the blue
  1001  			doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true},
  1002  			isWaiting(nil),
  1003  			isScheduled{
  1004  				tracking: nil,
  1005  				fetching: nil,
  1006  				dangling: map[string][]common.Hash{
  1007  					"A": {testTxsHashes[0]},
  1008  				},
  1009  			},
  1010  		},
  1011  	})
  1012  }
  1013  
  1014  // Tests that dropping a peer cleans out all internal data structures in all the
  1015  // live or danglng stages.
  1016  func TestTransactionFetcherDrop(t *testing.T) {
  1017  	testTransactionFetcherParallel(t, txFetcherTest{
  1018  		init: func() *TxFetcher {
  1019  			return NewTxFetcher(
  1020  				func(common.Hash) bool { return false },
  1021  				func(txs []*types.Transaction) []error {
  1022  					return make([]error, len(txs))
  1023  				},
  1024  				func(string, []common.Hash) error { return nil },
  1025  			)
  1026  		},
  1027  		steps: []interface{}{
  1028  			// Set up a few hashes into various stages
  1029  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
  1030  			doWait{time: txArriveTimeout, step: true},
  1031  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
  1032  			doWait{time: txArriveTimeout, step: true},
  1033  			doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}},
  1034  
  1035  			isWaiting(map[string][]common.Hash{
  1036  				"A": {{0x03}},
  1037  			}),
  1038  			isScheduled{
  1039  				tracking: map[string][]common.Hash{
  1040  					"A": {{0x01}, {0x02}},
  1041  				},
  1042  				fetching: map[string][]common.Hash{
  1043  					"A": {{0x01}},
  1044  				},
  1045  			},
  1046  			// Drop the peer and ensure everything's cleaned out
  1047  			doDrop("A"),
  1048  			isWaiting(nil),
  1049  			isScheduled{nil, nil, nil},
  1050  
  1051  			// Push the node into a dangling (timeout) state
  1052  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1053  			doWait{time: txArriveTimeout, step: true},
  1054  			isWaiting(nil),
  1055  			isScheduled{
  1056  				tracking: map[string][]common.Hash{
  1057  					"A": {testTxsHashes[0]},
  1058  				},
  1059  				fetching: map[string][]common.Hash{
  1060  					"A": {testTxsHashes[0]},
  1061  				},
  1062  			},
  1063  			doWait{time: txFetchTimeout, step: true},
  1064  			isWaiting(nil),
  1065  			isScheduled{
  1066  				tracking: nil,
  1067  				fetching: nil,
  1068  				dangling: map[string][]common.Hash{
  1069  					"A": {},
  1070  				},
  1071  			},
  1072  			// Drop the peer and ensure everything's cleaned out
  1073  			doDrop("A"),
  1074  			isWaiting(nil),
  1075  			isScheduled{nil, nil, nil},
  1076  		},
  1077  	})
  1078  }
  1079  
  1080  // Tests that dropping a peer instantly reschedules failed announcements to any
  1081  // available peer.
  1082  func TestTransactionFetcherDropRescheduling(t *testing.T) {
  1083  	testTransactionFetcherParallel(t, txFetcherTest{
  1084  		init: func() *TxFetcher {
  1085  			return NewTxFetcher(
  1086  				func(common.Hash) bool { return false },
  1087  				func(txs []*types.Transaction) []error {
  1088  					return make([]error, len(txs))
  1089  				},
  1090  				func(string, []common.Hash) error { return nil },
  1091  			)
  1092  		},
  1093  		steps: []interface{}{
  1094  			// Set up a few hashes into various stages
  1095  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
  1096  			doWait{time: txArriveTimeout, step: true},
  1097  			doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}},
  1098  
  1099  			isWaiting(nil),
  1100  			isScheduled{
  1101  				tracking: map[string][]common.Hash{
  1102  					"A": {{0x01}},
  1103  					"B": {{0x01}},
  1104  				},
  1105  				fetching: map[string][]common.Hash{
  1106  					"A": {{0x01}},
  1107  				},
  1108  			},
  1109  			// Drop the peer and ensure everything's cleaned out
  1110  			doDrop("A"),
  1111  			isWaiting(nil),
  1112  			isScheduled{
  1113  				tracking: map[string][]common.Hash{
  1114  					"B": {{0x01}},
  1115  				},
  1116  				fetching: map[string][]common.Hash{
  1117  					"B": {{0x01}},
  1118  				},
  1119  			},
  1120  		},
  1121  	})
  1122  }
  1123  
  1124  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1125  // dangling transaction timing out and clashing on readd with a concurrently
  1126  // announced one.
  1127  func TestTransactionFetcherFuzzCrash01(t *testing.T) {
  1128  	testTransactionFetcherParallel(t, txFetcherTest{
  1129  		init: func() *TxFetcher {
  1130  			return NewTxFetcher(
  1131  				func(common.Hash) bool { return false },
  1132  				func(txs []*types.Transaction) []error {
  1133  					return make([]error, len(txs))
  1134  				},
  1135  				func(string, []common.Hash) error { return nil },
  1136  			)
  1137  		},
  1138  		steps: []interface{}{
  1139  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1140  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1141  			doWait{time: txArriveTimeout, step: true},
  1142  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1143  
  1144  			// Notify the dangling transaction once more and crash via a timeout
  1145  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1146  			doWait{time: txFetchTimeout, step: true},
  1147  		},
  1148  	})
  1149  }
  1150  
  1151  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1152  // dangling transaction getting peer-dropped and clashing on readd with a
  1153  // concurrently announced one.
  1154  func TestTransactionFetcherFuzzCrash02(t *testing.T) {
  1155  	testTransactionFetcherParallel(t, txFetcherTest{
  1156  		init: func() *TxFetcher {
  1157  			return NewTxFetcher(
  1158  				func(common.Hash) bool { return false },
  1159  				func(txs []*types.Transaction) []error {
  1160  					return make([]error, len(txs))
  1161  				},
  1162  				func(string, []common.Hash) error { return nil },
  1163  			)
  1164  		},
  1165  		steps: []interface{}{
  1166  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1167  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1168  			doWait{time: txArriveTimeout, step: true},
  1169  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1170  
  1171  			// Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout
  1172  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1173  			doWait{time: txArriveTimeout, step: true},
  1174  			doDrop("A"),
  1175  			doWait{time: txFetchTimeout, step: true},
  1176  		},
  1177  	})
  1178  }
  1179  
  1180  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1181  // dangling transaction getting rescheduled via a partial delivery, clashing
  1182  // with a concurrent notify.
  1183  func TestTransactionFetcherFuzzCrash03(t *testing.T) {
  1184  	testTransactionFetcherParallel(t, txFetcherTest{
  1185  		init: func() *TxFetcher {
  1186  			return NewTxFetcher(
  1187  				func(common.Hash) bool { return false },
  1188  				func(txs []*types.Transaction) []error {
  1189  					return make([]error, len(txs))
  1190  				},
  1191  				func(string, []common.Hash) error { return nil },
  1192  			)
  1193  		},
  1194  		steps: []interface{}{
  1195  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1196  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
  1197  			doWait{time: txFetchTimeout, step: true},
  1198  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
  1199  
  1200  			// Notify the dangling transaction once more, partially deliver, clash&crash with a timeout
  1201  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1202  			doWait{time: txArriveTimeout, step: true},
  1203  
  1204  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
  1205  			doWait{time: txFetchTimeout, step: true},
  1206  		},
  1207  	})
  1208  }
  1209  
  1210  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1211  // dangling transaction getting rescheduled via a disconnect, clashing with
  1212  // a concurrent notify.
  1213  func TestTransactionFetcherFuzzCrash04(t *testing.T) {
  1214  	// Create a channel to control when tx requests can fail
  1215  	proceed := make(chan struct{})
  1216  
  1217  	testTransactionFetcherParallel(t, txFetcherTest{
  1218  		init: func() *TxFetcher {
  1219  			return NewTxFetcher(
  1220  				func(common.Hash) bool { return false },
  1221  				func(txs []*types.Transaction) []error {
  1222  					return make([]error, len(txs))
  1223  				},
  1224  				func(string, []common.Hash) error {
  1225  					<-proceed
  1226  					return errors.New("peer disconnected")
  1227  				},
  1228  			)
  1229  		},
  1230  		steps: []interface{}{
  1231  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1232  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1233  			doWait{time: txArriveTimeout, step: true},
  1234  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1235  
  1236  			// Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect
  1237  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1238  			doWait{time: txArriveTimeout, step: true},
  1239  			doFunc(func() {
  1240  				proceed <- struct{}{} // Allow peer A to return the failure
  1241  			}),
  1242  			doWait{time: 0, step: true},
  1243  			doWait{time: txFetchTimeout, step: true},
  1244  		},
  1245  	})
  1246  }
  1247  
  1248  func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
  1249  	t.Parallel()
  1250  	testTransactionFetcher(t, tt)
  1251  }
  1252  
  1253  func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
  1254  	// Create a fetcher and hook into it's simulated fields
  1255  	clock := new(mclock.Simulated)
  1256  	wait := make(chan struct{})
  1257  
  1258  	fetcher := tt.init()
  1259  	fetcher.clock = clock
  1260  	fetcher.step = wait
  1261  	fetcher.rand = rand.New(rand.NewSource(0x3a29))
  1262  
  1263  	fetcher.Start()
  1264  	defer fetcher.Stop()
  1265  
  1266  	// Crunch through all the test steps and execute them
  1267  	for i, step := range tt.steps {
  1268  		switch step := step.(type) {
  1269  		case doTxNotify:
  1270  			if err := fetcher.Notify(step.peer, step.hashes); err != nil {
  1271  				t.Errorf("step %d: %v", i, err)
  1272  			}
  1273  			<-wait // Fetcher needs to process this, wait until it's done
  1274  			select {
  1275  			case <-wait:
  1276  				panic("wtf")
  1277  			case <-time.After(time.Millisecond):
  1278  			}
  1279  
  1280  		case doTxEnqueue:
  1281  			if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil {
  1282  				t.Errorf("step %d: %v", i, err)
  1283  			}
  1284  			<-wait // Fetcher needs to process this, wait until it's done
  1285  
  1286  		case doWait:
  1287  			clock.Run(step.time)
  1288  			if step.step {
  1289  				<-wait // Fetcher supposed to do something, wait until it's done
  1290  			}
  1291  
  1292  		case doDrop:
  1293  			if err := fetcher.Drop(string(step)); err != nil {
  1294  				t.Errorf("step %d: %v", i, err)
  1295  			}
  1296  			<-wait // Fetcher needs to process this, wait until it's done
  1297  
  1298  		case doFunc:
  1299  			step()
  1300  
  1301  		case isWaiting:
  1302  			// We need to check that the waiting list (stage 1) internals
  1303  			// match with the expected set. Check the peer->hash mappings
  1304  			// first.
  1305  			for peer, hashes := range step {
  1306  				waiting := fetcher.waitslots[peer]
  1307  				if waiting == nil {
  1308  					t.Errorf("step %d: peer %s missing from waitslots", i, peer)
  1309  					continue
  1310  				}
  1311  				for _, hash := range hashes {
  1312  					if _, ok := waiting[hash]; !ok {
  1313  						t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash)
  1314  					}
  1315  				}
  1316  				for hash := range waiting {
  1317  					if !containsHash(hashes, hash) {
  1318  						t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash)
  1319  					}
  1320  				}
  1321  			}
  1322  			for peer := range fetcher.waitslots {
  1323  				if _, ok := step[peer]; !ok {
  1324  					t.Errorf("step %d: peer %s extra in waitslots", i, peer)
  1325  				}
  1326  			}
  1327  			// Peer->hash sets correct, check the hash->peer and timeout sets
  1328  			for peer, hashes := range step {
  1329  				for _, hash := range hashes {
  1330  					if _, ok := fetcher.waitlist[hash][peer]; !ok {
  1331  						t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer)
  1332  					}
  1333  					if _, ok := fetcher.waittime[hash]; !ok {
  1334  						t.Errorf("step %d: hash %x missing from waittime", i, hash)
  1335  					}
  1336  				}
  1337  			}
  1338  			for hash, peers := range fetcher.waitlist {
  1339  				if len(peers) == 0 {
  1340  					t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
  1341  				}
  1342  				for peer := range peers {
  1343  					if !containsHash(step[peer], hash) {
  1344  						t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
  1345  					}
  1346  				}
  1347  			}
  1348  			for hash := range fetcher.waittime {
  1349  				var found bool
  1350  				for _, hashes := range step {
  1351  					if containsHash(hashes, hash) {
  1352  						found = true
  1353  						break
  1354  					}
  1355  				}
  1356  				if !found {
  1357  					t.Errorf("step %d,: hash %x extra in waittime", i, hash)
  1358  				}
  1359  			}
  1360  
  1361  		case isScheduled:
  1362  			// Check that all scheduled announces are accounted for and no
  1363  			// extra ones are present.
  1364  			for peer, hashes := range step.tracking {
  1365  				scheduled := fetcher.announces[peer]
  1366  				if scheduled == nil {
  1367  					t.Errorf("step %d: peer %s missing from announces", i, peer)
  1368  					continue
  1369  				}
  1370  				for _, hash := range hashes {
  1371  					if _, ok := scheduled[hash]; !ok {
  1372  						t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash)
  1373  					}
  1374  				}
  1375  				for hash := range scheduled {
  1376  					if !containsHash(hashes, hash) {
  1377  						t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash)
  1378  					}
  1379  				}
  1380  			}
  1381  			for peer := range fetcher.announces {
  1382  				if _, ok := step.tracking[peer]; !ok {
  1383  					t.Errorf("step %d: peer %s extra in announces", i, peer)
  1384  				}
  1385  			}
  1386  			// Check that all announces required to be fetching are in the
  1387  			// appropriate sets
  1388  			for peer, hashes := range step.fetching {
  1389  				request := fetcher.requests[peer]
  1390  				if request == nil {
  1391  					t.Errorf("step %d: peer %s missing from requests", i, peer)
  1392  					continue
  1393  				}
  1394  				for _, hash := range hashes {
  1395  					if !containsHash(request.hashes, hash) {
  1396  						t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
  1397  					}
  1398  				}
  1399  				for _, hash := range request.hashes {
  1400  					if !containsHash(hashes, hash) {
  1401  						t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
  1402  					}
  1403  				}
  1404  			}
  1405  			for peer := range fetcher.requests {
  1406  				if _, ok := step.fetching[peer]; !ok {
  1407  					if _, ok := step.dangling[peer]; !ok {
  1408  						t.Errorf("step %d: peer %s extra in requests", i, peer)
  1409  					}
  1410  				}
  1411  			}
  1412  			for peer, hashes := range step.fetching {
  1413  				for _, hash := range hashes {
  1414  					if _, ok := fetcher.fetching[hash]; !ok {
  1415  						t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash)
  1416  					}
  1417  				}
  1418  			}
  1419  			for hash := range fetcher.fetching {
  1420  				var found bool
  1421  				for _, req := range fetcher.requests {
  1422  					if containsHash(req.hashes, hash) {
  1423  						found = true
  1424  						break
  1425  					}
  1426  				}
  1427  				if !found {
  1428  					t.Errorf("step %d: hash %x extra in fetching", i, hash)
  1429  				}
  1430  			}
  1431  			for _, hashes := range step.fetching {
  1432  				for _, hash := range hashes {
  1433  					alternates := fetcher.alternates[hash]
  1434  					if alternates == nil {
  1435  						t.Errorf("step %d: hash %x missing from alternates", i, hash)
  1436  						continue
  1437  					}
  1438  					for peer := range alternates {
  1439  						if _, ok := fetcher.announces[peer]; !ok {
  1440  							t.Errorf("step %d: peer %s extra in alternates", i, peer)
  1441  							continue
  1442  						}
  1443  						if _, ok := fetcher.announces[peer][hash]; !ok {
  1444  							t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer)
  1445  							continue
  1446  						}
  1447  					}
  1448  					for p := range fetcher.announced[hash] {
  1449  						if _, ok := alternates[p]; !ok {
  1450  							t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p)
  1451  							continue
  1452  						}
  1453  					}
  1454  				}
  1455  			}
  1456  			for peer, hashes := range step.dangling {
  1457  				request := fetcher.requests[peer]
  1458  				if request == nil {
  1459  					t.Errorf("step %d: peer %s missing from requests", i, peer)
  1460  					continue
  1461  				}
  1462  				for _, hash := range hashes {
  1463  					if !containsHash(request.hashes, hash) {
  1464  						t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
  1465  					}
  1466  				}
  1467  				for _, hash := range request.hashes {
  1468  					if !containsHash(hashes, hash) {
  1469  						t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
  1470  					}
  1471  				}
  1472  			}
  1473  			// Check that all transaction announces that are scheduled for
  1474  			// retrieval but not actively being downloaded are tracked only
  1475  			// in the stage 2 `announced` map.
  1476  			var queued []common.Hash
  1477  			for _, hashes := range step.tracking {
  1478  				for _, hash := range hashes {
  1479  					var found bool
  1480  					for _, hs := range step.fetching {
  1481  						if containsHash(hs, hash) {
  1482  							found = true
  1483  							break
  1484  						}
  1485  					}
  1486  					if !found {
  1487  						queued = append(queued, hash)
  1488  					}
  1489  				}
  1490  			}
  1491  			for _, hash := range queued {
  1492  				if _, ok := fetcher.announced[hash]; !ok {
  1493  					t.Errorf("step %d: hash %x missing from announced", i, hash)
  1494  				}
  1495  			}
  1496  			for hash := range fetcher.announced {
  1497  				if !containsHash(queued, hash) {
  1498  					t.Errorf("step %d: hash %x extra in announced", i, hash)
  1499  				}
  1500  			}
  1501  
  1502  		case isUnderpriced:
  1503  			if fetcher.underpriced.Cardinality() != int(step) {
  1504  				t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step)
  1505  			}
  1506  
  1507  		default:
  1508  			t.Fatalf("step %d: unknown step type %T", i, step)
  1509  		}
  1510  		// After every step, cross validate the internal uniqueness invariants
  1511  		// between stage one and stage two.
  1512  		for hash := range fetcher.waittime {
  1513  			if _, ok := fetcher.announced[hash]; ok {
  1514  				t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash)
  1515  			}
  1516  		}
  1517  	}
  1518  }
  1519  
  1520  // containsHash returns whether a hash is contained within a hash slice.
  1521  func containsHash(slice []common.Hash, hash common.Hash) bool {
  1522  	for _, have := range slice {
  1523  		if have == hash {
  1524  			return true
  1525  		}
  1526  	}
  1527  	return false
  1528  }