github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/eth/fetcher/tx_fetcher_test.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package fetcher
    18  
    19  import (
    20  	"errors"
    21  	"math/big"
    22  	"math/rand"
    23  	"testing"
    24  	"time"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/common/mclock"
    28  	"github.com/ethereum/go-ethereum/core"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  )
    31  
    32  var (
    33  	// testTxs is a set of transactions to use during testing that have meaningful hashes.
    34  	testTxs = []*types.Transaction{
    35  		types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil),
    36  		types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil),
    37  		types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil),
    38  		types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil),
    39  	}
    40  	// testTxsHashes is the hashes of the test transactions above
    41  	testTxsHashes     = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()}
    42  	testTxArrivalWait = 500 * time.Millisecond
    43  )
    44  
    45  type doTxNotify struct {
    46  	peer   string
    47  	hashes []common.Hash
    48  }
    49  type doTxEnqueue struct {
    50  	peer   string
    51  	txs    []*types.Transaction
    52  	direct bool
    53  }
    54  type doWait struct {
    55  	time time.Duration
    56  	step bool
    57  }
    58  type doDrop string
    59  type doFunc func()
    60  
    61  type isWaiting map[string][]common.Hash
    62  type isScheduled struct {
    63  	tracking map[string][]common.Hash
    64  	fetching map[string][]common.Hash
    65  	dangling map[string][]common.Hash
    66  }
    67  type isUnderpriced int
    68  
    69  // txFetcherTest represents a test scenario that can be executed by the test
    70  // runner.
    71  type txFetcherTest struct {
    72  	init  func() *TxFetcher
    73  	steps []interface{}
    74  }
    75  
    76  // Tests that transaction announcements are added to a waitlist, and none
    77  // of them are scheduled for retrieval until the wait expires.
    78  func TestTransactionFetcherWaiting(t *testing.T) {
    79  	testTransactionFetcherParallel(t, txFetcherTest{
    80  		init: func() *TxFetcher {
    81  			return NewTxFetcher(
    82  				func(common.Hash) bool { return false },
    83  				nil,
    84  				func(string, []common.Hash) error { return nil },
    85  				testTxArrivalWait,
    86  			)
    87  		},
    88  		steps: []interface{}{
    89  			// Initial announcement to get something into the waitlist
    90  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
    91  			isWaiting(map[string][]common.Hash{
    92  				"A": {{0x01}, {0x02}},
    93  			}),
    94  			// Announce from a new peer to check that no overwrite happens
    95  			doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}},
    96  			isWaiting(map[string][]common.Hash{
    97  				"A": {{0x01}, {0x02}},
    98  				"B": {{0x03}, {0x04}},
    99  			}),
   100  			// Announce clashing hashes but unique new peer
   101  			doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}},
   102  			isWaiting(map[string][]common.Hash{
   103  				"A": {{0x01}, {0x02}},
   104  				"B": {{0x03}, {0x04}},
   105  				"C": {{0x01}, {0x04}},
   106  			}),
   107  			// Announce existing and clashing hashes from existing peer
   108  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}},
   109  			isWaiting(map[string][]common.Hash{
   110  				"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   111  				"B": {{0x03}, {0x04}},
   112  				"C": {{0x01}, {0x04}},
   113  			}),
   114  			isScheduled{tracking: nil, fetching: nil},
   115  
   116  			// Wait for the arrival timeout which should move all expired items
   117  			// from the wait list to the scheduler
   118  			doWait{time: testTxArrivalWait, step: true},
   119  			isWaiting(nil),
   120  			isScheduled{
   121  				tracking: map[string][]common.Hash{
   122  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   123  					"B": {{0x03}, {0x04}},
   124  					"C": {{0x01}, {0x04}},
   125  				},
   126  				fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer
   127  					"A": {{0x02}, {0x03}, {0x05}},
   128  					"C": {{0x01}, {0x04}},
   129  				},
   130  			},
   131  			// Queue up a non-fetchable transaction and then trigger it with a new
   132  			// peer (weird case to test 1 line in the fetcher)
   133  			doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}},
   134  			isWaiting(map[string][]common.Hash{
   135  				"C": {{0x06}, {0x07}},
   136  			}),
   137  			doWait{time: testTxArrivalWait, step: true},
   138  			isScheduled{
   139  				tracking: map[string][]common.Hash{
   140  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   141  					"B": {{0x03}, {0x04}},
   142  					"C": {{0x01}, {0x04}, {0x06}, {0x07}},
   143  				},
   144  				fetching: map[string][]common.Hash{
   145  					"A": {{0x02}, {0x03}, {0x05}},
   146  					"C": {{0x01}, {0x04}},
   147  				},
   148  			},
   149  			doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}},
   150  			isScheduled{
   151  				tracking: map[string][]common.Hash{
   152  					"A": {{0x01}, {0x02}, {0x03}, {0x05}},
   153  					"B": {{0x03}, {0x04}},
   154  					"C": {{0x01}, {0x04}, {0x06}, {0x07}},
   155  					"D": {{0x06}, {0x07}},
   156  				},
   157  				fetching: map[string][]common.Hash{
   158  					"A": {{0x02}, {0x03}, {0x05}},
   159  					"C": {{0x01}, {0x04}},
   160  					"D": {{0x06}, {0x07}},
   161  				},
   162  			},
   163  		},
   164  	})
   165  }
   166  
   167  // Tests that transaction announcements skip the waiting list if they are
   168  // already scheduled.
   169  func TestTransactionFetcherSkipWaiting(t *testing.T) {
   170  	testTransactionFetcherParallel(t, txFetcherTest{
   171  		init: func() *TxFetcher {
   172  			return NewTxFetcher(
   173  				func(common.Hash) bool { return false },
   174  				nil,
   175  				func(string, []common.Hash) error { return nil },
   176  				testTxArrivalWait,
   177  			)
   178  		},
   179  		steps: []interface{}{
   180  			// Push an initial announcement through to the scheduled stage
   181  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   182  			isWaiting(map[string][]common.Hash{
   183  				"A": {{0x01}, {0x02}},
   184  			}),
   185  			isScheduled{tracking: nil, fetching: nil},
   186  
   187  			doWait{time: testTxArrivalWait, step: true},
   188  			isWaiting(nil),
   189  			isScheduled{
   190  				tracking: map[string][]common.Hash{
   191  					"A": {{0x01}, {0x02}},
   192  				},
   193  				fetching: map[string][]common.Hash{
   194  					"A": {{0x01}, {0x02}},
   195  				},
   196  			},
   197  			// Announce overlaps from the same peer, ensure the new ones end up
   198  			// in stage one, and clashing ones don't get double tracked
   199  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}},
   200  			isWaiting(map[string][]common.Hash{
   201  				"A": {{0x03}},
   202  			}),
   203  			isScheduled{
   204  				tracking: map[string][]common.Hash{
   205  					"A": {{0x01}, {0x02}},
   206  				},
   207  				fetching: map[string][]common.Hash{
   208  					"A": {{0x01}, {0x02}},
   209  				},
   210  			},
   211  			// Announce overlaps from a new peer, ensure new transactions end up
   212  			// in stage one and clashing ones get tracked for the new peer
   213  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}},
   214  			isWaiting(map[string][]common.Hash{
   215  				"A": {{0x03}},
   216  				"B": {{0x03}, {0x04}},
   217  			}),
   218  			isScheduled{
   219  				tracking: map[string][]common.Hash{
   220  					"A": {{0x01}, {0x02}},
   221  					"B": {{0x02}},
   222  				},
   223  				fetching: map[string][]common.Hash{
   224  					"A": {{0x01}, {0x02}},
   225  				},
   226  			},
   227  		},
   228  	})
   229  }
   230  
   231  // Tests that only a single transaction request gets scheduled to a peer
   232  // and subsequent announces block or get allotted to someone else.
   233  func TestTransactionFetcherSingletonRequesting(t *testing.T) {
   234  	testTransactionFetcherParallel(t, txFetcherTest{
   235  		init: func() *TxFetcher {
   236  			return NewTxFetcher(
   237  				func(common.Hash) bool { return false },
   238  				nil,
   239  				func(string, []common.Hash) error { return nil },
   240  				testTxArrivalWait,
   241  			)
   242  		},
   243  		steps: []interface{}{
   244  			// Push an initial announcement through to the scheduled stage
   245  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   246  			isWaiting(map[string][]common.Hash{
   247  				"A": {{0x01}, {0x02}},
   248  			}),
   249  			isScheduled{tracking: nil, fetching: nil},
   250  
   251  			doWait{time: testTxArrivalWait, step: true},
   252  			isWaiting(nil),
   253  			isScheduled{
   254  				tracking: map[string][]common.Hash{
   255  					"A": {{0x01}, {0x02}},
   256  				},
   257  				fetching: map[string][]common.Hash{
   258  					"A": {{0x01}, {0x02}},
   259  				},
   260  			},
   261  			// Announce a new set of transactions from the same peer and ensure
   262  			// they do not start fetching since the peer is already busy
   263  			doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}},
   264  			isWaiting(map[string][]common.Hash{
   265  				"A": {{0x03}, {0x04}},
   266  			}),
   267  			isScheduled{
   268  				tracking: map[string][]common.Hash{
   269  					"A": {{0x01}, {0x02}},
   270  				},
   271  				fetching: map[string][]common.Hash{
   272  					"A": {{0x01}, {0x02}},
   273  				},
   274  			},
   275  			doWait{time: testTxArrivalWait, step: true},
   276  			isWaiting(nil),
   277  			isScheduled{
   278  				tracking: map[string][]common.Hash{
   279  					"A": {{0x01}, {0x02}, {0x03}, {0x04}},
   280  				},
   281  				fetching: map[string][]common.Hash{
   282  					"A": {{0x01}, {0x02}},
   283  				},
   284  			},
   285  			// Announce a duplicate set of transactions from a new peer and ensure
   286  			// uniquely new ones start downloading, even if clashing.
   287  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}},
   288  			isWaiting(map[string][]common.Hash{
   289  				"B": {{0x05}, {0x06}},
   290  			}),
   291  			isScheduled{
   292  				tracking: map[string][]common.Hash{
   293  					"A": {{0x01}, {0x02}, {0x03}, {0x04}},
   294  					"B": {{0x02}, {0x03}},
   295  				},
   296  				fetching: map[string][]common.Hash{
   297  					"A": {{0x01}, {0x02}},
   298  					"B": {{0x03}},
   299  				},
   300  			},
   301  		},
   302  	})
   303  }
   304  
   305  // Tests that if a transaction retrieval fails, all the transactions get
   306  // instantly schedule back to someone else or the announcements dropped
   307  // if no alternate source is available.
   308  func TestTransactionFetcherFailedRescheduling(t *testing.T) {
   309  	// Create a channel to control when tx requests can fail
   310  	proceed := make(chan struct{})
   311  
   312  	testTransactionFetcherParallel(t, txFetcherTest{
   313  		init: func() *TxFetcher {
   314  			return NewTxFetcher(
   315  				func(common.Hash) bool { return false },
   316  				nil,
   317  				func(origin string, hashes []common.Hash) error {
   318  					<-proceed
   319  					return errors.New("peer disconnected")
   320  				},
   321  				testTxArrivalWait,
   322  			)
   323  		},
   324  		steps: []interface{}{
   325  			// Push an initial announcement through to the scheduled stage
   326  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}},
   327  			isWaiting(map[string][]common.Hash{
   328  				"A": {{0x01}, {0x02}},
   329  			}),
   330  			isScheduled{tracking: nil, fetching: nil},
   331  
   332  			doWait{time: testTxArrivalWait, step: true},
   333  			isWaiting(nil),
   334  			isScheduled{
   335  				tracking: map[string][]common.Hash{
   336  					"A": {{0x01}, {0x02}},
   337  				},
   338  				fetching: map[string][]common.Hash{
   339  					"A": {{0x01}, {0x02}},
   340  				},
   341  			},
   342  			// While the original peer is stuck in the request, push in an second
   343  			// data source.
   344  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
   345  			isWaiting(nil),
   346  			isScheduled{
   347  				tracking: map[string][]common.Hash{
   348  					"A": {{0x01}, {0x02}},
   349  					"B": {{0x02}},
   350  				},
   351  				fetching: map[string][]common.Hash{
   352  					"A": {{0x01}, {0x02}},
   353  				},
   354  			},
   355  			// Wait until the original request fails and check that transactions
   356  			// are either rescheduled or dropped
   357  			doFunc(func() {
   358  				proceed <- struct{}{} // Allow peer A to return the failure
   359  			}),
   360  			doWait{time: 0, step: true},
   361  			isWaiting(nil),
   362  			isScheduled{
   363  				tracking: map[string][]common.Hash{
   364  					"B": {{0x02}},
   365  				},
   366  				fetching: map[string][]common.Hash{
   367  					"B": {{0x02}},
   368  				},
   369  			},
   370  			doFunc(func() {
   371  				proceed <- struct{}{} // Allow peer B to return the failure
   372  			}),
   373  			doWait{time: 0, step: true},
   374  			isWaiting(nil),
   375  			isScheduled{nil, nil, nil},
   376  		},
   377  	})
   378  }
   379  
   380  // Tests that if a transaction retrieval succeeds, all alternate origins
   381  // are cleaned up.
   382  func TestTransactionFetcherCleanup(t *testing.T) {
   383  	testTransactionFetcherParallel(t, txFetcherTest{
   384  		init: func() *TxFetcher {
   385  			return NewTxFetcher(
   386  				func(common.Hash) bool { return false },
   387  				func(txs []*types.Transaction) []error {
   388  					return make([]error, len(txs))
   389  				},
   390  				func(string, []common.Hash) error { return nil },
   391  				testTxArrivalWait,
   392  			)
   393  		},
   394  		steps: []interface{}{
   395  			// Push an initial announcement through to the scheduled stage
   396  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   397  			isWaiting(map[string][]common.Hash{
   398  				"A": {testTxsHashes[0]},
   399  			}),
   400  			isScheduled{tracking: nil, fetching: nil},
   401  
   402  			doWait{time: testTxArrivalWait, step: true},
   403  			isWaiting(nil),
   404  			isScheduled{
   405  				tracking: map[string][]common.Hash{
   406  					"A": {testTxsHashes[0]},
   407  				},
   408  				fetching: map[string][]common.Hash{
   409  					"A": {testTxsHashes[0]},
   410  				},
   411  			},
   412  			// Request should be delivered
   413  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
   414  			isScheduled{nil, nil, nil},
   415  		},
   416  	})
   417  }
   418  
   419  // Tests that if a transaction retrieval succeeds, but the response is empty (no
   420  // transactions available, then all are nuked instead of being rescheduled (yes,
   421  // this was a bug)).
   422  func TestTransactionFetcherCleanupEmpty(t *testing.T) {
   423  	testTransactionFetcherParallel(t, txFetcherTest{
   424  		init: func() *TxFetcher {
   425  			return NewTxFetcher(
   426  				func(common.Hash) bool { return false },
   427  				func(txs []*types.Transaction) []error {
   428  					return make([]error, len(txs))
   429  				},
   430  				func(string, []common.Hash) error { return nil },
   431  				testTxArrivalWait,
   432  			)
   433  		},
   434  		steps: []interface{}{
   435  			// Push an initial announcement through to the scheduled stage
   436  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   437  			isWaiting(map[string][]common.Hash{
   438  				"A": {testTxsHashes[0]},
   439  			}),
   440  			isScheduled{tracking: nil, fetching: nil},
   441  
   442  			doWait{time: testTxArrivalWait, step: true},
   443  			isWaiting(nil),
   444  			isScheduled{
   445  				tracking: map[string][]common.Hash{
   446  					"A": {testTxsHashes[0]},
   447  				},
   448  				fetching: map[string][]common.Hash{
   449  					"A": {testTxsHashes[0]},
   450  				},
   451  			},
   452  			// Deliver an empty response and ensure the transaction is cleared, not rescheduled
   453  			doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true},
   454  			isScheduled{nil, nil, nil},
   455  		},
   456  	})
   457  }
   458  
   459  // Tests that non-returned transactions are either re-scheduled from a
   460  // different peer, or self if they are after the cutoff point.
   461  func TestTransactionFetcherMissingRescheduling(t *testing.T) {
   462  	testTransactionFetcherParallel(t, txFetcherTest{
   463  		init: func() *TxFetcher {
   464  			return NewTxFetcher(
   465  				func(common.Hash) bool { return false },
   466  				func(txs []*types.Transaction) []error {
   467  					return make([]error, len(txs))
   468  				},
   469  				func(string, []common.Hash) error { return nil },
   470  				testTxArrivalWait,
   471  			)
   472  		},
   473  		steps: []interface{}{
   474  			// Push an initial announcement through to the scheduled stage
   475  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}},
   476  			isWaiting(map[string][]common.Hash{
   477  				"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   478  			}),
   479  			isScheduled{tracking: nil, fetching: nil},
   480  
   481  			doWait{time: testTxArrivalWait, step: true},
   482  			isWaiting(nil),
   483  			isScheduled{
   484  				tracking: map[string][]common.Hash{
   485  					"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   486  				},
   487  				fetching: map[string][]common.Hash{
   488  					"A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]},
   489  				},
   490  			},
   491  			// Deliver the middle transaction requested, the one before which
   492  			// should be dropped and the one after re-requested.
   493  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random
   494  			isScheduled{
   495  				tracking: map[string][]common.Hash{
   496  					"A": {testTxsHashes[2]},
   497  				},
   498  				fetching: map[string][]common.Hash{
   499  					"A": {testTxsHashes[2]},
   500  				},
   501  			},
   502  		},
   503  	})
   504  }
   505  
   506  // Tests that out of two transactions, if one is missing and the last is
   507  // delivered, the peer gets properly cleaned out from the internal state.
   508  func TestTransactionFetcherMissingCleanup(t *testing.T) {
   509  	testTransactionFetcherParallel(t, txFetcherTest{
   510  		init: func() *TxFetcher {
   511  			return NewTxFetcher(
   512  				func(common.Hash) bool { return false },
   513  				func(txs []*types.Transaction) []error {
   514  					return make([]error, len(txs))
   515  				},
   516  				func(string, []common.Hash) error { return nil },
   517  				testTxArrivalWait,
   518  			)
   519  		},
   520  		steps: []interface{}{
   521  			// Push an initial announcement through to the scheduled stage
   522  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
   523  			isWaiting(map[string][]common.Hash{
   524  				"A": {testTxsHashes[0], testTxsHashes[1]},
   525  			}),
   526  			isScheduled{tracking: nil, fetching: nil},
   527  
   528  			doWait{time: testTxArrivalWait, step: true},
   529  			isWaiting(nil),
   530  			isScheduled{
   531  				tracking: map[string][]common.Hash{
   532  					"A": {testTxsHashes[0], testTxsHashes[1]},
   533  				},
   534  				fetching: map[string][]common.Hash{
   535  					"A": {testTxsHashes[0], testTxsHashes[1]},
   536  				},
   537  			},
   538  			// Deliver the middle transaction requested, the one before which
   539  			// should be dropped and the one after re-requested.
   540  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random
   541  			isScheduled{nil, nil, nil},
   542  		},
   543  	})
   544  }
   545  
   546  // Tests that transaction broadcasts properly clean up announcements.
   547  func TestTransactionFetcherBroadcasts(t *testing.T) {
   548  	testTransactionFetcherParallel(t, txFetcherTest{
   549  		init: func() *TxFetcher {
   550  			return NewTxFetcher(
   551  				func(common.Hash) bool { return false },
   552  				func(txs []*types.Transaction) []error {
   553  					return make([]error, len(txs))
   554  				},
   555  				func(string, []common.Hash) error { return nil },
   556  				testTxArrivalWait,
   557  			)
   558  		},
   559  		steps: []interface{}{
   560  			// Set up three transactions to be in different stats, waiting, queued and fetching
   561  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   562  			doWait{time: testTxArrivalWait, step: true},
   563  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
   564  			doWait{time: testTxArrivalWait, step: true},
   565  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
   566  
   567  			isWaiting(map[string][]common.Hash{
   568  				"A": {testTxsHashes[2]},
   569  			}),
   570  			isScheduled{
   571  				tracking: map[string][]common.Hash{
   572  					"A": {testTxsHashes[0], testTxsHashes[1]},
   573  				},
   574  				fetching: map[string][]common.Hash{
   575  					"A": {testTxsHashes[0]},
   576  				},
   577  			},
   578  			// Broadcast all the transactions and ensure everything gets cleaned
   579  			// up, but the dangling request is left alone to avoid doing multiple
   580  			// concurrent requests.
   581  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false},
   582  			isWaiting(nil),
   583  			isScheduled{
   584  				tracking: nil,
   585  				fetching: nil,
   586  				dangling: map[string][]common.Hash{
   587  					"A": {testTxsHashes[0]},
   588  				},
   589  			},
   590  			// Deliver the requested hashes
   591  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true},
   592  			isScheduled{nil, nil, nil},
   593  		},
   594  	})
   595  }
   596  
   597  // Tests that the waiting list timers properly reset and reschedule.
   598  func TestTransactionFetcherWaitTimerResets(t *testing.T) {
   599  	testTransactionFetcherParallel(t, txFetcherTest{
   600  		init: func() *TxFetcher {
   601  			return NewTxFetcher(
   602  				func(common.Hash) bool { return false },
   603  				nil,
   604  				func(string, []common.Hash) error { return nil },
   605  				testTxArrivalWait,
   606  			)
   607  		},
   608  		steps: []interface{}{
   609  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
   610  			isWaiting(map[string][]common.Hash{
   611  				"A": {{0x01}},
   612  			}),
   613  			isScheduled{nil, nil, nil},
   614  			doWait{time: testTxArrivalWait / 2, step: false},
   615  			isWaiting(map[string][]common.Hash{
   616  				"A": {{0x01}},
   617  			}),
   618  			isScheduled{nil, nil, nil},
   619  
   620  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
   621  			isWaiting(map[string][]common.Hash{
   622  				"A": {{0x01}, {0x02}},
   623  			}),
   624  			isScheduled{nil, nil, nil},
   625  			doWait{time: testTxArrivalWait / 2, step: true},
   626  			isWaiting(map[string][]common.Hash{
   627  				"A": {{0x02}},
   628  			}),
   629  			isScheduled{
   630  				tracking: map[string][]common.Hash{
   631  					"A": {{0x01}},
   632  				},
   633  				fetching: map[string][]common.Hash{
   634  					"A": {{0x01}},
   635  				},
   636  			},
   637  
   638  			doWait{time: testTxArrivalWait / 2, step: true},
   639  			isWaiting(nil),
   640  			isScheduled{
   641  				tracking: map[string][]common.Hash{
   642  					"A": {{0x01}, {0x02}},
   643  				},
   644  				fetching: map[string][]common.Hash{
   645  					"A": {{0x01}},
   646  				},
   647  			},
   648  		},
   649  	})
   650  }
   651  
   652  // Tests that if a transaction request is not replied to, it will time
   653  // out and be re-scheduled for someone else.
   654  func TestTransactionFetcherTimeoutRescheduling(t *testing.T) {
   655  	testTransactionFetcherParallel(t, txFetcherTest{
   656  		init: func() *TxFetcher {
   657  			return NewTxFetcher(
   658  				func(common.Hash) bool { return false },
   659  				func(txs []*types.Transaction) []error {
   660  					return make([]error, len(txs))
   661  				},
   662  				func(string, []common.Hash) error { return nil },
   663  				testTxArrivalWait,
   664  			)
   665  		},
   666  		steps: []interface{}{
   667  			// Push an initial announcement through to the scheduled stage
   668  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
   669  			isWaiting(map[string][]common.Hash{
   670  				"A": {testTxsHashes[0]},
   671  			}),
   672  			isScheduled{tracking: nil, fetching: nil},
   673  
   674  			doWait{time: testTxArrivalWait, step: true},
   675  			isWaiting(nil),
   676  			isScheduled{
   677  				tracking: map[string][]common.Hash{
   678  					"A": {testTxsHashes[0]},
   679  				},
   680  				fetching: map[string][]common.Hash{
   681  					"A": {testTxsHashes[0]},
   682  				},
   683  			},
   684  			// Wait until the delivery times out, everything should be cleaned up
   685  			doWait{time: txFetchTimeout, step: true},
   686  			isWaiting(nil),
   687  			isScheduled{
   688  				tracking: nil,
   689  				fetching: nil,
   690  				dangling: map[string][]common.Hash{
   691  					"A": {},
   692  				},
   693  			},
   694  			// Ensure that followup announcements don't get scheduled
   695  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
   696  			doWait{time: testTxArrivalWait, step: true},
   697  			isScheduled{
   698  				tracking: map[string][]common.Hash{
   699  					"A": {testTxsHashes[1]},
   700  				},
   701  				fetching: nil,
   702  				dangling: map[string][]common.Hash{
   703  					"A": {},
   704  				},
   705  			},
   706  			// If the dangling request arrives a bit later, do not choke
   707  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true},
   708  			isWaiting(nil),
   709  			isScheduled{
   710  				tracking: map[string][]common.Hash{
   711  					"A": {testTxsHashes[1]},
   712  				},
   713  				fetching: map[string][]common.Hash{
   714  					"A": {testTxsHashes[1]},
   715  				},
   716  			},
   717  		},
   718  	})
   719  }
   720  
   721  // Tests that the fetching timeout timers properly reset and reschedule.
   722  func TestTransactionFetcherTimeoutTimerResets(t *testing.T) {
   723  	testTransactionFetcherParallel(t, txFetcherTest{
   724  		init: func() *TxFetcher {
   725  			return NewTxFetcher(
   726  				func(common.Hash) bool { return false },
   727  				nil,
   728  				func(string, []common.Hash) error { return nil },
   729  				testTxArrivalWait,
   730  			)
   731  		},
   732  		steps: []interface{}{
   733  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
   734  			doWait{time: testTxArrivalWait, step: true},
   735  			doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}},
   736  			doWait{time: testTxArrivalWait, step: true},
   737  
   738  			isWaiting(nil),
   739  			isScheduled{
   740  				tracking: map[string][]common.Hash{
   741  					"A": {{0x01}},
   742  					"B": {{0x02}},
   743  				},
   744  				fetching: map[string][]common.Hash{
   745  					"A": {{0x01}},
   746  					"B": {{0x02}},
   747  				},
   748  			},
   749  			doWait{time: txFetchTimeout - testTxArrivalWait, step: true},
   750  			isScheduled{
   751  				tracking: map[string][]common.Hash{
   752  					"B": {{0x02}},
   753  				},
   754  				fetching: map[string][]common.Hash{
   755  					"B": {{0x02}},
   756  				},
   757  				dangling: map[string][]common.Hash{
   758  					"A": {},
   759  				},
   760  			},
   761  			doWait{time: testTxArrivalWait, step: true},
   762  			isScheduled{
   763  				tracking: nil,
   764  				fetching: nil,
   765  				dangling: map[string][]common.Hash{
   766  					"A": {},
   767  					"B": {},
   768  				},
   769  			},
   770  		},
   771  	})
   772  }
   773  
   774  // Tests that if thousands of transactions are announces, only a small
   775  // number of them will be requested at a time.
   776  func TestTransactionFetcherRateLimiting(t *testing.T) {
   777  	// Create a slew of transactions and to announce them
   778  	var hashes []common.Hash
   779  	for i := 0; i < maxTxAnnounces; i++ {
   780  		hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)})
   781  	}
   782  
   783  	testTransactionFetcherParallel(t, txFetcherTest{
   784  		init: func() *TxFetcher {
   785  			return NewTxFetcher(
   786  				func(common.Hash) bool { return false },
   787  				nil,
   788  				func(string, []common.Hash) error { return nil },
   789  				testTxArrivalWait,
   790  			)
   791  		},
   792  		steps: []interface{}{
   793  			// Announce all the transactions, wait a bit and ensure only a small
   794  			// percentage gets requested
   795  			doTxNotify{peer: "A", hashes: hashes},
   796  			doWait{time: testTxArrivalWait, step: true},
   797  			isWaiting(nil),
   798  			isScheduled{
   799  				tracking: map[string][]common.Hash{
   800  					"A": hashes,
   801  				},
   802  				fetching: map[string][]common.Hash{
   803  					"A": hashes[1643 : 1643+maxTxRetrievals],
   804  				},
   805  			},
   806  		},
   807  	})
   808  }
   809  
   810  // Tests that then number of transactions a peer is allowed to announce and/or
   811  // request at the same time is hard capped.
   812  func TestTransactionFetcherDoSProtection(t *testing.T) {
   813  	// Create a slew of transactions and to announce them
   814  	var hashesA []common.Hash
   815  	for i := 0; i < maxTxAnnounces+1; i++ {
   816  		hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)})
   817  	}
   818  	var hashesB []common.Hash
   819  	for i := 0; i < maxTxAnnounces+1; i++ {
   820  		hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)})
   821  	}
   822  	testTransactionFetcherParallel(t, txFetcherTest{
   823  		init: func() *TxFetcher {
   824  			return NewTxFetcher(
   825  				func(common.Hash) bool { return false },
   826  				nil,
   827  				func(string, []common.Hash) error { return nil },
   828  				testTxArrivalWait,
   829  			)
   830  		},
   831  		steps: []interface{}{
   832  			// Announce half of the transaction and wait for them to be scheduled
   833  			doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]},
   834  			doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]},
   835  			doWait{time: testTxArrivalWait, step: true},
   836  
   837  			// Announce the second half and keep them in the wait list
   838  			doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]},
   839  			doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]},
   840  
   841  			// Ensure the hashes are split half and half
   842  			isWaiting(map[string][]common.Hash{
   843  				"A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
   844  				"B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1],
   845  			}),
   846  			isScheduled{
   847  				tracking: map[string][]common.Hash{
   848  					"A": hashesA[:maxTxAnnounces/2],
   849  					"B": hashesB[:maxTxAnnounces/2-1],
   850  				},
   851  				fetching: map[string][]common.Hash{
   852  					"A": hashesA[1643 : 1643+maxTxRetrievals],
   853  					"B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
   854  				},
   855  			},
   856  			// Ensure that adding even one more hash results in dropping the hash
   857  			doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}},
   858  			doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]},
   859  
   860  			isWaiting(map[string][]common.Hash{
   861  				"A": hashesA[maxTxAnnounces/2 : maxTxAnnounces],
   862  				"B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces],
   863  			}),
   864  			isScheduled{
   865  				tracking: map[string][]common.Hash{
   866  					"A": hashesA[:maxTxAnnounces/2],
   867  					"B": hashesB[:maxTxAnnounces/2-1],
   868  				},
   869  				fetching: map[string][]common.Hash{
   870  					"A": hashesA[1643 : 1643+maxTxRetrievals],
   871  					"B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...),
   872  				},
   873  			},
   874  		},
   875  	})
   876  }
   877  
   878  // Tests that underpriced transactions don't get rescheduled after being rejected.
   879  func TestTransactionFetcherUnderpricedDedup(t *testing.T) {
   880  	testTransactionFetcherParallel(t, txFetcherTest{
   881  		init: func() *TxFetcher {
   882  			return NewTxFetcher(
   883  				func(common.Hash) bool { return false },
   884  				func(txs []*types.Transaction) []error {
   885  					errs := make([]error, len(txs))
   886  					for i := 0; i < len(errs); i++ {
   887  						if i%2 == 0 {
   888  							errs[i] = core.ErrUnderpriced
   889  						} else {
   890  							errs[i] = core.ErrReplaceUnderpriced
   891  						}
   892  					}
   893  					return errs
   894  				},
   895  				func(string, []common.Hash) error { return nil },
   896  				testTxArrivalWait,
   897  			)
   898  		},
   899  		steps: []interface{}{
   900  			// Deliver a transaction through the fetcher, but reject as underpriced
   901  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
   902  			doWait{time: testTxArrivalWait, step: true},
   903  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true},
   904  			isScheduled{nil, nil, nil},
   905  
   906  			// Try to announce the transaction again, ensure it's not scheduled back
   907  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher
   908  			isWaiting(map[string][]common.Hash{
   909  				"A": {testTxsHashes[2]},
   910  			}),
   911  			isScheduled{nil, nil, nil},
   912  		},
   913  	})
   914  }
   915  
   916  // Tests that underpriced transactions don't get rescheduled after being rejected,
   917  // but at the same time there's a hard cap on the number of transactions that are
   918  // tracked.
   919  func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) {
   920  	// Temporarily disable fetch timeouts as they massively mess up the simulated clock
   921  	defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout)
   922  	txFetchTimeout = 24 * time.Hour
   923  
   924  	// Create a slew of transactions to max out the underpriced set
   925  	var txs []*types.Transaction
   926  	for i := 0; i < maxTxUnderpricedSetSize+1; i++ {
   927  		txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil))
   928  	}
   929  	hashes := make([]common.Hash, len(txs))
   930  	for i, tx := range txs {
   931  		hashes[i] = tx.Hash()
   932  	}
   933  	// Generate a set of steps to announce and deliver the entire set of transactions
   934  	var steps []interface{}
   935  	for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ {
   936  		steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]})
   937  		steps = append(steps, isWaiting(map[string][]common.Hash{
   938  			"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   939  		}))
   940  		steps = append(steps, doWait{time: testTxArrivalWait, step: true})
   941  		steps = append(steps, isScheduled{
   942  			tracking: map[string][]common.Hash{
   943  				"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   944  			},
   945  			fetching: map[string][]common.Hash{
   946  				"A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals],
   947  			},
   948  		})
   949  		steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true})
   950  		steps = append(steps, isWaiting(nil))
   951  		steps = append(steps, isScheduled{nil, nil, nil})
   952  		steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals))
   953  	}
   954  	testTransactionFetcher(t, txFetcherTest{
   955  		init: func() *TxFetcher {
   956  			return NewTxFetcher(
   957  				func(common.Hash) bool { return false },
   958  				func(txs []*types.Transaction) []error {
   959  					errs := make([]error, len(txs))
   960  					for i := 0; i < len(errs); i++ {
   961  						errs[i] = core.ErrUnderpriced
   962  					}
   963  					return errs
   964  				},
   965  				func(string, []common.Hash) error { return nil },
   966  				testTxArrivalWait,
   967  			)
   968  		},
   969  		steps: append(steps, []interface{}{
   970  			// The preparation of the test has already been done in `steps`, add the last check
   971  			doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}},
   972  			doWait{time: testTxArrivalWait, step: true},
   973  			doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true},
   974  			isUnderpriced(maxTxUnderpricedSetSize),
   975  		}...),
   976  	})
   977  }
   978  
   979  // Tests that unexpected deliveries don't corrupt the internal state.
   980  func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) {
   981  	testTransactionFetcherParallel(t, txFetcherTest{
   982  		init: func() *TxFetcher {
   983  			return NewTxFetcher(
   984  				func(common.Hash) bool { return false },
   985  				func(txs []*types.Transaction) []error {
   986  					return make([]error, len(txs))
   987  				},
   988  				func(string, []common.Hash) error { return nil },
   989  				testTxArrivalWait,
   990  			)
   991  		},
   992  		steps: []interface{}{
   993  			// Deliver something out of the blue
   994  			isWaiting(nil),
   995  			isScheduled{nil, nil, nil},
   996  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false},
   997  			isWaiting(nil),
   998  			isScheduled{nil, nil, nil},
   999  
  1000  			// Set up a few hashes into various stages
  1001  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1002  			doWait{time: testTxArrivalWait, step: true},
  1003  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}},
  1004  			doWait{time: testTxArrivalWait, step: true},
  1005  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}},
  1006  
  1007  			isWaiting(map[string][]common.Hash{
  1008  				"A": {testTxsHashes[2]},
  1009  			}),
  1010  			isScheduled{
  1011  				tracking: map[string][]common.Hash{
  1012  					"A": {testTxsHashes[0], testTxsHashes[1]},
  1013  				},
  1014  				fetching: map[string][]common.Hash{
  1015  					"A": {testTxsHashes[0]},
  1016  				},
  1017  			},
  1018  			// Deliver everything and more out of the blue
  1019  			doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true},
  1020  			isWaiting(nil),
  1021  			isScheduled{
  1022  				tracking: nil,
  1023  				fetching: nil,
  1024  				dangling: map[string][]common.Hash{
  1025  					"A": {testTxsHashes[0]},
  1026  				},
  1027  			},
  1028  		},
  1029  	})
  1030  }
  1031  
  1032  // Tests that dropping a peer cleans out all internal data structures in all the
  1033  // live or danglng stages.
  1034  func TestTransactionFetcherDrop(t *testing.T) {
  1035  	testTransactionFetcherParallel(t, txFetcherTest{
  1036  		init: func() *TxFetcher {
  1037  			return NewTxFetcher(
  1038  				func(common.Hash) bool { return false },
  1039  				func(txs []*types.Transaction) []error {
  1040  					return make([]error, len(txs))
  1041  				},
  1042  				func(string, []common.Hash) error { return nil },
  1043  				testTxArrivalWait,
  1044  			)
  1045  		},
  1046  		steps: []interface{}{
  1047  			// Set up a few hashes into various stages
  1048  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
  1049  			doWait{time: testTxArrivalWait, step: true},
  1050  			doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}},
  1051  			doWait{time: testTxArrivalWait, step: true},
  1052  			doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}},
  1053  
  1054  			isWaiting(map[string][]common.Hash{
  1055  				"A": {{0x03}},
  1056  			}),
  1057  			isScheduled{
  1058  				tracking: map[string][]common.Hash{
  1059  					"A": {{0x01}, {0x02}},
  1060  				},
  1061  				fetching: map[string][]common.Hash{
  1062  					"A": {{0x01}},
  1063  				},
  1064  			},
  1065  			// Drop the peer and ensure everything's cleaned out
  1066  			doDrop("A"),
  1067  			isWaiting(nil),
  1068  			isScheduled{nil, nil, nil},
  1069  
  1070  			// Push the node into a dangling (timeout) state
  1071  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1072  			doWait{time: testTxArrivalWait, step: true},
  1073  			isWaiting(nil),
  1074  			isScheduled{
  1075  				tracking: map[string][]common.Hash{
  1076  					"A": {testTxsHashes[0]},
  1077  				},
  1078  				fetching: map[string][]common.Hash{
  1079  					"A": {testTxsHashes[0]},
  1080  				},
  1081  			},
  1082  			doWait{time: txFetchTimeout, step: true},
  1083  			isWaiting(nil),
  1084  			isScheduled{
  1085  				tracking: nil,
  1086  				fetching: nil,
  1087  				dangling: map[string][]common.Hash{
  1088  					"A": {},
  1089  				},
  1090  			},
  1091  			// Drop the peer and ensure everything's cleaned out
  1092  			doDrop("A"),
  1093  			isWaiting(nil),
  1094  			isScheduled{nil, nil, nil},
  1095  		},
  1096  	})
  1097  }
  1098  
  1099  // Tests that dropping a peer instantly reschedules failed announcements to any
  1100  // available peer.
  1101  func TestTransactionFetcherDropRescheduling(t *testing.T) {
  1102  	testTransactionFetcherParallel(t, txFetcherTest{
  1103  		init: func() *TxFetcher {
  1104  			return NewTxFetcher(
  1105  				func(common.Hash) bool { return false },
  1106  				func(txs []*types.Transaction) []error {
  1107  					return make([]error, len(txs))
  1108  				},
  1109  				func(string, []common.Hash) error { return nil },
  1110  				testTxArrivalWait,
  1111  			)
  1112  		},
  1113  		steps: []interface{}{
  1114  			// Set up a few hashes into various stages
  1115  			doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}},
  1116  			doWait{time: testTxArrivalWait, step: true},
  1117  			doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}},
  1118  
  1119  			isWaiting(nil),
  1120  			isScheduled{
  1121  				tracking: map[string][]common.Hash{
  1122  					"A": {{0x01}},
  1123  					"B": {{0x01}},
  1124  				},
  1125  				fetching: map[string][]common.Hash{
  1126  					"A": {{0x01}},
  1127  				},
  1128  			},
  1129  			// Drop the peer and ensure everything's cleaned out
  1130  			doDrop("A"),
  1131  			isWaiting(nil),
  1132  			isScheduled{
  1133  				tracking: map[string][]common.Hash{
  1134  					"B": {{0x01}},
  1135  				},
  1136  				fetching: map[string][]common.Hash{
  1137  					"B": {{0x01}},
  1138  				},
  1139  			},
  1140  		},
  1141  	})
  1142  }
  1143  
  1144  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1145  // dangling transaction timing out and clashing on readd with a concurrently
  1146  // announced one.
  1147  func TestTransactionFetcherFuzzCrash01(t *testing.T) {
  1148  	testTransactionFetcherParallel(t, txFetcherTest{
  1149  		init: func() *TxFetcher {
  1150  			return NewTxFetcher(
  1151  				func(common.Hash) bool { return false },
  1152  				func(txs []*types.Transaction) []error {
  1153  					return make([]error, len(txs))
  1154  				},
  1155  				func(string, []common.Hash) error { return nil },
  1156  				testTxArrivalWait,
  1157  			)
  1158  		},
  1159  		steps: []interface{}{
  1160  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1161  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1162  			doWait{time: testTxArrivalWait, step: true},
  1163  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1164  
  1165  			// Notify the dangling transaction once more and crash via a timeout
  1166  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1167  			doWait{time: txFetchTimeout, step: true},
  1168  		},
  1169  	})
  1170  }
  1171  
  1172  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1173  // dangling transaction getting peer-dropped and clashing on readd with a
  1174  // concurrently announced one.
  1175  func TestTransactionFetcherFuzzCrash02(t *testing.T) {
  1176  	testTransactionFetcherParallel(t, txFetcherTest{
  1177  		init: func() *TxFetcher {
  1178  			return NewTxFetcher(
  1179  				func(common.Hash) bool { return false },
  1180  				func(txs []*types.Transaction) []error {
  1181  					return make([]error, len(txs))
  1182  				},
  1183  				func(string, []common.Hash) error { return nil },
  1184  				testTxArrivalWait,
  1185  			)
  1186  		},
  1187  		steps: []interface{}{
  1188  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1189  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1190  			doWait{time: testTxArrivalWait, step: true},
  1191  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1192  
  1193  			// Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout
  1194  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1195  			doWait{time: testTxArrivalWait, step: true},
  1196  			doDrop("A"),
  1197  			doWait{time: txFetchTimeout, step: true},
  1198  		},
  1199  	})
  1200  }
  1201  
  1202  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1203  // dangling transaction getting rescheduled via a partial delivery, clashing
  1204  // with a concurrent notify.
  1205  func TestTransactionFetcherFuzzCrash03(t *testing.T) {
  1206  	testTransactionFetcherParallel(t, txFetcherTest{
  1207  		init: func() *TxFetcher {
  1208  			return NewTxFetcher(
  1209  				func(common.Hash) bool { return false },
  1210  				func(txs []*types.Transaction) []error {
  1211  					return make([]error, len(txs))
  1212  				},
  1213  				func(string, []common.Hash) error { return nil },
  1214  				testTxArrivalWait,
  1215  			)
  1216  		},
  1217  		steps: []interface{}{
  1218  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1219  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}},
  1220  			doWait{time: txFetchTimeout, step: true},
  1221  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}},
  1222  
  1223  			// Notify the dangling transaction once more, partially deliver, clash&crash with a timeout
  1224  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1225  			doWait{time: testTxArrivalWait, step: true},
  1226  
  1227  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true},
  1228  			doWait{time: txFetchTimeout, step: true},
  1229  		},
  1230  	})
  1231  }
  1232  
  1233  // This test reproduces a crash caught by the fuzzer. The root cause was a
  1234  // dangling transaction getting rescheduled via a disconnect, clashing with
  1235  // a concurrent notify.
  1236  func TestTransactionFetcherFuzzCrash04(t *testing.T) {
  1237  	// Create a channel to control when tx requests can fail
  1238  	proceed := make(chan struct{})
  1239  
  1240  	testTransactionFetcherParallel(t, txFetcherTest{
  1241  		init: func() *TxFetcher {
  1242  			return NewTxFetcher(
  1243  				func(common.Hash) bool { return false },
  1244  				func(txs []*types.Transaction) []error {
  1245  					return make([]error, len(txs))
  1246  				},
  1247  				func(string, []common.Hash) error {
  1248  					<-proceed
  1249  					return errors.New("peer disconnected")
  1250  				},
  1251  				testTxArrivalWait,
  1252  			)
  1253  		},
  1254  		steps: []interface{}{
  1255  			// Get a transaction into fetching mode and make it dangling with a broadcast
  1256  			doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}},
  1257  			doWait{time: testTxArrivalWait, step: true},
  1258  			doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}},
  1259  
  1260  			// Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect
  1261  			doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}},
  1262  			doWait{time: testTxArrivalWait, step: true},
  1263  			doFunc(func() {
  1264  				proceed <- struct{}{} // Allow peer A to return the failure
  1265  			}),
  1266  			doWait{time: 0, step: true},
  1267  			doWait{time: txFetchTimeout, step: true},
  1268  		},
  1269  	})
  1270  }
  1271  
  1272  func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) {
  1273  	t.Parallel()
  1274  	testTransactionFetcher(t, tt)
  1275  }
  1276  
  1277  func testTransactionFetcher(t *testing.T, tt txFetcherTest) {
  1278  	// Create a fetcher and hook into it's simulated fields
  1279  	clock := new(mclock.Simulated)
  1280  	wait := make(chan struct{})
  1281  
  1282  	fetcher := tt.init()
  1283  	fetcher.clock = clock
  1284  	fetcher.step = wait
  1285  	fetcher.rand = rand.New(rand.NewSource(0x3a29))
  1286  
  1287  	fetcher.Start()
  1288  	defer fetcher.Stop()
  1289  
  1290  	// Crunch through all the test steps and execute them
  1291  	for i, step := range tt.steps {
  1292  		switch step := step.(type) {
  1293  		case doTxNotify:
  1294  			if err := fetcher.Notify(step.peer, step.hashes); err != nil {
  1295  				t.Errorf("step %d: %v", i, err)
  1296  			}
  1297  			<-wait // Fetcher needs to process this, wait until it's done
  1298  			select {
  1299  			case <-wait:
  1300  				panic("wtf")
  1301  			case <-time.After(time.Millisecond):
  1302  			}
  1303  
  1304  		case doTxEnqueue:
  1305  			if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil {
  1306  				t.Errorf("step %d: %v", i, err)
  1307  			}
  1308  			<-wait // Fetcher needs to process this, wait until it's done
  1309  
  1310  		case doWait:
  1311  			clock.Run(step.time)
  1312  			if step.step {
  1313  				<-wait // Fetcher supposed to do something, wait until it's done
  1314  			}
  1315  
  1316  		case doDrop:
  1317  			if err := fetcher.Drop(string(step)); err != nil {
  1318  				t.Errorf("step %d: %v", i, err)
  1319  			}
  1320  			<-wait // Fetcher needs to process this, wait until it's done
  1321  
  1322  		case doFunc:
  1323  			step()
  1324  
  1325  		case isWaiting:
  1326  			// We need to check that the waiting list (stage 1) internals
  1327  			// match with the expected set. Check the peer->hash mappings
  1328  			// first.
  1329  			for peer, hashes := range step {
  1330  				waiting := fetcher.waitslots[peer]
  1331  				if waiting == nil {
  1332  					t.Errorf("step %d: peer %s missing from waitslots", i, peer)
  1333  					continue
  1334  				}
  1335  				for _, hash := range hashes {
  1336  					if _, ok := waiting[hash]; !ok {
  1337  						t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash)
  1338  					}
  1339  				}
  1340  				for hash := range waiting {
  1341  					if !containsHash(hashes, hash) {
  1342  						t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash)
  1343  					}
  1344  				}
  1345  			}
  1346  			for peer := range fetcher.waitslots {
  1347  				if _, ok := step[peer]; !ok {
  1348  					t.Errorf("step %d: peer %s extra in waitslots", i, peer)
  1349  				}
  1350  			}
  1351  			// Peer->hash sets correct, check the hash->peer and timeout sets
  1352  			for peer, hashes := range step {
  1353  				for _, hash := range hashes {
  1354  					if _, ok := fetcher.waitlist[hash][peer]; !ok {
  1355  						t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer)
  1356  					}
  1357  					if _, ok := fetcher.waittime[hash]; !ok {
  1358  						t.Errorf("step %d: hash %x missing from waittime", i, hash)
  1359  					}
  1360  				}
  1361  			}
  1362  			for hash, peers := range fetcher.waitlist {
  1363  				if len(peers) == 0 {
  1364  					t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash)
  1365  				}
  1366  				for peer := range peers {
  1367  					if !containsHash(step[peer], hash) {
  1368  						t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer)
  1369  					}
  1370  				}
  1371  			}
  1372  			for hash := range fetcher.waittime {
  1373  				var found bool
  1374  				for _, hashes := range step {
  1375  					if containsHash(hashes, hash) {
  1376  						found = true
  1377  						break
  1378  					}
  1379  				}
  1380  				if !found {
  1381  					t.Errorf("step %d,: hash %x extra in waittime", i, hash)
  1382  				}
  1383  			}
  1384  
  1385  		case isScheduled:
  1386  			// Check that all scheduled announces are accounted for and no
  1387  			// extra ones are present.
  1388  			for peer, hashes := range step.tracking {
  1389  				scheduled := fetcher.announces[peer]
  1390  				if scheduled == nil {
  1391  					t.Errorf("step %d: peer %s missing from announces", i, peer)
  1392  					continue
  1393  				}
  1394  				for _, hash := range hashes {
  1395  					if _, ok := scheduled[hash]; !ok {
  1396  						t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash)
  1397  					}
  1398  				}
  1399  				for hash := range scheduled {
  1400  					if !containsHash(hashes, hash) {
  1401  						t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash)
  1402  					}
  1403  				}
  1404  			}
  1405  			for peer := range fetcher.announces {
  1406  				if _, ok := step.tracking[peer]; !ok {
  1407  					t.Errorf("step %d: peer %s extra in announces", i, peer)
  1408  				}
  1409  			}
  1410  			// Check that all announces required to be fetching are in the
  1411  			// appropriate sets
  1412  			for peer, hashes := range step.fetching {
  1413  				request := fetcher.requests[peer]
  1414  				if request == nil {
  1415  					t.Errorf("step %d: peer %s missing from requests", i, peer)
  1416  					continue
  1417  				}
  1418  				for _, hash := range hashes {
  1419  					if !containsHash(request.hashes, hash) {
  1420  						t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
  1421  					}
  1422  				}
  1423  				for _, hash := range request.hashes {
  1424  					if !containsHash(hashes, hash) {
  1425  						t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
  1426  					}
  1427  				}
  1428  			}
  1429  			for peer := range fetcher.requests {
  1430  				if _, ok := step.fetching[peer]; !ok {
  1431  					if _, ok := step.dangling[peer]; !ok {
  1432  						t.Errorf("step %d: peer %s extra in requests", i, peer)
  1433  					}
  1434  				}
  1435  			}
  1436  			for peer, hashes := range step.fetching {
  1437  				for _, hash := range hashes {
  1438  					if _, ok := fetcher.fetching[hash]; !ok {
  1439  						t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash)
  1440  					}
  1441  				}
  1442  			}
  1443  			for hash := range fetcher.fetching {
  1444  				var found bool
  1445  				for _, req := range fetcher.requests {
  1446  					if containsHash(req.hashes, hash) {
  1447  						found = true
  1448  						break
  1449  					}
  1450  				}
  1451  				if !found {
  1452  					t.Errorf("step %d: hash %x extra in fetching", i, hash)
  1453  				}
  1454  			}
  1455  			for _, hashes := range step.fetching {
  1456  				for _, hash := range hashes {
  1457  					alternates := fetcher.alternates[hash]
  1458  					if alternates == nil {
  1459  						t.Errorf("step %d: hash %x missing from alternates", i, hash)
  1460  						continue
  1461  					}
  1462  					for peer := range alternates {
  1463  						if _, ok := fetcher.announces[peer]; !ok {
  1464  							t.Errorf("step %d: peer %s extra in alternates", i, peer)
  1465  							continue
  1466  						}
  1467  						if _, ok := fetcher.announces[peer][hash]; !ok {
  1468  							t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer)
  1469  							continue
  1470  						}
  1471  					}
  1472  					for p := range fetcher.announced[hash] {
  1473  						if _, ok := alternates[p]; !ok {
  1474  							t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p)
  1475  							continue
  1476  						}
  1477  					}
  1478  				}
  1479  			}
  1480  			for peer, hashes := range step.dangling {
  1481  				request := fetcher.requests[peer]
  1482  				if request == nil {
  1483  					t.Errorf("step %d: peer %s missing from requests", i, peer)
  1484  					continue
  1485  				}
  1486  				for _, hash := range hashes {
  1487  					if !containsHash(request.hashes, hash) {
  1488  						t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash)
  1489  					}
  1490  				}
  1491  				for _, hash := range request.hashes {
  1492  					if !containsHash(hashes, hash) {
  1493  						t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash)
  1494  					}
  1495  				}
  1496  			}
  1497  			// Check that all transaction announces that are scheduled for
  1498  			// retrieval but not actively being downloaded are tracked only
  1499  			// in the stage 2 `announced` map.
  1500  			var queued []common.Hash
  1501  			for _, hashes := range step.tracking {
  1502  				for _, hash := range hashes {
  1503  					var found bool
  1504  					for _, hs := range step.fetching {
  1505  						if containsHash(hs, hash) {
  1506  							found = true
  1507  							break
  1508  						}
  1509  					}
  1510  					if !found {
  1511  						queued = append(queued, hash)
  1512  					}
  1513  				}
  1514  			}
  1515  			for _, hash := range queued {
  1516  				if _, ok := fetcher.announced[hash]; !ok {
  1517  					t.Errorf("step %d: hash %x missing from announced", i, hash)
  1518  				}
  1519  			}
  1520  			for hash := range fetcher.announced {
  1521  				if !containsHash(queued, hash) {
  1522  					t.Errorf("step %d: hash %x extra in announced", i, hash)
  1523  				}
  1524  			}
  1525  
  1526  		case isUnderpriced:
  1527  			if fetcher.underpriced.Cardinality() != int(step) {
  1528  				t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step)
  1529  			}
  1530  
  1531  		default:
  1532  			t.Fatalf("step %d: unknown step type %T", i, step)
  1533  		}
  1534  		// After every step, cross validate the internal uniqueness invariants
  1535  		// between stage one and stage two.
  1536  		for hash := range fetcher.waittime {
  1537  			if _, ok := fetcher.announced[hash]; ok {
  1538  				t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash)
  1539  			}
  1540  		}
  1541  	}
  1542  }
  1543  
  1544  // containsHash returns whether a hash is contained within a hash slice.
  1545  func containsHash(slice []common.Hash, hash common.Hash) bool {
  1546  	for _, have := range slice {
  1547  		if have == hash {
  1548  			return true
  1549  		}
  1550  	}
  1551  	return false
  1552  }