github.com/theQRL/go-zond@v0.1.1/zond/fetcher/tx_fetcher_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "errors" 21 "math/big" 22 "math/rand" 23 "testing" 24 "time" 25 26 "github.com/theQRL/go-zond/common" 27 "github.com/theQRL/go-zond/common/mclock" 28 "github.com/theQRL/go-zond/core/txpool" 29 "github.com/theQRL/go-zond/core/types" 30 ) 31 32 var ( 33 // testTxs is a set of transactions to use during testing that have meaningful hashes. 34 testTxs = []*types.Transaction{ 35 types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil), 36 types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil), 37 types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil), 38 types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil), 39 } 40 // testTxsHashes is the hashes of the test transactions above 41 testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} 42 ) 43 44 type doTxNotify struct { 45 peer string 46 hashes []common.Hash 47 } 48 type doTxEnqueue struct { 49 peer string 50 txs []*types.Transaction 51 direct bool 52 } 53 type doWait struct { 54 time time.Duration 55 step bool 56 } 57 type doDrop string 58 type doFunc func() 59 60 type isWaiting map[string][]common.Hash 61 type isScheduled struct { 62 tracking map[string][]common.Hash 63 fetching map[string][]common.Hash 64 dangling map[string][]common.Hash 65 } 66 type isUnderpriced int 67 68 // txFetcherTest represents a test scenario that can be executed by the test 69 // runner. 70 type txFetcherTest struct { 71 init func() *TxFetcher 72 steps []interface{} 73 } 74 75 // Tests that transaction announcements are added to a waitlist, and none 76 // of them are scheduled for retrieval until the wait expires. 77 func TestTransactionFetcherWaiting(t *testing.T) { 78 testTransactionFetcherParallel(t, txFetcherTest{ 79 init: func() *TxFetcher { 80 return NewTxFetcher( 81 func(common.Hash) bool { return false }, 82 nil, 83 func(string, []common.Hash) error { return nil }, 84 ) 85 }, 86 steps: []interface{}{ 87 // Initial announcement to get something into the waitlist 88 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 89 isWaiting(map[string][]common.Hash{ 90 "A": {{0x01}, {0x02}}, 91 }), 92 // Announce from a new peer to check that no overwrite happens 93 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}}, 94 isWaiting(map[string][]common.Hash{ 95 "A": {{0x01}, {0x02}}, 96 "B": {{0x03}, {0x04}}, 97 }), 98 // Announce clashing hashes but unique new peer 99 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}}, 100 isWaiting(map[string][]common.Hash{ 101 "A": {{0x01}, {0x02}}, 102 "B": {{0x03}, {0x04}}, 103 "C": {{0x01}, {0x04}}, 104 }), 105 // Announce existing and clashing hashes from existing peer 106 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}}, 107 isWaiting(map[string][]common.Hash{ 108 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 109 "B": {{0x03}, {0x04}}, 110 "C": {{0x01}, {0x04}}, 111 }), 112 isScheduled{tracking: nil, fetching: nil}, 113 114 // Wait for the arrival timeout which should move all expired items 115 // from the wait list to the scheduler 116 doWait{time: txArriveTimeout, step: true}, 117 isWaiting(nil), 118 isScheduled{ 119 tracking: map[string][]common.Hash{ 120 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 121 "B": {{0x03}, {0x04}}, 122 "C": {{0x01}, {0x04}}, 123 }, 124 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 125 "A": {{0x02}, {0x03}, {0x05}}, 126 "C": {{0x01}, {0x04}}, 127 }, 128 }, 129 // Queue up a non-fetchable transaction and then trigger it with a new 130 // peer (weird case to test 1 line in the fetcher) 131 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}}, 132 isWaiting(map[string][]common.Hash{ 133 "C": {{0x06}, {0x07}}, 134 }), 135 doWait{time: txArriveTimeout, step: true}, 136 isScheduled{ 137 tracking: map[string][]common.Hash{ 138 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 139 "B": {{0x03}, {0x04}}, 140 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 141 }, 142 fetching: map[string][]common.Hash{ 143 "A": {{0x02}, {0x03}, {0x05}}, 144 "C": {{0x01}, {0x04}}, 145 }, 146 }, 147 doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}}, 148 isScheduled{ 149 tracking: map[string][]common.Hash{ 150 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 151 "B": {{0x03}, {0x04}}, 152 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 153 "D": {{0x06}, {0x07}}, 154 }, 155 fetching: map[string][]common.Hash{ 156 "A": {{0x02}, {0x03}, {0x05}}, 157 "C": {{0x01}, {0x04}}, 158 "D": {{0x06}, {0x07}}, 159 }, 160 }, 161 }, 162 }) 163 } 164 165 // Tests that transaction announcements skip the waiting list if they are 166 // already scheduled. 167 func TestTransactionFetcherSkipWaiting(t *testing.T) { 168 testTransactionFetcherParallel(t, txFetcherTest{ 169 init: func() *TxFetcher { 170 return NewTxFetcher( 171 func(common.Hash) bool { return false }, 172 nil, 173 func(string, []common.Hash) error { return nil }, 174 ) 175 }, 176 steps: []interface{}{ 177 // Push an initial announcement through to the scheduled stage 178 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 179 isWaiting(map[string][]common.Hash{ 180 "A": {{0x01}, {0x02}}, 181 }), 182 isScheduled{tracking: nil, fetching: nil}, 183 184 doWait{time: txArriveTimeout, step: true}, 185 isWaiting(nil), 186 isScheduled{ 187 tracking: map[string][]common.Hash{ 188 "A": {{0x01}, {0x02}}, 189 }, 190 fetching: map[string][]common.Hash{ 191 "A": {{0x01}, {0x02}}, 192 }, 193 }, 194 // Announce overlaps from the same peer, ensure the new ones end up 195 // in stage one, and clashing ones don't get double tracked 196 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}}, 197 isWaiting(map[string][]common.Hash{ 198 "A": {{0x03}}, 199 }), 200 isScheduled{ 201 tracking: map[string][]common.Hash{ 202 "A": {{0x01}, {0x02}}, 203 }, 204 fetching: map[string][]common.Hash{ 205 "A": {{0x01}, {0x02}}, 206 }, 207 }, 208 // Announce overlaps from a new peer, ensure new transactions end up 209 // in stage one and clashing ones get tracked for the new peer 210 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}}, 211 isWaiting(map[string][]common.Hash{ 212 "A": {{0x03}}, 213 "B": {{0x03}, {0x04}}, 214 }), 215 isScheduled{ 216 tracking: map[string][]common.Hash{ 217 "A": {{0x01}, {0x02}}, 218 "B": {{0x02}}, 219 }, 220 fetching: map[string][]common.Hash{ 221 "A": {{0x01}, {0x02}}, 222 }, 223 }, 224 }, 225 }) 226 } 227 228 // Tests that only a single transaction request gets scheduled to a peer 229 // and subsequent announces block or get allotted to someone else. 230 func TestTransactionFetcherSingletonRequesting(t *testing.T) { 231 testTransactionFetcherParallel(t, txFetcherTest{ 232 init: func() *TxFetcher { 233 return NewTxFetcher( 234 func(common.Hash) bool { return false }, 235 nil, 236 func(string, []common.Hash) error { return nil }, 237 ) 238 }, 239 steps: []interface{}{ 240 // Push an initial announcement through to the scheduled stage 241 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 242 isWaiting(map[string][]common.Hash{ 243 "A": {{0x01}, {0x02}}, 244 }), 245 isScheduled{tracking: nil, fetching: nil}, 246 247 doWait{time: txArriveTimeout, step: true}, 248 isWaiting(nil), 249 isScheduled{ 250 tracking: map[string][]common.Hash{ 251 "A": {{0x01}, {0x02}}, 252 }, 253 fetching: map[string][]common.Hash{ 254 "A": {{0x01}, {0x02}}, 255 }, 256 }, 257 // Announce a new set of transactions from the same peer and ensure 258 // they do not start fetching since the peer is already busy 259 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}}, 260 isWaiting(map[string][]common.Hash{ 261 "A": {{0x03}, {0x04}}, 262 }), 263 isScheduled{ 264 tracking: map[string][]common.Hash{ 265 "A": {{0x01}, {0x02}}, 266 }, 267 fetching: map[string][]common.Hash{ 268 "A": {{0x01}, {0x02}}, 269 }, 270 }, 271 doWait{time: txArriveTimeout, step: true}, 272 isWaiting(nil), 273 isScheduled{ 274 tracking: map[string][]common.Hash{ 275 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 276 }, 277 fetching: map[string][]common.Hash{ 278 "A": {{0x01}, {0x02}}, 279 }, 280 }, 281 // Announce a duplicate set of transactions from a new peer and ensure 282 // uniquely new ones start downloading, even if clashing. 283 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}}, 284 isWaiting(map[string][]common.Hash{ 285 "B": {{0x05}, {0x06}}, 286 }), 287 isScheduled{ 288 tracking: map[string][]common.Hash{ 289 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 290 "B": {{0x02}, {0x03}}, 291 }, 292 fetching: map[string][]common.Hash{ 293 "A": {{0x01}, {0x02}}, 294 "B": {{0x03}}, 295 }, 296 }, 297 }, 298 }) 299 } 300 301 // Tests that if a transaction retrieval fails, all the transactions get 302 // instantly schedule back to someone else or the announcements dropped 303 // if no alternate source is available. 304 func TestTransactionFetcherFailedRescheduling(t *testing.T) { 305 // Create a channel to control when tx requests can fail 306 proceed := make(chan struct{}) 307 testTransactionFetcherParallel(t, txFetcherTest{ 308 init: func() *TxFetcher { 309 return NewTxFetcher( 310 func(common.Hash) bool { return false }, 311 nil, 312 func(origin string, hashes []common.Hash) error { 313 <-proceed 314 return errors.New("peer disconnected") 315 }, 316 ) 317 }, 318 steps: []interface{}{ 319 // Push an initial announcement through to the scheduled stage 320 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 321 isWaiting(map[string][]common.Hash{ 322 "A": {{0x01}, {0x02}}, 323 }), 324 isScheduled{tracking: nil, fetching: nil}, 325 326 doWait{time: txArriveTimeout, step: true}, 327 isWaiting(nil), 328 isScheduled{ 329 tracking: map[string][]common.Hash{ 330 "A": {{0x01}, {0x02}}, 331 }, 332 fetching: map[string][]common.Hash{ 333 "A": {{0x01}, {0x02}}, 334 }, 335 }, 336 // While the original peer is stuck in the request, push in an second 337 // data source. 338 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 339 isWaiting(nil), 340 isScheduled{ 341 tracking: map[string][]common.Hash{ 342 "A": {{0x01}, {0x02}}, 343 "B": {{0x02}}, 344 }, 345 fetching: map[string][]common.Hash{ 346 "A": {{0x01}, {0x02}}, 347 }, 348 }, 349 // Wait until the original request fails and check that transactions 350 // are either rescheduled or dropped 351 doFunc(func() { 352 proceed <- struct{}{} // Allow peer A to return the failure 353 }), 354 doWait{time: 0, step: true}, 355 isWaiting(nil), 356 isScheduled{ 357 tracking: map[string][]common.Hash{ 358 "B": {{0x02}}, 359 }, 360 fetching: map[string][]common.Hash{ 361 "B": {{0x02}}, 362 }, 363 }, 364 doFunc(func() { 365 proceed <- struct{}{} // Allow peer B to return the failure 366 }), 367 doWait{time: 0, step: true}, 368 isWaiting(nil), 369 isScheduled{nil, nil, nil}, 370 }, 371 }) 372 } 373 374 // Tests that if a transaction retrieval succeeds, all alternate origins 375 // are cleaned up. 376 func TestTransactionFetcherCleanup(t *testing.T) { 377 testTransactionFetcherParallel(t, txFetcherTest{ 378 init: func() *TxFetcher { 379 return NewTxFetcher( 380 func(common.Hash) bool { return false }, 381 func(txs []*types.Transaction) []error { 382 return make([]error, len(txs)) 383 }, 384 func(string, []common.Hash) error { return nil }, 385 ) 386 }, 387 steps: []interface{}{ 388 // Push an initial announcement through to the scheduled stage 389 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 390 isWaiting(map[string][]common.Hash{ 391 "A": {testTxsHashes[0]}, 392 }), 393 isScheduled{tracking: nil, fetching: nil}, 394 395 doWait{time: txArriveTimeout, step: true}, 396 isWaiting(nil), 397 isScheduled{ 398 tracking: map[string][]common.Hash{ 399 "A": {testTxsHashes[0]}, 400 }, 401 fetching: map[string][]common.Hash{ 402 "A": {testTxsHashes[0]}, 403 }, 404 }, 405 // Request should be delivered 406 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 407 isScheduled{nil, nil, nil}, 408 }, 409 }) 410 } 411 412 // Tests that if a transaction retrieval succeeds, but the response is empty (no 413 // transactions available, then all are nuked instead of being rescheduled (yes, 414 // this was a bug)). 415 func TestTransactionFetcherCleanupEmpty(t *testing.T) { 416 testTransactionFetcherParallel(t, txFetcherTest{ 417 init: func() *TxFetcher { 418 return NewTxFetcher( 419 func(common.Hash) bool { return false }, 420 func(txs []*types.Transaction) []error { 421 return make([]error, len(txs)) 422 }, 423 func(string, []common.Hash) error { return nil }, 424 ) 425 }, 426 steps: []interface{}{ 427 // Push an initial announcement through to the scheduled stage 428 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 429 isWaiting(map[string][]common.Hash{ 430 "A": {testTxsHashes[0]}, 431 }), 432 isScheduled{tracking: nil, fetching: nil}, 433 434 doWait{time: txArriveTimeout, step: true}, 435 isWaiting(nil), 436 isScheduled{ 437 tracking: map[string][]common.Hash{ 438 "A": {testTxsHashes[0]}, 439 }, 440 fetching: map[string][]common.Hash{ 441 "A": {testTxsHashes[0]}, 442 }, 443 }, 444 // Deliver an empty response and ensure the transaction is cleared, not rescheduled 445 doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true}, 446 isScheduled{nil, nil, nil}, 447 }, 448 }) 449 } 450 451 // Tests that non-returned transactions are either re-scheduled from a 452 // different peer, or self if they are after the cutoff point. 453 func TestTransactionFetcherMissingRescheduling(t *testing.T) { 454 testTransactionFetcherParallel(t, txFetcherTest{ 455 init: func() *TxFetcher { 456 return NewTxFetcher( 457 func(common.Hash) bool { return false }, 458 func(txs []*types.Transaction) []error { 459 return make([]error, len(txs)) 460 }, 461 func(string, []common.Hash) error { return nil }, 462 ) 463 }, 464 steps: []interface{}{ 465 // Push an initial announcement through to the scheduled stage 466 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, 467 isWaiting(map[string][]common.Hash{ 468 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 469 }), 470 isScheduled{tracking: nil, fetching: nil}, 471 472 doWait{time: txArriveTimeout, step: true}, 473 isWaiting(nil), 474 isScheduled{ 475 tracking: map[string][]common.Hash{ 476 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 477 }, 478 fetching: map[string][]common.Hash{ 479 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 480 }, 481 }, 482 // Deliver the middle transaction requested, the one before which 483 // should be dropped and the one after re-requested. 484 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random 485 isScheduled{ 486 tracking: map[string][]common.Hash{ 487 "A": {testTxsHashes[2]}, 488 }, 489 fetching: map[string][]common.Hash{ 490 "A": {testTxsHashes[2]}, 491 }, 492 }, 493 }, 494 }) 495 } 496 497 // Tests that out of two transactions, if one is missing and the last is 498 // delivered, the peer gets properly cleaned out from the internal state. 499 func TestTransactionFetcherMissingCleanup(t *testing.T) { 500 testTransactionFetcherParallel(t, txFetcherTest{ 501 init: func() *TxFetcher { 502 return NewTxFetcher( 503 func(common.Hash) bool { return false }, 504 func(txs []*types.Transaction) []error { 505 return make([]error, len(txs)) 506 }, 507 func(string, []common.Hash) error { return nil }, 508 ) 509 }, 510 steps: []interface{}{ 511 // Push an initial announcement through to the scheduled stage 512 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 513 isWaiting(map[string][]common.Hash{ 514 "A": {testTxsHashes[0], testTxsHashes[1]}, 515 }), 516 isScheduled{tracking: nil, fetching: nil}, 517 518 doWait{time: txArriveTimeout, step: true}, 519 isWaiting(nil), 520 isScheduled{ 521 tracking: map[string][]common.Hash{ 522 "A": {testTxsHashes[0], testTxsHashes[1]}, 523 }, 524 fetching: map[string][]common.Hash{ 525 "A": {testTxsHashes[0], testTxsHashes[1]}, 526 }, 527 }, 528 // Deliver the middle transaction requested, the one before which 529 // should be dropped and the one after re-requested. 530 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random 531 isScheduled{nil, nil, nil}, 532 }, 533 }) 534 } 535 536 // Tests that transaction broadcasts properly clean up announcements. 537 func TestTransactionFetcherBroadcasts(t *testing.T) { 538 testTransactionFetcherParallel(t, txFetcherTest{ 539 init: func() *TxFetcher { 540 return NewTxFetcher( 541 func(common.Hash) bool { return false }, 542 func(txs []*types.Transaction) []error { 543 return make([]error, len(txs)) 544 }, 545 func(string, []common.Hash) error { return nil }, 546 ) 547 }, 548 steps: []interface{}{ 549 // Set up three transactions to be in different stats, waiting, queued and fetching 550 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 551 doWait{time: txArriveTimeout, step: true}, 552 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 553 doWait{time: txArriveTimeout, step: true}, 554 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 555 556 isWaiting(map[string][]common.Hash{ 557 "A": {testTxsHashes[2]}, 558 }), 559 isScheduled{ 560 tracking: map[string][]common.Hash{ 561 "A": {testTxsHashes[0], testTxsHashes[1]}, 562 }, 563 fetching: map[string][]common.Hash{ 564 "A": {testTxsHashes[0]}, 565 }, 566 }, 567 // Broadcast all the transactions and ensure everything gets cleaned 568 // up, but the dangling request is left alone to avoid doing multiple 569 // concurrent requests. 570 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false}, 571 isWaiting(nil), 572 isScheduled{ 573 tracking: nil, 574 fetching: nil, 575 dangling: map[string][]common.Hash{ 576 "A": {testTxsHashes[0]}, 577 }, 578 }, 579 // Deliver the requested hashes 580 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true}, 581 isScheduled{nil, nil, nil}, 582 }, 583 }) 584 } 585 586 // Tests that the waiting list timers properly reset and reschedule. 587 func TestTransactionFetcherWaitTimerResets(t *testing.T) { 588 testTransactionFetcherParallel(t, txFetcherTest{ 589 init: func() *TxFetcher { 590 return NewTxFetcher( 591 func(common.Hash) bool { return false }, 592 nil, 593 func(string, []common.Hash) error { return nil }, 594 ) 595 }, 596 steps: []interface{}{ 597 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 598 isWaiting(map[string][]common.Hash{ 599 "A": {{0x01}}, 600 }), 601 isScheduled{nil, nil, nil}, 602 doWait{time: txArriveTimeout / 2, step: false}, 603 isWaiting(map[string][]common.Hash{ 604 "A": {{0x01}}, 605 }), 606 isScheduled{nil, nil, nil}, 607 608 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 609 isWaiting(map[string][]common.Hash{ 610 "A": {{0x01}, {0x02}}, 611 }), 612 isScheduled{nil, nil, nil}, 613 doWait{time: txArriveTimeout / 2, step: true}, 614 isWaiting(map[string][]common.Hash{ 615 "A": {{0x02}}, 616 }), 617 isScheduled{ 618 tracking: map[string][]common.Hash{ 619 "A": {{0x01}}, 620 }, 621 fetching: map[string][]common.Hash{ 622 "A": {{0x01}}, 623 }, 624 }, 625 626 doWait{time: txArriveTimeout / 2, step: true}, 627 isWaiting(nil), 628 isScheduled{ 629 tracking: map[string][]common.Hash{ 630 "A": {{0x01}, {0x02}}, 631 }, 632 fetching: map[string][]common.Hash{ 633 "A": {{0x01}}, 634 }, 635 }, 636 }, 637 }) 638 } 639 640 // Tests that if a transaction request is not replied to, it will time 641 // out and be re-scheduled for someone else. 642 func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { 643 testTransactionFetcherParallel(t, txFetcherTest{ 644 init: func() *TxFetcher { 645 return NewTxFetcher( 646 func(common.Hash) bool { return false }, 647 func(txs []*types.Transaction) []error { 648 return make([]error, len(txs)) 649 }, 650 func(string, []common.Hash) error { return nil }, 651 ) 652 }, 653 steps: []interface{}{ 654 // Push an initial announcement through to the scheduled stage 655 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 656 isWaiting(map[string][]common.Hash{ 657 "A": {testTxsHashes[0]}, 658 }), 659 isScheduled{tracking: nil, fetching: nil}, 660 661 doWait{time: txArriveTimeout, step: true}, 662 isWaiting(nil), 663 isScheduled{ 664 tracking: map[string][]common.Hash{ 665 "A": {testTxsHashes[0]}, 666 }, 667 fetching: map[string][]common.Hash{ 668 "A": {testTxsHashes[0]}, 669 }, 670 }, 671 // Wait until the delivery times out, everything should be cleaned up 672 doWait{time: txFetchTimeout, step: true}, 673 isWaiting(nil), 674 isScheduled{ 675 tracking: nil, 676 fetching: nil, 677 dangling: map[string][]common.Hash{ 678 "A": {}, 679 }, 680 }, 681 // Ensure that followup announcements don't get scheduled 682 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 683 doWait{time: txArriveTimeout, step: true}, 684 isScheduled{ 685 tracking: map[string][]common.Hash{ 686 "A": {testTxsHashes[1]}, 687 }, 688 fetching: nil, 689 dangling: map[string][]common.Hash{ 690 "A": {}, 691 }, 692 }, 693 // If the dangling request arrives a bit later, do not choke 694 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 695 isWaiting(nil), 696 isScheduled{ 697 tracking: map[string][]common.Hash{ 698 "A": {testTxsHashes[1]}, 699 }, 700 fetching: map[string][]common.Hash{ 701 "A": {testTxsHashes[1]}, 702 }, 703 }, 704 }, 705 }) 706 } 707 708 // Tests that the fetching timeout timers properly reset and reschedule. 709 func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { 710 testTransactionFetcherParallel(t, txFetcherTest{ 711 init: func() *TxFetcher { 712 return NewTxFetcher( 713 func(common.Hash) bool { return false }, 714 nil, 715 func(string, []common.Hash) error { return nil }, 716 ) 717 }, 718 steps: []interface{}{ 719 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 720 doWait{time: txArriveTimeout, step: true}, 721 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 722 doWait{time: txArriveTimeout, step: true}, 723 724 isWaiting(nil), 725 isScheduled{ 726 tracking: map[string][]common.Hash{ 727 "A": {{0x01}}, 728 "B": {{0x02}}, 729 }, 730 fetching: map[string][]common.Hash{ 731 "A": {{0x01}}, 732 "B": {{0x02}}, 733 }, 734 }, 735 doWait{time: txFetchTimeout - txArriveTimeout, step: true}, 736 isScheduled{ 737 tracking: map[string][]common.Hash{ 738 "B": {{0x02}}, 739 }, 740 fetching: map[string][]common.Hash{ 741 "B": {{0x02}}, 742 }, 743 dangling: map[string][]common.Hash{ 744 "A": {}, 745 }, 746 }, 747 doWait{time: txArriveTimeout, step: true}, 748 isScheduled{ 749 tracking: nil, 750 fetching: nil, 751 dangling: map[string][]common.Hash{ 752 "A": {}, 753 "B": {}, 754 }, 755 }, 756 }, 757 }) 758 } 759 760 // Tests that if thousands of transactions are announces, only a small 761 // number of them will be requested at a time. 762 func TestTransactionFetcherRateLimiting(t *testing.T) { 763 // Create a slew of transactions and to announce them 764 var hashes []common.Hash 765 for i := 0; i < maxTxAnnounces; i++ { 766 hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) 767 } 768 769 testTransactionFetcherParallel(t, txFetcherTest{ 770 init: func() *TxFetcher { 771 return NewTxFetcher( 772 func(common.Hash) bool { return false }, 773 nil, 774 func(string, []common.Hash) error { return nil }, 775 ) 776 }, 777 steps: []interface{}{ 778 // Announce all the transactions, wait a bit and ensure only a small 779 // percentage gets requested 780 doTxNotify{peer: "A", hashes: hashes}, 781 doWait{time: txArriveTimeout, step: true}, 782 isWaiting(nil), 783 isScheduled{ 784 tracking: map[string][]common.Hash{ 785 "A": hashes, 786 }, 787 fetching: map[string][]common.Hash{ 788 "A": hashes[1643 : 1643+maxTxRetrievals], 789 }, 790 }, 791 }, 792 }) 793 } 794 795 // Tests that then number of transactions a peer is allowed to announce and/or 796 // request at the same time is hard capped. 797 func TestTransactionFetcherDoSProtection(t *testing.T) { 798 // Create a slew of transactions and to announce them 799 var hashesA []common.Hash 800 for i := 0; i < maxTxAnnounces+1; i++ { 801 hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)}) 802 } 803 var hashesB []common.Hash 804 for i := 0; i < maxTxAnnounces+1; i++ { 805 hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)}) 806 } 807 testTransactionFetcherParallel(t, txFetcherTest{ 808 init: func() *TxFetcher { 809 return NewTxFetcher( 810 func(common.Hash) bool { return false }, 811 nil, 812 func(string, []common.Hash) error { return nil }, 813 ) 814 }, 815 steps: []interface{}{ 816 // Announce half of the transaction and wait for them to be scheduled 817 doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]}, 818 doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]}, 819 doWait{time: txArriveTimeout, step: true}, 820 821 // Announce the second half and keep them in the wait list 822 doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]}, 823 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]}, 824 825 // Ensure the hashes are split half and half 826 isWaiting(map[string][]common.Hash{ 827 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 828 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1], 829 }), 830 isScheduled{ 831 tracking: map[string][]common.Hash{ 832 "A": hashesA[:maxTxAnnounces/2], 833 "B": hashesB[:maxTxAnnounces/2-1], 834 }, 835 fetching: map[string][]common.Hash{ 836 "A": hashesA[1643 : 1643+maxTxRetrievals], 837 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 838 }, 839 }, 840 // Ensure that adding even one more hash results in dropping the hash 841 doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}}, 842 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]}, 843 844 isWaiting(map[string][]common.Hash{ 845 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 846 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces], 847 }), 848 isScheduled{ 849 tracking: map[string][]common.Hash{ 850 "A": hashesA[:maxTxAnnounces/2], 851 "B": hashesB[:maxTxAnnounces/2-1], 852 }, 853 fetching: map[string][]common.Hash{ 854 "A": hashesA[1643 : 1643+maxTxRetrievals], 855 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 856 }, 857 }, 858 }, 859 }) 860 } 861 862 // Tests that underpriced transactions don't get rescheduled after being rejected. 863 func TestTransactionFetcherUnderpricedDedup(t *testing.T) { 864 testTransactionFetcherParallel(t, txFetcherTest{ 865 init: func() *TxFetcher { 866 return NewTxFetcher( 867 func(common.Hash) bool { return false }, 868 func(txs []*types.Transaction) []error { 869 errs := make([]error, len(txs)) 870 for i := 0; i < len(errs); i++ { 871 if i%2 == 0 { 872 errs[i] = txpool.ErrUnderpriced 873 } else { 874 errs[i] = txpool.ErrReplaceUnderpriced 875 } 876 } 877 return errs 878 }, 879 func(string, []common.Hash) error { return nil }, 880 ) 881 }, 882 steps: []interface{}{ 883 // Deliver a transaction through the fetcher, but reject as underpriced 884 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 885 doWait{time: txArriveTimeout, step: true}, 886 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true}, 887 isScheduled{nil, nil, nil}, 888 889 // Try to announce the transaction again, ensure it's not scheduled back 890 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher 891 isWaiting(map[string][]common.Hash{ 892 "A": {testTxsHashes[2]}, 893 }), 894 isScheduled{nil, nil, nil}, 895 }, 896 }) 897 } 898 899 // Tests that underpriced transactions don't get rescheduled after being rejected, 900 // but at the same time there's a hard cap on the number of transactions that are 901 // tracked. 902 func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { 903 // Temporarily disable fetch timeouts as they massively mess up the simulated clock 904 defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout) 905 txFetchTimeout = 24 * time.Hour 906 907 // Create a slew of transactions to max out the underpriced set 908 var txs []*types.Transaction 909 for i := 0; i < maxTxUnderpricedSetSize+1; i++ { 910 txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil)) 911 } 912 hashes := make([]common.Hash, len(txs)) 913 for i, tx := range txs { 914 hashes[i] = tx.Hash() 915 } 916 // Generate a set of steps to announce and deliver the entire set of transactions 917 var steps []interface{} 918 for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ { 919 steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]}) 920 steps = append(steps, isWaiting(map[string][]common.Hash{ 921 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 922 })) 923 steps = append(steps, doWait{time: txArriveTimeout, step: true}) 924 steps = append(steps, isScheduled{ 925 tracking: map[string][]common.Hash{ 926 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 927 }, 928 fetching: map[string][]common.Hash{ 929 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 930 }, 931 }) 932 steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true}) 933 steps = append(steps, isWaiting(nil)) 934 steps = append(steps, isScheduled{nil, nil, nil}) 935 steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals)) 936 } 937 testTransactionFetcher(t, txFetcherTest{ 938 init: func() *TxFetcher { 939 return NewTxFetcher( 940 func(common.Hash) bool { return false }, 941 func(txs []*types.Transaction) []error { 942 errs := make([]error, len(txs)) 943 for i := 0; i < len(errs); i++ { 944 errs[i] = txpool.ErrUnderpriced 945 } 946 return errs 947 }, 948 func(string, []common.Hash) error { return nil }, 949 ) 950 }, 951 steps: append(steps, []interface{}{ 952 // The preparation of the test has already been done in `steps`, add the last check 953 doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}}, 954 doWait{time: txArriveTimeout, step: true}, 955 doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true}, 956 isUnderpriced(maxTxUnderpricedSetSize), 957 }...), 958 }) 959 } 960 961 // Tests that unexpected deliveries don't corrupt the internal state. 962 func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { 963 testTransactionFetcherParallel(t, txFetcherTest{ 964 init: func() *TxFetcher { 965 return NewTxFetcher( 966 func(common.Hash) bool { return false }, 967 func(txs []*types.Transaction) []error { 968 return make([]error, len(txs)) 969 }, 970 func(string, []common.Hash) error { return nil }, 971 ) 972 }, 973 steps: []interface{}{ 974 // Deliver something out of the blue 975 isWaiting(nil), 976 isScheduled{nil, nil, nil}, 977 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false}, 978 isWaiting(nil), 979 isScheduled{nil, nil, nil}, 980 981 // Set up a few hashes into various stages 982 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 983 doWait{time: txArriveTimeout, step: true}, 984 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 985 doWait{time: txArriveTimeout, step: true}, 986 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 987 988 isWaiting(map[string][]common.Hash{ 989 "A": {testTxsHashes[2]}, 990 }), 991 isScheduled{ 992 tracking: map[string][]common.Hash{ 993 "A": {testTxsHashes[0], testTxsHashes[1]}, 994 }, 995 fetching: map[string][]common.Hash{ 996 "A": {testTxsHashes[0]}, 997 }, 998 }, 999 // Deliver everything and more out of the blue 1000 doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true}, 1001 isWaiting(nil), 1002 isScheduled{ 1003 tracking: nil, 1004 fetching: nil, 1005 dangling: map[string][]common.Hash{ 1006 "A": {testTxsHashes[0]}, 1007 }, 1008 }, 1009 }, 1010 }) 1011 } 1012 1013 // Tests that dropping a peer cleans out all internal data structures in all the 1014 // live or dangling stages. 1015 func TestTransactionFetcherDrop(t *testing.T) { 1016 testTransactionFetcherParallel(t, txFetcherTest{ 1017 init: func() *TxFetcher { 1018 return NewTxFetcher( 1019 func(common.Hash) bool { return false }, 1020 func(txs []*types.Transaction) []error { 1021 return make([]error, len(txs)) 1022 }, 1023 func(string, []common.Hash) error { return nil }, 1024 ) 1025 }, 1026 steps: []interface{}{ 1027 // Set up a few hashes into various stages 1028 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1029 doWait{time: txArriveTimeout, step: true}, 1030 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 1031 doWait{time: txArriveTimeout, step: true}, 1032 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}}, 1033 1034 isWaiting(map[string][]common.Hash{ 1035 "A": {{0x03}}, 1036 }), 1037 isScheduled{ 1038 tracking: map[string][]common.Hash{ 1039 "A": {{0x01}, {0x02}}, 1040 }, 1041 fetching: map[string][]common.Hash{ 1042 "A": {{0x01}}, 1043 }, 1044 }, 1045 // Drop the peer and ensure everything's cleaned out 1046 doDrop("A"), 1047 isWaiting(nil), 1048 isScheduled{nil, nil, nil}, 1049 1050 // Push the node into a dangling (timeout) state 1051 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1052 doWait{time: txArriveTimeout, step: true}, 1053 isWaiting(nil), 1054 isScheduled{ 1055 tracking: map[string][]common.Hash{ 1056 "A": {testTxsHashes[0]}, 1057 }, 1058 fetching: map[string][]common.Hash{ 1059 "A": {testTxsHashes[0]}, 1060 }, 1061 }, 1062 doWait{time: txFetchTimeout, step: true}, 1063 isWaiting(nil), 1064 isScheduled{ 1065 tracking: nil, 1066 fetching: nil, 1067 dangling: map[string][]common.Hash{ 1068 "A": {}, 1069 }, 1070 }, 1071 // Drop the peer and ensure everything's cleaned out 1072 doDrop("A"), 1073 isWaiting(nil), 1074 isScheduled{nil, nil, nil}, 1075 }, 1076 }) 1077 } 1078 1079 // Tests that dropping a peer instantly reschedules failed announcements to any 1080 // available peer. 1081 func TestTransactionFetcherDropRescheduling(t *testing.T) { 1082 testTransactionFetcherParallel(t, txFetcherTest{ 1083 init: func() *TxFetcher { 1084 return NewTxFetcher( 1085 func(common.Hash) bool { return false }, 1086 func(txs []*types.Transaction) []error { 1087 return make([]error, len(txs)) 1088 }, 1089 func(string, []common.Hash) error { return nil }, 1090 ) 1091 }, 1092 steps: []interface{}{ 1093 // Set up a few hashes into various stages 1094 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1095 doWait{time: txArriveTimeout, step: true}, 1096 doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}}, 1097 1098 isWaiting(nil), 1099 isScheduled{ 1100 tracking: map[string][]common.Hash{ 1101 "A": {{0x01}}, 1102 "B": {{0x01}}, 1103 }, 1104 fetching: map[string][]common.Hash{ 1105 "A": {{0x01}}, 1106 }, 1107 }, 1108 // Drop the peer and ensure everything's cleaned out 1109 doDrop("A"), 1110 isWaiting(nil), 1111 isScheduled{ 1112 tracking: map[string][]common.Hash{ 1113 "B": {{0x01}}, 1114 }, 1115 fetching: map[string][]common.Hash{ 1116 "B": {{0x01}}, 1117 }, 1118 }, 1119 }, 1120 }) 1121 } 1122 1123 // This test reproduces a crash caught by the fuzzer. The root cause was a 1124 // dangling transaction timing out and clashing on re-add with a concurrently 1125 // announced one. 1126 func TestTransactionFetcherFuzzCrash01(t *testing.T) { 1127 testTransactionFetcherParallel(t, txFetcherTest{ 1128 init: func() *TxFetcher { 1129 return NewTxFetcher( 1130 func(common.Hash) bool { return false }, 1131 func(txs []*types.Transaction) []error { 1132 return make([]error, len(txs)) 1133 }, 1134 func(string, []common.Hash) error { return nil }, 1135 ) 1136 }, 1137 steps: []interface{}{ 1138 // Get a transaction into fetching mode and make it dangling with a broadcast 1139 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1140 doWait{time: txArriveTimeout, step: true}, 1141 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1142 1143 // Notify the dangling transaction once more and crash via a timeout 1144 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1145 doWait{time: txFetchTimeout, step: true}, 1146 }, 1147 }) 1148 } 1149 1150 // This test reproduces a crash caught by the fuzzer. The root cause was a 1151 // dangling transaction getting peer-dropped and clashing on re-add with a 1152 // concurrently announced one. 1153 func TestTransactionFetcherFuzzCrash02(t *testing.T) { 1154 testTransactionFetcherParallel(t, txFetcherTest{ 1155 init: func() *TxFetcher { 1156 return NewTxFetcher( 1157 func(common.Hash) bool { return false }, 1158 func(txs []*types.Transaction) []error { 1159 return make([]error, len(txs)) 1160 }, 1161 func(string, []common.Hash) error { return nil }, 1162 ) 1163 }, 1164 steps: []interface{}{ 1165 // Get a transaction into fetching mode and make it dangling with a broadcast 1166 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1167 doWait{time: txArriveTimeout, step: true}, 1168 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1169 1170 // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout 1171 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1172 doWait{time: txArriveTimeout, step: true}, 1173 doDrop("A"), 1174 doWait{time: txFetchTimeout, step: true}, 1175 }, 1176 }) 1177 } 1178 1179 // This test reproduces a crash caught by the fuzzer. The root cause was a 1180 // dangling transaction getting rescheduled via a partial delivery, clashing 1181 // with a concurrent notify. 1182 func TestTransactionFetcherFuzzCrash03(t *testing.T) { 1183 testTransactionFetcherParallel(t, txFetcherTest{ 1184 init: func() *TxFetcher { 1185 return NewTxFetcher( 1186 func(common.Hash) bool { return false }, 1187 func(txs []*types.Transaction) []error { 1188 return make([]error, len(txs)) 1189 }, 1190 func(string, []common.Hash) error { return nil }, 1191 ) 1192 }, 1193 steps: []interface{}{ 1194 // Get a transaction into fetching mode and make it dangling with a broadcast 1195 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 1196 doWait{time: txFetchTimeout, step: true}, 1197 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1198 1199 // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout 1200 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1201 doWait{time: txArriveTimeout, step: true}, 1202 1203 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, 1204 doWait{time: txFetchTimeout, step: true}, 1205 }, 1206 }) 1207 } 1208 1209 // This test reproduces a crash caught by the fuzzer. The root cause was a 1210 // dangling transaction getting rescheduled via a disconnect, clashing with 1211 // a concurrent notify. 1212 func TestTransactionFetcherFuzzCrash04(t *testing.T) { 1213 // Create a channel to control when tx requests can fail 1214 proceed := make(chan struct{}) 1215 1216 testTransactionFetcherParallel(t, txFetcherTest{ 1217 init: func() *TxFetcher { 1218 return NewTxFetcher( 1219 func(common.Hash) bool { return false }, 1220 func(txs []*types.Transaction) []error { 1221 return make([]error, len(txs)) 1222 }, 1223 func(string, []common.Hash) error { 1224 <-proceed 1225 return errors.New("peer disconnected") 1226 }, 1227 ) 1228 }, 1229 steps: []interface{}{ 1230 // Get a transaction into fetching mode and make it dangling with a broadcast 1231 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1232 doWait{time: txArriveTimeout, step: true}, 1233 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1234 1235 // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect 1236 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1237 doWait{time: txArriveTimeout, step: true}, 1238 doFunc(func() { 1239 proceed <- struct{}{} // Allow peer A to return the failure 1240 }), 1241 doWait{time: 0, step: true}, 1242 doWait{time: txFetchTimeout, step: true}, 1243 }, 1244 }) 1245 } 1246 1247 func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { 1248 t.Parallel() 1249 testTransactionFetcher(t, tt) 1250 } 1251 1252 func testTransactionFetcher(t *testing.T, tt txFetcherTest) { 1253 // Create a fetcher and hook into it's simulated fields 1254 clock := new(mclock.Simulated) 1255 wait := make(chan struct{}) 1256 1257 fetcher := tt.init() 1258 fetcher.clock = clock 1259 fetcher.step = wait 1260 fetcher.rand = rand.New(rand.NewSource(0x3a29)) 1261 1262 fetcher.Start() 1263 defer fetcher.Stop() 1264 1265 defer func() { // drain the wait chan on exit 1266 for { 1267 select { 1268 case <-wait: 1269 default: 1270 return 1271 } 1272 } 1273 }() 1274 1275 // Crunch through all the test steps and execute them 1276 for i, step := range tt.steps { 1277 switch step := step.(type) { 1278 case doTxNotify: 1279 if err := fetcher.Notify(step.peer, step.hashes); err != nil { 1280 t.Errorf("step %d: %v", i, err) 1281 } 1282 <-wait // Fetcher needs to process this, wait until it's done 1283 select { 1284 case <-wait: 1285 panic("wtf") 1286 case <-time.After(time.Millisecond): 1287 } 1288 1289 case doTxEnqueue: 1290 if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil { 1291 t.Errorf("step %d: %v", i, err) 1292 } 1293 <-wait // Fetcher needs to process this, wait until it's done 1294 1295 case doWait: 1296 clock.Run(step.time) 1297 if step.step { 1298 <-wait // Fetcher supposed to do something, wait until it's done 1299 } 1300 1301 case doDrop: 1302 if err := fetcher.Drop(string(step)); err != nil { 1303 t.Errorf("step %d: %v", i, err) 1304 } 1305 <-wait // Fetcher needs to process this, wait until it's done 1306 1307 case doFunc: 1308 step() 1309 1310 case isWaiting: 1311 // We need to check that the waiting list (stage 1) internals 1312 // match with the expected set. Check the peer->hash mappings 1313 // first. 1314 for peer, hashes := range step { 1315 waiting := fetcher.waitslots[peer] 1316 if waiting == nil { 1317 t.Errorf("step %d: peer %s missing from waitslots", i, peer) 1318 continue 1319 } 1320 for _, hash := range hashes { 1321 if _, ok := waiting[hash]; !ok { 1322 t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, hash) 1323 } 1324 } 1325 for hash := range waiting { 1326 if !containsHash(hashes, hash) { 1327 t.Errorf("step %d, peer %s: hash %x extra in waitslots", i, peer, hash) 1328 } 1329 } 1330 } 1331 for peer := range fetcher.waitslots { 1332 if _, ok := step[peer]; !ok { 1333 t.Errorf("step %d: peer %s extra in waitslots", i, peer) 1334 } 1335 } 1336 // Peer->hash sets correct, check the hash->peer and timeout sets 1337 for peer, hashes := range step { 1338 for _, hash := range hashes { 1339 if _, ok := fetcher.waitlist[hash][peer]; !ok { 1340 t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, hash, peer) 1341 } 1342 if _, ok := fetcher.waittime[hash]; !ok { 1343 t.Errorf("step %d: hash %x missing from waittime", i, hash) 1344 } 1345 } 1346 } 1347 for hash, peers := range fetcher.waitlist { 1348 if len(peers) == 0 { 1349 t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) 1350 } 1351 for peer := range peers { 1352 if !containsHash(step[peer], hash) { 1353 t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) 1354 } 1355 } 1356 } 1357 for hash := range fetcher.waittime { 1358 var found bool 1359 for _, hashes := range step { 1360 if containsHash(hashes, hash) { 1361 found = true 1362 break 1363 } 1364 } 1365 if !found { 1366 t.Errorf("step %d,: hash %x extra in waittime", i, hash) 1367 } 1368 } 1369 1370 case isScheduled: 1371 // Check that all scheduled announces are accounted for and no 1372 // extra ones are present. 1373 for peer, hashes := range step.tracking { 1374 scheduled := fetcher.announces[peer] 1375 if scheduled == nil { 1376 t.Errorf("step %d: peer %s missing from announces", i, peer) 1377 continue 1378 } 1379 for _, hash := range hashes { 1380 if _, ok := scheduled[hash]; !ok { 1381 t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, hash) 1382 } 1383 } 1384 for hash := range scheduled { 1385 if !containsHash(hashes, hash) { 1386 t.Errorf("step %d, peer %s: hash %x extra in announces", i, peer, hash) 1387 } 1388 } 1389 } 1390 for peer := range fetcher.announces { 1391 if _, ok := step.tracking[peer]; !ok { 1392 t.Errorf("step %d: peer %s extra in announces", i, peer) 1393 } 1394 } 1395 // Check that all announces required to be fetching are in the 1396 // appropriate sets 1397 for peer, hashes := range step.fetching { 1398 request := fetcher.requests[peer] 1399 if request == nil { 1400 t.Errorf("step %d: peer %s missing from requests", i, peer) 1401 continue 1402 } 1403 for _, hash := range hashes { 1404 if !containsHash(request.hashes, hash) { 1405 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1406 } 1407 } 1408 for _, hash := range request.hashes { 1409 if !containsHash(hashes, hash) { 1410 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1411 } 1412 } 1413 } 1414 for peer := range fetcher.requests { 1415 if _, ok := step.fetching[peer]; !ok { 1416 if _, ok := step.dangling[peer]; !ok { 1417 t.Errorf("step %d: peer %s extra in requests", i, peer) 1418 } 1419 } 1420 } 1421 for peer, hashes := range step.fetching { 1422 for _, hash := range hashes { 1423 if _, ok := fetcher.fetching[hash]; !ok { 1424 t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash) 1425 } 1426 } 1427 } 1428 for hash := range fetcher.fetching { 1429 var found bool 1430 for _, req := range fetcher.requests { 1431 if containsHash(req.hashes, hash) { 1432 found = true 1433 break 1434 } 1435 } 1436 if !found { 1437 t.Errorf("step %d: hash %x extra in fetching", i, hash) 1438 } 1439 } 1440 for _, hashes := range step.fetching { 1441 for _, hash := range hashes { 1442 alternates := fetcher.alternates[hash] 1443 if alternates == nil { 1444 t.Errorf("step %d: hash %x missing from alternates", i, hash) 1445 continue 1446 } 1447 for peer := range alternates { 1448 if _, ok := fetcher.announces[peer]; !ok { 1449 t.Errorf("step %d: peer %s extra in alternates", i, peer) 1450 continue 1451 } 1452 if _, ok := fetcher.announces[peer][hash]; !ok { 1453 t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer) 1454 continue 1455 } 1456 } 1457 for p := range fetcher.announced[hash] { 1458 if _, ok := alternates[p]; !ok { 1459 t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p) 1460 continue 1461 } 1462 } 1463 } 1464 } 1465 for peer, hashes := range step.dangling { 1466 request := fetcher.requests[peer] 1467 if request == nil { 1468 t.Errorf("step %d: peer %s missing from requests", i, peer) 1469 continue 1470 } 1471 for _, hash := range hashes { 1472 if !containsHash(request.hashes, hash) { 1473 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1474 } 1475 } 1476 for _, hash := range request.hashes { 1477 if !containsHash(hashes, hash) { 1478 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1479 } 1480 } 1481 } 1482 // Check that all transaction announces that are scheduled for 1483 // retrieval but not actively being downloaded are tracked only 1484 // in the stage 2 `announced` map. 1485 var queued []common.Hash 1486 for _, hashes := range step.tracking { 1487 for _, hash := range hashes { 1488 var found bool 1489 for _, hs := range step.fetching { 1490 if containsHash(hs, hash) { 1491 found = true 1492 break 1493 } 1494 } 1495 if !found { 1496 queued = append(queued, hash) 1497 } 1498 } 1499 } 1500 for _, hash := range queued { 1501 if _, ok := fetcher.announced[hash]; !ok { 1502 t.Errorf("step %d: hash %x missing from announced", i, hash) 1503 } 1504 } 1505 for hash := range fetcher.announced { 1506 if !containsHash(queued, hash) { 1507 t.Errorf("step %d: hash %x extra in announced", i, hash) 1508 } 1509 } 1510 1511 case isUnderpriced: 1512 if fetcher.underpriced.Cardinality() != int(step) { 1513 t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Cardinality(), step) 1514 } 1515 1516 default: 1517 t.Fatalf("step %d: unknown step type %T", i, step) 1518 } 1519 // After every step, cross validate the internal uniqueness invariants 1520 // between stage one and stage two. 1521 for hash := range fetcher.waittime { 1522 if _, ok := fetcher.announced[hash]; ok { 1523 t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash) 1524 } 1525 } 1526 } 1527 } 1528 1529 // containsHash returns whether a hash is contained within a hash slice. 1530 func containsHash(slice []common.Hash, hash common.Hash) bool { 1531 for _, have := range slice { 1532 if have == hash { 1533 return true 1534 } 1535 } 1536 return false 1537 }