github.com/ethereum/go-ethereum@v1.16.1/eth/fetcher/tx_fetcher_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "errors" 21 "math/big" 22 "math/rand" 23 "slices" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/mclock" 29 "github.com/ethereum/go-ethereum/core/txpool" 30 "github.com/ethereum/go-ethereum/core/types" 31 "github.com/ethereum/go-ethereum/params" 32 ) 33 34 var ( 35 // testTxs is a set of transactions to use during testing that have meaningful hashes. 36 testTxs = []*types.Transaction{ 37 types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil), 38 types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil), 39 types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil), 40 types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil), 41 } 42 // testTxsHashes is the hashes of the test transactions above 43 testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} 44 ) 45 46 type announce struct { 47 hash common.Hash 48 kind byte 49 size uint32 50 } 51 52 type doTxNotify struct { 53 peer string 54 hashes []common.Hash 55 types []byte 56 sizes []uint32 57 } 58 type doTxEnqueue struct { 59 peer string 60 txs []*types.Transaction 61 direct bool 62 } 63 type doWait struct { 64 time time.Duration 65 step bool 66 } 67 type doDrop string 68 type doFunc func() 69 70 type isWaiting map[string][]announce 71 72 type isScheduled struct { 73 tracking map[string][]announce 74 fetching map[string][]common.Hash 75 dangling map[string][]common.Hash 76 } 77 type isUnderpriced int 78 79 // txFetcherTest represents a test scenario that can be executed by the test 80 // runner. 81 type txFetcherTest struct { 82 init func() *TxFetcher 83 steps []interface{} 84 } 85 86 // Tests that transaction announcements with associated metadata are added to a 87 // waitlist, and none of them are scheduled for retrieval until the wait expires. 88 // 89 // This test is an extended version of TestTransactionFetcherWaiting. It's mostly 90 // to cover the metadata checks without bloating up the basic behavioral tests 91 // with all the useless extra fields. 92 func TestTransactionFetcherWaiting(t *testing.T) { 93 testTransactionFetcherParallel(t, txFetcherTest{ 94 init: func() *TxFetcher { 95 return NewTxFetcher( 96 func(common.Hash) bool { return false }, 97 nil, 98 func(string, []common.Hash) error { return nil }, 99 nil, 100 ) 101 }, 102 steps: []interface{}{ 103 // Initial announcement to get something into the waitlist 104 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, 105 isWaiting(map[string][]announce{ 106 "A": { 107 {common.Hash{0x01}, types.LegacyTxType, 111}, 108 {common.Hash{0x02}, types.LegacyTxType, 222}, 109 }, 110 }), 111 // Announce from a new peer to check that no overwrite happens 112 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}}, 113 isWaiting(map[string][]announce{ 114 "A": { 115 {common.Hash{0x01}, types.LegacyTxType, 111}, 116 {common.Hash{0x02}, types.LegacyTxType, 222}, 117 }, 118 "B": { 119 {common.Hash{0x03}, types.LegacyTxType, 333}, 120 {common.Hash{0x04}, types.LegacyTxType, 444}, 121 }, 122 }), 123 // Announce clashing hashes but unique new peer 124 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}}, 125 isWaiting(map[string][]announce{ 126 "A": { 127 {common.Hash{0x01}, types.LegacyTxType, 111}, 128 {common.Hash{0x02}, types.LegacyTxType, 222}, 129 }, 130 "B": { 131 {common.Hash{0x03}, types.LegacyTxType, 333}, 132 {common.Hash{0x04}, types.LegacyTxType, 444}, 133 }, 134 "C": { 135 {common.Hash{0x01}, types.LegacyTxType, 111}, 136 {common.Hash{0x04}, types.LegacyTxType, 444}, 137 }, 138 }), 139 // Announce existing and clashing hashes from existing peer. Clashes 140 // should not overwrite previous announcements. 141 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}}, 142 isWaiting(map[string][]announce{ 143 "A": { 144 {common.Hash{0x01}, types.LegacyTxType, 111}, 145 {common.Hash{0x02}, types.LegacyTxType, 222}, 146 {common.Hash{0x03}, types.LegacyTxType, 333}, 147 {common.Hash{0x05}, types.LegacyTxType, 555}, 148 }, 149 "B": { 150 {common.Hash{0x03}, types.LegacyTxType, 333}, 151 {common.Hash{0x04}, types.LegacyTxType, 444}, 152 }, 153 "C": { 154 {common.Hash{0x01}, types.LegacyTxType, 111}, 155 {common.Hash{0x04}, types.LegacyTxType, 444}, 156 }, 157 }), 158 // Announce clashing hashes with conflicting metadata. Somebody will 159 // be in the wrong, but we don't know yet who. 160 doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}}, 161 isWaiting(map[string][]announce{ 162 "A": { 163 {common.Hash{0x01}, types.LegacyTxType, 111}, 164 {common.Hash{0x02}, types.LegacyTxType, 222}, 165 {common.Hash{0x03}, types.LegacyTxType, 333}, 166 {common.Hash{0x05}, types.LegacyTxType, 555}, 167 }, 168 "B": { 169 {common.Hash{0x03}, types.LegacyTxType, 333}, 170 {common.Hash{0x04}, types.LegacyTxType, 444}, 171 }, 172 "C": { 173 {common.Hash{0x01}, types.LegacyTxType, 111}, 174 {common.Hash{0x04}, types.LegacyTxType, 444}, 175 }, 176 "D": { 177 {common.Hash{0x01}, types.LegacyTxType, 999}, 178 {common.Hash{0x02}, types.BlobTxType, 222}, 179 }, 180 }), 181 isScheduled{tracking: nil, fetching: nil}, 182 183 // Wait for the arrival timeout which should move all expired items 184 // from the wait list to the scheduler 185 doWait{time: txArriveTimeout, step: true}, 186 isWaiting(nil), 187 isScheduled{ 188 tracking: map[string][]announce{ 189 "A": { 190 {common.Hash{0x01}, types.LegacyTxType, 111}, 191 {common.Hash{0x02}, types.LegacyTxType, 222}, 192 {common.Hash{0x03}, types.LegacyTxType, 333}, 193 {common.Hash{0x05}, types.LegacyTxType, 555}, 194 }, 195 "B": { 196 {common.Hash{0x03}, types.LegacyTxType, 333}, 197 {common.Hash{0x04}, types.LegacyTxType, 444}, 198 }, 199 "C": { 200 {common.Hash{0x01}, types.LegacyTxType, 111}, 201 {common.Hash{0x04}, types.LegacyTxType, 444}, 202 }, 203 "D": { 204 {common.Hash{0x01}, types.LegacyTxType, 999}, 205 {common.Hash{0x02}, types.BlobTxType, 222}, 206 }, 207 }, 208 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 209 "A": {{0x03}, {0x05}}, 210 "C": {{0x01}, {0x04}}, 211 "D": {{0x02}}, 212 }, 213 }, 214 // Queue up a non-fetchable transaction and then trigger it with a new 215 // peer (weird case to test 1 line in the fetcher) 216 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, 217 isWaiting(map[string][]announce{ 218 "C": { 219 {common.Hash{0x06}, types.LegacyTxType, 666}, 220 {common.Hash{0x07}, types.LegacyTxType, 777}, 221 }, 222 }), 223 doWait{time: txArriveTimeout, step: true}, 224 isScheduled{ 225 tracking: map[string][]announce{ 226 "A": { 227 {common.Hash{0x01}, types.LegacyTxType, 111}, 228 {common.Hash{0x02}, types.LegacyTxType, 222}, 229 {common.Hash{0x03}, types.LegacyTxType, 333}, 230 {common.Hash{0x05}, types.LegacyTxType, 555}, 231 }, 232 "B": { 233 {common.Hash{0x03}, types.LegacyTxType, 333}, 234 {common.Hash{0x04}, types.LegacyTxType, 444}, 235 }, 236 "C": { 237 {common.Hash{0x01}, types.LegacyTxType, 111}, 238 {common.Hash{0x04}, types.LegacyTxType, 444}, 239 {common.Hash{0x06}, types.LegacyTxType, 666}, 240 {common.Hash{0x07}, types.LegacyTxType, 777}, 241 }, 242 "D": { 243 {common.Hash{0x01}, types.LegacyTxType, 999}, 244 {common.Hash{0x02}, types.BlobTxType, 222}, 245 }, 246 }, 247 fetching: map[string][]common.Hash{ 248 "A": {{0x03}, {0x05}}, 249 "C": {{0x01}, {0x04}}, 250 "D": {{0x02}}, 251 }, 252 }, 253 doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, 254 isScheduled{ 255 tracking: map[string][]announce{ 256 "A": { 257 {common.Hash{0x01}, types.LegacyTxType, 111}, 258 {common.Hash{0x02}, types.LegacyTxType, 222}, 259 {common.Hash{0x03}, types.LegacyTxType, 333}, 260 {common.Hash{0x05}, types.LegacyTxType, 555}, 261 }, 262 "B": { 263 {common.Hash{0x03}, types.LegacyTxType, 333}, 264 {common.Hash{0x04}, types.LegacyTxType, 444}, 265 }, 266 "C": { 267 {common.Hash{0x01}, types.LegacyTxType, 111}, 268 {common.Hash{0x04}, types.LegacyTxType, 444}, 269 {common.Hash{0x06}, types.LegacyTxType, 666}, 270 {common.Hash{0x07}, types.LegacyTxType, 777}, 271 }, 272 "D": { 273 {common.Hash{0x01}, types.LegacyTxType, 999}, 274 {common.Hash{0x02}, types.BlobTxType, 222}, 275 }, 276 "E": { 277 {common.Hash{0x06}, types.LegacyTxType, 666}, 278 {common.Hash{0x07}, types.LegacyTxType, 777}, 279 }, 280 }, 281 fetching: map[string][]common.Hash{ 282 "A": {{0x03}, {0x05}}, 283 "C": {{0x01}, {0x04}}, 284 "D": {{0x02}}, 285 "E": {{0x06}, {0x07}}, 286 }, 287 }, 288 }, 289 }) 290 } 291 292 // Tests that transaction announcements skip the waiting list if they are 293 // already scheduled. 294 func TestTransactionFetcherSkipWaiting(t *testing.T) { 295 testTransactionFetcherParallel(t, txFetcherTest{ 296 init: func() *TxFetcher { 297 return NewTxFetcher( 298 func(common.Hash) bool { return false }, 299 nil, 300 func(string, []common.Hash) error { return nil }, 301 nil, 302 ) 303 }, 304 steps: []interface{}{ 305 // Push an initial announcement through to the scheduled stage 306 doTxNotify{ 307 peer: "A", 308 hashes: []common.Hash{{0x01}, {0x02}}, 309 types: []byte{types.LegacyTxType, types.LegacyTxType}, 310 sizes: []uint32{111, 222}, 311 }, 312 isWaiting(map[string][]announce{ 313 "A": { 314 {common.Hash{0x01}, types.LegacyTxType, 111}, 315 {common.Hash{0x02}, types.LegacyTxType, 222}, 316 }, 317 }), 318 isScheduled{tracking: nil, fetching: nil}, 319 320 doWait{time: txArriveTimeout, step: true}, 321 isWaiting(nil), 322 isScheduled{ 323 tracking: map[string][]announce{ 324 "A": { 325 {common.Hash{0x01}, types.LegacyTxType, 111}, 326 {common.Hash{0x02}, types.LegacyTxType, 222}, 327 }, 328 }, 329 fetching: map[string][]common.Hash{ 330 "A": {{0x01}, {0x02}}, 331 }, 332 }, 333 // Announce overlaps from the same peer, ensure the new ones end up 334 // in stage one, and clashing ones don't get double tracked 335 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{222, 333}}, 336 isWaiting(map[string][]announce{ 337 "A": { 338 {common.Hash{0x03}, types.LegacyTxType, 333}, 339 }, 340 }), 341 isScheduled{ 342 tracking: map[string][]announce{ 343 "A": { 344 {common.Hash{0x01}, types.LegacyTxType, 111}, 345 {common.Hash{0x02}, types.LegacyTxType, 222}, 346 }, 347 }, 348 fetching: map[string][]common.Hash{ 349 "A": {{0x01}, {0x02}}, 350 }, 351 }, 352 // Announce overlaps from a new peer, ensure new transactions end up 353 // in stage one and clashing ones get tracked for the new peer 354 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{222, 333, 444}}, 355 isWaiting(map[string][]announce{ 356 "A": { 357 {common.Hash{0x03}, types.LegacyTxType, 333}, 358 }, 359 "B": { 360 {common.Hash{0x03}, types.LegacyTxType, 333}, 361 {common.Hash{0x04}, types.LegacyTxType, 444}, 362 }, 363 }), 364 isScheduled{ 365 tracking: map[string][]announce{ 366 "A": { 367 {common.Hash{0x01}, types.LegacyTxType, 111}, 368 {common.Hash{0x02}, types.LegacyTxType, 222}, 369 }, 370 "B": { 371 {common.Hash{0x02}, types.LegacyTxType, 222}, 372 }, 373 }, 374 fetching: map[string][]common.Hash{ 375 "A": {{0x01}, {0x02}}, 376 }, 377 }, 378 }, 379 }) 380 } 381 382 // Tests that only a single transaction request gets scheduled to a peer 383 // and subsequent announces block or get allotted to someone else. 384 func TestTransactionFetcherSingletonRequesting(t *testing.T) { 385 testTransactionFetcherParallel(t, txFetcherTest{ 386 init: func() *TxFetcher { 387 return NewTxFetcher( 388 func(common.Hash) bool { return false }, 389 nil, 390 func(string, []common.Hash) error { return nil }, 391 nil, 392 ) 393 }, 394 steps: []interface{}{ 395 // Push an initial announcement through to the scheduled stage 396 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, 397 isWaiting(map[string][]announce{ 398 "A": { 399 {common.Hash{0x01}, types.LegacyTxType, 111}, 400 {common.Hash{0x02}, types.LegacyTxType, 222}, 401 }, 402 }), 403 isScheduled{tracking: nil, fetching: nil}, 404 405 doWait{time: txArriveTimeout, step: true}, 406 isWaiting(nil), 407 isScheduled{ 408 tracking: map[string][]announce{ 409 "A": { 410 {common.Hash{0x01}, types.LegacyTxType, 111}, 411 {common.Hash{0x02}, types.LegacyTxType, 222}, 412 }, 413 }, 414 fetching: map[string][]common.Hash{ 415 "A": {{0x01}, {0x02}}, 416 }, 417 }, 418 // Announce a new set of transactions from the same peer and ensure 419 // they do not start fetching since the peer is already busy 420 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}}, 421 isWaiting(map[string][]announce{ 422 "A": { 423 {common.Hash{0x03}, types.LegacyTxType, 333}, 424 {common.Hash{0x04}, types.LegacyTxType, 444}, 425 }, 426 }), 427 isScheduled{ 428 tracking: map[string][]announce{ 429 "A": { 430 {common.Hash{0x01}, types.LegacyTxType, 111}, 431 {common.Hash{0x02}, types.LegacyTxType, 222}, 432 }, 433 }, 434 fetching: map[string][]common.Hash{ 435 "A": {{0x01}, {0x02}}, 436 }, 437 }, 438 doWait{time: txArriveTimeout, step: true}, 439 isWaiting(nil), 440 isScheduled{ 441 tracking: map[string][]announce{ 442 "A": { 443 {common.Hash{0x01}, types.LegacyTxType, 111}, 444 {common.Hash{0x02}, types.LegacyTxType, 222}, 445 {common.Hash{0x03}, types.LegacyTxType, 333}, 446 {common.Hash{0x04}, types.LegacyTxType, 444}, 447 }, 448 }, 449 fetching: map[string][]common.Hash{ 450 "A": {{0x01}, {0x02}}, 451 }, 452 }, 453 // Announce a duplicate set of transactions from a new peer and ensure 454 // uniquely new ones start downloading, even if clashing. 455 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{222, 333, 555, 666}}, 456 isWaiting(map[string][]announce{ 457 "B": { 458 {common.Hash{0x05}, types.LegacyTxType, 555}, 459 {common.Hash{0x06}, types.LegacyTxType, 666}, 460 }, 461 }), 462 isScheduled{ 463 tracking: map[string][]announce{ 464 "A": { 465 {common.Hash{0x01}, types.LegacyTxType, 111}, 466 {common.Hash{0x02}, types.LegacyTxType, 222}, 467 {common.Hash{0x03}, types.LegacyTxType, 333}, 468 {common.Hash{0x04}, types.LegacyTxType, 444}, 469 }, 470 "B": { 471 {common.Hash{0x02}, types.LegacyTxType, 222}, 472 {common.Hash{0x03}, types.LegacyTxType, 333}, 473 }, 474 }, 475 fetching: map[string][]common.Hash{ 476 "A": {{0x01}, {0x02}}, 477 "B": {{0x03}}, 478 }, 479 }, 480 }, 481 }) 482 } 483 484 // Tests that if a transaction retrieval fails, all the transactions get 485 // instantly schedule back to someone else or the announcements dropped 486 // if no alternate source is available. 487 func TestTransactionFetcherFailedRescheduling(t *testing.T) { 488 // Create a channel to control when tx requests can fail 489 proceed := make(chan struct{}) 490 testTransactionFetcherParallel(t, txFetcherTest{ 491 init: func() *TxFetcher { 492 return NewTxFetcher( 493 func(common.Hash) bool { return false }, 494 nil, 495 func(origin string, hashes []common.Hash) error { 496 <-proceed 497 return errors.New("peer disconnected") 498 }, 499 nil, 500 ) 501 }, 502 steps: []interface{}{ 503 // Push an initial announcement through to the scheduled stage 504 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, 505 isWaiting(map[string][]announce{ 506 "A": { 507 {common.Hash{0x01}, types.LegacyTxType, 111}, 508 {common.Hash{0x02}, types.LegacyTxType, 222}, 509 }, 510 }), 511 isScheduled{tracking: nil, fetching: nil}, 512 513 doWait{time: txArriveTimeout, step: true}, 514 isWaiting(nil), 515 isScheduled{ 516 tracking: map[string][]announce{ 517 "A": { 518 {common.Hash{0x01}, types.LegacyTxType, 111}, 519 {common.Hash{0x02}, types.LegacyTxType, 222}, 520 }, 521 }, 522 fetching: map[string][]common.Hash{ 523 "A": {{0x01}, {0x02}}, 524 }, 525 }, 526 // While the original peer is stuck in the request, push in an second 527 // data source. 528 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}, types: []byte{types.LegacyTxType}, sizes: []uint32{222}}, 529 isWaiting(nil), 530 isScheduled{ 531 tracking: map[string][]announce{ 532 "A": { 533 {common.Hash{0x01}, types.LegacyTxType, 111}, 534 {common.Hash{0x02}, types.LegacyTxType, 222}, 535 }, 536 "B": { 537 {common.Hash{0x02}, types.LegacyTxType, 222}, 538 }, 539 }, 540 fetching: map[string][]common.Hash{ 541 "A": {{0x01}, {0x02}}, 542 }, 543 }, 544 // Wait until the original request fails and check that transactions 545 // are either rescheduled or dropped 546 doFunc(func() { 547 proceed <- struct{}{} // Allow peer A to return the failure 548 }), 549 doWait{time: 0, step: true}, 550 isWaiting(nil), 551 isScheduled{ 552 tracking: map[string][]announce{ 553 "B": { 554 {common.Hash{0x02}, types.LegacyTxType, 222}, 555 }, 556 }, 557 fetching: map[string][]common.Hash{ 558 "B": {{0x02}}, 559 }, 560 }, 561 doFunc(func() { 562 proceed <- struct{}{} // Allow peer B to return the failure 563 }), 564 doWait{time: 0, step: true}, 565 isWaiting(nil), 566 isScheduled{nil, nil, nil}, 567 }, 568 }) 569 } 570 571 // Tests that if a transaction retrieval succeeds, all alternate origins 572 // are cleaned up. 573 func TestTransactionFetcherCleanup(t *testing.T) { 574 testTransactionFetcherParallel(t, txFetcherTest{ 575 init: func() *TxFetcher { 576 return NewTxFetcher( 577 func(common.Hash) bool { return false }, 578 func(txs []*types.Transaction) []error { 579 return make([]error, len(txs)) 580 }, 581 func(string, []common.Hash) error { return nil }, 582 nil, 583 ) 584 }, 585 steps: []interface{}{ 586 // Push an initial announcement through to the scheduled stage 587 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 588 isWaiting(map[string][]announce{ 589 "A": { 590 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 591 }, 592 }), 593 isScheduled{tracking: nil, fetching: nil}, 594 595 doWait{time: txArriveTimeout, step: true}, 596 isWaiting(nil), 597 isScheduled{ 598 tracking: map[string][]announce{ 599 "A": { 600 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 601 }, 602 }, 603 fetching: map[string][]common.Hash{ 604 "A": {testTxsHashes[0]}, 605 }, 606 }, 607 // Request should be delivered 608 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 609 isScheduled{nil, nil, nil}, 610 }, 611 }) 612 } 613 614 // Tests that if a transaction retrieval succeeds, but the response is empty (no 615 // transactions available, then all are nuked instead of being rescheduled (yes, 616 // this was a bug)). 617 func TestTransactionFetcherCleanupEmpty(t *testing.T) { 618 testTransactionFetcherParallel(t, txFetcherTest{ 619 init: func() *TxFetcher { 620 return NewTxFetcher( 621 func(common.Hash) bool { return false }, 622 func(txs []*types.Transaction) []error { 623 return make([]error, len(txs)) 624 }, 625 func(string, []common.Hash) error { return nil }, 626 nil, 627 ) 628 }, 629 steps: []interface{}{ 630 // Push an initial announcement through to the scheduled stage 631 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 632 isWaiting(map[string][]announce{ 633 "A": { 634 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 635 }, 636 }), 637 isScheduled{tracking: nil, fetching: nil}, 638 639 doWait{time: txArriveTimeout, step: true}, 640 isWaiting(nil), 641 isScheduled{ 642 tracking: map[string][]announce{ 643 "A": { 644 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 645 }, 646 }, 647 fetching: map[string][]common.Hash{ 648 "A": {testTxsHashes[0]}, 649 }, 650 }, 651 // Deliver an empty response and ensure the transaction is cleared, not rescheduled 652 doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true}, 653 isScheduled{nil, nil, nil}, 654 }, 655 }) 656 } 657 658 // Tests that non-returned transactions are either re-scheduled from a 659 // different peer, or self if they are after the cutoff point. 660 func TestTransactionFetcherMissingRescheduling(t *testing.T) { 661 testTransactionFetcherParallel(t, txFetcherTest{ 662 init: func() *TxFetcher { 663 return NewTxFetcher( 664 func(common.Hash) bool { return false }, 665 func(txs []*types.Transaction) []error { 666 return make([]error, len(txs)) 667 }, 668 func(string, []common.Hash) error { return nil }, 669 nil, 670 ) 671 }, 672 steps: []interface{}{ 673 // Push an initial announcement through to the scheduled stage 674 doTxNotify{peer: "A", 675 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 676 types: []byte{testTxs[0].Type(), testTxs[1].Type(), testTxs[2].Type()}, 677 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size()), uint32(testTxs[2].Size())}, 678 }, 679 isWaiting(map[string][]announce{ 680 "A": { 681 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 682 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 683 {testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}, 684 }, 685 }), 686 isScheduled{tracking: nil, fetching: nil}, 687 688 doWait{time: txArriveTimeout, step: true}, 689 isWaiting(nil), 690 isScheduled{ 691 tracking: map[string][]announce{ 692 "A": { 693 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 694 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 695 {testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}, 696 }, 697 }, 698 fetching: map[string][]common.Hash{ 699 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 700 }, 701 }, 702 // Deliver the middle transaction requested, the one before which 703 // should be dropped and the one after re-requested. 704 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, 705 isScheduled{ 706 tracking: map[string][]announce{ 707 "A": { 708 {testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}, 709 }, 710 }, 711 fetching: map[string][]common.Hash{ 712 "A": {testTxsHashes[2]}, 713 }, 714 }, 715 }, 716 }) 717 } 718 719 // Tests that out of two transactions, if one is missing and the last is 720 // delivered, the peer gets properly cleaned out from the internal state. 721 func TestTransactionFetcherMissingCleanup(t *testing.T) { 722 testTransactionFetcherParallel(t, txFetcherTest{ 723 init: func() *TxFetcher { 724 return NewTxFetcher( 725 func(common.Hash) bool { return false }, 726 func(txs []*types.Transaction) []error { 727 return make([]error, len(txs)) 728 }, 729 func(string, []common.Hash) error { return nil }, 730 nil, 731 ) 732 }, 733 steps: []interface{}{ 734 // Push an initial announcement through to the scheduled stage 735 doTxNotify{peer: "A", 736 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, 737 types: []byte{testTxs[0].Type(), testTxs[1].Type()}, 738 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}, 739 }, 740 isWaiting(map[string][]announce{ 741 "A": { 742 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 743 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 744 }, 745 }), 746 isScheduled{tracking: nil, fetching: nil}, 747 748 doWait{time: txArriveTimeout, step: true}, 749 isWaiting(nil), 750 isScheduled{ 751 tracking: map[string][]announce{ 752 "A": { 753 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 754 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 755 }, 756 }, 757 fetching: map[string][]common.Hash{ 758 "A": {testTxsHashes[0], testTxsHashes[1]}, 759 }, 760 }, 761 // Deliver the middle transaction requested, the one before which 762 // should be dropped and the one after re-requested. 763 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random 764 isScheduled{nil, nil, nil}, 765 }, 766 }) 767 } 768 769 // Tests that transaction broadcasts properly clean up announcements. 770 func TestTransactionFetcherBroadcasts(t *testing.T) { 771 testTransactionFetcherParallel(t, txFetcherTest{ 772 init: func() *TxFetcher { 773 return NewTxFetcher( 774 func(common.Hash) bool { return false }, 775 func(txs []*types.Transaction) []error { 776 return make([]error, len(txs)) 777 }, 778 func(string, []common.Hash) error { return nil }, 779 nil, 780 ) 781 }, 782 steps: []interface{}{ 783 // Set up three transactions to be in different stats, waiting, queued and fetching 784 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 785 doWait{time: txArriveTimeout, step: true}, 786 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}}, 787 doWait{time: txArriveTimeout, step: true}, 788 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}, types: []byte{testTxs[2].Type()}, sizes: []uint32{uint32(testTxs[2].Size())}}, 789 790 isWaiting(map[string][]announce{ 791 "A": { 792 {testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}, 793 }, 794 }), 795 isScheduled{ 796 tracking: map[string][]announce{ 797 "A": { 798 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 799 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 800 }, 801 }, 802 fetching: map[string][]common.Hash{ 803 "A": {testTxsHashes[0]}, 804 }, 805 }, 806 // Broadcast all the transactions and ensure everything gets cleaned 807 // up, but the dangling request is left alone to avoid doing multiple 808 // concurrent requests. 809 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false}, 810 isWaiting(nil), 811 isScheduled{ 812 tracking: nil, 813 fetching: nil, 814 dangling: map[string][]common.Hash{ 815 "A": {testTxsHashes[0]}, 816 }, 817 }, 818 // Deliver the requested hashes 819 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true}, 820 isScheduled{nil, nil, nil}, 821 }, 822 }) 823 } 824 825 // Tests that the waiting list timers properly reset and reschedule. 826 func TestTransactionFetcherWaitTimerResets(t *testing.T) { 827 testTransactionFetcherParallel(t, txFetcherTest{ 828 init: func() *TxFetcher { 829 return NewTxFetcher( 830 func(common.Hash) bool { return false }, 831 nil, 832 func(string, []common.Hash) error { return nil }, 833 nil, 834 ) 835 }, 836 steps: []interface{}{ 837 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, 838 isWaiting(map[string][]announce{ 839 "A": { 840 {common.Hash{0x01}, types.LegacyTxType, 111}, 841 }, 842 }), 843 isScheduled{nil, nil, nil}, 844 doWait{time: txArriveTimeout / 2, step: false}, 845 isWaiting(map[string][]announce{ 846 "A": { 847 {common.Hash{0x01}, types.LegacyTxType, 111}, 848 }, 849 }), 850 isScheduled{nil, nil, nil}, 851 852 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}, types: []byte{types.LegacyTxType}, sizes: []uint32{222}}, 853 isWaiting(map[string][]announce{ 854 "A": { 855 {common.Hash{0x01}, types.LegacyTxType, 111}, 856 {common.Hash{0x02}, types.LegacyTxType, 222}, 857 }, 858 }), 859 isScheduled{nil, nil, nil}, 860 doWait{time: txArriveTimeout / 2, step: true}, 861 isWaiting(map[string][]announce{ 862 "A": { 863 {common.Hash{0x02}, types.LegacyTxType, 222}, 864 }, 865 }), 866 isScheduled{ 867 tracking: map[string][]announce{ 868 "A": { 869 {common.Hash{0x01}, types.LegacyTxType, 111}, 870 }, 871 }, 872 fetching: map[string][]common.Hash{ 873 "A": {{0x01}}, 874 }, 875 }, 876 877 doWait{time: txArriveTimeout / 2, step: true}, 878 isWaiting(nil), 879 isScheduled{ 880 tracking: map[string][]announce{ 881 "A": { 882 {common.Hash{0x01}, types.LegacyTxType, 111}, 883 {common.Hash{0x02}, types.LegacyTxType, 222}, 884 }, 885 }, 886 fetching: map[string][]common.Hash{ 887 "A": {{0x01}}, 888 }, 889 }, 890 }, 891 }) 892 } 893 894 // Tests that if a transaction request is not replied to, it will time 895 // out and be re-scheduled for someone else. 896 func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { 897 testTransactionFetcherParallel(t, txFetcherTest{ 898 init: func() *TxFetcher { 899 return NewTxFetcher( 900 func(common.Hash) bool { return false }, 901 func(txs []*types.Transaction) []error { 902 return make([]error, len(txs)) 903 }, 904 func(string, []common.Hash) error { return nil }, 905 nil, 906 ) 907 }, 908 steps: []interface{}{ 909 // Push an initial announcement through to the scheduled stage 910 doTxNotify{ 911 peer: "A", 912 hashes: []common.Hash{testTxsHashes[0]}, 913 types: []byte{testTxs[0].Type()}, 914 sizes: []uint32{uint32(testTxs[0].Size())}, 915 }, 916 isWaiting(map[string][]announce{ 917 "A": {{testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}}, 918 }), 919 isScheduled{tracking: nil, fetching: nil}, 920 921 doWait{time: txArriveTimeout, step: true}, 922 isWaiting(nil), 923 isScheduled{ 924 tracking: map[string][]announce{ 925 "A": {{testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}}, 926 }, 927 fetching: map[string][]common.Hash{ 928 "A": {testTxsHashes[0]}, 929 }, 930 }, 931 // Wait until the delivery times out, everything should be cleaned up 932 doWait{time: txFetchTimeout, step: true}, 933 isWaiting(nil), 934 isScheduled{ 935 tracking: nil, 936 fetching: nil, 937 dangling: map[string][]common.Hash{ 938 "A": {}, 939 }, 940 }, 941 // Ensure that followup announcements don't get scheduled 942 doTxNotify{ 943 peer: "A", 944 hashes: []common.Hash{testTxsHashes[1]}, 945 types: []byte{testTxs[1].Type()}, 946 sizes: []uint32{uint32(testTxs[1].Size())}, 947 }, 948 doWait{time: txArriveTimeout, step: true}, 949 isScheduled{ 950 tracking: map[string][]announce{ 951 "A": {{testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}}, 952 }, 953 fetching: nil, 954 dangling: map[string][]common.Hash{ 955 "A": {}, 956 }, 957 }, 958 // If the dangling request arrives a bit later, do not choke 959 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 960 isWaiting(nil), 961 isScheduled{ 962 tracking: map[string][]announce{ 963 "A": {{testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}}, 964 }, 965 fetching: map[string][]common.Hash{ 966 "A": {testTxsHashes[1]}, 967 }, 968 }, 969 }, 970 }) 971 } 972 973 // Tests that the fetching timeout timers properly reset and reschedule. 974 func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { 975 testTransactionFetcherParallel(t, txFetcherTest{ 976 init: func() *TxFetcher { 977 return NewTxFetcher( 978 func(common.Hash) bool { return false }, 979 nil, 980 func(string, []common.Hash) error { return nil }, 981 nil, 982 ) 983 }, 984 steps: []interface{}{ 985 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, 986 doWait{time: txArriveTimeout, step: true}, 987 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}, types: []byte{types.LegacyTxType}, sizes: []uint32{222}}, 988 doWait{time: txArriveTimeout, step: true}, 989 990 isWaiting(nil), 991 isScheduled{ 992 tracking: map[string][]announce{ 993 "A": { 994 {common.Hash{0x01}, types.LegacyTxType, 111}, 995 }, 996 "B": { 997 {common.Hash{0x02}, types.LegacyTxType, 222}, 998 }, 999 }, 1000 fetching: map[string][]common.Hash{ 1001 "A": {{0x01}}, 1002 "B": {{0x02}}, 1003 }, 1004 }, 1005 doWait{time: txFetchTimeout - txArriveTimeout, step: true}, 1006 isScheduled{ 1007 tracking: map[string][]announce{ 1008 "B": { 1009 {common.Hash{0x02}, types.LegacyTxType, 222}, 1010 }, 1011 }, 1012 fetching: map[string][]common.Hash{ 1013 "B": {{0x02}}, 1014 }, 1015 dangling: map[string][]common.Hash{ 1016 "A": {}, 1017 }, 1018 }, 1019 doWait{time: txArriveTimeout, step: true}, 1020 isScheduled{ 1021 tracking: nil, 1022 fetching: nil, 1023 dangling: map[string][]common.Hash{ 1024 "A": {}, 1025 "B": {}, 1026 }, 1027 }, 1028 }, 1029 }) 1030 } 1031 1032 // Tests that if thousands of transactions are announced, only a small 1033 // number of them will be requested at a time. 1034 func TestTransactionFetcherRateLimiting(t *testing.T) { 1035 // Create a slew of transactions and announce them 1036 var ( 1037 hashes []common.Hash 1038 ts []byte 1039 sizes []uint32 1040 announces []announce 1041 ) 1042 for i := 0; i < maxTxAnnounces; i++ { 1043 hash := common.Hash{byte(i / 256), byte(i % 256)} 1044 hashes = append(hashes, hash) 1045 ts = append(ts, types.LegacyTxType) 1046 sizes = append(sizes, 111) 1047 announces = append(announces, announce{ 1048 hash: hash, 1049 kind: types.LegacyTxType, 1050 size: 111, 1051 }) 1052 } 1053 testTransactionFetcherParallel(t, txFetcherTest{ 1054 init: func() *TxFetcher { 1055 return NewTxFetcher( 1056 func(common.Hash) bool { return false }, 1057 nil, 1058 func(string, []common.Hash) error { return nil }, 1059 nil, 1060 ) 1061 }, 1062 steps: []interface{}{ 1063 // Announce all the transactions, wait a bit and ensure only a small 1064 // percentage gets requested 1065 doTxNotify{peer: "A", hashes: hashes, types: ts, sizes: sizes}, 1066 doWait{time: txArriveTimeout, step: true}, 1067 isWaiting(nil), 1068 isScheduled{ 1069 tracking: map[string][]announce{ 1070 "A": announces, 1071 }, 1072 fetching: map[string][]common.Hash{ 1073 "A": hashes[:maxTxRetrievals], 1074 }, 1075 }, 1076 }, 1077 }) 1078 } 1079 1080 // Tests that if huge transactions are announced, only a small number of them will 1081 // be requested at a time, to keep the responses below a reasonable level. 1082 func TestTransactionFetcherBandwidthLimiting(t *testing.T) { 1083 testTransactionFetcherParallel(t, txFetcherTest{ 1084 init: func() *TxFetcher { 1085 return NewTxFetcher( 1086 func(common.Hash) bool { return false }, 1087 nil, 1088 func(string, []common.Hash) error { return nil }, 1089 nil, 1090 ) 1091 }, 1092 steps: []interface{}{ 1093 // Announce mid size transactions from A to verify that multiple 1094 // ones can be piled into a single request. 1095 doTxNotify{peer: "A", 1096 hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}}, 1097 types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, 1098 sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024}, 1099 }, 1100 // Announce exactly on the limit transactions to see that only one 1101 // gets requested 1102 doTxNotify{peer: "B", 1103 hashes: []common.Hash{{0x05}, {0x06}}, 1104 types: []byte{types.LegacyTxType, types.LegacyTxType}, 1105 sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize}, 1106 }, 1107 // Announce oversized blob transactions to see that overflows are ok 1108 doTxNotify{peer: "C", 1109 hashes: []common.Hash{{0x07}, {0x08}}, 1110 types: []byte{types.BlobTxType, types.BlobTxType}, 1111 sizes: []uint32{params.BlobTxBlobGasPerBlob * 10, params.BlobTxBlobGasPerBlob * 10}, 1112 }, 1113 doWait{time: txArriveTimeout, step: true}, 1114 isWaiting(nil), 1115 isScheduled{ 1116 tracking: map[string][]announce{ 1117 "A": { 1118 {common.Hash{0x01}, types.LegacyTxType, 48 * 1024}, 1119 {common.Hash{0x02}, types.LegacyTxType, 48 * 1024}, 1120 {common.Hash{0x03}, types.LegacyTxType, 48 * 1024}, 1121 {common.Hash{0x04}, types.LegacyTxType, 48 * 1024}, 1122 }, 1123 "B": { 1124 {common.Hash{0x05}, types.LegacyTxType, maxTxRetrievalSize}, 1125 {common.Hash{0x06}, types.LegacyTxType, maxTxRetrievalSize}, 1126 }, 1127 "C": { 1128 {common.Hash{0x07}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10}, 1129 {common.Hash{0x08}, types.BlobTxType, params.BlobTxBlobGasPerBlob * 10}, 1130 }, 1131 }, 1132 fetching: map[string][]common.Hash{ 1133 "A": {{0x01}, {0x02}, {0x03}}, 1134 "B": {{0x05}}, 1135 "C": {{0x07}}, 1136 }, 1137 }, 1138 }, 1139 }) 1140 } 1141 1142 // Tests that then number of transactions a peer is allowed to announce and/or 1143 // request at the same time is hard capped. 1144 func TestTransactionFetcherDoSProtection(t *testing.T) { 1145 // Create a slew of transactions and to announce them 1146 var ( 1147 hashesA []common.Hash 1148 typesA []byte 1149 sizesA []uint32 1150 announceA []announce 1151 ) 1152 for i := 0; i < maxTxAnnounces+1; i++ { 1153 hash := common.Hash{0x01, byte(i / 256), byte(i % 256)} 1154 hashesA = append(hashesA, hash) 1155 typesA = append(typesA, types.LegacyTxType) 1156 sizesA = append(sizesA, 111) 1157 1158 announceA = append(announceA, announce{ 1159 hash: hash, 1160 kind: types.LegacyTxType, 1161 size: 111, 1162 }) 1163 } 1164 var ( 1165 hashesB []common.Hash 1166 typesB []byte 1167 sizesB []uint32 1168 announceB []announce 1169 ) 1170 for i := 0; i < maxTxAnnounces+1; i++ { 1171 hash := common.Hash{0x02, byte(i / 256), byte(i % 256)} 1172 hashesB = append(hashesB, hash) 1173 typesB = append(typesB, types.LegacyTxType) 1174 sizesB = append(sizesB, 111) 1175 1176 announceB = append(announceB, announce{ 1177 hash: hash, 1178 kind: types.LegacyTxType, 1179 size: 111, 1180 }) 1181 } 1182 testTransactionFetcherParallel(t, txFetcherTest{ 1183 init: func() *TxFetcher { 1184 return NewTxFetcher( 1185 func(common.Hash) bool { return false }, 1186 nil, 1187 func(string, []common.Hash) error { return nil }, 1188 nil, 1189 ) 1190 }, 1191 steps: []interface{}{ 1192 // Announce half of the transaction and wait for them to be scheduled 1193 doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2], types: typesA[:maxTxAnnounces/2], sizes: sizesA[:maxTxAnnounces/2]}, 1194 doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1], types: typesB[:maxTxAnnounces/2-1], sizes: sizesB[:maxTxAnnounces/2-1]}, 1195 doWait{time: txArriveTimeout, step: true}, 1196 1197 // Announce the second half and keep them in the wait list 1198 doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces], types: typesA[maxTxAnnounces/2 : maxTxAnnounces], sizes: sizesA[maxTxAnnounces/2 : maxTxAnnounces]}, 1199 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1], types: typesB[maxTxAnnounces/2-1 : maxTxAnnounces-1], sizes: sizesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]}, 1200 1201 // Ensure the hashes are split half and half 1202 isWaiting(map[string][]announce{ 1203 "A": announceA[maxTxAnnounces/2 : maxTxAnnounces], 1204 "B": announceB[maxTxAnnounces/2-1 : maxTxAnnounces-1], 1205 }), 1206 isScheduled{ 1207 tracking: map[string][]announce{ 1208 "A": announceA[:maxTxAnnounces/2], 1209 "B": announceB[:maxTxAnnounces/2-1], 1210 }, 1211 fetching: map[string][]common.Hash{ 1212 "A": hashesA[:maxTxRetrievals], 1213 "B": hashesB[:maxTxRetrievals], 1214 }, 1215 }, 1216 // Ensure that adding even one more hash results in dropping the hash 1217 doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}, types: []byte{typesA[maxTxAnnounces]}, sizes: []uint32{sizesA[maxTxAnnounces]}}, 1218 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1], types: typesB[maxTxAnnounces-1 : maxTxAnnounces+1], sizes: sizesB[maxTxAnnounces-1 : maxTxAnnounces+1]}, 1219 1220 isWaiting(map[string][]announce{ 1221 "A": announceA[maxTxAnnounces/2 : maxTxAnnounces], 1222 "B": announceB[maxTxAnnounces/2-1 : maxTxAnnounces], 1223 }), 1224 isScheduled{ 1225 tracking: map[string][]announce{ 1226 "A": announceA[:maxTxAnnounces/2], 1227 "B": announceB[:maxTxAnnounces/2-1], 1228 }, 1229 fetching: map[string][]common.Hash{ 1230 "A": hashesA[:maxTxRetrievals], 1231 "B": hashesB[:maxTxRetrievals], 1232 }, 1233 }, 1234 }, 1235 }) 1236 } 1237 1238 // Tests that underpriced transactions don't get rescheduled after being rejected. 1239 func TestTransactionFetcherUnderpricedDedup(t *testing.T) { 1240 testTransactionFetcherParallel(t, txFetcherTest{ 1241 init: func() *TxFetcher { 1242 return NewTxFetcher( 1243 func(common.Hash) bool { return false }, 1244 func(txs []*types.Transaction) []error { 1245 errs := make([]error, len(txs)) 1246 for i := 0; i < len(errs); i++ { 1247 if i%3 == 0 { 1248 errs[i] = txpool.ErrUnderpriced 1249 } else if i%3 == 1 { 1250 errs[i] = txpool.ErrReplaceUnderpriced 1251 } else { 1252 errs[i] = txpool.ErrTxGasPriceTooLow 1253 } 1254 } 1255 return errs 1256 }, 1257 func(string, []common.Hash) error { return nil }, 1258 nil, 1259 ) 1260 }, 1261 steps: []interface{}{ 1262 // Deliver a transaction through the fetcher, but reject as underpriced 1263 doTxNotify{peer: "A", 1264 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, 1265 types: []byte{testTxs[0].Type(), testTxs[1].Type()}, 1266 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}, 1267 }, 1268 doWait{time: txArriveTimeout, step: true}, 1269 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true}, 1270 isScheduled{nil, nil, nil}, 1271 1272 // Try to announce the transaction again, ensure it's not scheduled back 1273 doTxNotify{peer: "A", 1274 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 1275 types: []byte{testTxs[0].Type(), testTxs[1].Type(), testTxs[2].Type()}, 1276 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size()), uint32(testTxs[2].Size())}, 1277 }, // [2] is needed to force a step in the fetcher 1278 isWaiting(map[string][]announce{ 1279 "A": {{testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}}, 1280 }), 1281 isScheduled{nil, nil, nil}, 1282 }, 1283 }) 1284 } 1285 1286 // Tests that underpriced transactions don't get rescheduled after being rejected, 1287 // but at the same time there's a hard cap on the number of transactions that are 1288 // tracked. 1289 func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { 1290 // Temporarily disable fetch timeouts as they massively mess up the simulated clock 1291 defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout) 1292 txFetchTimeout = 24 * time.Hour 1293 1294 // Create a slew of transactions to max out the underpriced set 1295 var txs []*types.Transaction 1296 for i := 0; i < maxTxUnderpricedSetSize+1; i++ { 1297 txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil)) 1298 } 1299 var ( 1300 hashes []common.Hash 1301 ts []byte 1302 sizes []uint32 1303 annos []announce 1304 ) 1305 for _, tx := range txs { 1306 hashes = append(hashes, tx.Hash()) 1307 ts = append(ts, tx.Type()) 1308 sizes = append(sizes, uint32(tx.Size())) 1309 annos = append(annos, announce{ 1310 hash: tx.Hash(), 1311 kind: tx.Type(), 1312 size: uint32(tx.Size()), 1313 }) 1314 } 1315 // Generate a set of steps to announce and deliver the entire set of transactions 1316 var steps []interface{} 1317 for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ { 1318 steps = append(steps, doTxNotify{ 1319 peer: "A", 1320 hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1321 types: ts[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1322 sizes: sizes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1323 }) 1324 steps = append(steps, isWaiting(map[string][]announce{ 1325 "A": annos[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1326 })) 1327 steps = append(steps, doWait{time: txArriveTimeout, step: true}) 1328 steps = append(steps, isScheduled{ 1329 tracking: map[string][]announce{ 1330 "A": annos[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1331 }, 1332 fetching: map[string][]common.Hash{ 1333 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1334 }, 1335 }) 1336 steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true}) 1337 steps = append(steps, isWaiting(nil)) 1338 steps = append(steps, isScheduled{nil, nil, nil}) 1339 steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals)) 1340 } 1341 testTransactionFetcher(t, txFetcherTest{ 1342 init: func() *TxFetcher { 1343 return NewTxFetcher( 1344 func(common.Hash) bool { return false }, 1345 func(txs []*types.Transaction) []error { 1346 errs := make([]error, len(txs)) 1347 for i := 0; i < len(errs); i++ { 1348 errs[i] = txpool.ErrUnderpriced 1349 } 1350 return errs 1351 }, 1352 func(string, []common.Hash) error { return nil }, 1353 nil, 1354 ) 1355 }, 1356 steps: append(steps, []interface{}{ 1357 // The preparation of the test has already been done in `steps`, add the last check 1358 doTxNotify{ 1359 peer: "A", 1360 hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}, 1361 types: []byte{ts[maxTxUnderpricedSetSize]}, 1362 sizes: []uint32{sizes[maxTxUnderpricedSetSize]}, 1363 }, 1364 doWait{time: txArriveTimeout, step: true}, 1365 doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true}, 1366 isUnderpriced(maxTxUnderpricedSetSize), 1367 }...), 1368 }) 1369 } 1370 1371 // Tests that unexpected deliveries don't corrupt the internal state. 1372 func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { 1373 testTransactionFetcherParallel(t, txFetcherTest{ 1374 init: func() *TxFetcher { 1375 return NewTxFetcher( 1376 func(common.Hash) bool { return false }, 1377 func(txs []*types.Transaction) []error { 1378 return make([]error, len(txs)) 1379 }, 1380 func(string, []common.Hash) error { return nil }, 1381 nil, 1382 ) 1383 }, 1384 steps: []interface{}{ 1385 // Deliver something out of the blue 1386 isWaiting(nil), 1387 isScheduled{nil, nil, nil}, 1388 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false}, 1389 isWaiting(nil), 1390 isScheduled{nil, nil, nil}, 1391 1392 // Set up a few hashes into various stages 1393 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1394 doWait{time: txArriveTimeout, step: true}, 1395 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}}, 1396 doWait{time: txArriveTimeout, step: true}, 1397 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}, types: []byte{testTxs[2].Type()}, sizes: []uint32{uint32(testTxs[2].Size())}}, 1398 1399 isWaiting(map[string][]announce{ 1400 "A": { 1401 {testTxsHashes[2], testTxs[2].Type(), uint32(testTxs[2].Size())}, 1402 }, 1403 }), 1404 isScheduled{ 1405 tracking: map[string][]announce{ 1406 "A": { 1407 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 1408 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 1409 }, 1410 }, 1411 fetching: map[string][]common.Hash{ 1412 "A": {testTxsHashes[0]}, 1413 }, 1414 }, 1415 // Deliver everything and more out of the blue 1416 doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true}, 1417 isWaiting(nil), 1418 isScheduled{ 1419 tracking: nil, 1420 fetching: nil, 1421 dangling: map[string][]common.Hash{ 1422 "A": {testTxsHashes[0]}, 1423 }, 1424 }, 1425 }, 1426 }) 1427 } 1428 1429 // Tests that dropping a peer cleans out all internal data structures in all the 1430 // live or dangling stages. 1431 func TestTransactionFetcherDrop(t *testing.T) { 1432 testTransactionFetcherParallel(t, txFetcherTest{ 1433 init: func() *TxFetcher { 1434 return NewTxFetcher( 1435 func(common.Hash) bool { return false }, 1436 func(txs []*types.Transaction) []error { 1437 return make([]error, len(txs)) 1438 }, 1439 func(string, []common.Hash) error { return nil }, 1440 nil, 1441 ) 1442 }, 1443 steps: []interface{}{ 1444 // Set up a few hashes into various stages 1445 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, 1446 doWait{time: txArriveTimeout, step: true}, 1447 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}, types: []byte{types.LegacyTxType}, sizes: []uint32{222}}, 1448 doWait{time: txArriveTimeout, step: true}, 1449 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}, types: []byte{types.LegacyTxType}, sizes: []uint32{333}}, 1450 1451 isWaiting(map[string][]announce{ 1452 "A": { 1453 {common.Hash{0x03}, types.LegacyTxType, 333}, 1454 }, 1455 }), 1456 isScheduled{ 1457 tracking: map[string][]announce{ 1458 "A": { 1459 {common.Hash{0x01}, types.LegacyTxType, 111}, 1460 {common.Hash{0x02}, types.LegacyTxType, 222}, 1461 }, 1462 }, 1463 fetching: map[string][]common.Hash{ 1464 "A": {{0x01}}, 1465 }, 1466 }, 1467 // Drop the peer and ensure everything's cleaned out 1468 doDrop("A"), 1469 isWaiting(nil), 1470 isScheduled{nil, nil, nil}, 1471 1472 // Push the node into a dangling (timeout) state 1473 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1474 doWait{time: txArriveTimeout, step: true}, 1475 isWaiting(nil), 1476 isScheduled{ 1477 tracking: map[string][]announce{ 1478 "A": { 1479 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 1480 }, 1481 }, 1482 fetching: map[string][]common.Hash{ 1483 "A": {testTxsHashes[0]}, 1484 }, 1485 }, 1486 doWait{time: txFetchTimeout, step: true}, 1487 isWaiting(nil), 1488 isScheduled{ 1489 tracking: nil, 1490 fetching: nil, 1491 dangling: map[string][]common.Hash{ 1492 "A": {}, 1493 }, 1494 }, 1495 // Drop the peer and ensure everything's cleaned out 1496 doDrop("A"), 1497 isWaiting(nil), 1498 isScheduled{nil, nil, nil}, 1499 }, 1500 }) 1501 } 1502 1503 // Tests that dropping a peer instantly reschedules failed announcements to any 1504 // available peer. 1505 func TestTransactionFetcherDropRescheduling(t *testing.T) { 1506 testTransactionFetcherParallel(t, txFetcherTest{ 1507 init: func() *TxFetcher { 1508 return NewTxFetcher( 1509 func(common.Hash) bool { return false }, 1510 func(txs []*types.Transaction) []error { 1511 return make([]error, len(txs)) 1512 }, 1513 func(string, []common.Hash) error { return nil }, 1514 nil, 1515 ) 1516 }, 1517 steps: []interface{}{ 1518 // Set up a few hashes into various stages 1519 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, 1520 doWait{time: txArriveTimeout, step: true}, 1521 doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}, types: []byte{types.LegacyTxType}, sizes: []uint32{111}}, 1522 1523 isWaiting(nil), 1524 isScheduled{ 1525 tracking: map[string][]announce{ 1526 "A": {{common.Hash{0x01}, types.LegacyTxType, 111}}, 1527 "B": {{common.Hash{0x01}, types.LegacyTxType, 111}}, 1528 }, 1529 fetching: map[string][]common.Hash{ 1530 "A": {{0x01}}, 1531 }, 1532 }, 1533 // Drop the peer and ensure everything's cleaned out 1534 doDrop("A"), 1535 isWaiting(nil), 1536 isScheduled{ 1537 tracking: map[string][]announce{ 1538 "B": {{common.Hash{0x01}, types.LegacyTxType, 111}}, 1539 }, 1540 fetching: map[string][]common.Hash{ 1541 "B": {{0x01}}, 1542 }, 1543 }, 1544 }, 1545 }) 1546 } 1547 1548 // Tests that announced transactions with the wrong transaction type or size will 1549 // result in a dropped peer. 1550 func TestInvalidAnnounceMetadata(t *testing.T) { 1551 drop := make(chan string, 2) 1552 testTransactionFetcherParallel(t, txFetcherTest{ 1553 init: func() *TxFetcher { 1554 return NewTxFetcher( 1555 func(common.Hash) bool { return false }, 1556 func(txs []*types.Transaction) []error { 1557 return make([]error, len(txs)) 1558 }, 1559 func(string, []common.Hash) error { return nil }, 1560 func(peer string) { drop <- peer }, 1561 ) 1562 }, 1563 steps: []interface{}{ 1564 // Initial announcement to get something into the waitlist 1565 doTxNotify{ 1566 peer: "A", 1567 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, 1568 types: []byte{testTxs[0].Type(), testTxs[1].Type()}, 1569 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}, 1570 }, 1571 isWaiting(map[string][]announce{ 1572 "A": { 1573 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 1574 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 1575 }, 1576 }), 1577 // Announce from new peers conflicting transactions 1578 doTxNotify{ 1579 peer: "B", 1580 hashes: []common.Hash{testTxsHashes[0]}, 1581 types: []byte{testTxs[0].Type()}, 1582 sizes: []uint32{1024 + uint32(testTxs[0].Size())}, 1583 }, 1584 doTxNotify{ 1585 peer: "C", 1586 hashes: []common.Hash{testTxsHashes[1]}, 1587 types: []byte{1 + testTxs[1].Type()}, 1588 sizes: []uint32{uint32(testTxs[1].Size())}, 1589 }, 1590 isWaiting(map[string][]announce{ 1591 "A": { 1592 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 1593 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 1594 }, 1595 "B": { 1596 {testTxsHashes[0], testTxs[0].Type(), 1024 + uint32(testTxs[0].Size())}, 1597 }, 1598 "C": { 1599 {testTxsHashes[1], 1 + testTxs[1].Type(), uint32(testTxs[1].Size())}, 1600 }, 1601 }), 1602 // Schedule all the transactions for retrieval 1603 doWait{time: txArriveTimeout, step: true}, 1604 isWaiting(nil), 1605 isScheduled{ 1606 tracking: map[string][]announce{ 1607 "A": { 1608 {testTxsHashes[0], testTxs[0].Type(), uint32(testTxs[0].Size())}, 1609 {testTxsHashes[1], testTxs[1].Type(), uint32(testTxs[1].Size())}, 1610 }, 1611 "B": { 1612 {testTxsHashes[0], testTxs[0].Type(), 1024 + uint32(testTxs[0].Size())}, 1613 }, 1614 "C": { 1615 {testTxsHashes[1], 1 + testTxs[1].Type(), uint32(testTxs[1].Size())}, 1616 }, 1617 }, 1618 fetching: map[string][]common.Hash{ 1619 "A": {testTxsHashes[0]}, 1620 "C": {testTxsHashes[1]}, 1621 }, 1622 }, 1623 // Deliver the transactions and wait for B to be dropped 1624 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1625 doFunc(func() { <-drop }), 1626 doFunc(func() { <-drop }), 1627 }, 1628 }) 1629 } 1630 1631 // This test reproduces a crash caught by the fuzzer. The root cause was a 1632 // dangling transaction timing out and clashing on re-add with a concurrently 1633 // announced one. 1634 func TestTransactionFetcherFuzzCrash01(t *testing.T) { 1635 testTransactionFetcherParallel(t, txFetcherTest{ 1636 init: func() *TxFetcher { 1637 return NewTxFetcher( 1638 func(common.Hash) bool { return false }, 1639 func(txs []*types.Transaction) []error { 1640 return make([]error, len(txs)) 1641 }, 1642 func(string, []common.Hash) error { return nil }, 1643 nil, 1644 ) 1645 }, 1646 steps: []interface{}{ 1647 // Get a transaction into fetching mode and make it dangling with a broadcast 1648 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1649 doWait{time: txArriveTimeout, step: true}, 1650 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1651 1652 // Notify the dangling transaction once more and crash via a timeout 1653 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1654 doWait{time: txFetchTimeout, step: true}, 1655 }, 1656 }) 1657 } 1658 1659 // This test reproduces a crash caught by the fuzzer. The root cause was a 1660 // dangling transaction getting peer-dropped and clashing on re-add with a 1661 // concurrently announced one. 1662 func TestTransactionFetcherFuzzCrash02(t *testing.T) { 1663 testTransactionFetcherParallel(t, txFetcherTest{ 1664 init: func() *TxFetcher { 1665 return NewTxFetcher( 1666 func(common.Hash) bool { return false }, 1667 func(txs []*types.Transaction) []error { 1668 return make([]error, len(txs)) 1669 }, 1670 func(string, []common.Hash) error { return nil }, 1671 nil, 1672 ) 1673 }, 1674 steps: []interface{}{ 1675 // Get a transaction into fetching mode and make it dangling with a broadcast 1676 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1677 doWait{time: txArriveTimeout, step: true}, 1678 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1679 1680 // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout 1681 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1682 doWait{time: txArriveTimeout, step: true}, 1683 doDrop("A"), 1684 doWait{time: txFetchTimeout, step: true}, 1685 }, 1686 }) 1687 } 1688 1689 // This test reproduces a crash caught by the fuzzer. The root cause was a 1690 // dangling transaction getting rescheduled via a partial delivery, clashing 1691 // with a concurrent notify. 1692 func TestTransactionFetcherFuzzCrash03(t *testing.T) { 1693 testTransactionFetcherParallel(t, txFetcherTest{ 1694 init: func() *TxFetcher { 1695 return NewTxFetcher( 1696 func(common.Hash) bool { return false }, 1697 func(txs []*types.Transaction) []error { 1698 return make([]error, len(txs)) 1699 }, 1700 func(string, []common.Hash) error { return nil }, 1701 nil, 1702 ) 1703 }, 1704 steps: []interface{}{ 1705 // Get a transaction into fetching mode and make it dangling with a broadcast 1706 doTxNotify{ 1707 peer: "A", 1708 hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, 1709 types: []byte{testTxs[0].Type(), testTxs[1].Type()}, 1710 sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}, 1711 }, 1712 doWait{time: txFetchTimeout, step: true}, 1713 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1714 1715 // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout 1716 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1717 doWait{time: txArriveTimeout, step: true}, 1718 1719 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, 1720 doWait{time: txFetchTimeout, step: true}, 1721 }, 1722 }) 1723 } 1724 1725 // This test reproduces a crash caught by the fuzzer. The root cause was a 1726 // dangling transaction getting rescheduled via a disconnect, clashing with 1727 // a concurrent notify. 1728 func TestTransactionFetcherFuzzCrash04(t *testing.T) { 1729 // Create a channel to control when tx requests can fail 1730 proceed := make(chan struct{}) 1731 1732 testTransactionFetcherParallel(t, txFetcherTest{ 1733 init: func() *TxFetcher { 1734 return NewTxFetcher( 1735 func(common.Hash) bool { return false }, 1736 func(txs []*types.Transaction) []error { 1737 return make([]error, len(txs)) 1738 }, 1739 func(string, []common.Hash) error { 1740 <-proceed 1741 return errors.New("peer disconnected") 1742 }, 1743 nil, 1744 ) 1745 }, 1746 steps: []interface{}{ 1747 // Get a transaction into fetching mode and make it dangling with a broadcast 1748 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1749 doWait{time: txArriveTimeout, step: true}, 1750 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1751 1752 // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect 1753 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{uint32(testTxs[0].Size())}}, 1754 doWait{time: txArriveTimeout, step: true}, 1755 doFunc(func() { 1756 proceed <- struct{}{} // Allow peer A to return the failure 1757 }), 1758 doWait{time: 0, step: true}, 1759 doWait{time: txFetchTimeout, step: true}, 1760 }, 1761 }) 1762 } 1763 1764 // This test ensures the blob transactions will be scheduled for fetching 1765 // once they are announced in the network. 1766 func TestBlobTransactionAnnounce(t *testing.T) { 1767 testTransactionFetcherParallel(t, txFetcherTest{ 1768 init: func() *TxFetcher { 1769 return NewTxFetcher( 1770 func(common.Hash) bool { return false }, 1771 nil, 1772 func(string, []common.Hash) error { return nil }, 1773 nil, 1774 ) 1775 }, 1776 steps: []interface{}{ 1777 // Initial announcement to get something into the waitlist 1778 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, 1779 isWaiting(map[string][]announce{ 1780 "A": { 1781 {common.Hash{0x01}, types.LegacyTxType, 111}, 1782 {common.Hash{0x02}, types.LegacyTxType, 222}, 1783 }, 1784 }), 1785 // Announce a blob transaction 1786 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}}, types: []byte{types.BlobTxType}, sizes: []uint32{333}}, 1787 isWaiting(map[string][]announce{ 1788 "A": { 1789 {common.Hash{0x01}, types.LegacyTxType, 111}, 1790 {common.Hash{0x02}, types.LegacyTxType, 222}, 1791 }, 1792 "B": { 1793 {common.Hash{0x03}, types.BlobTxType, 333}, 1794 }, 1795 }), 1796 doWait{time: 0, step: true}, // zero time, but the blob fetching should be scheduled 1797 isWaiting(map[string][]announce{ 1798 "A": { 1799 {common.Hash{0x01}, types.LegacyTxType, 111}, 1800 {common.Hash{0x02}, types.LegacyTxType, 222}, 1801 }, 1802 }), 1803 isScheduled{ 1804 tracking: map[string][]announce{ 1805 "B": { 1806 {common.Hash{0x03}, types.BlobTxType, 333}, 1807 }, 1808 }, 1809 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 1810 "B": {{0x03}}, 1811 }, 1812 }, 1813 doWait{time: txArriveTimeout, step: true}, // zero time, but the blob fetching should be scheduled 1814 isWaiting(nil), 1815 isScheduled{ 1816 tracking: map[string][]announce{ 1817 "A": { 1818 {common.Hash{0x01}, types.LegacyTxType, 111}, 1819 {common.Hash{0x02}, types.LegacyTxType, 222}, 1820 }, 1821 "B": { 1822 {common.Hash{0x03}, types.BlobTxType, 333}, 1823 }, 1824 }, 1825 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 1826 "A": {{0x01}, {0x02}}, 1827 "B": {{0x03}}, 1828 }, 1829 }, 1830 }, 1831 }) 1832 } 1833 1834 func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { 1835 t.Parallel() 1836 testTransactionFetcher(t, tt) 1837 } 1838 1839 func testTransactionFetcher(t *testing.T, tt txFetcherTest) { 1840 // Create a fetcher and hook into it's simulated fields 1841 clock := new(mclock.Simulated) 1842 wait := make(chan struct{}) 1843 1844 fetcher := tt.init() 1845 fetcher.clock = clock 1846 fetcher.step = wait 1847 fetcher.rand = rand.New(rand.NewSource(0x3a29)) 1848 1849 fetcher.Start() 1850 defer fetcher.Stop() 1851 1852 defer func() { // drain the wait chan on exit 1853 for { 1854 select { 1855 case <-wait: 1856 default: 1857 return 1858 } 1859 } 1860 }() 1861 1862 // Crunch through all the test steps and execute them 1863 for i, step := range tt.steps { 1864 // Process the original or expanded steps 1865 switch step := step.(type) { 1866 case doTxNotify: 1867 if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil { 1868 t.Errorf("step %d: %v", i, err) 1869 } 1870 <-wait // Fetcher needs to process this, wait until it's done 1871 select { 1872 case <-wait: 1873 panic("wtf") 1874 case <-time.After(time.Millisecond): 1875 } 1876 1877 case doTxEnqueue: 1878 if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil { 1879 t.Errorf("step %d: %v", i, err) 1880 } 1881 <-wait // Fetcher needs to process this, wait until it's done 1882 1883 case doWait: 1884 clock.Run(step.time) 1885 if step.step { 1886 <-wait // Fetcher supposed to do something, wait until it's done 1887 } 1888 1889 case doDrop: 1890 if err := fetcher.Drop(string(step)); err != nil { 1891 t.Errorf("step %d: %v", i, err) 1892 } 1893 <-wait // Fetcher needs to process this, wait until it's done 1894 1895 case doFunc: 1896 step() 1897 1898 case isWaiting: 1899 // We need to check that the waiting list (stage 1) internals 1900 // match with the expected set. Check the peer->hash mappings 1901 // first. 1902 for peer, announces := range step { 1903 waiting := fetcher.waitslots[peer] 1904 if waiting == nil { 1905 t.Errorf("step %d: peer %s missing from waitslots", i, peer) 1906 continue 1907 } 1908 for _, ann := range announces { 1909 if meta, ok := waiting[ann.hash]; !ok { 1910 t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash) 1911 } else { 1912 if meta.kind != ann.kind || meta.size != ann.size { 1913 t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) 1914 } 1915 } 1916 } 1917 for hash, meta := range waiting { 1918 ann := announce{hash: hash, kind: meta.kind, size: meta.size} 1919 if !containsAnnounce(announces, ann) { 1920 t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann) 1921 } 1922 } 1923 } 1924 for peer := range fetcher.waitslots { 1925 if _, ok := step[peer]; !ok { 1926 t.Errorf("step %d: peer %s extra in waitslots", i, peer) 1927 } 1928 } 1929 // Peer->hash sets correct, check the hash->peer and timeout sets 1930 for peer, announces := range step { 1931 for _, ann := range announces { 1932 if _, ok := fetcher.waitlist[ann.hash][peer]; !ok { 1933 t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer) 1934 } 1935 if _, ok := fetcher.waittime[ann.hash]; !ok { 1936 t.Errorf("step %d: hash %x missing from waittime", i, ann.hash) 1937 } 1938 } 1939 } 1940 for hash, peers := range fetcher.waitlist { 1941 if len(peers) == 0 { 1942 t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) 1943 } 1944 for peer := range peers { 1945 if !containsHashInAnnounces(step[peer], hash) { 1946 t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) 1947 } 1948 } 1949 } 1950 for hash := range fetcher.waittime { 1951 var found bool 1952 for _, announces := range step { 1953 if containsHashInAnnounces(announces, hash) { 1954 found = true 1955 break 1956 } 1957 } 1958 if !found { 1959 t.Errorf("step %d,: hash %x extra in waittime", i, hash) 1960 } 1961 } 1962 1963 case isScheduled: 1964 // Check that all scheduled announces are accounted for and no 1965 // extra ones are present. 1966 for peer, announces := range step.tracking { 1967 scheduled := fetcher.announces[peer] 1968 if scheduled == nil { 1969 t.Errorf("step %d: peer %s missing from announces", i, peer) 1970 continue 1971 } 1972 for _, ann := range announces { 1973 if meta, ok := scheduled[ann.hash]; !ok { 1974 t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash) 1975 } else { 1976 if meta.kind != ann.kind || meta.size != ann.size { 1977 t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, ann.kind, ann.size) 1978 } 1979 } 1980 } 1981 for hash, meta := range scheduled { 1982 ann := announce{hash: hash, kind: meta.kind, size: meta.size} 1983 if !containsAnnounce(announces, ann) { 1984 t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash) 1985 } 1986 } 1987 } 1988 for peer := range fetcher.announces { 1989 if _, ok := step.tracking[peer]; !ok { 1990 t.Errorf("step %d: peer %s extra in announces", i, peer) 1991 } 1992 } 1993 // Check that all announces required to be fetching are in the 1994 // appropriate sets 1995 for peer, hashes := range step.fetching { 1996 request := fetcher.requests[peer] 1997 if request == nil { 1998 t.Errorf("step %d: peer %s missing from requests", i, peer) 1999 continue 2000 } 2001 for _, hash := range hashes { 2002 if !slices.Contains(request.hashes, hash) { 2003 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 2004 } 2005 } 2006 for _, hash := range request.hashes { 2007 if !slices.Contains(hashes, hash) { 2008 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 2009 } 2010 } 2011 } 2012 for peer := range fetcher.requests { 2013 if _, ok := step.fetching[peer]; !ok { 2014 if _, ok := step.dangling[peer]; !ok { 2015 t.Errorf("step %d: peer %s extra in requests", i, peer) 2016 } 2017 } 2018 } 2019 for peer, hashes := range step.fetching { 2020 for _, hash := range hashes { 2021 if _, ok := fetcher.fetching[hash]; !ok { 2022 t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash) 2023 } 2024 } 2025 } 2026 for hash := range fetcher.fetching { 2027 var found bool 2028 for _, req := range fetcher.requests { 2029 if slices.Contains(req.hashes, hash) { 2030 found = true 2031 break 2032 } 2033 } 2034 if !found { 2035 t.Errorf("step %d: hash %x extra in fetching", i, hash) 2036 } 2037 } 2038 for _, hashes := range step.fetching { 2039 for _, hash := range hashes { 2040 alternates := fetcher.alternates[hash] 2041 if alternates == nil { 2042 t.Errorf("step %d: hash %x missing from alternates", i, hash) 2043 continue 2044 } 2045 for peer := range alternates { 2046 if _, ok := fetcher.announces[peer]; !ok { 2047 t.Errorf("step %d: peer %s extra in alternates", i, peer) 2048 continue 2049 } 2050 if _, ok := fetcher.announces[peer][hash]; !ok { 2051 t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer) 2052 continue 2053 } 2054 } 2055 for p := range fetcher.announced[hash] { 2056 if _, ok := alternates[p]; !ok { 2057 t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p) 2058 continue 2059 } 2060 } 2061 } 2062 } 2063 for peer, hashes := range step.dangling { 2064 request := fetcher.requests[peer] 2065 if request == nil { 2066 t.Errorf("step %d: peer %s missing from requests", i, peer) 2067 continue 2068 } 2069 for _, hash := range hashes { 2070 if !slices.Contains(request.hashes, hash) { 2071 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 2072 } 2073 } 2074 for _, hash := range request.hashes { 2075 if !slices.Contains(hashes, hash) { 2076 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 2077 } 2078 } 2079 } 2080 // Check that all transaction announces that are scheduled for 2081 // retrieval but not actively being downloaded are tracked only 2082 // in the stage 2 `announced` map. 2083 var queued []common.Hash 2084 for _, announces := range step.tracking { 2085 for _, ann := range announces { 2086 var found bool 2087 for _, hs := range step.fetching { 2088 if slices.Contains(hs, ann.hash) { 2089 found = true 2090 break 2091 } 2092 } 2093 if !found { 2094 queued = append(queued, ann.hash) 2095 } 2096 } 2097 } 2098 for _, hash := range queued { 2099 if _, ok := fetcher.announced[hash]; !ok { 2100 t.Errorf("step %d: hash %x missing from announced", i, hash) 2101 } 2102 } 2103 for hash := range fetcher.announced { 2104 if !slices.Contains(queued, hash) { 2105 t.Errorf("step %d: hash %x extra in announced", i, hash) 2106 } 2107 } 2108 2109 case isUnderpriced: 2110 if fetcher.underpriced.Len() != int(step) { 2111 t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step) 2112 } 2113 2114 default: 2115 t.Fatalf("step %d: unknown step type %T", i, step) 2116 } 2117 // After every step, cross validate the internal uniqueness invariants 2118 // between stage one and stage two. 2119 for hash := range fetcher.waittime { 2120 if _, ok := fetcher.announced[hash]; ok { 2121 t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash) 2122 } 2123 } 2124 } 2125 } 2126 2127 // containsAnnounce returns whether an announcement is contained within a slice 2128 // of announcements. 2129 func containsAnnounce(slice []announce, ann announce) bool { 2130 for _, have := range slice { 2131 if have.hash == ann.hash { 2132 if have.kind != ann.kind { 2133 return false 2134 } 2135 if have.size != ann.size { 2136 return false 2137 } 2138 return true 2139 } 2140 } 2141 return false 2142 } 2143 2144 // containsHashInAnnounces returns whether a hash is contained within a slice 2145 // of announcements. 2146 func containsHashInAnnounces(slice []announce, hash common.Hash) bool { 2147 for _, have := range slice { 2148 if have.hash == hash { 2149 return true 2150 } 2151 } 2152 return false 2153 } 2154 2155 // TestTransactionForgotten verifies that underpriced transactions are properly 2156 // forgotten after the timeout period, testing both the exact timeout boundary 2157 // and the cleanup of the underpriced cache. 2158 func TestTransactionForgotten(t *testing.T) { 2159 // Test ensures that underpriced transactions are properly forgotten after a timeout period, 2160 // including checks for timeout boundary and cache cleanup. 2161 t.Parallel() 2162 2163 // Create a mock clock for deterministic time control 2164 mockClock := new(mclock.Simulated) 2165 mockTime := func() time.Time { 2166 nanoTime := int64(mockClock.Now()) 2167 return time.Unix(nanoTime/1000000000, nanoTime%1000000000) 2168 } 2169 2170 fetcher := NewTxFetcherForTests( 2171 func(common.Hash) bool { return false }, 2172 func(txs []*types.Transaction) []error { 2173 errs := make([]error, len(txs)) 2174 for i := 0; i < len(errs); i++ { 2175 errs[i] = txpool.ErrUnderpriced 2176 } 2177 return errs 2178 }, 2179 func(string, []common.Hash) error { return nil }, 2180 func(string) {}, 2181 mockClock, 2182 mockTime, 2183 rand.New(rand.NewSource(0)), // Use fixed seed for deterministic behavior 2184 ) 2185 fetcher.Start() 2186 defer fetcher.Stop() 2187 2188 // Create two test transactions with the same timestamp 2189 tx1 := types.NewTransaction(0, common.Address{}, big.NewInt(100), 21000, big.NewInt(1), nil) 2190 tx2 := types.NewTransaction(1, common.Address{}, big.NewInt(100), 21000, big.NewInt(1), nil) 2191 2192 now := mockTime() 2193 tx1.SetTime(now) 2194 tx2.SetTime(now) 2195 2196 // Initial state: both transactions should be marked as underpriced 2197 if err := fetcher.Enqueue("peer", []*types.Transaction{tx1, tx2}, false); err != nil { 2198 t.Fatal(err) 2199 } 2200 if !fetcher.isKnownUnderpriced(tx1.Hash()) { 2201 t.Error("tx1 should be underpriced") 2202 } 2203 if !fetcher.isKnownUnderpriced(tx2.Hash()) { 2204 t.Error("tx2 should be underpriced") 2205 } 2206 2207 // Verify cache size 2208 if size := fetcher.underpriced.Len(); size != 2 { 2209 t.Errorf("wrong underpriced cache size: got %d, want %d", size, 2) 2210 } 2211 2212 // Just before timeout: transactions should still be underpriced 2213 mockClock.Run(maxTxUnderpricedTimeout - time.Second) 2214 if !fetcher.isKnownUnderpriced(tx1.Hash()) { 2215 t.Error("tx1 should still be underpriced before timeout") 2216 } 2217 if !fetcher.isKnownUnderpriced(tx2.Hash()) { 2218 t.Error("tx2 should still be underpriced before timeout") 2219 } 2220 2221 // Exactly at timeout boundary: transactions should still be present 2222 mockClock.Run(time.Second) 2223 if !fetcher.isKnownUnderpriced(tx1.Hash()) { 2224 t.Error("tx1 should be present exactly at timeout") 2225 } 2226 if !fetcher.isKnownUnderpriced(tx2.Hash()) { 2227 t.Error("tx2 should be present exactly at timeout") 2228 } 2229 2230 // After timeout: transactions should be forgotten 2231 mockClock.Run(time.Second) 2232 if fetcher.isKnownUnderpriced(tx1.Hash()) { 2233 t.Error("tx1 should be forgotten after timeout") 2234 } 2235 if fetcher.isKnownUnderpriced(tx2.Hash()) { 2236 t.Error("tx2 should be forgotten after timeout") 2237 } 2238 2239 // Verify cache is empty 2240 if size := fetcher.underpriced.Len(); size != 0 { 2241 t.Errorf("wrong underpriced cache size after timeout: got %d, want 0", size) 2242 } 2243 2244 // Re-enqueue tx1 with updated timestamp 2245 tx1.SetTime(mockTime()) 2246 if err := fetcher.Enqueue("peer", []*types.Transaction{tx1}, false); err != nil { 2247 t.Fatal(err) 2248 } 2249 if !fetcher.isKnownUnderpriced(tx1.Hash()) { 2250 t.Error("tx1 should be underpriced after re-enqueueing with new timestamp") 2251 } 2252 if fetcher.isKnownUnderpriced(tx2.Hash()) { 2253 t.Error("tx2 should remain forgotten") 2254 } 2255 2256 // Verify final cache state 2257 if size := fetcher.underpriced.Len(); size != 1 { 2258 t.Errorf("wrong final underpriced cache size: got %d, want 1", size) 2259 } 2260 }