github.com/theQRL/go-zond@v0.2.1/zond/fetcher/tx_fetcher_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "errors" 21 "math/big" 22 "math/rand" 23 "slices" 24 "testing" 25 "time" 26 27 "github.com/theQRL/go-zond/common" 28 "github.com/theQRL/go-zond/common/mclock" 29 "github.com/theQRL/go-zond/core/txpool" 30 "github.com/theQRL/go-zond/core/types" 31 ) 32 33 var ( 34 to0 = common.Address{0x0f} 35 to1 = common.Address{0xbb} 36 to2 = common.Address{0x86} 37 to3 = common.Address{0xac} 38 // testTxs is a set of transactions to use during testing that have meaningful hashes. 39 testTxs = []*types.Transaction{ 40 types.NewTx(&types.DynamicFeeTx{Nonce: 15352856648520921629, To: &to1, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}), 41 types.NewTx(&types.DynamicFeeTx{Nonce: 5577006791947779410, To: &to0, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}), 42 types.NewTx(&types.DynamicFeeTx{Nonce: 3916589616287113937, To: &to2, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}), 43 types.NewTx(&types.DynamicFeeTx{Nonce: 9828766684487745566, To: &to3, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil}), 44 } 45 // testTxsHashes is the hashes of the test transactions above 46 testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} 47 ) 48 49 type announce struct { 50 hash common.Hash 51 kind *byte 52 size *uint32 53 } 54 55 func typeptr(t byte) *byte { return &t } 56 func sizeptr(n uint32) *uint32 { return &n } 57 58 type doTxNotify struct { 59 peer string 60 hashes []common.Hash 61 types []byte 62 sizes []uint32 63 } 64 type doTxEnqueue struct { 65 peer string 66 txs []*types.Transaction 67 direct bool 68 } 69 type doWait struct { 70 time time.Duration 71 step bool 72 } 73 type doDrop string 74 type doFunc func() 75 76 type isWaitingWithMeta map[string][]announce 77 type isWaiting map[string][]common.Hash 78 79 type isScheduledWithMeta struct { 80 tracking map[string][]announce 81 fetching map[string][]common.Hash 82 dangling map[string][]common.Hash 83 } 84 type isScheduled struct { 85 tracking map[string][]common.Hash 86 fetching map[string][]common.Hash 87 dangling map[string][]common.Hash 88 } 89 type isUnderpriced int 90 91 // txFetcherTest represents a test scenario that can be executed by the test 92 // runner. 93 type txFetcherTest struct { 94 init func() *TxFetcher 95 steps []interface{} 96 } 97 98 // Tests that transaction announcements are added to a waitlist, and none 99 // of them are scheduled for retrieval until the wait expires. 100 func TestTransactionFetcherWaiting(t *testing.T) { 101 testTransactionFetcherParallel(t, txFetcherTest{ 102 init: func() *TxFetcher { 103 return NewTxFetcher( 104 func(common.Hash) bool { return false }, 105 nil, 106 func(string, []common.Hash) error { return nil }, 107 nil, 108 ) 109 }, 110 steps: []interface{}{ 111 // Initial announcement to get something into the waitlist 112 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 113 isWaiting(map[string][]common.Hash{ 114 "A": {{0x01}, {0x02}}, 115 }), 116 // Announce from a new peer to check that no overwrite happens 117 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}}, 118 isWaiting(map[string][]common.Hash{ 119 "A": {{0x01}, {0x02}}, 120 "B": {{0x03}, {0x04}}, 121 }), 122 // Announce clashing hashes but unique new peer 123 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}}, 124 isWaiting(map[string][]common.Hash{ 125 "A": {{0x01}, {0x02}}, 126 "B": {{0x03}, {0x04}}, 127 "C": {{0x01}, {0x04}}, 128 }), 129 // Announce existing and clashing hashes from existing peer 130 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}}, 131 isWaiting(map[string][]common.Hash{ 132 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 133 "B": {{0x03}, {0x04}}, 134 "C": {{0x01}, {0x04}}, 135 }), 136 isScheduled{tracking: nil, fetching: nil}, 137 138 // Wait for the arrival timeout which should move all expired items 139 // from the wait list to the scheduler 140 doWait{time: txArriveTimeout, step: true}, 141 isWaiting(nil), 142 isScheduled{ 143 tracking: map[string][]common.Hash{ 144 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 145 "B": {{0x03}, {0x04}}, 146 "C": {{0x01}, {0x04}}, 147 }, 148 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 149 "A": {{0x02}, {0x03}, {0x05}}, 150 "C": {{0x01}, {0x04}}, 151 }, 152 }, 153 // Queue up a non-fetchable transaction and then trigger it with a new 154 // peer (weird case to test 1 line in the fetcher) 155 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}}, 156 isWaiting(map[string][]common.Hash{ 157 "C": {{0x06}, {0x07}}, 158 }), 159 doWait{time: txArriveTimeout, step: true}, 160 isScheduled{ 161 tracking: map[string][]common.Hash{ 162 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 163 "B": {{0x03}, {0x04}}, 164 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 165 }, 166 fetching: map[string][]common.Hash{ 167 "A": {{0x02}, {0x03}, {0x05}}, 168 "C": {{0x01}, {0x04}}, 169 }, 170 }, 171 doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}}, 172 isScheduled{ 173 tracking: map[string][]common.Hash{ 174 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 175 "B": {{0x03}, {0x04}}, 176 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 177 "D": {{0x06}, {0x07}}, 178 }, 179 fetching: map[string][]common.Hash{ 180 "A": {{0x02}, {0x03}, {0x05}}, 181 "C": {{0x01}, {0x04}}, 182 "D": {{0x06}, {0x07}}, 183 }, 184 }, 185 }, 186 }) 187 } 188 189 // Tests that transaction announcements with associated metadata are added to a 190 // waitlist, and none of them are scheduled for retrieval until the wait expires. 191 // 192 // This test is an extended version of TestTransactionFetcherWaiting. It's mostly 193 // to cover the metadata checks without bloating up the basic behavioral tests 194 // with all the useless extra fields. 195 func TestTransactionFetcherWaitingWithMeta(t *testing.T) { 196 testTransactionFetcherParallel(t, txFetcherTest{ 197 init: func() *TxFetcher { 198 return NewTxFetcher( 199 func(common.Hash) bool { return false }, 200 nil, 201 func(string, []common.Hash) error { return nil }, 202 nil, 203 ) 204 }, 205 steps: []interface{}{ 206 // Initial announcement to get something into the waitlist 207 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{111, 222}}, 208 isWaitingWithMeta(map[string][]announce{ 209 "A": { 210 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 211 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 212 }, 213 }), 214 // Announce from a new peer to check that no overwrite happens 215 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{333, 444}}, 216 isWaitingWithMeta(map[string][]announce{ 217 "A": { 218 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 219 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 220 }, 221 "B": { 222 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 223 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 224 }, 225 }), 226 // Announce clashing hashes but unique new peer 227 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{111, 444}}, 228 isWaitingWithMeta(map[string][]announce{ 229 "A": { 230 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 231 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 232 }, 233 "B": { 234 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 235 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 236 }, 237 "C": { 238 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 239 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 240 }, 241 }), 242 // Announce existing and clashing hashes from existing peer. Clashes 243 // should not overwrite previous announcements. 244 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{999, 333, 555}}, 245 isWaitingWithMeta(map[string][]announce{ 246 "A": { 247 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 248 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 249 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 250 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(555)}, 251 }, 252 "B": { 253 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 254 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 255 }, 256 "C": { 257 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 258 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 259 }, 260 }), 261 // Announce clashing hashes with conflicting metadata. Somebody will 262 // be in the wrong, but we don't know yet who. 263 doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{999, 222}}, 264 isWaitingWithMeta(map[string][]announce{ 265 "A": { 266 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 267 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 268 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 269 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(555)}, 270 }, 271 "B": { 272 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 273 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 274 }, 275 "C": { 276 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 277 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 278 }, 279 "D": { 280 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(999)}, 281 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 282 }, 283 }), 284 isScheduled{tracking: nil, fetching: nil}, 285 286 // Wait for the arrival timeout which should move all expired items 287 // from the wait list to the scheduler 288 doWait{time: txArriveTimeout, step: true}, 289 isWaiting(nil), 290 isScheduledWithMeta{ 291 tracking: map[string][]announce{ 292 "A": { 293 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 294 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 295 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 296 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(555)}, 297 }, 298 "B": { 299 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 300 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 301 }, 302 "C": { 303 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 304 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 305 }, 306 "D": { 307 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(999)}, 308 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 309 }, 310 }, 311 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 312 "A": {{0x03}, {0x05}}, 313 "C": {{0x01}, {0x04}}, 314 "D": {{0x02}}, 315 }, 316 }, 317 // Queue up a non-fetchable transaction and then trigger it with a new 318 // peer (weird case to test 1 line in the fetcher) 319 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{666, 777}}, 320 isWaitingWithMeta(map[string][]announce{ 321 "C": { 322 {common.Hash{0x06}, typeptr(types.DynamicFeeTxType), sizeptr(666)}, 323 {common.Hash{0x07}, typeptr(types.DynamicFeeTxType), sizeptr(777)}, 324 }, 325 }), 326 doWait{time: txArriveTimeout, step: true}, 327 isScheduledWithMeta{ 328 tracking: map[string][]announce{ 329 "A": { 330 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 331 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 332 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 333 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(555)}, 334 }, 335 "B": { 336 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 337 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 338 }, 339 "C": { 340 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 341 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 342 {common.Hash{0x06}, typeptr(types.DynamicFeeTxType), sizeptr(666)}, 343 {common.Hash{0x07}, typeptr(types.DynamicFeeTxType), sizeptr(777)}, 344 }, 345 "D": { 346 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(999)}, 347 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 348 }, 349 }, 350 fetching: map[string][]common.Hash{ 351 "A": {{0x03}, {0x05}}, 352 "C": {{0x01}, {0x04}}, 353 "D": {{0x02}}, 354 }, 355 }, 356 doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, sizes: []uint32{666, 777}}, 357 isScheduledWithMeta{ 358 tracking: map[string][]announce{ 359 "A": { 360 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 361 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 362 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 363 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(555)}, 364 }, 365 "B": { 366 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(333)}, 367 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 368 }, 369 "C": { 370 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(111)}, 371 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(444)}, 372 {common.Hash{0x06}, typeptr(types.DynamicFeeTxType), sizeptr(666)}, 373 {common.Hash{0x07}, typeptr(types.DynamicFeeTxType), sizeptr(777)}, 374 }, 375 "D": { 376 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(999)}, 377 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(222)}, 378 }, 379 "E": { 380 {common.Hash{0x06}, typeptr(types.DynamicFeeTxType), sizeptr(666)}, 381 {common.Hash{0x07}, typeptr(types.DynamicFeeTxType), sizeptr(777)}, 382 }, 383 }, 384 fetching: map[string][]common.Hash{ 385 "A": {{0x03}, {0x05}}, 386 "C": {{0x01}, {0x04}}, 387 "D": {{0x02}}, 388 "E": {{0x06}, {0x07}}, 389 }, 390 }, 391 }, 392 }) 393 } 394 395 // Tests that transaction announcements skip the waiting list if they are 396 // already scheduled. 397 func TestTransactionFetcherSkipWaiting(t *testing.T) { 398 testTransactionFetcherParallel(t, txFetcherTest{ 399 init: func() *TxFetcher { 400 return NewTxFetcher( 401 func(common.Hash) bool { return false }, 402 nil, 403 func(string, []common.Hash) error { return nil }, 404 nil, 405 ) 406 }, 407 steps: []interface{}{ 408 // Push an initial announcement through to the scheduled stage 409 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 410 isWaiting(map[string][]common.Hash{ 411 "A": {{0x01}, {0x02}}, 412 }), 413 isScheduled{tracking: nil, fetching: nil}, 414 415 doWait{time: txArriveTimeout, step: true}, 416 isWaiting(nil), 417 isScheduled{ 418 tracking: map[string][]common.Hash{ 419 "A": {{0x01}, {0x02}}, 420 }, 421 fetching: map[string][]common.Hash{ 422 "A": {{0x01}, {0x02}}, 423 }, 424 }, 425 // Announce overlaps from the same peer, ensure the new ones end up 426 // in stage one, and clashing ones don't get double tracked 427 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}}, 428 isWaiting(map[string][]common.Hash{ 429 "A": {{0x03}}, 430 }), 431 isScheduled{ 432 tracking: map[string][]common.Hash{ 433 "A": {{0x01}, {0x02}}, 434 }, 435 fetching: map[string][]common.Hash{ 436 "A": {{0x01}, {0x02}}, 437 }, 438 }, 439 // Announce overlaps from a new peer, ensure new transactions end up 440 // in stage one and clashing ones get tracked for the new peer 441 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}}, 442 isWaiting(map[string][]common.Hash{ 443 "A": {{0x03}}, 444 "B": {{0x03}, {0x04}}, 445 }), 446 isScheduled{ 447 tracking: map[string][]common.Hash{ 448 "A": {{0x01}, {0x02}}, 449 "B": {{0x02}}, 450 }, 451 fetching: map[string][]common.Hash{ 452 "A": {{0x01}, {0x02}}, 453 }, 454 }, 455 }, 456 }) 457 } 458 459 // Tests that only a single transaction request gets scheduled to a peer 460 // and subsequent announces block or get allotted to someone else. 461 func TestTransactionFetcherSingletonRequesting(t *testing.T) { 462 testTransactionFetcherParallel(t, txFetcherTest{ 463 init: func() *TxFetcher { 464 return NewTxFetcher( 465 func(common.Hash) bool { return false }, 466 nil, 467 func(string, []common.Hash) error { return nil }, 468 nil, 469 ) 470 }, 471 steps: []interface{}{ 472 // Push an initial announcement through to the scheduled stage 473 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 474 isWaiting(map[string][]common.Hash{ 475 "A": {{0x01}, {0x02}}, 476 }), 477 isScheduled{tracking: nil, fetching: nil}, 478 479 doWait{time: txArriveTimeout, step: true}, 480 isWaiting(nil), 481 isScheduled{ 482 tracking: map[string][]common.Hash{ 483 "A": {{0x01}, {0x02}}, 484 }, 485 fetching: map[string][]common.Hash{ 486 "A": {{0x01}, {0x02}}, 487 }, 488 }, 489 // Announce a new set of transactions from the same peer and ensure 490 // they do not start fetching since the peer is already busy 491 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}}, 492 isWaiting(map[string][]common.Hash{ 493 "A": {{0x03}, {0x04}}, 494 }), 495 isScheduled{ 496 tracking: map[string][]common.Hash{ 497 "A": {{0x01}, {0x02}}, 498 }, 499 fetching: map[string][]common.Hash{ 500 "A": {{0x01}, {0x02}}, 501 }, 502 }, 503 doWait{time: txArriveTimeout, step: true}, 504 isWaiting(nil), 505 isScheduled{ 506 tracking: map[string][]common.Hash{ 507 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 508 }, 509 fetching: map[string][]common.Hash{ 510 "A": {{0x01}, {0x02}}, 511 }, 512 }, 513 // Announce a duplicate set of transactions from a new peer and ensure 514 // uniquely new ones start downloading, even if clashing. 515 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}}, 516 isWaiting(map[string][]common.Hash{ 517 "B": {{0x05}, {0x06}}, 518 }), 519 isScheduled{ 520 tracking: map[string][]common.Hash{ 521 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 522 "B": {{0x02}, {0x03}}, 523 }, 524 fetching: map[string][]common.Hash{ 525 "A": {{0x01}, {0x02}}, 526 "B": {{0x03}}, 527 }, 528 }, 529 }, 530 }) 531 } 532 533 // Tests that if a transaction retrieval fails, all the transactions get 534 // instantly schedule back to someone else or the announcements dropped 535 // if no alternate source is available. 536 func TestTransactionFetcherFailedRescheduling(t *testing.T) { 537 // Create a channel to control when tx requests can fail 538 proceed := make(chan struct{}) 539 testTransactionFetcherParallel(t, txFetcherTest{ 540 init: func() *TxFetcher { 541 return NewTxFetcher( 542 func(common.Hash) bool { return false }, 543 nil, 544 func(origin string, hashes []common.Hash) error { 545 <-proceed 546 return errors.New("peer disconnected") 547 }, 548 nil, 549 ) 550 }, 551 steps: []interface{}{ 552 // Push an initial announcement through to the scheduled stage 553 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 554 isWaiting(map[string][]common.Hash{ 555 "A": {{0x01}, {0x02}}, 556 }), 557 isScheduled{tracking: nil, fetching: nil}, 558 559 doWait{time: txArriveTimeout, step: true}, 560 isWaiting(nil), 561 isScheduled{ 562 tracking: map[string][]common.Hash{ 563 "A": {{0x01}, {0x02}}, 564 }, 565 fetching: map[string][]common.Hash{ 566 "A": {{0x01}, {0x02}}, 567 }, 568 }, 569 // While the original peer is stuck in the request, push in an second 570 // data source. 571 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 572 isWaiting(nil), 573 isScheduled{ 574 tracking: map[string][]common.Hash{ 575 "A": {{0x01}, {0x02}}, 576 "B": {{0x02}}, 577 }, 578 fetching: map[string][]common.Hash{ 579 "A": {{0x01}, {0x02}}, 580 }, 581 }, 582 // Wait until the original request fails and check that transactions 583 // are either rescheduled or dropped 584 doFunc(func() { 585 proceed <- struct{}{} // Allow peer A to return the failure 586 }), 587 doWait{time: 0, step: true}, 588 isWaiting(nil), 589 isScheduled{ 590 tracking: map[string][]common.Hash{ 591 "B": {{0x02}}, 592 }, 593 fetching: map[string][]common.Hash{ 594 "B": {{0x02}}, 595 }, 596 }, 597 doFunc(func() { 598 proceed <- struct{}{} // Allow peer B to return the failure 599 }), 600 doWait{time: 0, step: true}, 601 isWaiting(nil), 602 isScheduled{nil, nil, nil}, 603 }, 604 }) 605 } 606 607 // Tests that if a transaction retrieval succeeds, all alternate origins 608 // are cleaned up. 609 func TestTransactionFetcherCleanup(t *testing.T) { 610 testTransactionFetcherParallel(t, txFetcherTest{ 611 init: func() *TxFetcher { 612 return NewTxFetcher( 613 func(common.Hash) bool { return false }, 614 func(txs []*types.Transaction) []error { 615 return make([]error, len(txs)) 616 }, 617 func(string, []common.Hash) error { return nil }, 618 nil, 619 ) 620 }, 621 steps: []interface{}{ 622 // Push an initial announcement through to the scheduled stage 623 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 624 isWaiting(map[string][]common.Hash{ 625 "A": {testTxsHashes[0]}, 626 }), 627 isScheduled{tracking: nil, fetching: nil}, 628 629 doWait{time: txArriveTimeout, step: true}, 630 isWaiting(nil), 631 isScheduled{ 632 tracking: map[string][]common.Hash{ 633 "A": {testTxsHashes[0]}, 634 }, 635 fetching: map[string][]common.Hash{ 636 "A": {testTxsHashes[0]}, 637 }, 638 }, 639 // Request should be delivered 640 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 641 isScheduled{nil, nil, nil}, 642 }, 643 }) 644 } 645 646 // Tests that if a transaction retrieval succeeds, but the response is empty (no 647 // transactions available, then all are nuked instead of being rescheduled (yes, 648 // this was a bug)). 649 func TestTransactionFetcherCleanupEmpty(t *testing.T) { 650 testTransactionFetcherParallel(t, txFetcherTest{ 651 init: func() *TxFetcher { 652 return NewTxFetcher( 653 func(common.Hash) bool { return false }, 654 func(txs []*types.Transaction) []error { 655 return make([]error, len(txs)) 656 }, 657 func(string, []common.Hash) error { return nil }, 658 nil, 659 ) 660 }, 661 steps: []interface{}{ 662 // Push an initial announcement through to the scheduled stage 663 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 664 isWaiting(map[string][]common.Hash{ 665 "A": {testTxsHashes[0]}, 666 }), 667 isScheduled{tracking: nil, fetching: nil}, 668 669 doWait{time: txArriveTimeout, step: true}, 670 isWaiting(nil), 671 isScheduled{ 672 tracking: map[string][]common.Hash{ 673 "A": {testTxsHashes[0]}, 674 }, 675 fetching: map[string][]common.Hash{ 676 "A": {testTxsHashes[0]}, 677 }, 678 }, 679 // Deliver an empty response and ensure the transaction is cleared, not rescheduled 680 doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true}, 681 isScheduled{nil, nil, nil}, 682 }, 683 }) 684 } 685 686 // Tests that non-returned transactions are either re-scheduled from a 687 // different peer, or self if they are after the cutoff point. 688 func TestTransactionFetcherMissingRescheduling(t *testing.T) { 689 testTransactionFetcherParallel(t, txFetcherTest{ 690 init: func() *TxFetcher { 691 return NewTxFetcher( 692 func(common.Hash) bool { return false }, 693 func(txs []*types.Transaction) []error { 694 return make([]error, len(txs)) 695 }, 696 func(string, []common.Hash) error { return nil }, 697 nil, 698 ) 699 }, 700 steps: []interface{}{ 701 // Push an initial announcement through to the scheduled stage 702 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, 703 isWaiting(map[string][]common.Hash{ 704 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 705 }), 706 isScheduled{tracking: nil, fetching: nil}, 707 708 doWait{time: txArriveTimeout, step: true}, 709 isWaiting(nil), 710 isScheduled{ 711 tracking: map[string][]common.Hash{ 712 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 713 }, 714 fetching: map[string][]common.Hash{ 715 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 716 }, 717 }, 718 // Deliver the middle transaction requested, the one before which 719 // should be dropped and the one after re-requested. 720 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random 721 isScheduled{ 722 tracking: map[string][]common.Hash{ 723 "A": {testTxsHashes[2]}, 724 }, 725 fetching: map[string][]common.Hash{ 726 "A": {testTxsHashes[2]}, 727 }, 728 }, 729 }, 730 }) 731 } 732 733 // Tests that out of two transactions, if one is missing and the last is 734 // delivered, the peer gets properly cleaned out from the internal state. 735 func TestTransactionFetcherMissingCleanup(t *testing.T) { 736 testTransactionFetcherParallel(t, txFetcherTest{ 737 init: func() *TxFetcher { 738 return NewTxFetcher( 739 func(common.Hash) bool { return false }, 740 func(txs []*types.Transaction) []error { 741 return make([]error, len(txs)) 742 }, 743 func(string, []common.Hash) error { return nil }, 744 nil, 745 ) 746 }, 747 steps: []interface{}{ 748 // Push an initial announcement through to the scheduled stage 749 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 750 isWaiting(map[string][]common.Hash{ 751 "A": {testTxsHashes[0], testTxsHashes[1]}, 752 }), 753 isScheduled{tracking: nil, fetching: nil}, 754 755 doWait{time: txArriveTimeout, step: true}, 756 isWaiting(nil), 757 isScheduled{ 758 tracking: map[string][]common.Hash{ 759 "A": {testTxsHashes[0], testTxsHashes[1]}, 760 }, 761 fetching: map[string][]common.Hash{ 762 "A": {testTxsHashes[0], testTxsHashes[1]}, 763 }, 764 }, 765 // Deliver the middle transaction requested, the one before which 766 // should be dropped and the one after re-requested. 767 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random 768 isScheduled{nil, nil, nil}, 769 }, 770 }) 771 } 772 773 // Tests that transaction broadcasts properly clean up announcements. 774 func TestTransactionFetcherBroadcasts(t *testing.T) { 775 testTransactionFetcherParallel(t, txFetcherTest{ 776 init: func() *TxFetcher { 777 return NewTxFetcher( 778 func(common.Hash) bool { return false }, 779 func(txs []*types.Transaction) []error { 780 return make([]error, len(txs)) 781 }, 782 func(string, []common.Hash) error { return nil }, 783 nil, 784 ) 785 }, 786 steps: []interface{}{ 787 // Set up three transactions to be in different stats, waiting, queued and fetching 788 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 789 doWait{time: txArriveTimeout, step: true}, 790 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 791 doWait{time: txArriveTimeout, step: true}, 792 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 793 794 isWaiting(map[string][]common.Hash{ 795 "A": {testTxsHashes[2]}, 796 }), 797 isScheduled{ 798 tracking: map[string][]common.Hash{ 799 "A": {testTxsHashes[0], testTxsHashes[1]}, 800 }, 801 fetching: map[string][]common.Hash{ 802 "A": {testTxsHashes[0]}, 803 }, 804 }, 805 // Broadcast all the transactions and ensure everything gets cleaned 806 // up, but the dangling request is left alone to avoid doing multiple 807 // concurrent requests. 808 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false}, 809 isWaiting(nil), 810 isScheduled{ 811 tracking: nil, 812 fetching: nil, 813 dangling: map[string][]common.Hash{ 814 "A": {testTxsHashes[0]}, 815 }, 816 }, 817 // Deliver the requested hashes 818 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true}, 819 isScheduled{nil, nil, nil}, 820 }, 821 }) 822 } 823 824 // Tests that the waiting list timers properly reset and reschedule. 825 func TestTransactionFetcherWaitTimerResets(t *testing.T) { 826 testTransactionFetcherParallel(t, txFetcherTest{ 827 init: func() *TxFetcher { 828 return NewTxFetcher( 829 func(common.Hash) bool { return false }, 830 nil, 831 func(string, []common.Hash) error { return nil }, 832 nil, 833 ) 834 }, 835 steps: []interface{}{ 836 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 837 isWaiting(map[string][]common.Hash{ 838 "A": {{0x01}}, 839 }), 840 isScheduled{nil, nil, nil}, 841 doWait{time: txArriveTimeout / 2, step: false}, 842 isWaiting(map[string][]common.Hash{ 843 "A": {{0x01}}, 844 }), 845 isScheduled{nil, nil, nil}, 846 847 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 848 isWaiting(map[string][]common.Hash{ 849 "A": {{0x01}, {0x02}}, 850 }), 851 isScheduled{nil, nil, nil}, 852 doWait{time: txArriveTimeout / 2, step: true}, 853 isWaiting(map[string][]common.Hash{ 854 "A": {{0x02}}, 855 }), 856 isScheduled{ 857 tracking: map[string][]common.Hash{ 858 "A": {{0x01}}, 859 }, 860 fetching: map[string][]common.Hash{ 861 "A": {{0x01}}, 862 }, 863 }, 864 865 doWait{time: txArriveTimeout / 2, step: true}, 866 isWaiting(nil), 867 isScheduled{ 868 tracking: map[string][]common.Hash{ 869 "A": {{0x01}, {0x02}}, 870 }, 871 fetching: map[string][]common.Hash{ 872 "A": {{0x01}}, 873 }, 874 }, 875 }, 876 }) 877 } 878 879 // Tests that if a transaction request is not replied to, it will time 880 // out and be re-scheduled for someone else. 881 func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { 882 testTransactionFetcherParallel(t, txFetcherTest{ 883 init: func() *TxFetcher { 884 return NewTxFetcher( 885 func(common.Hash) bool { return false }, 886 func(txs []*types.Transaction) []error { 887 return make([]error, len(txs)) 888 }, 889 func(string, []common.Hash) error { return nil }, 890 nil, 891 ) 892 }, 893 steps: []interface{}{ 894 // Push an initial announcement through to the scheduled stage 895 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 896 isWaiting(map[string][]common.Hash{ 897 "A": {testTxsHashes[0]}, 898 }), 899 isScheduled{tracking: nil, fetching: nil}, 900 901 doWait{time: txArriveTimeout, step: true}, 902 isWaiting(nil), 903 isScheduled{ 904 tracking: map[string][]common.Hash{ 905 "A": {testTxsHashes[0]}, 906 }, 907 fetching: map[string][]common.Hash{ 908 "A": {testTxsHashes[0]}, 909 }, 910 }, 911 // Wait until the delivery times out, everything should be cleaned up 912 doWait{time: txFetchTimeout, step: true}, 913 isWaiting(nil), 914 isScheduled{ 915 tracking: nil, 916 fetching: nil, 917 dangling: map[string][]common.Hash{ 918 "A": {}, 919 }, 920 }, 921 // Ensure that followup announcements don't get scheduled 922 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 923 doWait{time: txArriveTimeout, step: true}, 924 isScheduled{ 925 tracking: map[string][]common.Hash{ 926 "A": {testTxsHashes[1]}, 927 }, 928 fetching: nil, 929 dangling: map[string][]common.Hash{ 930 "A": {}, 931 }, 932 }, 933 // If the dangling request arrives a bit later, do not choke 934 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 935 isWaiting(nil), 936 isScheduled{ 937 tracking: map[string][]common.Hash{ 938 "A": {testTxsHashes[1]}, 939 }, 940 fetching: map[string][]common.Hash{ 941 "A": {testTxsHashes[1]}, 942 }, 943 }, 944 }, 945 }) 946 } 947 948 // Tests that the fetching timeout timers properly reset and reschedule. 949 func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { 950 testTransactionFetcherParallel(t, txFetcherTest{ 951 init: func() *TxFetcher { 952 return NewTxFetcher( 953 func(common.Hash) bool { return false }, 954 nil, 955 func(string, []common.Hash) error { return nil }, 956 nil, 957 ) 958 }, 959 steps: []interface{}{ 960 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 961 doWait{time: txArriveTimeout, step: true}, 962 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 963 doWait{time: txArriveTimeout, step: true}, 964 965 isWaiting(nil), 966 isScheduled{ 967 tracking: map[string][]common.Hash{ 968 "A": {{0x01}}, 969 "B": {{0x02}}, 970 }, 971 fetching: map[string][]common.Hash{ 972 "A": {{0x01}}, 973 "B": {{0x02}}, 974 }, 975 }, 976 doWait{time: txFetchTimeout - txArriveTimeout, step: true}, 977 isScheduled{ 978 tracking: map[string][]common.Hash{ 979 "B": {{0x02}}, 980 }, 981 fetching: map[string][]common.Hash{ 982 "B": {{0x02}}, 983 }, 984 dangling: map[string][]common.Hash{ 985 "A": {}, 986 }, 987 }, 988 doWait{time: txArriveTimeout, step: true}, 989 isScheduled{ 990 tracking: nil, 991 fetching: nil, 992 dangling: map[string][]common.Hash{ 993 "A": {}, 994 "B": {}, 995 }, 996 }, 997 }, 998 }) 999 } 1000 1001 // Tests that if thousands of transactions are announced, only a small 1002 // number of them will be requested at a time. 1003 func TestTransactionFetcherRateLimiting(t *testing.T) { 1004 // Create a slew of transactions and announce them 1005 var hashes []common.Hash 1006 for i := 0; i < maxTxAnnounces; i++ { 1007 hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) 1008 } 1009 testTransactionFetcherParallel(t, txFetcherTest{ 1010 init: func() *TxFetcher { 1011 return NewTxFetcher( 1012 func(common.Hash) bool { return false }, 1013 nil, 1014 func(string, []common.Hash) error { return nil }, 1015 nil, 1016 ) 1017 }, 1018 steps: []interface{}{ 1019 // Announce all the transactions, wait a bit and ensure only a small 1020 // percentage gets requested 1021 doTxNotify{peer: "A", hashes: hashes}, 1022 doWait{time: txArriveTimeout, step: true}, 1023 isWaiting(nil), 1024 isScheduled{ 1025 tracking: map[string][]common.Hash{ 1026 "A": hashes, 1027 }, 1028 fetching: map[string][]common.Hash{ 1029 "A": hashes[1643 : 1643+maxTxRetrievals], 1030 }, 1031 }, 1032 }, 1033 }) 1034 } 1035 1036 // Tests that if huge transactions are announced, only a small number of them will 1037 // be requested at a time, to keep the responses below a reasonable level. 1038 func TestTransactionFetcherBandwidthLimiting(t *testing.T) { 1039 testTransactionFetcherParallel(t, txFetcherTest{ 1040 init: func() *TxFetcher { 1041 return NewTxFetcher( 1042 func(common.Hash) bool { return false }, 1043 nil, 1044 func(string, []common.Hash) error { return nil }, 1045 nil, 1046 ) 1047 }, 1048 steps: []interface{}{ 1049 // Announce mid size transactions from A to verify that multiple 1050 // ones can be piled into a single request. 1051 doTxNotify{peer: "A", 1052 hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}}, 1053 types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType, types.DynamicFeeTxType, types.DynamicFeeTxType}, 1054 sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024}, 1055 }, 1056 // Announce exactly on the limit transactions to see that only one 1057 // gets requested 1058 doTxNotify{peer: "B", 1059 hashes: []common.Hash{{0x05}, {0x06}}, 1060 types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, 1061 sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize}, 1062 }, 1063 // Announce oversized transactions to see that overflows are ok 1064 doTxNotify{peer: "C", 1065 hashes: []common.Hash{{0x07}, {0x08}}, 1066 types: []byte{types.DynamicFeeTxType, types.DynamicFeeTxType}, 1067 sizes: []uint32{6 * (1 << 17), 6 * (1 << 17)}, 1068 }, 1069 doWait{time: txArriveTimeout, step: true}, 1070 isWaiting(nil), 1071 isScheduledWithMeta{ 1072 tracking: map[string][]announce{ 1073 "A": { 1074 {common.Hash{0x01}, typeptr(types.DynamicFeeTxType), sizeptr(48 * 1024)}, 1075 {common.Hash{0x02}, typeptr(types.DynamicFeeTxType), sizeptr(48 * 1024)}, 1076 {common.Hash{0x03}, typeptr(types.DynamicFeeTxType), sizeptr(48 * 1024)}, 1077 {common.Hash{0x04}, typeptr(types.DynamicFeeTxType), sizeptr(48 * 1024)}, 1078 }, 1079 "B": { 1080 {common.Hash{0x05}, typeptr(types.DynamicFeeTxType), sizeptr(maxTxRetrievalSize)}, 1081 {common.Hash{0x06}, typeptr(types.DynamicFeeTxType), sizeptr(maxTxRetrievalSize)}, 1082 }, 1083 "C": { 1084 {common.Hash{0x07}, typeptr(types.DynamicFeeTxType), sizeptr(6 * (1 << 17))}, 1085 {common.Hash{0x08}, typeptr(types.DynamicFeeTxType), sizeptr(6 * (1 << 17))}, 1086 }, 1087 }, 1088 fetching: map[string][]common.Hash{ 1089 "A": {{0x02}, {0x03}, {0x04}}, 1090 "B": {{0x06}}, 1091 "C": {{0x08}}, 1092 }, 1093 }, 1094 }, 1095 }) 1096 } 1097 1098 // Tests that then number of transactions a peer is allowed to announce and/or 1099 // request at the same time is hard capped. 1100 func TestTransactionFetcherDoSProtection(t *testing.T) { 1101 // Create a slew of transactions and to announce them 1102 var hashesA []common.Hash 1103 for i := 0; i < maxTxAnnounces+1; i++ { 1104 hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)}) 1105 } 1106 var hashesB []common.Hash 1107 for i := 0; i < maxTxAnnounces+1; i++ { 1108 hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)}) 1109 } 1110 testTransactionFetcherParallel(t, txFetcherTest{ 1111 init: func() *TxFetcher { 1112 return NewTxFetcher( 1113 func(common.Hash) bool { return false }, 1114 nil, 1115 func(string, []common.Hash) error { return nil }, 1116 nil, 1117 ) 1118 }, 1119 steps: []interface{}{ 1120 // Announce half of the transaction and wait for them to be scheduled 1121 doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]}, 1122 doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]}, 1123 doWait{time: txArriveTimeout, step: true}, 1124 1125 // Announce the second half and keep them in the wait list 1126 doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]}, 1127 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]}, 1128 1129 // Ensure the hashes are split half and half 1130 isWaiting(map[string][]common.Hash{ 1131 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 1132 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1], 1133 }), 1134 isScheduled{ 1135 tracking: map[string][]common.Hash{ 1136 "A": hashesA[:maxTxAnnounces/2], 1137 "B": hashesB[:maxTxAnnounces/2-1], 1138 }, 1139 fetching: map[string][]common.Hash{ 1140 "A": hashesA[1643 : 1643+maxTxRetrievals], 1141 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 1142 }, 1143 }, 1144 // Ensure that adding even one more hash results in dropping the hash 1145 doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}}, 1146 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]}, 1147 1148 isWaiting(map[string][]common.Hash{ 1149 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 1150 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces], 1151 }), 1152 isScheduled{ 1153 tracking: map[string][]common.Hash{ 1154 "A": hashesA[:maxTxAnnounces/2], 1155 "B": hashesB[:maxTxAnnounces/2-1], 1156 }, 1157 fetching: map[string][]common.Hash{ 1158 "A": hashesA[1643 : 1643+maxTxRetrievals], 1159 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 1160 }, 1161 }, 1162 }, 1163 }) 1164 } 1165 1166 // Tests that underpriced transactions don't get rescheduled after being rejected. 1167 func TestTransactionFetcherUnderpricedDedup(t *testing.T) { 1168 testTransactionFetcherParallel(t, txFetcherTest{ 1169 init: func() *TxFetcher { 1170 return NewTxFetcher( 1171 func(common.Hash) bool { return false }, 1172 func(txs []*types.Transaction) []error { 1173 errs := make([]error, len(txs)) 1174 for i := 0; i < len(errs); i++ { 1175 if i%2 == 0 { 1176 errs[i] = txpool.ErrUnderpriced 1177 } else { 1178 errs[i] = txpool.ErrReplaceUnderpriced 1179 } 1180 } 1181 return errs 1182 }, 1183 func(string, []common.Hash) error { return nil }, 1184 nil, 1185 ) 1186 }, 1187 steps: []interface{}{ 1188 // Deliver a transaction through the fetcher, but reject as underpriced 1189 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 1190 doWait{time: txArriveTimeout, step: true}, 1191 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true}, 1192 isScheduled{nil, nil, nil}, 1193 1194 // Try to announce the transaction again, ensure it's not scheduled back 1195 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher 1196 isWaiting(map[string][]common.Hash{ 1197 "A": {testTxsHashes[2]}, 1198 }), 1199 isScheduled{nil, nil, nil}, 1200 }, 1201 }) 1202 } 1203 1204 // Tests that underpriced transactions don't get rescheduled after being rejected, 1205 // but at the same time there's a hard cap on the number of transactions that are 1206 // tracked. 1207 func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { 1208 // Temporarily disable fetch timeouts as they massively mess up the simulated clock 1209 defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout) 1210 txFetchTimeout = 24 * time.Hour 1211 1212 // Create a slew of transactions to max out the underpriced set 1213 var txs []*types.Transaction 1214 for i := 0; i < maxTxUnderpricedSetSize+1; i++ { 1215 to := common.Address{byte(rand.Intn(256))} 1216 txs = append(txs, types.NewTx(&types.DynamicFeeTx{Nonce: rand.Uint64(), To: &to, Value: new(big.Int), Gas: 0, GasFeeCap: new(big.Int), Data: nil})) 1217 } 1218 hashes := make([]common.Hash, len(txs)) 1219 for i, tx := range txs { 1220 hashes[i] = tx.Hash() 1221 } 1222 // Generate a set of steps to announce and deliver the entire set of transactions 1223 var steps []interface{} 1224 for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ { 1225 steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]}) 1226 steps = append(steps, isWaiting(map[string][]common.Hash{ 1227 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1228 })) 1229 steps = append(steps, doWait{time: txArriveTimeout, step: true}) 1230 steps = append(steps, isScheduled{ 1231 tracking: map[string][]common.Hash{ 1232 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1233 }, 1234 fetching: map[string][]common.Hash{ 1235 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1236 }, 1237 }) 1238 steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true}) 1239 steps = append(steps, isWaiting(nil)) 1240 steps = append(steps, isScheduled{nil, nil, nil}) 1241 steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals)) 1242 } 1243 testTransactionFetcher(t, txFetcherTest{ 1244 init: func() *TxFetcher { 1245 return NewTxFetcher( 1246 func(common.Hash) bool { return false }, 1247 func(txs []*types.Transaction) []error { 1248 errs := make([]error, len(txs)) 1249 for i := 0; i < len(errs); i++ { 1250 errs[i] = txpool.ErrUnderpriced 1251 } 1252 return errs 1253 }, 1254 func(string, []common.Hash) error { return nil }, 1255 nil, 1256 ) 1257 }, 1258 steps: append(steps, []interface{}{ 1259 // The preparation of the test has already been done in `steps`, add the last check 1260 doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}}, 1261 doWait{time: txArriveTimeout, step: true}, 1262 doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true}, 1263 isUnderpriced(maxTxUnderpricedSetSize), 1264 }...), 1265 }) 1266 } 1267 1268 // Tests that unexpected deliveries don't corrupt the internal state. 1269 func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { 1270 testTransactionFetcherParallel(t, txFetcherTest{ 1271 init: func() *TxFetcher { 1272 return NewTxFetcher( 1273 func(common.Hash) bool { return false }, 1274 func(txs []*types.Transaction) []error { 1275 return make([]error, len(txs)) 1276 }, 1277 func(string, []common.Hash) error { return nil }, 1278 nil, 1279 ) 1280 }, 1281 steps: []interface{}{ 1282 // Deliver something out of the blue 1283 isWaiting(nil), 1284 isScheduled{nil, nil, nil}, 1285 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false}, 1286 isWaiting(nil), 1287 isScheduled{nil, nil, nil}, 1288 1289 // Set up a few hashes into various stages 1290 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1291 doWait{time: txArriveTimeout, step: true}, 1292 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 1293 doWait{time: txArriveTimeout, step: true}, 1294 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 1295 1296 isWaiting(map[string][]common.Hash{ 1297 "A": {testTxsHashes[2]}, 1298 }), 1299 isScheduled{ 1300 tracking: map[string][]common.Hash{ 1301 "A": {testTxsHashes[0], testTxsHashes[1]}, 1302 }, 1303 fetching: map[string][]common.Hash{ 1304 "A": {testTxsHashes[0]}, 1305 }, 1306 }, 1307 // Deliver everything and more out of the blue 1308 doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true}, 1309 isWaiting(nil), 1310 isScheduled{ 1311 tracking: nil, 1312 fetching: nil, 1313 dangling: map[string][]common.Hash{ 1314 "A": {testTxsHashes[0]}, 1315 }, 1316 }, 1317 }, 1318 }) 1319 } 1320 1321 // Tests that dropping a peer cleans out all internal data structures in all the 1322 // live or dangling stages. 1323 func TestTransactionFetcherDrop(t *testing.T) { 1324 testTransactionFetcherParallel(t, txFetcherTest{ 1325 init: func() *TxFetcher { 1326 return NewTxFetcher( 1327 func(common.Hash) bool { return false }, 1328 func(txs []*types.Transaction) []error { 1329 return make([]error, len(txs)) 1330 }, 1331 func(string, []common.Hash) error { return nil }, 1332 nil, 1333 ) 1334 }, 1335 steps: []interface{}{ 1336 // Set up a few hashes into various stages 1337 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1338 doWait{time: txArriveTimeout, step: true}, 1339 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 1340 doWait{time: txArriveTimeout, step: true}, 1341 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}}, 1342 1343 isWaiting(map[string][]common.Hash{ 1344 "A": {{0x03}}, 1345 }), 1346 isScheduled{ 1347 tracking: map[string][]common.Hash{ 1348 "A": {{0x01}, {0x02}}, 1349 }, 1350 fetching: map[string][]common.Hash{ 1351 "A": {{0x01}}, 1352 }, 1353 }, 1354 // Drop the peer and ensure everything's cleaned out 1355 doDrop("A"), 1356 isWaiting(nil), 1357 isScheduled{nil, nil, nil}, 1358 1359 // Push the node into a dangling (timeout) state 1360 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1361 doWait{time: txArriveTimeout, step: true}, 1362 isWaiting(nil), 1363 isScheduled{ 1364 tracking: map[string][]common.Hash{ 1365 "A": {testTxsHashes[0]}, 1366 }, 1367 fetching: map[string][]common.Hash{ 1368 "A": {testTxsHashes[0]}, 1369 }, 1370 }, 1371 doWait{time: txFetchTimeout, step: true}, 1372 isWaiting(nil), 1373 isScheduled{ 1374 tracking: nil, 1375 fetching: nil, 1376 dangling: map[string][]common.Hash{ 1377 "A": {}, 1378 }, 1379 }, 1380 // Drop the peer and ensure everything's cleaned out 1381 doDrop("A"), 1382 isWaiting(nil), 1383 isScheduled{nil, nil, nil}, 1384 }, 1385 }) 1386 } 1387 1388 // Tests that dropping a peer instantly reschedules failed announcements to any 1389 // available peer. 1390 func TestTransactionFetcherDropRescheduling(t *testing.T) { 1391 testTransactionFetcherParallel(t, txFetcherTest{ 1392 init: func() *TxFetcher { 1393 return NewTxFetcher( 1394 func(common.Hash) bool { return false }, 1395 func(txs []*types.Transaction) []error { 1396 return make([]error, len(txs)) 1397 }, 1398 func(string, []common.Hash) error { return nil }, 1399 nil, 1400 ) 1401 }, 1402 steps: []interface{}{ 1403 // Set up a few hashes into various stages 1404 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1405 doWait{time: txArriveTimeout, step: true}, 1406 doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}}, 1407 1408 isWaiting(nil), 1409 isScheduled{ 1410 tracking: map[string][]common.Hash{ 1411 "A": {{0x01}}, 1412 "B": {{0x01}}, 1413 }, 1414 fetching: map[string][]common.Hash{ 1415 "A": {{0x01}}, 1416 }, 1417 }, 1418 // Drop the peer and ensure everything's cleaned out 1419 doDrop("A"), 1420 isWaiting(nil), 1421 isScheduled{ 1422 tracking: map[string][]common.Hash{ 1423 "B": {{0x01}}, 1424 }, 1425 fetching: map[string][]common.Hash{ 1426 "B": {{0x01}}, 1427 }, 1428 }, 1429 }, 1430 }) 1431 } 1432 1433 // Tests that announced transactions with the wrong transaction type or size will 1434 // result in a dropped peer. 1435 func TestInvalidAnnounceMetadata(t *testing.T) { 1436 drop := make(chan string, 2) 1437 testTransactionFetcherParallel(t, txFetcherTest{ 1438 init: func() *TxFetcher { 1439 return NewTxFetcher( 1440 func(common.Hash) bool { return false }, 1441 func(txs []*types.Transaction) []error { 1442 return make([]error, len(txs)) 1443 }, 1444 func(string, []common.Hash) error { return nil }, 1445 func(peer string) { drop <- peer }, 1446 ) 1447 }, 1448 steps: []interface{}{ 1449 // Initial announcement to get something into the waitlist 1450 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}}, 1451 isWaitingWithMeta(map[string][]announce{ 1452 "A": { 1453 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1454 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1455 }, 1456 }), 1457 // Announce from new peers conflicting transactions 1458 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}}, 1459 doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}}, 1460 isWaitingWithMeta(map[string][]announce{ 1461 "A": { 1462 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1463 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1464 }, 1465 "B": { 1466 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, 1467 }, 1468 "C": { 1469 {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1470 }, 1471 }), 1472 // Schedule all the transactions for retrieval 1473 doWait{time: txArriveTimeout, step: true}, 1474 isWaitingWithMeta(nil), 1475 isScheduledWithMeta{ 1476 tracking: map[string][]announce{ 1477 "A": { 1478 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1479 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1480 }, 1481 "B": { 1482 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, 1483 }, 1484 "C": { 1485 {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1486 }, 1487 }, 1488 fetching: map[string][]common.Hash{ 1489 "A": {testTxsHashes[0]}, 1490 "C": {testTxsHashes[1]}, 1491 }, 1492 }, 1493 // Deliver the transactions and wait for B to be dropped 1494 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1495 doFunc(func() { <-drop }), 1496 doFunc(func() { <-drop }), 1497 }, 1498 }) 1499 } 1500 1501 // This test reproduces a crash caught by the fuzzer. The root cause was a 1502 // dangling transaction timing out and clashing on re-add with a concurrently 1503 // announced one. 1504 func TestTransactionFetcherFuzzCrash01(t *testing.T) { 1505 testTransactionFetcherParallel(t, txFetcherTest{ 1506 init: func() *TxFetcher { 1507 return NewTxFetcher( 1508 func(common.Hash) bool { return false }, 1509 func(txs []*types.Transaction) []error { 1510 return make([]error, len(txs)) 1511 }, 1512 func(string, []common.Hash) error { return nil }, 1513 nil, 1514 ) 1515 }, 1516 steps: []interface{}{ 1517 // Get a transaction into fetching mode and make it dangling with a broadcast 1518 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1519 doWait{time: txArriveTimeout, step: true}, 1520 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1521 1522 // Notify the dangling transaction once more and crash via a timeout 1523 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1524 doWait{time: txFetchTimeout, step: true}, 1525 }, 1526 }) 1527 } 1528 1529 // This test reproduces a crash caught by the fuzzer. The root cause was a 1530 // dangling transaction getting peer-dropped and clashing on re-add with a 1531 // concurrently announced one. 1532 func TestTransactionFetcherFuzzCrash02(t *testing.T) { 1533 testTransactionFetcherParallel(t, txFetcherTest{ 1534 init: func() *TxFetcher { 1535 return NewTxFetcher( 1536 func(common.Hash) bool { return false }, 1537 func(txs []*types.Transaction) []error { 1538 return make([]error, len(txs)) 1539 }, 1540 func(string, []common.Hash) error { return nil }, 1541 nil, 1542 ) 1543 }, 1544 steps: []interface{}{ 1545 // Get a transaction into fetching mode and make it dangling with a broadcast 1546 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1547 doWait{time: txArriveTimeout, step: true}, 1548 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1549 1550 // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout 1551 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1552 doWait{time: txArriveTimeout, step: true}, 1553 doDrop("A"), 1554 doWait{time: txFetchTimeout, step: true}, 1555 }, 1556 }) 1557 } 1558 1559 // This test reproduces a crash caught by the fuzzer. The root cause was a 1560 // dangling transaction getting rescheduled via a partial delivery, clashing 1561 // with a concurrent notify. 1562 func TestTransactionFetcherFuzzCrash03(t *testing.T) { 1563 testTransactionFetcherParallel(t, txFetcherTest{ 1564 init: func() *TxFetcher { 1565 return NewTxFetcher( 1566 func(common.Hash) bool { return false }, 1567 func(txs []*types.Transaction) []error { 1568 return make([]error, len(txs)) 1569 }, 1570 func(string, []common.Hash) error { return nil }, 1571 nil, 1572 ) 1573 }, 1574 steps: []interface{}{ 1575 // Get a transaction into fetching mode and make it dangling with a broadcast 1576 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 1577 doWait{time: txFetchTimeout, step: true}, 1578 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1579 1580 // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout 1581 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1582 doWait{time: txArriveTimeout, step: true}, 1583 1584 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, 1585 doWait{time: txFetchTimeout, step: true}, 1586 }, 1587 }) 1588 } 1589 1590 // This test reproduces a crash caught by the fuzzer. The root cause was a 1591 // dangling transaction getting rescheduled via a disconnect, clashing with 1592 // a concurrent notify. 1593 func TestTransactionFetcherFuzzCrash04(t *testing.T) { 1594 // Create a channel to control when tx requests can fail 1595 proceed := make(chan struct{}) 1596 1597 testTransactionFetcherParallel(t, txFetcherTest{ 1598 init: func() *TxFetcher { 1599 return NewTxFetcher( 1600 func(common.Hash) bool { return false }, 1601 func(txs []*types.Transaction) []error { 1602 return make([]error, len(txs)) 1603 }, 1604 func(string, []common.Hash) error { 1605 <-proceed 1606 return errors.New("peer disconnected") 1607 }, 1608 nil, 1609 ) 1610 }, 1611 steps: []interface{}{ 1612 // Get a transaction into fetching mode and make it dangling with a broadcast 1613 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1614 doWait{time: txArriveTimeout, step: true}, 1615 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1616 1617 // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect 1618 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1619 doWait{time: txArriveTimeout, step: true}, 1620 doFunc(func() { 1621 proceed <- struct{}{} // Allow peer A to return the failure 1622 }), 1623 doWait{time: 0, step: true}, 1624 doWait{time: txFetchTimeout, step: true}, 1625 }, 1626 }) 1627 } 1628 1629 func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { 1630 t.Parallel() 1631 testTransactionFetcher(t, tt) 1632 } 1633 1634 func testTransactionFetcher(t *testing.T, tt txFetcherTest) { 1635 // Create a fetcher and hook into it's simulated fields 1636 clock := new(mclock.Simulated) 1637 wait := make(chan struct{}) 1638 1639 fetcher := tt.init() 1640 fetcher.clock = clock 1641 fetcher.step = wait 1642 fetcher.rand = rand.New(rand.NewSource(0x3a29)) 1643 1644 fetcher.Start() 1645 defer fetcher.Stop() 1646 1647 defer func() { // drain the wait chan on exit 1648 for { 1649 select { 1650 case <-wait: 1651 default: 1652 return 1653 } 1654 } 1655 }() 1656 1657 // Crunch through all the test steps and execute them 1658 for i, step := range tt.steps { 1659 // Auto-expand certain steps to ones with metadata 1660 switch old := step.(type) { 1661 case isWaiting: 1662 new := make(isWaitingWithMeta) 1663 for peer, hashes := range old { 1664 for _, hash := range hashes { 1665 new[peer] = append(new[peer], announce{hash, nil, nil}) 1666 } 1667 } 1668 step = new 1669 1670 case isScheduled: 1671 new := isScheduledWithMeta{ 1672 tracking: make(map[string][]announce), 1673 fetching: old.fetching, 1674 dangling: old.dangling, 1675 } 1676 for peer, hashes := range old.tracking { 1677 for _, hash := range hashes { 1678 new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil}) 1679 } 1680 } 1681 step = new 1682 } 1683 // Process the original or expanded steps 1684 switch step := step.(type) { 1685 case doTxNotify: 1686 if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil { 1687 t.Errorf("step %d: %v", i, err) 1688 } 1689 <-wait // Fetcher needs to process this, wait until it's done 1690 select { 1691 case <-wait: 1692 panic("wtf") 1693 case <-time.After(time.Millisecond): 1694 } 1695 1696 case doTxEnqueue: 1697 if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil { 1698 t.Errorf("step %d: %v", i, err) 1699 } 1700 <-wait // Fetcher needs to process this, wait until it's done 1701 1702 case doWait: 1703 clock.Run(step.time) 1704 if step.step { 1705 <-wait // Fetcher supposed to do something, wait until it's done 1706 } 1707 1708 case doDrop: 1709 if err := fetcher.Drop(string(step)); err != nil { 1710 t.Errorf("step %d: %v", i, err) 1711 } 1712 <-wait // Fetcher needs to process this, wait until it's done 1713 1714 case doFunc: 1715 step() 1716 1717 case isWaitingWithMeta: 1718 // We need to check that the waiting list (stage 1) internals 1719 // match with the expected set. Check the peer->hash mappings 1720 // first. 1721 for peer, announces := range step { 1722 waiting := fetcher.waitslots[peer] 1723 if waiting == nil { 1724 t.Errorf("step %d: peer %s missing from waitslots", i, peer) 1725 continue 1726 } 1727 for _, ann := range announces { 1728 if meta, ok := waiting[ann.hash]; !ok { 1729 t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash) 1730 } else { 1731 if (meta == nil && (ann.kind != nil || ann.size != nil)) || 1732 (meta != nil && (ann.kind == nil || ann.size == nil)) || 1733 (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { 1734 t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) 1735 } 1736 } 1737 } 1738 for hash, meta := range waiting { 1739 ann := announce{hash: hash} 1740 if meta != nil { 1741 ann.kind, ann.size = &meta.kind, &meta.size 1742 } 1743 if !containsAnnounce(announces, ann) { 1744 t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann) 1745 } 1746 } 1747 } 1748 for peer := range fetcher.waitslots { 1749 if _, ok := step[peer]; !ok { 1750 t.Errorf("step %d: peer %s extra in waitslots", i, peer) 1751 } 1752 } 1753 // Peer->hash sets correct, check the hash->peer and timeout sets 1754 for peer, announces := range step { 1755 for _, ann := range announces { 1756 if _, ok := fetcher.waitlist[ann.hash][peer]; !ok { 1757 t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer) 1758 } 1759 if _, ok := fetcher.waittime[ann.hash]; !ok { 1760 t.Errorf("step %d: hash %x missing from waittime", i, ann.hash) 1761 } 1762 } 1763 } 1764 for hash, peers := range fetcher.waitlist { 1765 if len(peers) == 0 { 1766 t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) 1767 } 1768 for peer := range peers { 1769 if !containsHashInAnnounces(step[peer], hash) { 1770 t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) 1771 } 1772 } 1773 } 1774 for hash := range fetcher.waittime { 1775 var found bool 1776 for _, announces := range step { 1777 if containsHashInAnnounces(announces, hash) { 1778 found = true 1779 break 1780 } 1781 } 1782 if !found { 1783 t.Errorf("step %d,: hash %x extra in waittime", i, hash) 1784 } 1785 } 1786 1787 case isScheduledWithMeta: 1788 // Check that all scheduled announces are accounted for and no 1789 // extra ones are present. 1790 for peer, announces := range step.tracking { 1791 scheduled := fetcher.announces[peer] 1792 if scheduled == nil { 1793 t.Errorf("step %d: peer %s missing from announces", i, peer) 1794 continue 1795 } 1796 for _, ann := range announces { 1797 if meta, ok := scheduled[ann.hash]; !ok { 1798 t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash) 1799 } else { 1800 if (meta == nil && (ann.kind != nil || ann.size != nil)) || 1801 (meta != nil && (ann.kind == nil || ann.size == nil)) || 1802 (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { 1803 t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) 1804 } 1805 } 1806 } 1807 for hash, meta := range scheduled { 1808 ann := announce{hash: hash} 1809 if meta != nil { 1810 ann.kind, ann.size = &meta.kind, &meta.size 1811 } 1812 if !containsAnnounce(announces, ann) { 1813 t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash) 1814 } 1815 } 1816 } 1817 for peer := range fetcher.announces { 1818 if _, ok := step.tracking[peer]; !ok { 1819 t.Errorf("step %d: peer %s extra in announces", i, peer) 1820 } 1821 } 1822 // Check that all announces required to be fetching are in the 1823 // appropriate sets 1824 for peer, hashes := range step.fetching { 1825 request := fetcher.requests[peer] 1826 if request == nil { 1827 t.Errorf("step %d: peer %s missing from requests", i, peer) 1828 continue 1829 } 1830 for _, hash := range hashes { 1831 if !slices.Contains(request.hashes, hash) { 1832 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1833 } 1834 } 1835 for _, hash := range request.hashes { 1836 if !slices.Contains(hashes, hash) { 1837 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1838 } 1839 } 1840 } 1841 for peer := range fetcher.requests { 1842 if _, ok := step.fetching[peer]; !ok { 1843 if _, ok := step.dangling[peer]; !ok { 1844 t.Errorf("step %d: peer %s extra in requests", i, peer) 1845 } 1846 } 1847 } 1848 for peer, hashes := range step.fetching { 1849 for _, hash := range hashes { 1850 if _, ok := fetcher.fetching[hash]; !ok { 1851 t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash) 1852 } 1853 } 1854 } 1855 for hash := range fetcher.fetching { 1856 var found bool 1857 for _, req := range fetcher.requests { 1858 if slices.Contains(req.hashes, hash) { 1859 found = true 1860 break 1861 } 1862 } 1863 if !found { 1864 t.Errorf("step %d: hash %x extra in fetching", i, hash) 1865 } 1866 } 1867 for _, hashes := range step.fetching { 1868 for _, hash := range hashes { 1869 alternates := fetcher.alternates[hash] 1870 if alternates == nil { 1871 t.Errorf("step %d: hash %x missing from alternates", i, hash) 1872 continue 1873 } 1874 for peer := range alternates { 1875 if _, ok := fetcher.announces[peer]; !ok { 1876 t.Errorf("step %d: peer %s extra in alternates", i, peer) 1877 continue 1878 } 1879 if _, ok := fetcher.announces[peer][hash]; !ok { 1880 t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer) 1881 continue 1882 } 1883 } 1884 for p := range fetcher.announced[hash] { 1885 if _, ok := alternates[p]; !ok { 1886 t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p) 1887 continue 1888 } 1889 } 1890 } 1891 } 1892 for peer, hashes := range step.dangling { 1893 request := fetcher.requests[peer] 1894 if request == nil { 1895 t.Errorf("step %d: peer %s missing from requests", i, peer) 1896 continue 1897 } 1898 for _, hash := range hashes { 1899 if !slices.Contains(request.hashes, hash) { 1900 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1901 } 1902 } 1903 for _, hash := range request.hashes { 1904 if !slices.Contains(hashes, hash) { 1905 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1906 } 1907 } 1908 } 1909 // Check that all transaction announces that are scheduled for 1910 // retrieval but not actively being downloaded are tracked only 1911 // in the stage 2 `announced` map. 1912 var queued []common.Hash 1913 for _, announces := range step.tracking { 1914 for _, ann := range announces { 1915 var found bool 1916 for _, hs := range step.fetching { 1917 if slices.Contains(hs, ann.hash) { 1918 found = true 1919 break 1920 } 1921 } 1922 if !found { 1923 queued = append(queued, ann.hash) 1924 } 1925 } 1926 } 1927 for _, hash := range queued { 1928 if _, ok := fetcher.announced[hash]; !ok { 1929 t.Errorf("step %d: hash %x missing from announced", i, hash) 1930 } 1931 } 1932 for hash := range fetcher.announced { 1933 if !slices.Contains(queued, hash) { 1934 t.Errorf("step %d: hash %x extra in announced", i, hash) 1935 } 1936 } 1937 1938 case isUnderpriced: 1939 if fetcher.underpriced.Len() != int(step) { 1940 t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step) 1941 } 1942 1943 default: 1944 t.Fatalf("step %d: unknown step type %T", i, step) 1945 } 1946 // After every step, cross validate the internal uniqueness invariants 1947 // between stage one and stage two. 1948 for hash := range fetcher.waittime { 1949 if _, ok := fetcher.announced[hash]; ok { 1950 t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash) 1951 } 1952 } 1953 } 1954 } 1955 1956 // containsAnnounce returns whether an announcement is contained within a slice 1957 // of announcements. 1958 func containsAnnounce(slice []announce, ann announce) bool { 1959 for _, have := range slice { 1960 if have.hash == ann.hash { 1961 if have.kind == nil || ann.kind == nil { 1962 if have.kind != ann.kind { 1963 return false 1964 } 1965 } else if *have.kind != *ann.kind { 1966 return false 1967 } 1968 if have.size == nil || ann.size == nil { 1969 if have.size != ann.size { 1970 return false 1971 } 1972 } else if *have.size != *ann.size { 1973 return false 1974 } 1975 return true 1976 } 1977 } 1978 return false 1979 } 1980 1981 // containsHashInAnnounces returns whether a hash is contained within a slice 1982 // of announcements. 1983 func containsHashInAnnounces(slice []announce, hash common.Hash) bool { 1984 for _, have := range slice { 1985 if have.hash == hash { 1986 return true 1987 } 1988 } 1989 return false 1990 } 1991 1992 // Tests that a transaction is forgotten after the timeout. 1993 func TestTransactionForgotten(t *testing.T) { 1994 fetcher := NewTxFetcher( 1995 func(common.Hash) bool { return false }, 1996 func(txs []*types.Transaction) []error { 1997 errs := make([]error, len(txs)) 1998 for i := 0; i < len(errs); i++ { 1999 errs[i] = txpool.ErrUnderpriced 2000 } 2001 return errs 2002 }, 2003 func(string, []common.Hash) error { return nil }, 2004 func(string) {}, 2005 ) 2006 fetcher.Start() 2007 defer fetcher.Stop() 2008 // Create one TX which is 5 minutes old, and one which is recent 2009 tx1 := types.NewTx(&types.DynamicFeeTx{Nonce: 0}) 2010 tx1.SetTime(time.Now().Add(-maxTxUnderpricedTimeout - 1*time.Second)) 2011 tx2 := types.NewTx(&types.DynamicFeeTx{Nonce: 1}) 2012 2013 // Enqueue both in the fetcher. They will be immediately tagged as underpriced 2014 if err := fetcher.Enqueue("asdf", []*types.Transaction{tx1, tx2}, false); err != nil { 2015 t.Fatal(err) 2016 } 2017 // isKnownUnderpriced should trigger removal of the first tx (no longer be known underpriced) 2018 if fetcher.isKnownUnderpriced(tx1.Hash()) { 2019 t.Fatal("transaction should be forgotten by now") 2020 } 2021 // isKnownUnderpriced should not trigger removal of the second 2022 if !fetcher.isKnownUnderpriced(tx2.Hash()) { 2023 t.Fatal("transaction should be known underpriced") 2024 } 2025 }