github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/eth/fetcher/tx_fetcher_test.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "errors" 21 "math/big" 22 "math/rand" 23 "slices" 24 "testing" 25 "time" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/mclock" 29 "github.com/ethereum/go-ethereum/core/txpool" 30 "github.com/ethereum/go-ethereum/core/types" 31 "github.com/ethereum/go-ethereum/params" 32 ) 33 34 var ( 35 // testTxs is a set of transactions to use during testing that have meaningful hashes. 36 testTxs = []*types.Transaction{ 37 types.NewTransaction(5577006791947779410, common.Address{0x0f}, new(big.Int), 0, new(big.Int), nil), 38 types.NewTransaction(15352856648520921629, common.Address{0xbb}, new(big.Int), 0, new(big.Int), nil), 39 types.NewTransaction(3916589616287113937, common.Address{0x86}, new(big.Int), 0, new(big.Int), nil), 40 types.NewTransaction(9828766684487745566, common.Address{0xac}, new(big.Int), 0, new(big.Int), nil), 41 } 42 // testTxsHashes is the hashes of the test transactions above 43 testTxsHashes = []common.Hash{testTxs[0].Hash(), testTxs[1].Hash(), testTxs[2].Hash(), testTxs[3].Hash()} 44 ) 45 46 type announce struct { 47 hash common.Hash 48 kind *byte 49 size *uint32 50 } 51 52 func typeptr(t byte) *byte { return &t } 53 func sizeptr(n uint32) *uint32 { return &n } 54 55 type doTxNotify struct { 56 peer string 57 hashes []common.Hash 58 types []byte 59 sizes []uint32 60 } 61 type doTxEnqueue struct { 62 peer string 63 txs []*types.Transaction 64 direct bool 65 } 66 type doWait struct { 67 time time.Duration 68 step bool 69 } 70 type doDrop string 71 type doFunc func() 72 73 type isWaitingWithMeta map[string][]announce 74 type isWaiting map[string][]common.Hash 75 76 type isScheduledWithMeta struct { 77 tracking map[string][]announce 78 fetching map[string][]common.Hash 79 dangling map[string][]common.Hash 80 } 81 type isScheduled struct { 82 tracking map[string][]common.Hash 83 fetching map[string][]common.Hash 84 dangling map[string][]common.Hash 85 } 86 type isUnderpriced int 87 88 // txFetcherTest represents a test scenario that can be executed by the test 89 // runner. 90 type txFetcherTest struct { 91 init func() *TxFetcher 92 steps []interface{} 93 } 94 95 // Tests that transaction announcements are added to a waitlist, and none 96 // of them are scheduled for retrieval until the wait expires. 97 func TestTransactionFetcherWaiting(t *testing.T) { 98 testTransactionFetcherParallel(t, txFetcherTest{ 99 init: func() *TxFetcher { 100 return NewTxFetcher( 101 func(common.Hash) bool { return false }, 102 nil, 103 func(string, []common.Hash) error { return nil }, 104 nil, 105 ) 106 }, 107 steps: []interface{}{ 108 // Initial announcement to get something into the waitlist 109 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 110 isWaiting(map[string][]common.Hash{ 111 "A": {{0x01}, {0x02}}, 112 }), 113 // Announce from a new peer to check that no overwrite happens 114 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}}, 115 isWaiting(map[string][]common.Hash{ 116 "A": {{0x01}, {0x02}}, 117 "B": {{0x03}, {0x04}}, 118 }), 119 // Announce clashing hashes but unique new peer 120 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}}, 121 isWaiting(map[string][]common.Hash{ 122 "A": {{0x01}, {0x02}}, 123 "B": {{0x03}, {0x04}}, 124 "C": {{0x01}, {0x04}}, 125 }), 126 // Announce existing and clashing hashes from existing peer 127 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}}, 128 isWaiting(map[string][]common.Hash{ 129 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 130 "B": {{0x03}, {0x04}}, 131 "C": {{0x01}, {0x04}}, 132 }), 133 isScheduled{tracking: nil, fetching: nil}, 134 135 // Wait for the arrival timeout which should move all expired items 136 // from the wait list to the scheduler 137 doWait{time: txArriveTimeout, step: true}, 138 isWaiting(nil), 139 isScheduled{ 140 tracking: map[string][]common.Hash{ 141 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 142 "B": {{0x03}, {0x04}}, 143 "C": {{0x01}, {0x04}}, 144 }, 145 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 146 "A": {{0x02}, {0x03}, {0x05}}, 147 "C": {{0x01}, {0x04}}, 148 }, 149 }, 150 // Queue up a non-fetchable transaction and then trigger it with a new 151 // peer (weird case to test 1 line in the fetcher) 152 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}}, 153 isWaiting(map[string][]common.Hash{ 154 "C": {{0x06}, {0x07}}, 155 }), 156 doWait{time: txArriveTimeout, step: true}, 157 isScheduled{ 158 tracking: map[string][]common.Hash{ 159 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 160 "B": {{0x03}, {0x04}}, 161 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 162 }, 163 fetching: map[string][]common.Hash{ 164 "A": {{0x02}, {0x03}, {0x05}}, 165 "C": {{0x01}, {0x04}}, 166 }, 167 }, 168 doTxNotify{peer: "D", hashes: []common.Hash{{0x06}, {0x07}}}, 169 isScheduled{ 170 tracking: map[string][]common.Hash{ 171 "A": {{0x01}, {0x02}, {0x03}, {0x05}}, 172 "B": {{0x03}, {0x04}}, 173 "C": {{0x01}, {0x04}, {0x06}, {0x07}}, 174 "D": {{0x06}, {0x07}}, 175 }, 176 fetching: map[string][]common.Hash{ 177 "A": {{0x02}, {0x03}, {0x05}}, 178 "C": {{0x01}, {0x04}}, 179 "D": {{0x06}, {0x07}}, 180 }, 181 }, 182 }, 183 }) 184 } 185 186 // Tests that transaction announcements with associated metadata are added to a 187 // waitlist, and none of them are scheduled for retrieval until the wait expires. 188 // 189 // This test is an extended version of TestTransactionFetcherWaiting. It's mostly 190 // to cover the metadata checks without bloating up the basic behavioral tests 191 // with all the useless extra fields. 192 func TestTransactionFetcherWaitingWithMeta(t *testing.T) { 193 testTransactionFetcherParallel(t, txFetcherTest{ 194 init: func() *TxFetcher { 195 return NewTxFetcher( 196 func(common.Hash) bool { return false }, 197 nil, 198 func(string, []common.Hash) error { return nil }, 199 nil, 200 ) 201 }, 202 steps: []interface{}{ 203 // Initial announcement to get something into the waitlist 204 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 222}}, 205 isWaitingWithMeta(map[string][]announce{ 206 "A": { 207 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 208 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 209 }, 210 }), 211 // Announce from a new peer to check that no overwrite happens 212 doTxNotify{peer: "B", hashes: []common.Hash{{0x03}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{333, 444}}, 213 isWaitingWithMeta(map[string][]announce{ 214 "A": { 215 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 216 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 217 }, 218 "B": { 219 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 220 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 221 }, 222 }), 223 // Announce clashing hashes but unique new peer 224 doTxNotify{peer: "C", hashes: []common.Hash{{0x01}, {0x04}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{111, 444}}, 225 isWaitingWithMeta(map[string][]announce{ 226 "A": { 227 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 228 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 229 }, 230 "B": { 231 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 232 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 233 }, 234 "C": { 235 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 236 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 237 }, 238 }), 239 // Announce existing and clashing hashes from existing peer. Clashes 240 // should not overwrite previous announcements. 241 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x03}, {0x05}}, types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{999, 333, 555}}, 242 isWaitingWithMeta(map[string][]announce{ 243 "A": { 244 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 245 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 246 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 247 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, 248 }, 249 "B": { 250 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 251 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 252 }, 253 "C": { 254 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 255 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 256 }, 257 }), 258 // Announce clashing hashes with conflicting metadata. Somebody will 259 // be in the wrong, but we don't know yet who. 260 doTxNotify{peer: "D", hashes: []common.Hash{{0x01}, {0x02}}, types: []byte{types.LegacyTxType, types.BlobTxType}, sizes: []uint32{999, 222}}, 261 isWaitingWithMeta(map[string][]announce{ 262 "A": { 263 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 264 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 265 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 266 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, 267 }, 268 "B": { 269 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 270 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 271 }, 272 "C": { 273 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 274 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 275 }, 276 "D": { 277 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, 278 {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, 279 }, 280 }), 281 isScheduled{tracking: nil, fetching: nil}, 282 283 // Wait for the arrival timeout which should move all expired items 284 // from the wait list to the scheduler 285 doWait{time: txArriveTimeout, step: true}, 286 isWaiting(nil), 287 isScheduledWithMeta{ 288 tracking: map[string][]announce{ 289 "A": { 290 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 291 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 292 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 293 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, 294 }, 295 "B": { 296 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 297 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 298 }, 299 "C": { 300 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 301 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 302 }, 303 "D": { 304 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, 305 {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, 306 }, 307 }, 308 fetching: map[string][]common.Hash{ // Depends on deterministic test randomizer 309 "A": {{0x03}, {0x05}}, 310 "C": {{0x01}, {0x04}}, 311 "D": {{0x02}}, 312 }, 313 }, 314 // Queue up a non-fetchable transaction and then trigger it with a new 315 // peer (weird case to test 1 line in the fetcher) 316 doTxNotify{peer: "C", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, 317 isWaitingWithMeta(map[string][]announce{ 318 "C": { 319 {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, 320 {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, 321 }, 322 }), 323 doWait{time: txArriveTimeout, step: true}, 324 isScheduledWithMeta{ 325 tracking: map[string][]announce{ 326 "A": { 327 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 328 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 329 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 330 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, 331 }, 332 "B": { 333 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 334 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 335 }, 336 "C": { 337 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 338 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 339 {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, 340 {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, 341 }, 342 "D": { 343 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, 344 {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, 345 }, 346 }, 347 fetching: map[string][]common.Hash{ 348 "A": {{0x03}, {0x05}}, 349 "C": {{0x01}, {0x04}}, 350 "D": {{0x02}}, 351 }, 352 }, 353 doTxNotify{peer: "E", hashes: []common.Hash{{0x06}, {0x07}}, types: []byte{types.LegacyTxType, types.LegacyTxType}, sizes: []uint32{666, 777}}, 354 isScheduledWithMeta{ 355 tracking: map[string][]announce{ 356 "A": { 357 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 358 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(222)}, 359 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 360 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(555)}, 361 }, 362 "B": { 363 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(333)}, 364 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 365 }, 366 "C": { 367 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(111)}, 368 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(444)}, 369 {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, 370 {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, 371 }, 372 "D": { 373 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(999)}, 374 {common.Hash{0x02}, typeptr(types.BlobTxType), sizeptr(222)}, 375 }, 376 "E": { 377 {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(666)}, 378 {common.Hash{0x07}, typeptr(types.LegacyTxType), sizeptr(777)}, 379 }, 380 }, 381 fetching: map[string][]common.Hash{ 382 "A": {{0x03}, {0x05}}, 383 "C": {{0x01}, {0x04}}, 384 "D": {{0x02}}, 385 "E": {{0x06}, {0x07}}, 386 }, 387 }, 388 }, 389 }) 390 } 391 392 // Tests that transaction announcements skip the waiting list if they are 393 // already scheduled. 394 func TestTransactionFetcherSkipWaiting(t *testing.T) { 395 testTransactionFetcherParallel(t, txFetcherTest{ 396 init: func() *TxFetcher { 397 return NewTxFetcher( 398 func(common.Hash) bool { return false }, 399 nil, 400 func(string, []common.Hash) error { return nil }, 401 nil, 402 ) 403 }, 404 steps: []interface{}{ 405 // Push an initial announcement through to the scheduled stage 406 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 407 isWaiting(map[string][]common.Hash{ 408 "A": {{0x01}, {0x02}}, 409 }), 410 isScheduled{tracking: nil, fetching: nil}, 411 412 doWait{time: txArriveTimeout, step: true}, 413 isWaiting(nil), 414 isScheduled{ 415 tracking: map[string][]common.Hash{ 416 "A": {{0x01}, {0x02}}, 417 }, 418 fetching: map[string][]common.Hash{ 419 "A": {{0x01}, {0x02}}, 420 }, 421 }, 422 // Announce overlaps from the same peer, ensure the new ones end up 423 // in stage one, and clashing ones don't get double tracked 424 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}, {0x03}}}, 425 isWaiting(map[string][]common.Hash{ 426 "A": {{0x03}}, 427 }), 428 isScheduled{ 429 tracking: map[string][]common.Hash{ 430 "A": {{0x01}, {0x02}}, 431 }, 432 fetching: map[string][]common.Hash{ 433 "A": {{0x01}, {0x02}}, 434 }, 435 }, 436 // Announce overlaps from a new peer, ensure new transactions end up 437 // in stage one and clashing ones get tracked for the new peer 438 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x04}}}, 439 isWaiting(map[string][]common.Hash{ 440 "A": {{0x03}}, 441 "B": {{0x03}, {0x04}}, 442 }), 443 isScheduled{ 444 tracking: map[string][]common.Hash{ 445 "A": {{0x01}, {0x02}}, 446 "B": {{0x02}}, 447 }, 448 fetching: map[string][]common.Hash{ 449 "A": {{0x01}, {0x02}}, 450 }, 451 }, 452 }, 453 }) 454 } 455 456 // Tests that only a single transaction request gets scheduled to a peer 457 // and subsequent announces block or get allotted to someone else. 458 func TestTransactionFetcherSingletonRequesting(t *testing.T) { 459 testTransactionFetcherParallel(t, txFetcherTest{ 460 init: func() *TxFetcher { 461 return NewTxFetcher( 462 func(common.Hash) bool { return false }, 463 nil, 464 func(string, []common.Hash) error { return nil }, 465 nil, 466 ) 467 }, 468 steps: []interface{}{ 469 // Push an initial announcement through to the scheduled stage 470 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 471 isWaiting(map[string][]common.Hash{ 472 "A": {{0x01}, {0x02}}, 473 }), 474 isScheduled{tracking: nil, fetching: nil}, 475 476 doWait{time: txArriveTimeout, step: true}, 477 isWaiting(nil), 478 isScheduled{ 479 tracking: map[string][]common.Hash{ 480 "A": {{0x01}, {0x02}}, 481 }, 482 fetching: map[string][]common.Hash{ 483 "A": {{0x01}, {0x02}}, 484 }, 485 }, 486 // Announce a new set of transactions from the same peer and ensure 487 // they do not start fetching since the peer is already busy 488 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}, {0x04}}}, 489 isWaiting(map[string][]common.Hash{ 490 "A": {{0x03}, {0x04}}, 491 }), 492 isScheduled{ 493 tracking: map[string][]common.Hash{ 494 "A": {{0x01}, {0x02}}, 495 }, 496 fetching: map[string][]common.Hash{ 497 "A": {{0x01}, {0x02}}, 498 }, 499 }, 500 doWait{time: txArriveTimeout, step: true}, 501 isWaiting(nil), 502 isScheduled{ 503 tracking: map[string][]common.Hash{ 504 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 505 }, 506 fetching: map[string][]common.Hash{ 507 "A": {{0x01}, {0x02}}, 508 }, 509 }, 510 // Announce a duplicate set of transactions from a new peer and ensure 511 // uniquely new ones start downloading, even if clashing. 512 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}, {0x03}, {0x05}, {0x06}}}, 513 isWaiting(map[string][]common.Hash{ 514 "B": {{0x05}, {0x06}}, 515 }), 516 isScheduled{ 517 tracking: map[string][]common.Hash{ 518 "A": {{0x01}, {0x02}, {0x03}, {0x04}}, 519 "B": {{0x02}, {0x03}}, 520 }, 521 fetching: map[string][]common.Hash{ 522 "A": {{0x01}, {0x02}}, 523 "B": {{0x03}}, 524 }, 525 }, 526 }, 527 }) 528 } 529 530 // Tests that if a transaction retrieval fails, all the transactions get 531 // instantly schedule back to someone else or the announcements dropped 532 // if no alternate source is available. 533 func TestTransactionFetcherFailedRescheduling(t *testing.T) { 534 // Create a channel to control when tx requests can fail 535 proceed := make(chan struct{}) 536 testTransactionFetcherParallel(t, txFetcherTest{ 537 init: func() *TxFetcher { 538 return NewTxFetcher( 539 func(common.Hash) bool { return false }, 540 nil, 541 func(origin string, hashes []common.Hash) error { 542 <-proceed 543 return errors.New("peer disconnected") 544 }, 545 nil, 546 ) 547 }, 548 steps: []interface{}{ 549 // Push an initial announcement through to the scheduled stage 550 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}, {0x02}}}, 551 isWaiting(map[string][]common.Hash{ 552 "A": {{0x01}, {0x02}}, 553 }), 554 isScheduled{tracking: nil, fetching: nil}, 555 556 doWait{time: txArriveTimeout, step: true}, 557 isWaiting(nil), 558 isScheduled{ 559 tracking: map[string][]common.Hash{ 560 "A": {{0x01}, {0x02}}, 561 }, 562 fetching: map[string][]common.Hash{ 563 "A": {{0x01}, {0x02}}, 564 }, 565 }, 566 // While the original peer is stuck in the request, push in an second 567 // data source. 568 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 569 isWaiting(nil), 570 isScheduled{ 571 tracking: map[string][]common.Hash{ 572 "A": {{0x01}, {0x02}}, 573 "B": {{0x02}}, 574 }, 575 fetching: map[string][]common.Hash{ 576 "A": {{0x01}, {0x02}}, 577 }, 578 }, 579 // Wait until the original request fails and check that transactions 580 // are either rescheduled or dropped 581 doFunc(func() { 582 proceed <- struct{}{} // Allow peer A to return the failure 583 }), 584 doWait{time: 0, step: true}, 585 isWaiting(nil), 586 isScheduled{ 587 tracking: map[string][]common.Hash{ 588 "B": {{0x02}}, 589 }, 590 fetching: map[string][]common.Hash{ 591 "B": {{0x02}}, 592 }, 593 }, 594 doFunc(func() { 595 proceed <- struct{}{} // Allow peer B to return the failure 596 }), 597 doWait{time: 0, step: true}, 598 isWaiting(nil), 599 isScheduled{nil, nil, nil}, 600 }, 601 }) 602 } 603 604 // Tests that if a transaction retrieval succeeds, all alternate origins 605 // are cleaned up. 606 func TestTransactionFetcherCleanup(t *testing.T) { 607 testTransactionFetcherParallel(t, txFetcherTest{ 608 init: func() *TxFetcher { 609 return NewTxFetcher( 610 func(common.Hash) bool { return false }, 611 func(txs []*types.Transaction) []error { 612 return make([]error, len(txs)) 613 }, 614 func(string, []common.Hash) error { return nil }, 615 nil, 616 ) 617 }, 618 steps: []interface{}{ 619 // Push an initial announcement through to the scheduled stage 620 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 621 isWaiting(map[string][]common.Hash{ 622 "A": {testTxsHashes[0]}, 623 }), 624 isScheduled{tracking: nil, fetching: nil}, 625 626 doWait{time: txArriveTimeout, step: true}, 627 isWaiting(nil), 628 isScheduled{ 629 tracking: map[string][]common.Hash{ 630 "A": {testTxsHashes[0]}, 631 }, 632 fetching: map[string][]common.Hash{ 633 "A": {testTxsHashes[0]}, 634 }, 635 }, 636 // Request should be delivered 637 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 638 isScheduled{nil, nil, nil}, 639 }, 640 }) 641 } 642 643 // Tests that if a transaction retrieval succeeds, but the response is empty (no 644 // transactions available, then all are nuked instead of being rescheduled (yes, 645 // this was a bug)). 646 func TestTransactionFetcherCleanupEmpty(t *testing.T) { 647 testTransactionFetcherParallel(t, txFetcherTest{ 648 init: func() *TxFetcher { 649 return NewTxFetcher( 650 func(common.Hash) bool { return false }, 651 func(txs []*types.Transaction) []error { 652 return make([]error, len(txs)) 653 }, 654 func(string, []common.Hash) error { return nil }, 655 nil, 656 ) 657 }, 658 steps: []interface{}{ 659 // Push an initial announcement through to the scheduled stage 660 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 661 isWaiting(map[string][]common.Hash{ 662 "A": {testTxsHashes[0]}, 663 }), 664 isScheduled{tracking: nil, fetching: nil}, 665 666 doWait{time: txArriveTimeout, step: true}, 667 isWaiting(nil), 668 isScheduled{ 669 tracking: map[string][]common.Hash{ 670 "A": {testTxsHashes[0]}, 671 }, 672 fetching: map[string][]common.Hash{ 673 "A": {testTxsHashes[0]}, 674 }, 675 }, 676 // Deliver an empty response and ensure the transaction is cleared, not rescheduled 677 doTxEnqueue{peer: "A", txs: []*types.Transaction{}, direct: true}, 678 isScheduled{nil, nil, nil}, 679 }, 680 }) 681 } 682 683 // Tests that non-returned transactions are either re-scheduled from a 684 // different peer, or self if they are after the cutoff point. 685 func TestTransactionFetcherMissingRescheduling(t *testing.T) { 686 testTransactionFetcherParallel(t, txFetcherTest{ 687 init: func() *TxFetcher { 688 return NewTxFetcher( 689 func(common.Hash) bool { return false }, 690 func(txs []*types.Transaction) []error { 691 return make([]error, len(txs)) 692 }, 693 func(string, []common.Hash) error { return nil }, 694 nil, 695 ) 696 }, 697 steps: []interface{}{ 698 // Push an initial announcement through to the scheduled stage 699 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, 700 isWaiting(map[string][]common.Hash{ 701 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 702 }), 703 isScheduled{tracking: nil, fetching: nil}, 704 705 doWait{time: txArriveTimeout, step: true}, 706 isWaiting(nil), 707 isScheduled{ 708 tracking: map[string][]common.Hash{ 709 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 710 }, 711 fetching: map[string][]common.Hash{ 712 "A": {testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}, 713 }, 714 }, 715 // Deliver the middle transaction requested, the one before which 716 // should be dropped and the one after re-requested. 717 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, // This depends on the deterministic random 718 isScheduled{ 719 tracking: map[string][]common.Hash{ 720 "A": {testTxsHashes[2]}, 721 }, 722 fetching: map[string][]common.Hash{ 723 "A": {testTxsHashes[2]}, 724 }, 725 }, 726 }, 727 }) 728 } 729 730 // Tests that out of two transactions, if one is missing and the last is 731 // delivered, the peer gets properly cleaned out from the internal state. 732 func TestTransactionFetcherMissingCleanup(t *testing.T) { 733 testTransactionFetcherParallel(t, txFetcherTest{ 734 init: func() *TxFetcher { 735 return NewTxFetcher( 736 func(common.Hash) bool { return false }, 737 func(txs []*types.Transaction) []error { 738 return make([]error, len(txs)) 739 }, 740 func(string, []common.Hash) error { return nil }, 741 nil, 742 ) 743 }, 744 steps: []interface{}{ 745 // Push an initial announcement through to the scheduled stage 746 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 747 isWaiting(map[string][]common.Hash{ 748 "A": {testTxsHashes[0], testTxsHashes[1]}, 749 }), 750 isScheduled{tracking: nil, fetching: nil}, 751 752 doWait{time: txArriveTimeout, step: true}, 753 isWaiting(nil), 754 isScheduled{ 755 tracking: map[string][]common.Hash{ 756 "A": {testTxsHashes[0], testTxsHashes[1]}, 757 }, 758 fetching: map[string][]common.Hash{ 759 "A": {testTxsHashes[0], testTxsHashes[1]}, 760 }, 761 }, 762 // Deliver the middle transaction requested, the one before which 763 // should be dropped and the one after re-requested. 764 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, // This depends on the deterministic random 765 isScheduled{nil, nil, nil}, 766 }, 767 }) 768 } 769 770 // Tests that transaction broadcasts properly clean up announcements. 771 func TestTransactionFetcherBroadcasts(t *testing.T) { 772 testTransactionFetcherParallel(t, txFetcherTest{ 773 init: func() *TxFetcher { 774 return NewTxFetcher( 775 func(common.Hash) bool { return false }, 776 func(txs []*types.Transaction) []error { 777 return make([]error, len(txs)) 778 }, 779 func(string, []common.Hash) error { return nil }, 780 nil, 781 ) 782 }, 783 steps: []interface{}{ 784 // Set up three transactions to be in different stats, waiting, queued and fetching 785 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 786 doWait{time: txArriveTimeout, step: true}, 787 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 788 doWait{time: txArriveTimeout, step: true}, 789 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 790 791 isWaiting(map[string][]common.Hash{ 792 "A": {testTxsHashes[2]}, 793 }), 794 isScheduled{ 795 tracking: map[string][]common.Hash{ 796 "A": {testTxsHashes[0], testTxsHashes[1]}, 797 }, 798 fetching: map[string][]common.Hash{ 799 "A": {testTxsHashes[0]}, 800 }, 801 }, 802 // Broadcast all the transactions and ensure everything gets cleaned 803 // up, but the dangling request is left alone to avoid doing multiple 804 // concurrent requests. 805 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: false}, 806 isWaiting(nil), 807 isScheduled{ 808 tracking: nil, 809 fetching: nil, 810 dangling: map[string][]common.Hash{ 811 "A": {testTxsHashes[0]}, 812 }, 813 }, 814 // Deliver the requested hashes 815 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2]}, direct: true}, 816 isScheduled{nil, nil, nil}, 817 }, 818 }) 819 } 820 821 // Tests that the waiting list timers properly reset and reschedule. 822 func TestTransactionFetcherWaitTimerResets(t *testing.T) { 823 testTransactionFetcherParallel(t, txFetcherTest{ 824 init: func() *TxFetcher { 825 return NewTxFetcher( 826 func(common.Hash) bool { return false }, 827 nil, 828 func(string, []common.Hash) error { return nil }, 829 nil, 830 ) 831 }, 832 steps: []interface{}{ 833 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 834 isWaiting(map[string][]common.Hash{ 835 "A": {{0x01}}, 836 }), 837 isScheduled{nil, nil, nil}, 838 doWait{time: txArriveTimeout / 2, step: false}, 839 isWaiting(map[string][]common.Hash{ 840 "A": {{0x01}}, 841 }), 842 isScheduled{nil, nil, nil}, 843 844 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 845 isWaiting(map[string][]common.Hash{ 846 "A": {{0x01}, {0x02}}, 847 }), 848 isScheduled{nil, nil, nil}, 849 doWait{time: txArriveTimeout / 2, step: true}, 850 isWaiting(map[string][]common.Hash{ 851 "A": {{0x02}}, 852 }), 853 isScheduled{ 854 tracking: map[string][]common.Hash{ 855 "A": {{0x01}}, 856 }, 857 fetching: map[string][]common.Hash{ 858 "A": {{0x01}}, 859 }, 860 }, 861 862 doWait{time: txArriveTimeout / 2, step: true}, 863 isWaiting(nil), 864 isScheduled{ 865 tracking: map[string][]common.Hash{ 866 "A": {{0x01}, {0x02}}, 867 }, 868 fetching: map[string][]common.Hash{ 869 "A": {{0x01}}, 870 }, 871 }, 872 }, 873 }) 874 } 875 876 // Tests that if a transaction request is not replied to, it will time 877 // out and be re-scheduled for someone else. 878 func TestTransactionFetcherTimeoutRescheduling(t *testing.T) { 879 testTransactionFetcherParallel(t, txFetcherTest{ 880 init: func() *TxFetcher { 881 return NewTxFetcher( 882 func(common.Hash) bool { return false }, 883 func(txs []*types.Transaction) []error { 884 return make([]error, len(txs)) 885 }, 886 func(string, []common.Hash) error { return nil }, 887 nil, 888 ) 889 }, 890 steps: []interface{}{ 891 // Push an initial announcement through to the scheduled stage 892 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 893 isWaiting(map[string][]common.Hash{ 894 "A": {testTxsHashes[0]}, 895 }), 896 isScheduled{tracking: nil, fetching: nil}, 897 898 doWait{time: txArriveTimeout, step: true}, 899 isWaiting(nil), 900 isScheduled{ 901 tracking: map[string][]common.Hash{ 902 "A": {testTxsHashes[0]}, 903 }, 904 fetching: map[string][]common.Hash{ 905 "A": {testTxsHashes[0]}, 906 }, 907 }, 908 // Wait until the delivery times out, everything should be cleaned up 909 doWait{time: txFetchTimeout, step: true}, 910 isWaiting(nil), 911 isScheduled{ 912 tracking: nil, 913 fetching: nil, 914 dangling: map[string][]common.Hash{ 915 "A": {}, 916 }, 917 }, 918 // Ensure that followup announcements don't get scheduled 919 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 920 doWait{time: txArriveTimeout, step: true}, 921 isScheduled{ 922 tracking: map[string][]common.Hash{ 923 "A": {testTxsHashes[1]}, 924 }, 925 fetching: nil, 926 dangling: map[string][]common.Hash{ 927 "A": {}, 928 }, 929 }, 930 // If the dangling request arrives a bit later, do not choke 931 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: true}, 932 isWaiting(nil), 933 isScheduled{ 934 tracking: map[string][]common.Hash{ 935 "A": {testTxsHashes[1]}, 936 }, 937 fetching: map[string][]common.Hash{ 938 "A": {testTxsHashes[1]}, 939 }, 940 }, 941 }, 942 }) 943 } 944 945 // Tests that the fetching timeout timers properly reset and reschedule. 946 func TestTransactionFetcherTimeoutTimerResets(t *testing.T) { 947 testTransactionFetcherParallel(t, txFetcherTest{ 948 init: func() *TxFetcher { 949 return NewTxFetcher( 950 func(common.Hash) bool { return false }, 951 nil, 952 func(string, []common.Hash) error { return nil }, 953 nil, 954 ) 955 }, 956 steps: []interface{}{ 957 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 958 doWait{time: txArriveTimeout, step: true}, 959 doTxNotify{peer: "B", hashes: []common.Hash{{0x02}}}, 960 doWait{time: txArriveTimeout, step: true}, 961 962 isWaiting(nil), 963 isScheduled{ 964 tracking: map[string][]common.Hash{ 965 "A": {{0x01}}, 966 "B": {{0x02}}, 967 }, 968 fetching: map[string][]common.Hash{ 969 "A": {{0x01}}, 970 "B": {{0x02}}, 971 }, 972 }, 973 doWait{time: txFetchTimeout - txArriveTimeout, step: true}, 974 isScheduled{ 975 tracking: map[string][]common.Hash{ 976 "B": {{0x02}}, 977 }, 978 fetching: map[string][]common.Hash{ 979 "B": {{0x02}}, 980 }, 981 dangling: map[string][]common.Hash{ 982 "A": {}, 983 }, 984 }, 985 doWait{time: txArriveTimeout, step: true}, 986 isScheduled{ 987 tracking: nil, 988 fetching: nil, 989 dangling: map[string][]common.Hash{ 990 "A": {}, 991 "B": {}, 992 }, 993 }, 994 }, 995 }) 996 } 997 998 // Tests that if thousands of transactions are announced, only a small 999 // number of them will be requested at a time. 1000 func TestTransactionFetcherRateLimiting(t *testing.T) { 1001 // Create a slew of transactions and announce them 1002 var hashes []common.Hash 1003 for i := 0; i < maxTxAnnounces; i++ { 1004 hashes = append(hashes, common.Hash{byte(i / 256), byte(i % 256)}) 1005 } 1006 testTransactionFetcherParallel(t, txFetcherTest{ 1007 init: func() *TxFetcher { 1008 return NewTxFetcher( 1009 func(common.Hash) bool { return false }, 1010 nil, 1011 func(string, []common.Hash) error { return nil }, 1012 nil, 1013 ) 1014 }, 1015 steps: []interface{}{ 1016 // Announce all the transactions, wait a bit and ensure only a small 1017 // percentage gets requested 1018 doTxNotify{peer: "A", hashes: hashes}, 1019 doWait{time: txArriveTimeout, step: true}, 1020 isWaiting(nil), 1021 isScheduled{ 1022 tracking: map[string][]common.Hash{ 1023 "A": hashes, 1024 }, 1025 fetching: map[string][]common.Hash{ 1026 "A": hashes[1643 : 1643+maxTxRetrievals], 1027 }, 1028 }, 1029 }, 1030 }) 1031 } 1032 1033 // Tests that if huge transactions are announced, only a small number of them will 1034 // be requested at a time, to keep the responses below a reasonable level. 1035 func TestTransactionFetcherBandwidthLimiting(t *testing.T) { 1036 testTransactionFetcherParallel(t, txFetcherTest{ 1037 init: func() *TxFetcher { 1038 return NewTxFetcher( 1039 func(common.Hash) bool { return false }, 1040 nil, 1041 func(string, []common.Hash) error { return nil }, 1042 nil, 1043 ) 1044 }, 1045 steps: []interface{}{ 1046 // Announce mid size transactions from A to verify that multiple 1047 // ones can be piled into a single request. 1048 doTxNotify{peer: "A", 1049 hashes: []common.Hash{{0x01}, {0x02}, {0x03}, {0x04}}, 1050 types: []byte{types.LegacyTxType, types.LegacyTxType, types.LegacyTxType, types.LegacyTxType}, 1051 sizes: []uint32{48 * 1024, 48 * 1024, 48 * 1024, 48 * 1024}, 1052 }, 1053 // Announce exactly on the limit transactions to see that only one 1054 // gets requested 1055 doTxNotify{peer: "B", 1056 hashes: []common.Hash{{0x05}, {0x06}}, 1057 types: []byte{types.LegacyTxType, types.LegacyTxType}, 1058 sizes: []uint32{maxTxRetrievalSize, maxTxRetrievalSize}, 1059 }, 1060 // Announce oversized blob transactions to see that overflows are ok 1061 doTxNotify{peer: "C", 1062 hashes: []common.Hash{{0x07}, {0x08}}, 1063 types: []byte{types.BlobTxType, types.BlobTxType}, 1064 sizes: []uint32{params.MaxBlobGasPerBlock, params.MaxBlobGasPerBlock}, 1065 }, 1066 doWait{time: txArriveTimeout, step: true}, 1067 isWaiting(nil), 1068 isScheduledWithMeta{ 1069 tracking: map[string][]announce{ 1070 "A": { 1071 {common.Hash{0x01}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, 1072 {common.Hash{0x02}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, 1073 {common.Hash{0x03}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, 1074 {common.Hash{0x04}, typeptr(types.LegacyTxType), sizeptr(48 * 1024)}, 1075 }, 1076 "B": { 1077 {common.Hash{0x05}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)}, 1078 {common.Hash{0x06}, typeptr(types.LegacyTxType), sizeptr(maxTxRetrievalSize)}, 1079 }, 1080 "C": { 1081 {common.Hash{0x07}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)}, 1082 {common.Hash{0x08}, typeptr(types.BlobTxType), sizeptr(params.MaxBlobGasPerBlock)}, 1083 }, 1084 }, 1085 fetching: map[string][]common.Hash{ 1086 "A": {{0x02}, {0x03}, {0x04}}, 1087 "B": {{0x06}}, 1088 "C": {{0x08}}, 1089 }, 1090 }, 1091 }, 1092 }) 1093 } 1094 1095 // Tests that then number of transactions a peer is allowed to announce and/or 1096 // request at the same time is hard capped. 1097 func TestTransactionFetcherDoSProtection(t *testing.T) { 1098 // Create a slew of transactions and to announce them 1099 var hashesA []common.Hash 1100 for i := 0; i < maxTxAnnounces+1; i++ { 1101 hashesA = append(hashesA, common.Hash{0x01, byte(i / 256), byte(i % 256)}) 1102 } 1103 var hashesB []common.Hash 1104 for i := 0; i < maxTxAnnounces+1; i++ { 1105 hashesB = append(hashesB, common.Hash{0x02, byte(i / 256), byte(i % 256)}) 1106 } 1107 testTransactionFetcherParallel(t, txFetcherTest{ 1108 init: func() *TxFetcher { 1109 return NewTxFetcher( 1110 func(common.Hash) bool { return false }, 1111 nil, 1112 func(string, []common.Hash) error { return nil }, 1113 nil, 1114 ) 1115 }, 1116 steps: []interface{}{ 1117 // Announce half of the transaction and wait for them to be scheduled 1118 doTxNotify{peer: "A", hashes: hashesA[:maxTxAnnounces/2]}, 1119 doTxNotify{peer: "B", hashes: hashesB[:maxTxAnnounces/2-1]}, 1120 doWait{time: txArriveTimeout, step: true}, 1121 1122 // Announce the second half and keep them in the wait list 1123 doTxNotify{peer: "A", hashes: hashesA[maxTxAnnounces/2 : maxTxAnnounces]}, 1124 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1]}, 1125 1126 // Ensure the hashes are split half and half 1127 isWaiting(map[string][]common.Hash{ 1128 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 1129 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces-1], 1130 }), 1131 isScheduled{ 1132 tracking: map[string][]common.Hash{ 1133 "A": hashesA[:maxTxAnnounces/2], 1134 "B": hashesB[:maxTxAnnounces/2-1], 1135 }, 1136 fetching: map[string][]common.Hash{ 1137 "A": hashesA[1643 : 1643+maxTxRetrievals], 1138 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 1139 }, 1140 }, 1141 // Ensure that adding even one more hash results in dropping the hash 1142 doTxNotify{peer: "A", hashes: []common.Hash{hashesA[maxTxAnnounces]}}, 1143 doTxNotify{peer: "B", hashes: hashesB[maxTxAnnounces-1 : maxTxAnnounces+1]}, 1144 1145 isWaiting(map[string][]common.Hash{ 1146 "A": hashesA[maxTxAnnounces/2 : maxTxAnnounces], 1147 "B": hashesB[maxTxAnnounces/2-1 : maxTxAnnounces], 1148 }), 1149 isScheduled{ 1150 tracking: map[string][]common.Hash{ 1151 "A": hashesA[:maxTxAnnounces/2], 1152 "B": hashesB[:maxTxAnnounces/2-1], 1153 }, 1154 fetching: map[string][]common.Hash{ 1155 "A": hashesA[1643 : 1643+maxTxRetrievals], 1156 "B": append(append([]common.Hash{}, hashesB[maxTxAnnounces/2-3:maxTxAnnounces/2-1]...), hashesB[:maxTxRetrievals-2]...), 1157 }, 1158 }, 1159 }, 1160 }) 1161 } 1162 1163 // Tests that underpriced transactions don't get rescheduled after being rejected. 1164 func TestTransactionFetcherUnderpricedDedup(t *testing.T) { 1165 testTransactionFetcherParallel(t, txFetcherTest{ 1166 init: func() *TxFetcher { 1167 return NewTxFetcher( 1168 func(common.Hash) bool { return false }, 1169 func(txs []*types.Transaction) []error { 1170 errs := make([]error, len(txs)) 1171 for i := 0; i < len(errs); i++ { 1172 if i%2 == 0 { 1173 errs[i] = txpool.ErrUnderpriced 1174 } else { 1175 errs[i] = txpool.ErrReplaceUnderpriced 1176 } 1177 } 1178 return errs 1179 }, 1180 func(string, []common.Hash) error { return nil }, 1181 nil, 1182 ) 1183 }, 1184 steps: []interface{}{ 1185 // Deliver a transaction through the fetcher, but reject as underpriced 1186 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 1187 doWait{time: txArriveTimeout, step: true}, 1188 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}, direct: true}, 1189 isScheduled{nil, nil, nil}, 1190 1191 // Try to announce the transaction again, ensure it's not scheduled back 1192 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1], testTxsHashes[2]}}, // [2] is needed to force a step in the fetcher 1193 isWaiting(map[string][]common.Hash{ 1194 "A": {testTxsHashes[2]}, 1195 }), 1196 isScheduled{nil, nil, nil}, 1197 }, 1198 }) 1199 } 1200 1201 // Tests that underpriced transactions don't get rescheduled after being rejected, 1202 // but at the same time there's a hard cap on the number of transactions that are 1203 // tracked. 1204 func TestTransactionFetcherUnderpricedDoSProtection(t *testing.T) { 1205 // Temporarily disable fetch timeouts as they massively mess up the simulated clock 1206 defer func(timeout time.Duration) { txFetchTimeout = timeout }(txFetchTimeout) 1207 txFetchTimeout = 24 * time.Hour 1208 1209 // Create a slew of transactions to max out the underpriced set 1210 var txs []*types.Transaction 1211 for i := 0; i < maxTxUnderpricedSetSize+1; i++ { 1212 txs = append(txs, types.NewTransaction(rand.Uint64(), common.Address{byte(rand.Intn(256))}, new(big.Int), 0, new(big.Int), nil)) 1213 } 1214 hashes := make([]common.Hash, len(txs)) 1215 for i, tx := range txs { 1216 hashes[i] = tx.Hash() 1217 } 1218 // Generate a set of steps to announce and deliver the entire set of transactions 1219 var steps []interface{} 1220 for i := 0; i < maxTxUnderpricedSetSize/maxTxRetrievals; i++ { 1221 steps = append(steps, doTxNotify{peer: "A", hashes: hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals]}) 1222 steps = append(steps, isWaiting(map[string][]common.Hash{ 1223 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1224 })) 1225 steps = append(steps, doWait{time: txArriveTimeout, step: true}) 1226 steps = append(steps, isScheduled{ 1227 tracking: map[string][]common.Hash{ 1228 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1229 }, 1230 fetching: map[string][]common.Hash{ 1231 "A": hashes[i*maxTxRetrievals : (i+1)*maxTxRetrievals], 1232 }, 1233 }) 1234 steps = append(steps, doTxEnqueue{peer: "A", txs: txs[i*maxTxRetrievals : (i+1)*maxTxRetrievals], direct: true}) 1235 steps = append(steps, isWaiting(nil)) 1236 steps = append(steps, isScheduled{nil, nil, nil}) 1237 steps = append(steps, isUnderpriced((i+1)*maxTxRetrievals)) 1238 } 1239 testTransactionFetcher(t, txFetcherTest{ 1240 init: func() *TxFetcher { 1241 return NewTxFetcher( 1242 func(common.Hash) bool { return false }, 1243 func(txs []*types.Transaction) []error { 1244 errs := make([]error, len(txs)) 1245 for i := 0; i < len(errs); i++ { 1246 errs[i] = txpool.ErrUnderpriced 1247 } 1248 return errs 1249 }, 1250 func(string, []common.Hash) error { return nil }, 1251 nil, 1252 ) 1253 }, 1254 steps: append(steps, []interface{}{ 1255 // The preparation of the test has already been done in `steps`, add the last check 1256 doTxNotify{peer: "A", hashes: []common.Hash{hashes[maxTxUnderpricedSetSize]}}, 1257 doWait{time: txArriveTimeout, step: true}, 1258 doTxEnqueue{peer: "A", txs: []*types.Transaction{txs[maxTxUnderpricedSetSize]}, direct: true}, 1259 isUnderpriced(maxTxUnderpricedSetSize), 1260 }...), 1261 }) 1262 } 1263 1264 // Tests that unexpected deliveries don't corrupt the internal state. 1265 func TestTransactionFetcherOutOfBoundDeliveries(t *testing.T) { 1266 testTransactionFetcherParallel(t, txFetcherTest{ 1267 init: func() *TxFetcher { 1268 return NewTxFetcher( 1269 func(common.Hash) bool { return false }, 1270 func(txs []*types.Transaction) []error { 1271 return make([]error, len(txs)) 1272 }, 1273 func(string, []common.Hash) error { return nil }, 1274 nil, 1275 ) 1276 }, 1277 steps: []interface{}{ 1278 // Deliver something out of the blue 1279 isWaiting(nil), 1280 isScheduled{nil, nil, nil}, 1281 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}, direct: false}, 1282 isWaiting(nil), 1283 isScheduled{nil, nil, nil}, 1284 1285 // Set up a few hashes into various stages 1286 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1287 doWait{time: txArriveTimeout, step: true}, 1288 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[1]}}, 1289 doWait{time: txArriveTimeout, step: true}, 1290 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[2]}}, 1291 1292 isWaiting(map[string][]common.Hash{ 1293 "A": {testTxsHashes[2]}, 1294 }), 1295 isScheduled{ 1296 tracking: map[string][]common.Hash{ 1297 "A": {testTxsHashes[0], testTxsHashes[1]}, 1298 }, 1299 fetching: map[string][]common.Hash{ 1300 "A": {testTxsHashes[0]}, 1301 }, 1302 }, 1303 // Deliver everything and more out of the blue 1304 doTxEnqueue{peer: "B", txs: []*types.Transaction{testTxs[0], testTxs[1], testTxs[2], testTxs[3]}, direct: true}, 1305 isWaiting(nil), 1306 isScheduled{ 1307 tracking: nil, 1308 fetching: nil, 1309 dangling: map[string][]common.Hash{ 1310 "A": {testTxsHashes[0]}, 1311 }, 1312 }, 1313 }, 1314 }) 1315 } 1316 1317 // Tests that dropping a peer cleans out all internal data structures in all the 1318 // live or dangling stages. 1319 func TestTransactionFetcherDrop(t *testing.T) { 1320 testTransactionFetcherParallel(t, txFetcherTest{ 1321 init: func() *TxFetcher { 1322 return NewTxFetcher( 1323 func(common.Hash) bool { return false }, 1324 func(txs []*types.Transaction) []error { 1325 return make([]error, len(txs)) 1326 }, 1327 func(string, []common.Hash) error { return nil }, 1328 nil, 1329 ) 1330 }, 1331 steps: []interface{}{ 1332 // Set up a few hashes into various stages 1333 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1334 doWait{time: txArriveTimeout, step: true}, 1335 doTxNotify{peer: "A", hashes: []common.Hash{{0x02}}}, 1336 doWait{time: txArriveTimeout, step: true}, 1337 doTxNotify{peer: "A", hashes: []common.Hash{{0x03}}}, 1338 1339 isWaiting(map[string][]common.Hash{ 1340 "A": {{0x03}}, 1341 }), 1342 isScheduled{ 1343 tracking: map[string][]common.Hash{ 1344 "A": {{0x01}, {0x02}}, 1345 }, 1346 fetching: map[string][]common.Hash{ 1347 "A": {{0x01}}, 1348 }, 1349 }, 1350 // Drop the peer and ensure everything's cleaned out 1351 doDrop("A"), 1352 isWaiting(nil), 1353 isScheduled{nil, nil, nil}, 1354 1355 // Push the node into a dangling (timeout) state 1356 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1357 doWait{time: txArriveTimeout, step: true}, 1358 isWaiting(nil), 1359 isScheduled{ 1360 tracking: map[string][]common.Hash{ 1361 "A": {testTxsHashes[0]}, 1362 }, 1363 fetching: map[string][]common.Hash{ 1364 "A": {testTxsHashes[0]}, 1365 }, 1366 }, 1367 doWait{time: txFetchTimeout, step: true}, 1368 isWaiting(nil), 1369 isScheduled{ 1370 tracking: nil, 1371 fetching: nil, 1372 dangling: map[string][]common.Hash{ 1373 "A": {}, 1374 }, 1375 }, 1376 // Drop the peer and ensure everything's cleaned out 1377 doDrop("A"), 1378 isWaiting(nil), 1379 isScheduled{nil, nil, nil}, 1380 }, 1381 }) 1382 } 1383 1384 // Tests that dropping a peer instantly reschedules failed announcements to any 1385 // available peer. 1386 func TestTransactionFetcherDropRescheduling(t *testing.T) { 1387 testTransactionFetcherParallel(t, txFetcherTest{ 1388 init: func() *TxFetcher { 1389 return NewTxFetcher( 1390 func(common.Hash) bool { return false }, 1391 func(txs []*types.Transaction) []error { 1392 return make([]error, len(txs)) 1393 }, 1394 func(string, []common.Hash) error { return nil }, 1395 nil, 1396 ) 1397 }, 1398 steps: []interface{}{ 1399 // Set up a few hashes into various stages 1400 doTxNotify{peer: "A", hashes: []common.Hash{{0x01}}}, 1401 doWait{time: txArriveTimeout, step: true}, 1402 doTxNotify{peer: "B", hashes: []common.Hash{{0x01}}}, 1403 1404 isWaiting(nil), 1405 isScheduled{ 1406 tracking: map[string][]common.Hash{ 1407 "A": {{0x01}}, 1408 "B": {{0x01}}, 1409 }, 1410 fetching: map[string][]common.Hash{ 1411 "A": {{0x01}}, 1412 }, 1413 }, 1414 // Drop the peer and ensure everything's cleaned out 1415 doDrop("A"), 1416 isWaiting(nil), 1417 isScheduled{ 1418 tracking: map[string][]common.Hash{ 1419 "B": {{0x01}}, 1420 }, 1421 fetching: map[string][]common.Hash{ 1422 "B": {{0x01}}, 1423 }, 1424 }, 1425 }, 1426 }) 1427 } 1428 1429 // Tests that announced transactions with the wrong transaction type or size will 1430 // result in a dropped peer. 1431 func TestInvalidAnnounceMetadata(t *testing.T) { 1432 drop := make(chan string, 2) 1433 testTransactionFetcherParallel(t, txFetcherTest{ 1434 init: func() *TxFetcher { 1435 return NewTxFetcher( 1436 func(common.Hash) bool { return false }, 1437 func(txs []*types.Transaction) []error { 1438 return make([]error, len(txs)) 1439 }, 1440 func(string, []common.Hash) error { return nil }, 1441 func(peer string) { drop <- peer }, 1442 ) 1443 }, 1444 steps: []interface{}{ 1445 // Initial announcement to get something into the waitlist 1446 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}, types: []byte{testTxs[0].Type(), testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[0].Size()), uint32(testTxs[1].Size())}}, 1447 isWaitingWithMeta(map[string][]announce{ 1448 "A": { 1449 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1450 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1451 }, 1452 }), 1453 // Announce from new peers conflicting transactions 1454 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}, types: []byte{testTxs[0].Type()}, sizes: []uint32{1024 + uint32(testTxs[0].Size())}}, 1455 doTxNotify{peer: "C", hashes: []common.Hash{testTxsHashes[1]}, types: []byte{1 + testTxs[1].Type()}, sizes: []uint32{uint32(testTxs[1].Size())}}, 1456 isWaitingWithMeta(map[string][]announce{ 1457 "A": { 1458 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1459 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1460 }, 1461 "B": { 1462 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, 1463 }, 1464 "C": { 1465 {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1466 }, 1467 }), 1468 // Schedule all the transactions for retrieval 1469 doWait{time: txArriveTimeout, step: true}, 1470 isWaitingWithMeta(nil), 1471 isScheduledWithMeta{ 1472 tracking: map[string][]announce{ 1473 "A": { 1474 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(uint32(testTxs[0].Size()))}, 1475 {testTxsHashes[1], typeptr(testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1476 }, 1477 "B": { 1478 {testTxsHashes[0], typeptr(testTxs[0].Type()), sizeptr(1024 + uint32(testTxs[0].Size()))}, 1479 }, 1480 "C": { 1481 {testTxsHashes[1], typeptr(1 + testTxs[1].Type()), sizeptr(uint32(testTxs[1].Size()))}, 1482 }, 1483 }, 1484 fetching: map[string][]common.Hash{ 1485 "A": {testTxsHashes[0]}, 1486 "C": {testTxsHashes[1]}, 1487 }, 1488 }, 1489 // Deliver the transactions and wait for B to be dropped 1490 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1491 doFunc(func() { <-drop }), 1492 doFunc(func() { <-drop }), 1493 }, 1494 }) 1495 } 1496 1497 // This test reproduces a crash caught by the fuzzer. The root cause was a 1498 // dangling transaction timing out and clashing on re-add with a concurrently 1499 // announced one. 1500 func TestTransactionFetcherFuzzCrash01(t *testing.T) { 1501 testTransactionFetcherParallel(t, txFetcherTest{ 1502 init: func() *TxFetcher { 1503 return NewTxFetcher( 1504 func(common.Hash) bool { return false }, 1505 func(txs []*types.Transaction) []error { 1506 return make([]error, len(txs)) 1507 }, 1508 func(string, []common.Hash) error { return nil }, 1509 nil, 1510 ) 1511 }, 1512 steps: []interface{}{ 1513 // Get a transaction into fetching mode and make it dangling with a broadcast 1514 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1515 doWait{time: txArriveTimeout, step: true}, 1516 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1517 1518 // Notify the dangling transaction once more and crash via a timeout 1519 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1520 doWait{time: txFetchTimeout, step: true}, 1521 }, 1522 }) 1523 } 1524 1525 // This test reproduces a crash caught by the fuzzer. The root cause was a 1526 // dangling transaction getting peer-dropped and clashing on re-add with a 1527 // concurrently announced one. 1528 func TestTransactionFetcherFuzzCrash02(t *testing.T) { 1529 testTransactionFetcherParallel(t, txFetcherTest{ 1530 init: func() *TxFetcher { 1531 return NewTxFetcher( 1532 func(common.Hash) bool { return false }, 1533 func(txs []*types.Transaction) []error { 1534 return make([]error, len(txs)) 1535 }, 1536 func(string, []common.Hash) error { return nil }, 1537 nil, 1538 ) 1539 }, 1540 steps: []interface{}{ 1541 // Get a transaction into fetching mode and make it dangling with a broadcast 1542 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1543 doWait{time: txArriveTimeout, step: true}, 1544 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1545 1546 // Notify the dangling transaction once more, re-fetch, and crash via a drop and timeout 1547 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1548 doWait{time: txArriveTimeout, step: true}, 1549 doDrop("A"), 1550 doWait{time: txFetchTimeout, step: true}, 1551 }, 1552 }) 1553 } 1554 1555 // This test reproduces a crash caught by the fuzzer. The root cause was a 1556 // dangling transaction getting rescheduled via a partial delivery, clashing 1557 // with a concurrent notify. 1558 func TestTransactionFetcherFuzzCrash03(t *testing.T) { 1559 testTransactionFetcherParallel(t, txFetcherTest{ 1560 init: func() *TxFetcher { 1561 return NewTxFetcher( 1562 func(common.Hash) bool { return false }, 1563 func(txs []*types.Transaction) []error { 1564 return make([]error, len(txs)) 1565 }, 1566 func(string, []common.Hash) error { return nil }, 1567 nil, 1568 ) 1569 }, 1570 steps: []interface{}{ 1571 // Get a transaction into fetching mode and make it dangling with a broadcast 1572 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0], testTxsHashes[1]}}, 1573 doWait{time: txFetchTimeout, step: true}, 1574 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0], testTxs[1]}}, 1575 1576 // Notify the dangling transaction once more, partially deliver, clash&crash with a timeout 1577 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1578 doWait{time: txArriveTimeout, step: true}, 1579 1580 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[1]}, direct: true}, 1581 doWait{time: txFetchTimeout, step: true}, 1582 }, 1583 }) 1584 } 1585 1586 // This test reproduces a crash caught by the fuzzer. The root cause was a 1587 // dangling transaction getting rescheduled via a disconnect, clashing with 1588 // a concurrent notify. 1589 func TestTransactionFetcherFuzzCrash04(t *testing.T) { 1590 // Create a channel to control when tx requests can fail 1591 proceed := make(chan struct{}) 1592 1593 testTransactionFetcherParallel(t, txFetcherTest{ 1594 init: func() *TxFetcher { 1595 return NewTxFetcher( 1596 func(common.Hash) bool { return false }, 1597 func(txs []*types.Transaction) []error { 1598 return make([]error, len(txs)) 1599 }, 1600 func(string, []common.Hash) error { 1601 <-proceed 1602 return errors.New("peer disconnected") 1603 }, 1604 nil, 1605 ) 1606 }, 1607 steps: []interface{}{ 1608 // Get a transaction into fetching mode and make it dangling with a broadcast 1609 doTxNotify{peer: "A", hashes: []common.Hash{testTxsHashes[0]}}, 1610 doWait{time: txArriveTimeout, step: true}, 1611 doTxEnqueue{peer: "A", txs: []*types.Transaction{testTxs[0]}}, 1612 1613 // Notify the dangling transaction once more, re-fetch, and crash via an in-flight disconnect 1614 doTxNotify{peer: "B", hashes: []common.Hash{testTxsHashes[0]}}, 1615 doWait{time: txArriveTimeout, step: true}, 1616 doFunc(func() { 1617 proceed <- struct{}{} // Allow peer A to return the failure 1618 }), 1619 doWait{time: 0, step: true}, 1620 doWait{time: txFetchTimeout, step: true}, 1621 }, 1622 }) 1623 } 1624 1625 func testTransactionFetcherParallel(t *testing.T, tt txFetcherTest) { 1626 t.Parallel() 1627 testTransactionFetcher(t, tt) 1628 } 1629 1630 func testTransactionFetcher(t *testing.T, tt txFetcherTest) { 1631 // Create a fetcher and hook into it's simulated fields 1632 clock := new(mclock.Simulated) 1633 wait := make(chan struct{}) 1634 1635 fetcher := tt.init() 1636 fetcher.clock = clock 1637 fetcher.step = wait 1638 fetcher.rand = rand.New(rand.NewSource(0x3a29)) 1639 1640 fetcher.Start() 1641 defer fetcher.Stop() 1642 1643 defer func() { // drain the wait chan on exit 1644 for { 1645 select { 1646 case <-wait: 1647 default: 1648 return 1649 } 1650 } 1651 }() 1652 1653 // Crunch through all the test steps and execute them 1654 for i, step := range tt.steps { 1655 // Auto-expand certain steps to ones with metadata 1656 switch old := step.(type) { 1657 case isWaiting: 1658 new := make(isWaitingWithMeta) 1659 for peer, hashes := range old { 1660 for _, hash := range hashes { 1661 new[peer] = append(new[peer], announce{hash, nil, nil}) 1662 } 1663 } 1664 step = new 1665 1666 case isScheduled: 1667 new := isScheduledWithMeta{ 1668 tracking: make(map[string][]announce), 1669 fetching: old.fetching, 1670 dangling: old.dangling, 1671 } 1672 for peer, hashes := range old.tracking { 1673 for _, hash := range hashes { 1674 new.tracking[peer] = append(new.tracking[peer], announce{hash, nil, nil}) 1675 } 1676 } 1677 step = new 1678 } 1679 // Process the original or expanded steps 1680 switch step := step.(type) { 1681 case doTxNotify: 1682 if err := fetcher.Notify(step.peer, step.types, step.sizes, step.hashes); err != nil { 1683 t.Errorf("step %d: %v", i, err) 1684 } 1685 <-wait // Fetcher needs to process this, wait until it's done 1686 select { 1687 case <-wait: 1688 panic("wtf") 1689 case <-time.After(time.Millisecond): 1690 } 1691 1692 case doTxEnqueue: 1693 if err := fetcher.Enqueue(step.peer, step.txs, step.direct); err != nil { 1694 t.Errorf("step %d: %v", i, err) 1695 } 1696 <-wait // Fetcher needs to process this, wait until it's done 1697 1698 case doWait: 1699 clock.Run(step.time) 1700 if step.step { 1701 <-wait // Fetcher supposed to do something, wait until it's done 1702 } 1703 1704 case doDrop: 1705 if err := fetcher.Drop(string(step)); err != nil { 1706 t.Errorf("step %d: %v", i, err) 1707 } 1708 <-wait // Fetcher needs to process this, wait until it's done 1709 1710 case doFunc: 1711 step() 1712 1713 case isWaitingWithMeta: 1714 // We need to check that the waiting list (stage 1) internals 1715 // match with the expected set. Check the peer->hash mappings 1716 // first. 1717 for peer, announces := range step { 1718 waiting := fetcher.waitslots[peer] 1719 if waiting == nil { 1720 t.Errorf("step %d: peer %s missing from waitslots", i, peer) 1721 continue 1722 } 1723 for _, ann := range announces { 1724 if meta, ok := waiting[ann.hash]; !ok { 1725 t.Errorf("step %d, peer %s: hash %x missing from waitslots", i, peer, ann.hash) 1726 } else { 1727 if (meta == nil && (ann.kind != nil || ann.size != nil)) || 1728 (meta != nil && (ann.kind == nil || ann.size == nil)) || 1729 (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { 1730 t.Errorf("step %d, peer %s, hash %x: waitslot metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) 1731 } 1732 } 1733 } 1734 for hash, meta := range waiting { 1735 ann := announce{hash: hash} 1736 if meta != nil { 1737 ann.kind, ann.size = &meta.kind, &meta.size 1738 } 1739 if !containsAnnounce(announces, ann) { 1740 t.Errorf("step %d, peer %s: announce %v extra in waitslots", i, peer, ann) 1741 } 1742 } 1743 } 1744 for peer := range fetcher.waitslots { 1745 if _, ok := step[peer]; !ok { 1746 t.Errorf("step %d: peer %s extra in waitslots", i, peer) 1747 } 1748 } 1749 // Peer->hash sets correct, check the hash->peer and timeout sets 1750 for peer, announces := range step { 1751 for _, ann := range announces { 1752 if _, ok := fetcher.waitlist[ann.hash][peer]; !ok { 1753 t.Errorf("step %d, hash %x: peer %s missing from waitlist", i, ann.hash, peer) 1754 } 1755 if _, ok := fetcher.waittime[ann.hash]; !ok { 1756 t.Errorf("step %d: hash %x missing from waittime", i, ann.hash) 1757 } 1758 } 1759 } 1760 for hash, peers := range fetcher.waitlist { 1761 if len(peers) == 0 { 1762 t.Errorf("step %d, hash %x: empty peerset in waitlist", i, hash) 1763 } 1764 for peer := range peers { 1765 if !containsHashInAnnounces(step[peer], hash) { 1766 t.Errorf("step %d, hash %x: peer %s extra in waitlist", i, hash, peer) 1767 } 1768 } 1769 } 1770 for hash := range fetcher.waittime { 1771 var found bool 1772 for _, announces := range step { 1773 if containsHashInAnnounces(announces, hash) { 1774 found = true 1775 break 1776 } 1777 } 1778 if !found { 1779 t.Errorf("step %d,: hash %x extra in waittime", i, hash) 1780 } 1781 } 1782 1783 case isScheduledWithMeta: 1784 // Check that all scheduled announces are accounted for and no 1785 // extra ones are present. 1786 for peer, announces := range step.tracking { 1787 scheduled := fetcher.announces[peer] 1788 if scheduled == nil { 1789 t.Errorf("step %d: peer %s missing from announces", i, peer) 1790 continue 1791 } 1792 for _, ann := range announces { 1793 if meta, ok := scheduled[ann.hash]; !ok { 1794 t.Errorf("step %d, peer %s: hash %x missing from announces", i, peer, ann.hash) 1795 } else { 1796 if (meta == nil && (ann.kind != nil || ann.size != nil)) || 1797 (meta != nil && (ann.kind == nil || ann.size == nil)) || 1798 (meta != nil && (meta.kind != *ann.kind || meta.size != *ann.size)) { 1799 t.Errorf("step %d, peer %s, hash %x: announce metadata mismatch: want %v, have %v/%v", i, peer, ann.hash, meta, *ann.kind, *ann.size) 1800 } 1801 } 1802 } 1803 for hash, meta := range scheduled { 1804 ann := announce{hash: hash} 1805 if meta != nil { 1806 ann.kind, ann.size = &meta.kind, &meta.size 1807 } 1808 if !containsAnnounce(announces, ann) { 1809 t.Errorf("step %d, peer %s: announce %x extra in announces", i, peer, hash) 1810 } 1811 } 1812 } 1813 for peer := range fetcher.announces { 1814 if _, ok := step.tracking[peer]; !ok { 1815 t.Errorf("step %d: peer %s extra in announces", i, peer) 1816 } 1817 } 1818 // Check that all announces required to be fetching are in the 1819 // appropriate sets 1820 for peer, hashes := range step.fetching { 1821 request := fetcher.requests[peer] 1822 if request == nil { 1823 t.Errorf("step %d: peer %s missing from requests", i, peer) 1824 continue 1825 } 1826 for _, hash := range hashes { 1827 if !slices.Contains(request.hashes, hash) { 1828 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1829 } 1830 } 1831 for _, hash := range request.hashes { 1832 if !slices.Contains(hashes, hash) { 1833 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1834 } 1835 } 1836 } 1837 for peer := range fetcher.requests { 1838 if _, ok := step.fetching[peer]; !ok { 1839 if _, ok := step.dangling[peer]; !ok { 1840 t.Errorf("step %d: peer %s extra in requests", i, peer) 1841 } 1842 } 1843 } 1844 for peer, hashes := range step.fetching { 1845 for _, hash := range hashes { 1846 if _, ok := fetcher.fetching[hash]; !ok { 1847 t.Errorf("step %d, peer %s: hash %x missing from fetching", i, peer, hash) 1848 } 1849 } 1850 } 1851 for hash := range fetcher.fetching { 1852 var found bool 1853 for _, req := range fetcher.requests { 1854 if slices.Contains(req.hashes, hash) { 1855 found = true 1856 break 1857 } 1858 } 1859 if !found { 1860 t.Errorf("step %d: hash %x extra in fetching", i, hash) 1861 } 1862 } 1863 for _, hashes := range step.fetching { 1864 for _, hash := range hashes { 1865 alternates := fetcher.alternates[hash] 1866 if alternates == nil { 1867 t.Errorf("step %d: hash %x missing from alternates", i, hash) 1868 continue 1869 } 1870 for peer := range alternates { 1871 if _, ok := fetcher.announces[peer]; !ok { 1872 t.Errorf("step %d: peer %s extra in alternates", i, peer) 1873 continue 1874 } 1875 if _, ok := fetcher.announces[peer][hash]; !ok { 1876 t.Errorf("step %d, peer %s: hash %x extra in alternates", i, hash, peer) 1877 continue 1878 } 1879 } 1880 for p := range fetcher.announced[hash] { 1881 if _, ok := alternates[p]; !ok { 1882 t.Errorf("step %d, hash %x: peer %s missing from alternates", i, hash, p) 1883 continue 1884 } 1885 } 1886 } 1887 } 1888 for peer, hashes := range step.dangling { 1889 request := fetcher.requests[peer] 1890 if request == nil { 1891 t.Errorf("step %d: peer %s missing from requests", i, peer) 1892 continue 1893 } 1894 for _, hash := range hashes { 1895 if !slices.Contains(request.hashes, hash) { 1896 t.Errorf("step %d, peer %s: hash %x missing from requests", i, peer, hash) 1897 } 1898 } 1899 for _, hash := range request.hashes { 1900 if !slices.Contains(hashes, hash) { 1901 t.Errorf("step %d, peer %s: hash %x extra in requests", i, peer, hash) 1902 } 1903 } 1904 } 1905 // Check that all transaction announces that are scheduled for 1906 // retrieval but not actively being downloaded are tracked only 1907 // in the stage 2 `announced` map. 1908 var queued []common.Hash 1909 for _, announces := range step.tracking { 1910 for _, ann := range announces { 1911 var found bool 1912 for _, hs := range step.fetching { 1913 if slices.Contains(hs, ann.hash) { 1914 found = true 1915 break 1916 } 1917 } 1918 if !found { 1919 queued = append(queued, ann.hash) 1920 } 1921 } 1922 } 1923 for _, hash := range queued { 1924 if _, ok := fetcher.announced[hash]; !ok { 1925 t.Errorf("step %d: hash %x missing from announced", i, hash) 1926 } 1927 } 1928 for hash := range fetcher.announced { 1929 if !slices.Contains(queued, hash) { 1930 t.Errorf("step %d: hash %x extra in announced", i, hash) 1931 } 1932 } 1933 1934 case isUnderpriced: 1935 if fetcher.underpriced.Len() != int(step) { 1936 t.Errorf("step %d: underpriced set size mismatch: have %d, want %d", i, fetcher.underpriced.Len(), step) 1937 } 1938 1939 default: 1940 t.Fatalf("step %d: unknown step type %T", i, step) 1941 } 1942 // After every step, cross validate the internal uniqueness invariants 1943 // between stage one and stage two. 1944 for hash := range fetcher.waittime { 1945 if _, ok := fetcher.announced[hash]; ok { 1946 t.Errorf("step %d: hash %s present in both stage 1 and 2", i, hash) 1947 } 1948 } 1949 } 1950 } 1951 1952 // containsAnnounce returns whether an announcement is contained within a slice 1953 // of announcements. 1954 func containsAnnounce(slice []announce, ann announce) bool { 1955 for _, have := range slice { 1956 if have.hash == ann.hash { 1957 if have.kind == nil || ann.kind == nil { 1958 if have.kind != ann.kind { 1959 return false 1960 } 1961 } else if *have.kind != *ann.kind { 1962 return false 1963 } 1964 if have.size == nil || ann.size == nil { 1965 if have.size != ann.size { 1966 return false 1967 } 1968 } else if *have.size != *ann.size { 1969 return false 1970 } 1971 return true 1972 } 1973 } 1974 return false 1975 } 1976 1977 // containsHashInAnnounces returns whether a hash is contained within a slice 1978 // of announcements. 1979 func containsHashInAnnounces(slice []announce, hash common.Hash) bool { 1980 for _, have := range slice { 1981 if have.hash == hash { 1982 return true 1983 } 1984 } 1985 return false 1986 } 1987 1988 // Tests that a transaction is forgotten after the timeout. 1989 func TestTransactionForgotten(t *testing.T) { 1990 fetcher := NewTxFetcher( 1991 func(common.Hash) bool { return false }, 1992 func(txs []*types.Transaction) []error { 1993 errs := make([]error, len(txs)) 1994 for i := 0; i < len(errs); i++ { 1995 errs[i] = txpool.ErrUnderpriced 1996 } 1997 return errs 1998 }, 1999 func(string, []common.Hash) error { return nil }, 2000 func(string) {}, 2001 ) 2002 fetcher.Start() 2003 defer fetcher.Stop() 2004 // Create one TX which is 5 minutes old, and one which is recent 2005 tx1 := types.NewTx(&types.LegacyTx{Nonce: 0}) 2006 tx1.SetTime(time.Now().Add(-maxTxUnderpricedTimeout - 1*time.Second)) 2007 tx2 := types.NewTx(&types.LegacyTx{Nonce: 1}) 2008 2009 // Enqueue both in the fetcher. They will be immediately tagged as underpriced 2010 if err := fetcher.Enqueue("asdf", []*types.Transaction{tx1, tx2}, false); err != nil { 2011 t.Fatal(err) 2012 } 2013 // isKnownUnderpriced should trigger removal of the first tx (no longer be known underpriced) 2014 if fetcher.isKnownUnderpriced(tx1.Hash()) { 2015 t.Fatal("transaction should be forgotten by now") 2016 } 2017 // isKnownUnderpriced should not trigger removal of the second 2018 if !fetcher.isKnownUnderpriced(tx2.Hash()) { 2019 t.Fatal("transaction should be known underpriced") 2020 } 2021 }