github.com/jonkofee/go-ethereum@v1.11.1/eth/fetcher/tx_fetcher.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 mrand "math/rand" 24 "sort" 25 "time" 26 27 mapset "github.com/deckarep/golang-set" 28 "github.com/jonkofee/go-ethereum/common" 29 "github.com/jonkofee/go-ethereum/common/mclock" 30 "github.com/jonkofee/go-ethereum/core" 31 "github.com/jonkofee/go-ethereum/core/types" 32 "github.com/jonkofee/go-ethereum/log" 33 "github.com/jonkofee/go-ethereum/metrics" 34 ) 35 36 const ( 37 // maxTxAnnounces is the maximum number of unique transaction a peer 38 // can announce in a short time. 39 maxTxAnnounces = 4096 40 41 // maxTxRetrievals is the maximum transaction number can be fetched in one 42 // request. The rationale to pick 256 is: 43 // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to 44 // Etherscan the average transaction size is around 200B, so in theory 45 // we can include lots of transaction in a single protocol packet. 46 // - However the maximum size of a single transaction is raised to 128KB, 47 // so pick a middle value here to ensure we can maximize the efficiency 48 // of the retrieval and response size overflow won't happen in most cases. 49 maxTxRetrievals = 256 50 51 // maxTxUnderpricedSetSize is the size of the underpriced transaction set that 52 // is used to track recent transactions that have been dropped so we don't 53 // re-request them. 54 maxTxUnderpricedSetSize = 32768 55 56 // txArriveTimeout is the time allowance before an announced transaction is 57 // explicitly requested. 58 txArriveTimeout = 500 * time.Millisecond 59 60 // txGatherSlack is the interval used to collate almost-expired announces 61 // with network fetches. 62 txGatherSlack = 100 * time.Millisecond 63 ) 64 65 var ( 66 // txFetchTimeout is the maximum allotted time to return an explicitly 67 // requested transaction. 68 txFetchTimeout = 5 * time.Second 69 ) 70 71 var ( 72 txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil) 73 txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil) 74 txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil) 75 txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil) 76 77 txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil) 78 txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil) 79 txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil) 80 txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil) 81 82 txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil) 83 txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil) 84 txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil) 85 txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil) 86 87 txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil) 88 txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil) 89 txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil) 90 txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil) 91 92 txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil) 93 txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil) 94 txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil) 95 txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil) 96 txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil) 97 txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil) 98 ) 99 100 // txAnnounce is the notification of the availability of a batch 101 // of new transactions in the network. 102 type txAnnounce struct { 103 origin string // Identifier of the peer originating the notification 104 hashes []common.Hash // Batch of transaction hashes being announced 105 } 106 107 // txRequest represents an in-flight transaction retrieval request destined to 108 // a specific peers. 109 type txRequest struct { 110 hashes []common.Hash // Transactions having been requested 111 stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request) 112 time mclock.AbsTime // Timestamp of the request 113 } 114 115 // txDelivery is the notification that a batch of transactions have been added 116 // to the pool and should be untracked. 117 type txDelivery struct { 118 origin string // Identifier of the peer originating the notification 119 hashes []common.Hash // Batch of transaction hashes having been delivered 120 direct bool // Whether this is a direct reply or a broadcast 121 } 122 123 // txDrop is the notification that a peer has disconnected. 124 type txDrop struct { 125 peer string 126 } 127 128 // TxFetcher is responsible for retrieving new transaction based on announcements. 129 // 130 // The fetcher operates in 3 stages: 131 // - Transactions that are newly discovered are moved into a wait list. 132 // - After ~500ms passes, transactions from the wait list that have not been 133 // broadcast to us in whole are moved into a queueing area. 134 // - When a connected peer doesn't have in-flight retrieval requests, any 135 // transaction queued up (and announced by the peer) are allocated to the 136 // peer and moved into a fetching status until it's fulfilled or fails. 137 // 138 // The invariants of the fetcher are: 139 // - Each tracked transaction (hash) must only be present in one of the 140 // three stages. This ensures that the fetcher operates akin to a finite 141 // state automata and there's do data leak. 142 // - Each peer that announced transactions may be scheduled retrievals, but 143 // only ever one concurrently. This ensures we can immediately know what is 144 // missing from a reply and reschedule it. 145 type TxFetcher struct { 146 notify chan *txAnnounce 147 cleanup chan *txDelivery 148 drop chan *txDrop 149 quit chan struct{} 150 151 underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch) 152 153 // Stage 1: Waiting lists for newly discovered transactions that might be 154 // broadcast without needing explicit request/reply round trips. 155 waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast 156 waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist 157 waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection) 158 159 // Stage 2: Queue of transactions that waiting to be allocated to some peer 160 // to be retrieved directly. 161 announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer 162 announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash 163 164 // Stage 3: Set of transactions currently being retrieved, some which may be 165 // fulfilled and some rescheduled. Note, this step shares 'announces' from the 166 // previous stage to avoid having to duplicate (need it for DoS checks). 167 fetching map[common.Hash]string // Transaction set currently being retrieved 168 requests map[string]*txRequest // In-flight transaction retrievals 169 alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails 170 171 // Callbacks 172 hasTx func(common.Hash) bool // Retrieves a tx from the local txpool 173 addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool 174 fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer 175 176 step chan struct{} // Notification channel when the fetcher loop iterates 177 clock mclock.Clock // Time wrapper to simulate in tests 178 rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random) 179 } 180 181 // NewTxFetcher creates a transaction fetcher to retrieve transaction 182 // based on hash announcements. 183 func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher { 184 return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil) 185 } 186 187 // NewTxFetcherForTests is a testing method to mock out the realtime clock with 188 // a simulated version and the internal randomness with a deterministic one. 189 func NewTxFetcherForTests( 190 hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, 191 clock mclock.Clock, rand *mrand.Rand) *TxFetcher { 192 return &TxFetcher{ 193 notify: make(chan *txAnnounce), 194 cleanup: make(chan *txDelivery), 195 drop: make(chan *txDrop), 196 quit: make(chan struct{}), 197 waitlist: make(map[common.Hash]map[string]struct{}), 198 waittime: make(map[common.Hash]mclock.AbsTime), 199 waitslots: make(map[string]map[common.Hash]struct{}), 200 announces: make(map[string]map[common.Hash]struct{}), 201 announced: make(map[common.Hash]map[string]struct{}), 202 fetching: make(map[common.Hash]string), 203 requests: make(map[string]*txRequest), 204 alternates: make(map[common.Hash]map[string]struct{}), 205 underpriced: mapset.NewSet(), 206 hasTx: hasTx, 207 addTxs: addTxs, 208 fetchTxs: fetchTxs, 209 clock: clock, 210 rand: rand, 211 } 212 } 213 214 // Notify announces the fetcher of the potential availability of a new batch of 215 // transactions in the network. 216 func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { 217 // Keep track of all the announced transactions 218 txAnnounceInMeter.Mark(int64(len(hashes))) 219 220 // Skip any transaction announcements that we already know of, or that we've 221 // previously marked as cheap and discarded. This check is of course racey, 222 // because multiple concurrent notifies will still manage to pass it, but it's 223 // still valuable to check here because it runs concurrent to the internal 224 // loop, so anything caught here is time saved internally. 225 var ( 226 unknowns = make([]common.Hash, 0, len(hashes)) 227 duplicate, underpriced int64 228 ) 229 for _, hash := range hashes { 230 switch { 231 case f.hasTx(hash): 232 duplicate++ 233 234 case f.underpriced.Contains(hash): 235 underpriced++ 236 237 default: 238 unknowns = append(unknowns, hash) 239 } 240 } 241 txAnnounceKnownMeter.Mark(duplicate) 242 txAnnounceUnderpricedMeter.Mark(underpriced) 243 244 // If anything's left to announce, push it into the internal loop 245 if len(unknowns) == 0 { 246 return nil 247 } 248 announce := &txAnnounce{ 249 origin: peer, 250 hashes: unknowns, 251 } 252 select { 253 case f.notify <- announce: 254 return nil 255 case <-f.quit: 256 return errTerminated 257 } 258 } 259 260 // Enqueue imports a batch of received transaction into the transaction pool 261 // and the fetcher. This method may be called by both transaction broadcasts and 262 // direct request replies. The differentiation is important so the fetcher can 263 // re-schedule missing transactions as soon as possible. 264 func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error { 265 // Keep track of all the propagated transactions 266 if direct { 267 txReplyInMeter.Mark(int64(len(txs))) 268 } else { 269 txBroadcastInMeter.Mark(int64(len(txs))) 270 } 271 // Push all the transactions into the pool, tracking underpriced ones to avoid 272 // re-requesting them and dropping the peer in case of malicious transfers. 273 var ( 274 added = make([]common.Hash, 0, len(txs)) 275 duplicate int64 276 underpriced int64 277 otherreject int64 278 ) 279 errs := f.addTxs(txs) 280 for i, err := range errs { 281 // Track the transaction hash if the price is too low for us. 282 // Avoid re-request this transaction when we receive another 283 // announcement. 284 if errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced) { 285 for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { 286 f.underpriced.Pop() 287 } 288 f.underpriced.Add(txs[i].Hash()) 289 } 290 // Track a few interesting failure types 291 switch { 292 case err == nil: // Noop, but need to handle to not count these 293 294 case errors.Is(err, core.ErrAlreadyKnown): 295 duplicate++ 296 297 case errors.Is(err, core.ErrUnderpriced) || errors.Is(err, core.ErrReplaceUnderpriced): 298 underpriced++ 299 300 default: 301 otherreject++ 302 } 303 added = append(added, txs[i].Hash()) 304 } 305 if direct { 306 txReplyKnownMeter.Mark(duplicate) 307 txReplyUnderpricedMeter.Mark(underpriced) 308 txReplyOtherRejectMeter.Mark(otherreject) 309 } else { 310 txBroadcastKnownMeter.Mark(duplicate) 311 txBroadcastUnderpricedMeter.Mark(underpriced) 312 txBroadcastOtherRejectMeter.Mark(otherreject) 313 } 314 select { 315 case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: 316 return nil 317 case <-f.quit: 318 return errTerminated 319 } 320 } 321 322 // Drop should be called when a peer disconnects. It cleans up all the internal 323 // data structures of the given node. 324 func (f *TxFetcher) Drop(peer string) error { 325 select { 326 case f.drop <- &txDrop{peer: peer}: 327 return nil 328 case <-f.quit: 329 return errTerminated 330 } 331 } 332 333 // Start boots up the announcement based synchroniser, accepting and processing 334 // hash notifications and block fetches until termination requested. 335 func (f *TxFetcher) Start() { 336 go f.loop() 337 } 338 339 // Stop terminates the announcement based synchroniser, canceling all pending 340 // operations. 341 func (f *TxFetcher) Stop() { 342 close(f.quit) 343 } 344 345 func (f *TxFetcher) loop() { 346 var ( 347 waitTimer = new(mclock.Timer) 348 timeoutTimer = new(mclock.Timer) 349 350 waitTrigger = make(chan struct{}, 1) 351 timeoutTrigger = make(chan struct{}, 1) 352 ) 353 for { 354 select { 355 case ann := <-f.notify: 356 // Drop part of the new announcements if there are too many accumulated. 357 // Note, we could but do not filter already known transactions here as 358 // the probability of something arriving between this call and the pre- 359 // filter outside is essentially zero. 360 used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin]) 361 if used >= maxTxAnnounces { 362 // This can happen if a set of transactions are requested but not 363 // all fulfilled, so the remainder are rescheduled without the cap 364 // check. Should be fine as the limit is in the thousands and the 365 // request size in the hundreds. 366 txAnnounceDOSMeter.Mark(int64(len(ann.hashes))) 367 break 368 } 369 want := used + len(ann.hashes) 370 if want > maxTxAnnounces { 371 txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces)) 372 ann.hashes = ann.hashes[:want-maxTxAnnounces] 373 } 374 // All is well, schedule the remainder of the transactions 375 idleWait := len(f.waittime) == 0 376 _, oldPeer := f.announces[ann.origin] 377 378 for _, hash := range ann.hashes { 379 // If the transaction is already downloading, add it to the list 380 // of possible alternates (in case the current retrieval fails) and 381 // also account it for the peer. 382 if f.alternates[hash] != nil { 383 f.alternates[hash][ann.origin] = struct{}{} 384 385 // Stage 2 and 3 share the set of origins per tx 386 if announces := f.announces[ann.origin]; announces != nil { 387 announces[hash] = struct{}{} 388 } else { 389 f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} 390 } 391 continue 392 } 393 // If the transaction is not downloading, but is already queued 394 // from a different peer, track it for the new peer too. 395 if f.announced[hash] != nil { 396 f.announced[hash][ann.origin] = struct{}{} 397 398 // Stage 2 and 3 share the set of origins per tx 399 if announces := f.announces[ann.origin]; announces != nil { 400 announces[hash] = struct{}{} 401 } else { 402 f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} 403 } 404 continue 405 } 406 // If the transaction is already known to the fetcher, but not 407 // yet downloading, add the peer as an alternate origin in the 408 // waiting list. 409 if f.waitlist[hash] != nil { 410 f.waitlist[hash][ann.origin] = struct{}{} 411 412 if waitslots := f.waitslots[ann.origin]; waitslots != nil { 413 waitslots[hash] = struct{}{} 414 } else { 415 f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} 416 } 417 continue 418 } 419 // Transaction unknown to the fetcher, insert it into the waiting list 420 f.waitlist[hash] = map[string]struct{}{ann.origin: {}} 421 f.waittime[hash] = f.clock.Now() 422 423 if waitslots := f.waitslots[ann.origin]; waitslots != nil { 424 waitslots[hash] = struct{}{} 425 } else { 426 f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} 427 } 428 } 429 // If a new item was added to the waitlist, schedule it into the fetcher 430 if idleWait && len(f.waittime) > 0 { 431 f.rescheduleWait(waitTimer, waitTrigger) 432 } 433 // If this peer is new and announced something already queued, maybe 434 // request transactions from them 435 if !oldPeer && len(f.announces[ann.origin]) > 0 { 436 f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}}) 437 } 438 439 case <-waitTrigger: 440 // At least one transaction's waiting time ran out, push all expired 441 // ones into the retrieval queues 442 actives := make(map[string]struct{}) 443 for hash, instance := range f.waittime { 444 if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout { 445 // Transaction expired without propagation, schedule for retrieval 446 if f.announced[hash] != nil { 447 panic("announce tracker already contains waitlist item") 448 } 449 f.announced[hash] = f.waitlist[hash] 450 for peer := range f.waitlist[hash] { 451 if announces := f.announces[peer]; announces != nil { 452 announces[hash] = struct{}{} 453 } else { 454 f.announces[peer] = map[common.Hash]struct{}{hash: {}} 455 } 456 delete(f.waitslots[peer], hash) 457 if len(f.waitslots[peer]) == 0 { 458 delete(f.waitslots, peer) 459 } 460 actives[peer] = struct{}{} 461 } 462 delete(f.waittime, hash) 463 delete(f.waitlist, hash) 464 } 465 } 466 // If transactions are still waiting for propagation, reschedule the wait timer 467 if len(f.waittime) > 0 { 468 f.rescheduleWait(waitTimer, waitTrigger) 469 } 470 // If any peers became active and are idle, request transactions from them 471 if len(actives) > 0 { 472 f.scheduleFetches(timeoutTimer, timeoutTrigger, actives) 473 } 474 475 case <-timeoutTrigger: 476 // Clean up any expired retrievals and avoid re-requesting them from the 477 // same peer (either overloaded or malicious, useless in both cases). We 478 // could also penalize (Drop), but there's nothing to gain, and if could 479 // possibly further increase the load on it. 480 for peer, req := range f.requests { 481 if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout { 482 txRequestTimeoutMeter.Mark(int64(len(req.hashes))) 483 484 // Reschedule all the not-yet-delivered fetches to alternate peers 485 for _, hash := range req.hashes { 486 // Skip rescheduling hashes already delivered by someone else 487 if req.stolen != nil { 488 if _, ok := req.stolen[hash]; ok { 489 continue 490 } 491 } 492 // Move the delivery back from fetching to queued 493 if _, ok := f.announced[hash]; ok { 494 panic("announced tracker already contains alternate item") 495 } 496 if f.alternates[hash] != nil { // nil if tx was broadcast during fetch 497 f.announced[hash] = f.alternates[hash] 498 } 499 delete(f.announced[hash], peer) 500 if len(f.announced[hash]) == 0 { 501 delete(f.announced, hash) 502 } 503 delete(f.announces[peer], hash) 504 delete(f.alternates, hash) 505 delete(f.fetching, hash) 506 } 507 if len(f.announces[peer]) == 0 { 508 delete(f.announces, peer) 509 } 510 // Keep track of the request as dangling, but never expire 511 f.requests[peer].hashes = nil 512 } 513 } 514 // Schedule a new transaction retrieval 515 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) 516 517 // No idea if we scheduled something or not, trigger the timer if needed 518 // TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow? 519 f.rescheduleTimeout(timeoutTimer, timeoutTrigger) 520 521 case delivery := <-f.cleanup: 522 // Independent if the delivery was direct or broadcast, remove all 523 // traces of the hash from internal trackers 524 for _, hash := range delivery.hashes { 525 if _, ok := f.waitlist[hash]; ok { 526 for peer, txset := range f.waitslots { 527 delete(txset, hash) 528 if len(txset) == 0 { 529 delete(f.waitslots, peer) 530 } 531 } 532 delete(f.waitlist, hash) 533 delete(f.waittime, hash) 534 } else { 535 for peer, txset := range f.announces { 536 delete(txset, hash) 537 if len(txset) == 0 { 538 delete(f.announces, peer) 539 } 540 } 541 delete(f.announced, hash) 542 delete(f.alternates, hash) 543 544 // If a transaction currently being fetched from a different 545 // origin was delivered (delivery stolen), mark it so the 546 // actual delivery won't double schedule it. 547 if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) { 548 stolen := f.requests[origin].stolen 549 if stolen == nil { 550 f.requests[origin].stolen = make(map[common.Hash]struct{}) 551 stolen = f.requests[origin].stolen 552 } 553 stolen[hash] = struct{}{} 554 } 555 delete(f.fetching, hash) 556 } 557 } 558 // In case of a direct delivery, also reschedule anything missing 559 // from the original query 560 if delivery.direct { 561 // Mark the requesting successful (independent of individual status) 562 txRequestDoneMeter.Mark(int64(len(delivery.hashes))) 563 564 // Make sure something was pending, nuke it 565 req := f.requests[delivery.origin] 566 if req == nil { 567 log.Warn("Unexpected transaction delivery", "peer", delivery.origin) 568 break 569 } 570 delete(f.requests, delivery.origin) 571 572 // Anything not delivered should be re-scheduled (with or without 573 // this peer, depending on the response cutoff) 574 delivered := make(map[common.Hash]struct{}) 575 for _, hash := range delivery.hashes { 576 delivered[hash] = struct{}{} 577 } 578 cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!! 579 for i, hash := range req.hashes { 580 if _, ok := delivered[hash]; ok { 581 cutoff = i 582 } 583 } 584 // Reschedule missing hashes from alternates, not-fulfilled from alt+self 585 for i, hash := range req.hashes { 586 // Skip rescheduling hashes already delivered by someone else 587 if req.stolen != nil { 588 if _, ok := req.stolen[hash]; ok { 589 continue 590 } 591 } 592 if _, ok := delivered[hash]; !ok { 593 if i < cutoff { 594 delete(f.alternates[hash], delivery.origin) 595 delete(f.announces[delivery.origin], hash) 596 if len(f.announces[delivery.origin]) == 0 { 597 delete(f.announces, delivery.origin) 598 } 599 } 600 if len(f.alternates[hash]) > 0 { 601 if _, ok := f.announced[hash]; ok { 602 panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash])) 603 } 604 f.announced[hash] = f.alternates[hash] 605 } 606 } 607 delete(f.alternates, hash) 608 delete(f.fetching, hash) 609 } 610 // Something was delivered, try to reschedule requests 611 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too 612 } 613 614 case drop := <-f.drop: 615 // A peer was dropped, remove all traces of it 616 if _, ok := f.waitslots[drop.peer]; ok { 617 for hash := range f.waitslots[drop.peer] { 618 delete(f.waitlist[hash], drop.peer) 619 if len(f.waitlist[hash]) == 0 { 620 delete(f.waitlist, hash) 621 delete(f.waittime, hash) 622 } 623 } 624 delete(f.waitslots, drop.peer) 625 if len(f.waitlist) > 0 { 626 f.rescheduleWait(waitTimer, waitTrigger) 627 } 628 } 629 // Clean up any active requests 630 var request *txRequest 631 if request = f.requests[drop.peer]; request != nil { 632 for _, hash := range request.hashes { 633 // Skip rescheduling hashes already delivered by someone else 634 if request.stolen != nil { 635 if _, ok := request.stolen[hash]; ok { 636 continue 637 } 638 } 639 // Undelivered hash, reschedule if there's an alternative origin available 640 delete(f.alternates[hash], drop.peer) 641 if len(f.alternates[hash]) == 0 { 642 delete(f.alternates, hash) 643 } else { 644 f.announced[hash] = f.alternates[hash] 645 delete(f.alternates, hash) 646 } 647 delete(f.fetching, hash) 648 } 649 delete(f.requests, drop.peer) 650 } 651 // Clean up general announcement tracking 652 if _, ok := f.announces[drop.peer]; ok { 653 for hash := range f.announces[drop.peer] { 654 delete(f.announced[hash], drop.peer) 655 if len(f.announced[hash]) == 0 { 656 delete(f.announced, hash) 657 } 658 } 659 delete(f.announces, drop.peer) 660 } 661 // If a request was cancelled, check if anything needs to be rescheduled 662 if request != nil { 663 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) 664 f.rescheduleTimeout(timeoutTimer, timeoutTrigger) 665 } 666 667 case <-f.quit: 668 return 669 } 670 // No idea what happened, but bump some sanity metrics 671 txFetcherWaitingPeers.Update(int64(len(f.waitslots))) 672 txFetcherWaitingHashes.Update(int64(len(f.waitlist))) 673 txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests))) 674 txFetcherQueueingHashes.Update(int64(len(f.announced))) 675 txFetcherFetchingPeers.Update(int64(len(f.requests))) 676 txFetcherFetchingHashes.Update(int64(len(f.fetching))) 677 678 // Loop did something, ping the step notifier if needed (tests) 679 if f.step != nil { 680 f.step <- struct{}{} 681 } 682 } 683 } 684 685 // rescheduleWait iterates over all the transactions currently in the waitlist 686 // and schedules the movement into the fetcher for the earliest. 687 // 688 // The method has a granularity of 'gatherSlack', since there's not much point in 689 // spinning over all the transactions just to maybe find one that should trigger 690 // a few ms earlier. 691 func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) { 692 if *timer != nil { 693 (*timer).Stop() 694 } 695 now := f.clock.Now() 696 697 earliest := now 698 for _, instance := range f.waittime { 699 if earliest > instance { 700 earliest = instance 701 if txArriveTimeout-time.Duration(now-earliest) < gatherSlack { 702 break 703 } 704 } 705 } 706 *timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() { 707 trigger <- struct{}{} 708 }) 709 } 710 711 // rescheduleTimeout iterates over all the transactions currently in flight and 712 // schedules a cleanup run when the first would trigger. 713 // 714 // The method has a granularity of 'gatherSlack', since there's not much point in 715 // spinning over all the transactions just to maybe find one that should trigger 716 // a few ms earlier. 717 // 718 // This method is a bit "flaky" "by design". In theory the timeout timer only ever 719 // should be rescheduled if some request is pending. In practice, a timeout will 720 // cause the timer to be rescheduled every 5 secs (until the peer comes through or 721 // disconnects). This is a limitation of the fetcher code because we don't trac 722 // pending requests and timed out requests separately. Without double tracking, if 723 // we simply didn't reschedule the timer on all-timeout then the timer would never 724 // be set again since len(request) > 0 => something's running. 725 func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) { 726 if *timer != nil { 727 (*timer).Stop() 728 } 729 now := f.clock.Now() 730 731 earliest := now 732 for _, req := range f.requests { 733 // If this request already timed out, skip it altogether 734 if req.hashes == nil { 735 continue 736 } 737 if earliest > req.time { 738 earliest = req.time 739 if txFetchTimeout-time.Duration(now-earliest) < gatherSlack { 740 break 741 } 742 } 743 } 744 *timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() { 745 trigger <- struct{}{} 746 }) 747 } 748 749 // scheduleFetches starts a batch of retrievals for all available idle peers. 750 func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) { 751 // Gather the set of peers we want to retrieve from (default to all) 752 actives := whitelist 753 if actives == nil { 754 actives = make(map[string]struct{}) 755 for peer := range f.announces { 756 actives[peer] = struct{}{} 757 } 758 } 759 if len(actives) == 0 { 760 return 761 } 762 // For each active peer, try to schedule some transaction fetches 763 idle := len(f.requests) == 0 764 765 f.forEachPeer(actives, func(peer string) { 766 if f.requests[peer] != nil { 767 return // continue in the for-each 768 } 769 if len(f.announces[peer]) == 0 { 770 return // continue in the for-each 771 } 772 hashes := make([]common.Hash, 0, maxTxRetrievals) 773 f.forEachHash(f.announces[peer], func(hash common.Hash) bool { 774 if _, ok := f.fetching[hash]; !ok { 775 // Mark the hash as fetching and stash away possible alternates 776 f.fetching[hash] = peer 777 778 if _, ok := f.alternates[hash]; ok { 779 panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash])) 780 } 781 f.alternates[hash] = f.announced[hash] 782 delete(f.announced, hash) 783 784 // Accumulate the hash and stop if the limit was reached 785 hashes = append(hashes, hash) 786 if len(hashes) >= maxTxRetrievals { 787 return false // break in the for-each 788 } 789 } 790 return true // continue in the for-each 791 }) 792 // If any hashes were allocated, request them from the peer 793 if len(hashes) > 0 { 794 f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()} 795 txRequestOutMeter.Mark(int64(len(hashes))) 796 797 go func(peer string, hashes []common.Hash) { 798 // Try to fetch the transactions, but in case of a request 799 // failure (e.g. peer disconnected), reschedule the hashes. 800 if err := f.fetchTxs(peer, hashes); err != nil { 801 txRequestFailMeter.Mark(int64(len(hashes))) 802 f.Drop(peer) 803 } 804 }(peer, hashes) 805 } 806 }) 807 // If a new request was fired, schedule a timeout timer 808 if idle && len(f.requests) > 0 { 809 f.rescheduleTimeout(timer, timeout) 810 } 811 } 812 813 // forEachPeer does a range loop over a map of peers in production, but during 814 // testing it does a deterministic sorted random to allow reproducing issues. 815 func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) { 816 // If we're running production, use whatever Go's map gives us 817 if f.rand == nil { 818 for peer := range peers { 819 do(peer) 820 } 821 return 822 } 823 // We're running the test suite, make iteration deterministic 824 list := make([]string, 0, len(peers)) 825 for peer := range peers { 826 list = append(list, peer) 827 } 828 sort.Strings(list) 829 rotateStrings(list, f.rand.Intn(len(list))) 830 for _, peer := range list { 831 do(peer) 832 } 833 } 834 835 // forEachHash does a range loop over a map of hashes in production, but during 836 // testing it does a deterministic sorted random to allow reproducing issues. 837 func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) { 838 // If we're running production, use whatever Go's map gives us 839 if f.rand == nil { 840 for hash := range hashes { 841 if !do(hash) { 842 return 843 } 844 } 845 return 846 } 847 // We're running the test suite, make iteration deterministic 848 list := make([]common.Hash, 0, len(hashes)) 849 for hash := range hashes { 850 list = append(list, hash) 851 } 852 sortHashes(list) 853 rotateHashes(list, f.rand.Intn(len(list))) 854 for _, hash := range list { 855 if !do(hash) { 856 return 857 } 858 } 859 } 860 861 // rotateStrings rotates the contents of a slice by n steps. This method is only 862 // used in tests to simulate random map iteration but keep it deterministic. 863 func rotateStrings(slice []string, n int) { 864 orig := make([]string, len(slice)) 865 copy(orig, slice) 866 867 for i := 0; i < len(orig); i++ { 868 slice[i] = orig[(i+n)%len(orig)] 869 } 870 } 871 872 // sortHashes sorts a slice of hashes. This method is only used in tests in order 873 // to simulate random map iteration but keep it deterministic. 874 func sortHashes(slice []common.Hash) { 875 for i := 0; i < len(slice); i++ { 876 for j := i + 1; j < len(slice); j++ { 877 if bytes.Compare(slice[i][:], slice[j][:]) > 0 { 878 slice[i], slice[j] = slice[j], slice[i] 879 } 880 } 881 } 882 } 883 884 // rotateHashes rotates the contents of a slice by n steps. This method is only 885 // used in tests to simulate random map iteration but keep it deterministic. 886 func rotateHashes(slice []common.Hash, n int) { 887 orig := make([]common.Hash, len(slice)) 888 copy(orig, slice) 889 890 for i := 0; i < len(orig); i++ { 891 slice[i] = orig[(i+n)%len(orig)] 892 } 893 }