github.com/aswedchain/aswed@v1.0.1/eth/fetcher/tx_fetcher.go (about) 1 // Copyright 2020 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package fetcher 18 19 import ( 20 "bytes" 21 "fmt" 22 mrand "math/rand" 23 "sort" 24 "time" 25 26 mapset "github.com/deckarep/golang-set" 27 "github.com/aswedchain/aswed/common" 28 "github.com/aswedchain/aswed/common/mclock" 29 "github.com/aswedchain/aswed/core" 30 "github.com/aswedchain/aswed/core/types" 31 "github.com/aswedchain/aswed/log" 32 "github.com/aswedchain/aswed/metrics" 33 ) 34 35 const ( 36 // maxTxAnnounces is the maximum number of unique transaction a peer 37 // can announce in a short time. 38 maxTxAnnounces = 4096 39 40 // maxTxRetrievals is the maximum transaction number can be fetched in one 41 // request. The rationale to pick 256 is: 42 // - In eth protocol, the softResponseLimit is 2MB. Nowadays according to 43 // Etherscan the average transaction size is around 200B, so in theory 44 // we can include lots of transaction in a single protocol packet. 45 // - However the maximum size of a single transaction is raised to 128KB, 46 // so pick a middle value here to ensure we can maximize the efficiency 47 // of the retrieval and response size overflow won't happen in most cases. 48 maxTxRetrievals = 256 49 50 // maxTxUnderpricedSetSize is the size of the underpriced transaction set that 51 // is used to track recent transactions that have been dropped so we don't 52 // re-request them. 53 maxTxUnderpricedSetSize = 32768 54 55 // txArriveTimeout is the time allowance before an announced transaction is 56 // explicitly requested. 57 txArriveTimeout = 500 * time.Millisecond 58 59 // txGatherSlack is the interval used to collate almost-expired announces 60 // with network fetches. 61 txGatherSlack = 100 * time.Millisecond 62 ) 63 64 var ( 65 // txFetchTimeout is the maximum allotted time to return an explicitly 66 // requested transaction. 67 txFetchTimeout = 5 * time.Second 68 ) 69 70 var ( 71 txAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/in", nil) 72 txAnnounceKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/known", nil) 73 txAnnounceUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/underpriced", nil) 74 txAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/announces/dos", nil) 75 76 txBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/in", nil) 77 txBroadcastKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/known", nil) 78 txBroadcastUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/underpriced", nil) 79 txBroadcastOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/broadcasts/otherreject", nil) 80 81 txRequestOutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/out", nil) 82 txRequestFailMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/fail", nil) 83 txRequestDoneMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/done", nil) 84 txRequestTimeoutMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/request/timeout", nil) 85 86 txReplyInMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/in", nil) 87 txReplyKnownMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/known", nil) 88 txReplyUnderpricedMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/underpriced", nil) 89 txReplyOtherRejectMeter = metrics.NewRegisteredMeter("eth/fetcher/transaction/replies/otherreject", nil) 90 91 txFetcherWaitingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/peers", nil) 92 txFetcherWaitingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/waiting/hashes", nil) 93 txFetcherQueueingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/peers", nil) 94 txFetcherQueueingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/queueing/hashes", nil) 95 txFetcherFetchingPeers = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/peers", nil) 96 txFetcherFetchingHashes = metrics.NewRegisteredGauge("eth/fetcher/transaction/fetching/hashes", nil) 97 ) 98 99 // txAnnounce is the notification of the availability of a batch 100 // of new transactions in the network. 101 type txAnnounce struct { 102 origin string // Identifier of the peer originating the notification 103 hashes []common.Hash // Batch of transaction hashes being announced 104 } 105 106 // txRequest represents an in-flight transaction retrieval request destined to 107 // a specific peers. 108 type txRequest struct { 109 hashes []common.Hash // Transactions having been requested 110 stolen map[common.Hash]struct{} // Deliveries by someone else (don't re-request) 111 time mclock.AbsTime // Timestamp of the request 112 } 113 114 // txDelivery is the notification that a batch of transactions have been added 115 // to the pool and should be untracked. 116 type txDelivery struct { 117 origin string // Identifier of the peer originating the notification 118 hashes []common.Hash // Batch of transaction hashes having been delivered 119 direct bool // Whether this is a direct reply or a broadcast 120 } 121 122 // txDrop is the notiication that a peer has disconnected. 123 type txDrop struct { 124 peer string 125 } 126 127 // TxFetcher is responsible for retrieving new transaction based on announcements. 128 // 129 // The fetcher operates in 3 stages: 130 // - Transactions that are newly discovered are moved into a wait list. 131 // - After ~500ms passes, transactions from the wait list that have not been 132 // broadcast to us in whole are moved into a queueing area. 133 // - When a connected peer doesn't have in-flight retrieval requests, any 134 // transaction queued up (and announced by the peer) are allocated to the 135 // peer and moved into a fetching status until it's fulfilled or fails. 136 // 137 // The invariants of the fetcher are: 138 // - Each tracked transaction (hash) must only be present in one of the 139 // three stages. This ensures that the fetcher operates akin to a finite 140 // state automata and there's do data leak. 141 // - Each peer that announced transactions may be scheduled retrievals, but 142 // only ever one concurrently. This ensures we can immediately know what is 143 // missing from a reply and reschedule it. 144 type TxFetcher struct { 145 notify chan *txAnnounce 146 cleanup chan *txDelivery 147 drop chan *txDrop 148 quit chan struct{} 149 150 underpriced mapset.Set // Transactions discarded as too cheap (don't re-fetch) 151 152 // Stage 1: Waiting lists for newly discovered transactions that might be 153 // broadcast without needing explicit request/reply round trips. 154 waitlist map[common.Hash]map[string]struct{} // Transactions waiting for an potential broadcast 155 waittime map[common.Hash]mclock.AbsTime // Timestamps when transactions were added to the waitlist 156 waitslots map[string]map[common.Hash]struct{} // Waiting announcement sgroupped by peer (DoS protection) 157 158 // Stage 2: Queue of transactions that waiting to be allocated to some peer 159 // to be retrieved directly. 160 announces map[string]map[common.Hash]struct{} // Set of announced transactions, grouped by origin peer 161 announced map[common.Hash]map[string]struct{} // Set of download locations, grouped by transaction hash 162 163 // Stage 3: Set of transactions currently being retrieved, some which may be 164 // fulfilled and some rescheduled. Note, this step shares 'announces' from the 165 // previous stage to avoid having to duplicate (need it for DoS checks). 166 fetching map[common.Hash]string // Transaction set currently being retrieved 167 requests map[string]*txRequest // In-flight transaction retrievals 168 alternates map[common.Hash]map[string]struct{} // In-flight transaction alternate origins if retrieval fails 169 170 // Callbacks 171 hasTx func(common.Hash) bool // Retrieves a tx from the local txpool 172 addTxs func([]*types.Transaction) []error // Insert a batch of transactions into local txpool 173 fetchTxs func(string, []common.Hash) error // Retrieves a set of txs from a remote peer 174 175 step chan struct{} // Notification channel when the fetcher loop iterates 176 clock mclock.Clock // Time wrapper to simulate in tests 177 rand *mrand.Rand // Randomizer to use in tests instead of map range loops (soft-random) 178 } 179 180 // NewTxFetcher creates a transaction fetcher to retrieve transaction 181 // based on hash announcements. 182 func NewTxFetcher(hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error) *TxFetcher { 183 return NewTxFetcherForTests(hasTx, addTxs, fetchTxs, mclock.System{}, nil) 184 } 185 186 // NewTxFetcherForTests is a testing method to mock out the realtime clock with 187 // a simulated version and the internal randomness with a deterministic one. 188 func NewTxFetcherForTests( 189 hasTx func(common.Hash) bool, addTxs func([]*types.Transaction) []error, fetchTxs func(string, []common.Hash) error, 190 clock mclock.Clock, rand *mrand.Rand) *TxFetcher { 191 return &TxFetcher{ 192 notify: make(chan *txAnnounce), 193 cleanup: make(chan *txDelivery), 194 drop: make(chan *txDrop), 195 quit: make(chan struct{}), 196 waitlist: make(map[common.Hash]map[string]struct{}), 197 waittime: make(map[common.Hash]mclock.AbsTime), 198 waitslots: make(map[string]map[common.Hash]struct{}), 199 announces: make(map[string]map[common.Hash]struct{}), 200 announced: make(map[common.Hash]map[string]struct{}), 201 fetching: make(map[common.Hash]string), 202 requests: make(map[string]*txRequest), 203 alternates: make(map[common.Hash]map[string]struct{}), 204 underpriced: mapset.NewSet(), 205 hasTx: hasTx, 206 addTxs: addTxs, 207 fetchTxs: fetchTxs, 208 clock: clock, 209 rand: rand, 210 } 211 } 212 213 // Notify announces the fetcher of the potential availability of a new batch of 214 // transactions in the network. 215 func (f *TxFetcher) Notify(peer string, hashes []common.Hash) error { 216 // Keep track of all the announced transactions 217 txAnnounceInMeter.Mark(int64(len(hashes))) 218 219 // Skip any transaction announcements that we already know of, or that we've 220 // previously marked as cheap and discarded. This check is of course racey, 221 // because multiple concurrent notifies will still manage to pass it, but it's 222 // still valuable to check here because it runs concurrent to the internal 223 // loop, so anything caught here is time saved internally. 224 var ( 225 unknowns = make([]common.Hash, 0, len(hashes)) 226 duplicate, underpriced int64 227 ) 228 for _, hash := range hashes { 229 switch { 230 case f.hasTx(hash): 231 duplicate++ 232 233 case f.underpriced.Contains(hash): 234 underpriced++ 235 236 default: 237 unknowns = append(unknowns, hash) 238 } 239 } 240 txAnnounceKnownMeter.Mark(duplicate) 241 txAnnounceUnderpricedMeter.Mark(underpriced) 242 243 // If anything's left to announce, push it into the internal loop 244 if len(unknowns) == 0 { 245 return nil 246 } 247 announce := &txAnnounce{ 248 origin: peer, 249 hashes: unknowns, 250 } 251 select { 252 case f.notify <- announce: 253 return nil 254 case <-f.quit: 255 return errTerminated 256 } 257 } 258 259 // Enqueue imports a batch of received transaction into the transaction pool 260 // and the fetcher. This method may be called by both transaction broadcasts and 261 // direct request replies. The differentiation is important so the fetcher can 262 // re-shedule missing transactions as soon as possible. 263 func (f *TxFetcher) Enqueue(peer string, txs []*types.Transaction, direct bool) error { 264 // Keep track of all the propagated transactions 265 if direct { 266 txReplyInMeter.Mark(int64(len(txs))) 267 } else { 268 txBroadcastInMeter.Mark(int64(len(txs))) 269 } 270 // Push all the transactions into the pool, tracking underpriced ones to avoid 271 // re-requesting them and dropping the peer in case of malicious transfers. 272 var ( 273 added = make([]common.Hash, 0, len(txs)) 274 duplicate int64 275 underpriced int64 276 otherreject int64 277 ) 278 errs := f.addTxs(txs) 279 for i, err := range errs { 280 if err != nil { 281 // Track the transaction hash if the price is too low for us. 282 // Avoid re-request this transaction when we receive another 283 // announcement. 284 if err == core.ErrUnderpriced || err == core.ErrReplaceUnderpriced { 285 for f.underpriced.Cardinality() >= maxTxUnderpricedSetSize { 286 f.underpriced.Pop() 287 } 288 f.underpriced.Add(txs[i].Hash()) 289 } 290 // Track a few interesting failure types 291 switch err { 292 case nil: // Noop, but need to handle to not count these 293 294 case core.ErrAlreadyKnown: 295 duplicate++ 296 297 case core.ErrUnderpriced, core.ErrReplaceUnderpriced: 298 underpriced++ 299 300 default: 301 otherreject++ 302 } 303 } 304 added = append(added, txs[i].Hash()) 305 } 306 if direct { 307 txReplyKnownMeter.Mark(duplicate) 308 txReplyUnderpricedMeter.Mark(underpriced) 309 txReplyOtherRejectMeter.Mark(otherreject) 310 } else { 311 txBroadcastKnownMeter.Mark(duplicate) 312 txBroadcastUnderpricedMeter.Mark(underpriced) 313 txBroadcastOtherRejectMeter.Mark(otherreject) 314 } 315 select { 316 case f.cleanup <- &txDelivery{origin: peer, hashes: added, direct: direct}: 317 return nil 318 case <-f.quit: 319 return errTerminated 320 } 321 } 322 323 // Drop should be called when a peer disconnects. It cleans up all the internal 324 // data structures of the given node. 325 func (f *TxFetcher) Drop(peer string) error { 326 select { 327 case f.drop <- &txDrop{peer: peer}: 328 return nil 329 case <-f.quit: 330 return errTerminated 331 } 332 } 333 334 // Start boots up the announcement based synchroniser, accepting and processing 335 // hash notifications and block fetches until termination requested. 336 func (f *TxFetcher) Start() { 337 go f.loop() 338 } 339 340 // Stop terminates the announcement based synchroniser, canceling all pending 341 // operations. 342 func (f *TxFetcher) Stop() { 343 close(f.quit) 344 } 345 346 func (f *TxFetcher) loop() { 347 var ( 348 waitTimer = new(mclock.Timer) 349 timeoutTimer = new(mclock.Timer) 350 351 waitTrigger = make(chan struct{}, 1) 352 timeoutTrigger = make(chan struct{}, 1) 353 ) 354 for { 355 select { 356 case ann := <-f.notify: 357 // Drop part of the new announcements if there are too many accumulated. 358 // Note, we could but do not filter already known transactions here as 359 // the probability of something arriving between this call and the pre- 360 // filter outside is essentially zero. 361 used := len(f.waitslots[ann.origin]) + len(f.announces[ann.origin]) 362 if used >= maxTxAnnounces { 363 // This can happen if a set of transactions are requested but not 364 // all fulfilled, so the remainder are rescheduled without the cap 365 // check. Should be fine as the limit is in the thousands and the 366 // request size in the hundreds. 367 txAnnounceDOSMeter.Mark(int64(len(ann.hashes))) 368 break 369 } 370 want := used + len(ann.hashes) 371 if want > maxTxAnnounces { 372 txAnnounceDOSMeter.Mark(int64(want - maxTxAnnounces)) 373 ann.hashes = ann.hashes[:want-maxTxAnnounces] 374 } 375 // All is well, schedule the remainder of the transactions 376 idleWait := len(f.waittime) == 0 377 _, oldPeer := f.announces[ann.origin] 378 379 for _, hash := range ann.hashes { 380 // If the transaction is already downloading, add it to the list 381 // of possible alternates (in case the current retrieval fails) and 382 // also account it for the peer. 383 if f.alternates[hash] != nil { 384 f.alternates[hash][ann.origin] = struct{}{} 385 386 // Stage 2 and 3 share the set of origins per tx 387 if announces := f.announces[ann.origin]; announces != nil { 388 announces[hash] = struct{}{} 389 } else { 390 f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} 391 } 392 continue 393 } 394 // If the transaction is not downloading, but is already queued 395 // from a different peer, track it for the new peer too. 396 if f.announced[hash] != nil { 397 f.announced[hash][ann.origin] = struct{}{} 398 399 // Stage 2 and 3 share the set of origins per tx 400 if announces := f.announces[ann.origin]; announces != nil { 401 announces[hash] = struct{}{} 402 } else { 403 f.announces[ann.origin] = map[common.Hash]struct{}{hash: {}} 404 } 405 continue 406 } 407 // If the transaction is already known to the fetcher, but not 408 // yet downloading, add the peer as an alternate origin in the 409 // waiting list. 410 if f.waitlist[hash] != nil { 411 f.waitlist[hash][ann.origin] = struct{}{} 412 413 if waitslots := f.waitslots[ann.origin]; waitslots != nil { 414 waitslots[hash] = struct{}{} 415 } else { 416 f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} 417 } 418 continue 419 } 420 // Transaction unknown to the fetcher, insert it into the waiting list 421 f.waitlist[hash] = map[string]struct{}{ann.origin: {}} 422 f.waittime[hash] = f.clock.Now() 423 424 if waitslots := f.waitslots[ann.origin]; waitslots != nil { 425 waitslots[hash] = struct{}{} 426 } else { 427 f.waitslots[ann.origin] = map[common.Hash]struct{}{hash: {}} 428 } 429 } 430 // If a new item was added to the waitlist, schedule it into the fetcher 431 if idleWait && len(f.waittime) > 0 { 432 f.rescheduleWait(waitTimer, waitTrigger) 433 } 434 // If this peer is new and announced something already queued, maybe 435 // request transactions from them 436 if !oldPeer && len(f.announces[ann.origin]) > 0 { 437 f.scheduleFetches(timeoutTimer, timeoutTrigger, map[string]struct{}{ann.origin: {}}) 438 } 439 440 case <-waitTrigger: 441 // At least one transaction's waiting time ran out, push all expired 442 // ones into the retrieval queues 443 actives := make(map[string]struct{}) 444 for hash, instance := range f.waittime { 445 if time.Duration(f.clock.Now()-instance)+txGatherSlack > txArriveTimeout { 446 // Transaction expired without propagation, schedule for retrieval 447 if f.announced[hash] != nil { 448 panic("announce tracker already contains waitlist item") 449 } 450 f.announced[hash] = f.waitlist[hash] 451 for peer := range f.waitlist[hash] { 452 if announces := f.announces[peer]; announces != nil { 453 announces[hash] = struct{}{} 454 } else { 455 f.announces[peer] = map[common.Hash]struct{}{hash: {}} 456 } 457 delete(f.waitslots[peer], hash) 458 if len(f.waitslots[peer]) == 0 { 459 delete(f.waitslots, peer) 460 } 461 actives[peer] = struct{}{} 462 } 463 delete(f.waittime, hash) 464 delete(f.waitlist, hash) 465 } 466 } 467 // If transactions are still waiting for propagation, reschedule the wait timer 468 if len(f.waittime) > 0 { 469 f.rescheduleWait(waitTimer, waitTrigger) 470 } 471 // If any peers became active and are idle, request transactions from them 472 if len(actives) > 0 { 473 f.scheduleFetches(timeoutTimer, timeoutTrigger, actives) 474 } 475 476 case <-timeoutTrigger: 477 // Clean up any expired retrievals and avoid re-requesting them from the 478 // same peer (either overloaded or malicious, useless in both cases). We 479 // could also penalize (Drop), but there's nothing to gain, and if could 480 // possibly further increase the load on it. 481 for peer, req := range f.requests { 482 if time.Duration(f.clock.Now()-req.time)+txGatherSlack > txFetchTimeout { 483 txRequestTimeoutMeter.Mark(int64(len(req.hashes))) 484 485 // Reschedule all the not-yet-delivered fetches to alternate peers 486 for _, hash := range req.hashes { 487 // Skip rescheduling hashes already delivered by someone else 488 if req.stolen != nil { 489 if _, ok := req.stolen[hash]; ok { 490 continue 491 } 492 } 493 // Move the delivery back from fetching to queued 494 if _, ok := f.announced[hash]; ok { 495 panic("announced tracker already contains alternate item") 496 } 497 if f.alternates[hash] != nil { // nil if tx was broadcast during fetch 498 f.announced[hash] = f.alternates[hash] 499 } 500 delete(f.announced[hash], peer) 501 if len(f.announced[hash]) == 0 { 502 delete(f.announced, hash) 503 } 504 delete(f.announces[peer], hash) 505 delete(f.alternates, hash) 506 delete(f.fetching, hash) 507 } 508 if len(f.announces[peer]) == 0 { 509 delete(f.announces, peer) 510 } 511 // Keep track of the request as dangling, but never expire 512 f.requests[peer].hashes = nil 513 } 514 } 515 // Schedule a new transaction retrieval 516 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) 517 518 // No idea if we scheduled something or not, trigger the timer if needed 519 // TODO(karalabe): this is kind of lame, can't we dump it into scheduleFetches somehow? 520 f.rescheduleTimeout(timeoutTimer, timeoutTrigger) 521 522 case delivery := <-f.cleanup: 523 // Independent if the delivery was direct or broadcast, remove all 524 // traces of the hash from internal trackers 525 for _, hash := range delivery.hashes { 526 if _, ok := f.waitlist[hash]; ok { 527 for peer, txset := range f.waitslots { 528 delete(txset, hash) 529 if len(txset) == 0 { 530 delete(f.waitslots, peer) 531 } 532 } 533 delete(f.waitlist, hash) 534 delete(f.waittime, hash) 535 } else { 536 for peer, txset := range f.announces { 537 delete(txset, hash) 538 if len(txset) == 0 { 539 delete(f.announces, peer) 540 } 541 } 542 delete(f.announced, hash) 543 delete(f.alternates, hash) 544 545 // If a transaction currently being fetched from a different 546 // origin was delivered (delivery stolen), mark it so the 547 // actual delivery won't double schedule it. 548 if origin, ok := f.fetching[hash]; ok && (origin != delivery.origin || !delivery.direct) { 549 stolen := f.requests[origin].stolen 550 if stolen == nil { 551 f.requests[origin].stolen = make(map[common.Hash]struct{}) 552 stolen = f.requests[origin].stolen 553 } 554 stolen[hash] = struct{}{} 555 } 556 delete(f.fetching, hash) 557 } 558 } 559 // In case of a direct delivery, also reschedule anything missing 560 // from the original query 561 if delivery.direct { 562 // Mark the reqesting successful (independent of individual status) 563 txRequestDoneMeter.Mark(int64(len(delivery.hashes))) 564 565 // Make sure something was pending, nuke it 566 req := f.requests[delivery.origin] 567 if req == nil { 568 log.Warn("Unexpected transaction delivery", "peer", delivery.origin) 569 break 570 } 571 delete(f.requests, delivery.origin) 572 573 // Anything not delivered should be re-scheduled (with or without 574 // this peer, depending on the response cutoff) 575 delivered := make(map[common.Hash]struct{}) 576 for _, hash := range delivery.hashes { 577 delivered[hash] = struct{}{} 578 } 579 cutoff := len(req.hashes) // If nothing is delivered, assume everything is missing, don't retry!!! 580 for i, hash := range req.hashes { 581 if _, ok := delivered[hash]; ok { 582 cutoff = i 583 } 584 } 585 // Reschedule missing hashes from alternates, not-fulfilled from alt+self 586 for i, hash := range req.hashes { 587 // Skip rescheduling hashes already delivered by someone else 588 if req.stolen != nil { 589 if _, ok := req.stolen[hash]; ok { 590 continue 591 } 592 } 593 if _, ok := delivered[hash]; !ok { 594 if i < cutoff { 595 delete(f.alternates[hash], delivery.origin) 596 delete(f.announces[delivery.origin], hash) 597 if len(f.announces[delivery.origin]) == 0 { 598 delete(f.announces, delivery.origin) 599 } 600 } 601 if len(f.alternates[hash]) > 0 { 602 if _, ok := f.announced[hash]; ok { 603 panic(fmt.Sprintf("announced tracker already contains alternate item: %v", f.announced[hash])) 604 } 605 f.announced[hash] = f.alternates[hash] 606 } 607 } 608 delete(f.alternates, hash) 609 delete(f.fetching, hash) 610 } 611 // Something was delivered, try to rechedule requests 612 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) // Partial delivery may enable others to deliver too 613 } 614 615 case drop := <-f.drop: 616 // A peer was dropped, remove all traces of it 617 if _, ok := f.waitslots[drop.peer]; ok { 618 for hash := range f.waitslots[drop.peer] { 619 delete(f.waitlist[hash], drop.peer) 620 if len(f.waitlist[hash]) == 0 { 621 delete(f.waitlist, hash) 622 delete(f.waittime, hash) 623 } 624 } 625 delete(f.waitslots, drop.peer) 626 if len(f.waitlist) > 0 { 627 f.rescheduleWait(waitTimer, waitTrigger) 628 } 629 } 630 // Clean up any active requests 631 var request *txRequest 632 if request = f.requests[drop.peer]; request != nil { 633 for _, hash := range request.hashes { 634 // Skip rescheduling hashes already delivered by someone else 635 if request.stolen != nil { 636 if _, ok := request.stolen[hash]; ok { 637 continue 638 } 639 } 640 // Undelivered hash, reschedule if there's an alternative origin available 641 delete(f.alternates[hash], drop.peer) 642 if len(f.alternates[hash]) == 0 { 643 delete(f.alternates, hash) 644 } else { 645 f.announced[hash] = f.alternates[hash] 646 delete(f.alternates, hash) 647 } 648 delete(f.fetching, hash) 649 } 650 delete(f.requests, drop.peer) 651 } 652 // Clean up general announcement tracking 653 if _, ok := f.announces[drop.peer]; ok { 654 for hash := range f.announces[drop.peer] { 655 delete(f.announced[hash], drop.peer) 656 if len(f.announced[hash]) == 0 { 657 delete(f.announced, hash) 658 } 659 } 660 delete(f.announces, drop.peer) 661 } 662 // If a request was cancelled, check if anything needs to be rescheduled 663 if request != nil { 664 f.scheduleFetches(timeoutTimer, timeoutTrigger, nil) 665 f.rescheduleTimeout(timeoutTimer, timeoutTrigger) 666 } 667 668 case <-f.quit: 669 return 670 } 671 // No idea what happened, but bump some sanity metrics 672 txFetcherWaitingPeers.Update(int64(len(f.waitslots))) 673 txFetcherWaitingHashes.Update(int64(len(f.waitlist))) 674 txFetcherQueueingPeers.Update(int64(len(f.announces) - len(f.requests))) 675 txFetcherQueueingHashes.Update(int64(len(f.announced))) 676 txFetcherFetchingPeers.Update(int64(len(f.requests))) 677 txFetcherFetchingHashes.Update(int64(len(f.fetching))) 678 679 // Loop did something, ping the step notifier if needed (tests) 680 if f.step != nil { 681 f.step <- struct{}{} 682 } 683 } 684 } 685 686 // rescheduleWait iterates over all the transactions currently in the waitlist 687 // and schedules the movement into the fetcher for the earliest. 688 // 689 // The method has a granularity of 'gatherSlack', since there's not much point in 690 // spinning over all the transactions just to maybe find one that should trigger 691 // a few ms earlier. 692 func (f *TxFetcher) rescheduleWait(timer *mclock.Timer, trigger chan struct{}) { 693 if *timer != nil { 694 (*timer).Stop() 695 } 696 now := f.clock.Now() 697 698 earliest := now 699 for _, instance := range f.waittime { 700 if earliest > instance { 701 earliest = instance 702 if txArriveTimeout-time.Duration(now-earliest) < gatherSlack { 703 break 704 } 705 } 706 } 707 *timer = f.clock.AfterFunc(txArriveTimeout-time.Duration(now-earliest), func() { 708 trigger <- struct{}{} 709 }) 710 } 711 712 // rescheduleTimeout iterates over all the transactions currently in flight and 713 // schedules a cleanup run when the first would trigger. 714 // 715 // The method has a granularity of 'gatherSlack', since there's not much point in 716 // spinning over all the transactions just to maybe find one that should trigger 717 // a few ms earlier. 718 // 719 // This method is a bit "flaky" "by design". In theory the timeout timer only ever 720 // should be rescheduled if some request is pending. In practice, a timeout will 721 // cause the timer to be rescheduled every 5 secs (until the peer comes through or 722 // disconnects). This is a limitation of the fetcher code because we don't trac 723 // pending requests and timed out requests separatey. Without double tracking, if 724 // we simply didn't reschedule the timer on all-timeout then the timer would never 725 // be set again since len(request) > 0 => something's running. 726 func (f *TxFetcher) rescheduleTimeout(timer *mclock.Timer, trigger chan struct{}) { 727 if *timer != nil { 728 (*timer).Stop() 729 } 730 now := f.clock.Now() 731 732 earliest := now 733 for _, req := range f.requests { 734 // If this request already timed out, skip it altogether 735 if req.hashes == nil { 736 continue 737 } 738 if earliest > req.time { 739 earliest = req.time 740 if txFetchTimeout-time.Duration(now-earliest) < gatherSlack { 741 break 742 } 743 } 744 } 745 *timer = f.clock.AfterFunc(txFetchTimeout-time.Duration(now-earliest), func() { 746 trigger <- struct{}{} 747 }) 748 } 749 750 // scheduleFetches starts a batch of retrievals for all available idle peers. 751 func (f *TxFetcher) scheduleFetches(timer *mclock.Timer, timeout chan struct{}, whitelist map[string]struct{}) { 752 // Gather the set of peers we want to retrieve from (default to all) 753 actives := whitelist 754 if actives == nil { 755 actives = make(map[string]struct{}) 756 for peer := range f.announces { 757 actives[peer] = struct{}{} 758 } 759 } 760 if len(actives) == 0 { 761 return 762 } 763 // For each active peer, try to schedule some transaction fetches 764 idle := len(f.requests) == 0 765 766 f.forEachPeer(actives, func(peer string) { 767 if f.requests[peer] != nil { 768 return // continue in the for-each 769 } 770 if len(f.announces[peer]) == 0 { 771 return // continue in the for-each 772 } 773 hashes := make([]common.Hash, 0, maxTxRetrievals) 774 f.forEachHash(f.announces[peer], func(hash common.Hash) bool { 775 if _, ok := f.fetching[hash]; !ok { 776 // Mark the hash as fetching and stash away possible alternates 777 f.fetching[hash] = peer 778 779 if _, ok := f.alternates[hash]; ok { 780 panic(fmt.Sprintf("alternate tracker already contains fetching item: %v", f.alternates[hash])) 781 } 782 f.alternates[hash] = f.announced[hash] 783 delete(f.announced, hash) 784 785 // Accumulate the hash and stop if the limit was reached 786 hashes = append(hashes, hash) 787 if len(hashes) >= maxTxRetrievals { 788 return false // break in the for-each 789 } 790 } 791 return true // continue in the for-each 792 }) 793 // If any hashes were allocated, request them from the peer 794 if len(hashes) > 0 { 795 f.requests[peer] = &txRequest{hashes: hashes, time: f.clock.Now()} 796 txRequestOutMeter.Mark(int64(len(hashes))) 797 798 go func(peer string, hashes []common.Hash) { 799 // Try to fetch the transactions, but in case of a request 800 // failure (e.g. peer disconnected), reschedule the hashes. 801 if err := f.fetchTxs(peer, hashes); err != nil { 802 txRequestFailMeter.Mark(int64(len(hashes))) 803 f.Drop(peer) 804 } 805 }(peer, hashes) 806 } 807 }) 808 // If a new request was fired, schedule a timeout timer 809 if idle && len(f.requests) > 0 { 810 f.rescheduleTimeout(timer, timeout) 811 } 812 } 813 814 // forEachPeer does a range loop over a map of peers in production, but during 815 // testing it does a deterministic sorted random to allow reproducing issues. 816 func (f *TxFetcher) forEachPeer(peers map[string]struct{}, do func(peer string)) { 817 // If we're running production, use whatever Go's map gives us 818 if f.rand == nil { 819 for peer := range peers { 820 do(peer) 821 } 822 return 823 } 824 // We're running the test suite, make iteration deterministic 825 list := make([]string, 0, len(peers)) 826 for peer := range peers { 827 list = append(list, peer) 828 } 829 sort.Strings(list) 830 rotateStrings(list, f.rand.Intn(len(list))) 831 for _, peer := range list { 832 do(peer) 833 } 834 } 835 836 // forEachHash does a range loop over a map of hashes in production, but during 837 // testing it does a deterministic sorted random to allow reproducing issues. 838 func (f *TxFetcher) forEachHash(hashes map[common.Hash]struct{}, do func(hash common.Hash) bool) { 839 // If we're running production, use whatever Go's map gives us 840 if f.rand == nil { 841 for hash := range hashes { 842 if !do(hash) { 843 return 844 } 845 } 846 return 847 } 848 // We're running the test suite, make iteration deterministic 849 list := make([]common.Hash, 0, len(hashes)) 850 for hash := range hashes { 851 list = append(list, hash) 852 } 853 sortHashes(list) 854 rotateHashes(list, f.rand.Intn(len(list))) 855 for _, hash := range list { 856 if !do(hash) { 857 return 858 } 859 } 860 } 861 862 // rotateStrings rotates the contents of a slice by n steps. This method is only 863 // used in tests to simulate random map iteration but keep it deterministic. 864 func rotateStrings(slice []string, n int) { 865 orig := make([]string, len(slice)) 866 copy(orig, slice) 867 868 for i := 0; i < len(orig); i++ { 869 slice[i] = orig[(i+n)%len(orig)] 870 } 871 } 872 873 // sortHashes sorts a slice of hashes. This method is only used in tests in order 874 // to simulate random map iteration but keep it deterministic. 875 func sortHashes(slice []common.Hash) { 876 for i := 0; i < len(slice); i++ { 877 for j := i + 1; j < len(slice); j++ { 878 if bytes.Compare(slice[i][:], slice[j][:]) > 0 { 879 slice[i], slice[j] = slice[j], slice[i] 880 } 881 } 882 } 883 } 884 885 // rotateHashes rotates the contents of a slice by n steps. This method is only 886 // used in tests to simulate random map iteration but keep it deterministic. 887 func rotateHashes(slice []common.Hash, n int) { 888 orig := make([]common.Hash, len(slice)) 889 copy(orig, slice) 890 891 for i := 0; i < len(orig); i++ { 892 slice[i] = orig[(i+n)%len(orig)] 893 } 894 }