git.pirl.io/community/pirl@v0.0.0-20201111064343-9d3d31ff74be/eth/fetcher/block_fetcher.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package fetcher contains the announcement based blocks or transaction synchronisation. 18 package fetcher 19 20 import ( 21 "errors" 22 "math/rand" 23 "time" 24 25 "git.pirl.io/community/pirl/common" 26 "git.pirl.io/community/pirl/common/prque" 27 "git.pirl.io/community/pirl/consensus" 28 "git.pirl.io/community/pirl/core/types" 29 "git.pirl.io/community/pirl/log" 30 "git.pirl.io/community/pirl/metrics" 31 ) 32 33 const ( 34 arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested 35 gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches 36 fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction 37 ) 38 39 const ( 40 maxUncleDist = 7 // Maximum allowed backward distance from the chain head 41 maxQueueDist = 32 // Maximum allowed distance from the chain head to queue 42 hashLimit = 256 // Maximum number of unique blocks a peer may have announced 43 blockLimit = 64 // Maximum number of unique blocks a peer may have delivered 44 ) 45 46 var ( 47 blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil) 48 blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil) 49 blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil) 50 blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil) 51 52 blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil) 53 blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil) 54 blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil) 55 blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil) 56 57 headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil) 58 bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil) 59 60 headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil) 61 headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil) 62 bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil) 63 bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) 64 ) 65 66 var ( 67 errTerminated = errors.New("terminated") 68 ) 69 70 // blockRetrievalFn is a callback type for retrieving a block from the local chain. 71 type blockRetrievalFn func(common.Hash) *types.Block 72 73 // headerRequesterFn is a callback type for sending a header retrieval request. 74 type headerRequesterFn func(common.Hash) error 75 76 // bodyRequesterFn is a callback type for sending a body retrieval request. 77 type bodyRequesterFn func([]common.Hash) error 78 79 // headerVerifierFn is a callback type to verify a block's header for fast propagation. 80 type headerVerifierFn func(header *types.Header) error 81 82 // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. 83 type blockBroadcasterFn func(block *types.Block, propagate bool) 84 85 // chainHeightFn is a callback type to retrieve the current chain height. 86 type chainHeightFn func() uint64 87 88 // chainInsertFn is a callback type to insert a batch of blocks into the local chain. 89 type chainInsertFn func(types.Blocks) (int, error) 90 91 // peerDropFn is a callback type for dropping a peer detected as malicious. 92 type peerDropFn func(id string) 93 94 // blockAnnounce is the hash notification of the availability of a new block in the 95 // network. 96 type blockAnnounce struct { 97 hash common.Hash // Hash of the block being announced 98 number uint64 // Number of the block being announced (0 = unknown | old protocol) 99 header *types.Header // Header of the block partially reassembled (new protocol) 100 time time.Time // Timestamp of the announcement 101 102 origin string // Identifier of the peer originating the notification 103 104 fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block 105 fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block 106 } 107 108 // headerFilterTask represents a batch of headers needing fetcher filtering. 109 type headerFilterTask struct { 110 peer string // The source peer of block headers 111 headers []*types.Header // Collection of headers to filter 112 time time.Time // Arrival time of the headers 113 } 114 115 // bodyFilterTask represents a batch of block bodies (transactions and uncles) 116 // needing fetcher filtering. 117 type bodyFilterTask struct { 118 peer string // The source peer of block bodies 119 transactions [][]*types.Transaction // Collection of transactions per block bodies 120 uncles [][]*types.Header // Collection of uncles per block bodies 121 time time.Time // Arrival time of the blocks' contents 122 } 123 124 // blockInject represents a schedules import operation. 125 type blockInject struct { 126 origin string 127 block *types.Block 128 } 129 130 // BlockFetcher is responsible for accumulating block announcements from various peers 131 // and scheduling them for retrieval. 132 type BlockFetcher struct { 133 // Various event channels 134 notify chan *blockAnnounce 135 inject chan *blockInject 136 137 headerFilter chan chan *headerFilterTask 138 bodyFilter chan chan *bodyFilterTask 139 140 done chan common.Hash 141 quit chan struct{} 142 143 // Announce states 144 announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion 145 announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching 146 fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching 147 fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval 148 completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing 149 150 // Block cache 151 queue *prque.Prque // Queue containing the import operations (block number sorted) 152 queues map[string]int // Per peer block counts to prevent memory exhaustion 153 queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports) 154 155 // Callbacks 156 getBlock blockRetrievalFn // Retrieves a block from the local chain 157 verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work 158 broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers 159 chainHeight chainHeightFn // Retrieves the current chain's height 160 insertChain chainInsertFn // Injects a batch of blocks into the chain 161 dropPeer peerDropFn // Drops a peer for misbehaving 162 163 // Testing hooks 164 announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list 165 queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue 166 fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch 167 completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) 168 importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) 169 } 170 171 // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements. 172 func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { 173 return &BlockFetcher{ 174 notify: make(chan *blockAnnounce), 175 inject: make(chan *blockInject), 176 headerFilter: make(chan chan *headerFilterTask), 177 bodyFilter: make(chan chan *bodyFilterTask), 178 done: make(chan common.Hash), 179 quit: make(chan struct{}), 180 announces: make(map[string]int), 181 announced: make(map[common.Hash][]*blockAnnounce), 182 fetching: make(map[common.Hash]*blockAnnounce), 183 fetched: make(map[common.Hash][]*blockAnnounce), 184 completing: make(map[common.Hash]*blockAnnounce), 185 queue: prque.New(nil), 186 queues: make(map[string]int), 187 queued: make(map[common.Hash]*blockInject), 188 getBlock: getBlock, 189 verifyHeader: verifyHeader, 190 broadcastBlock: broadcastBlock, 191 chainHeight: chainHeight, 192 insertChain: insertChain, 193 dropPeer: dropPeer, 194 } 195 } 196 197 // Start boots up the announcement based synchroniser, accepting and processing 198 // hash notifications and block fetches until termination requested. 199 func (f *BlockFetcher) Start() { 200 go f.loop() 201 } 202 203 // Stop terminates the announcement based synchroniser, canceling all pending 204 // operations. 205 func (f *BlockFetcher) Stop() { 206 close(f.quit) 207 } 208 209 // Notify announces the fetcher of the potential availability of a new block in 210 // the network. 211 func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, 212 headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { 213 block := &blockAnnounce{ 214 hash: hash, 215 number: number, 216 time: time, 217 origin: peer, 218 fetchHeader: headerFetcher, 219 fetchBodies: bodyFetcher, 220 } 221 select { 222 case f.notify <- block: 223 return nil 224 case <-f.quit: 225 return errTerminated 226 } 227 } 228 229 // Enqueue tries to fill gaps the fetcher's future import queue. 230 func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { 231 op := &blockInject{ 232 origin: peer, 233 block: block, 234 } 235 select { 236 case f.inject <- op: 237 return nil 238 case <-f.quit: 239 return errTerminated 240 } 241 } 242 243 // FilterHeaders extracts all the headers that were explicitly requested by the fetcher, 244 // returning those that should be handled differently. 245 func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { 246 log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) 247 248 // Send the filter channel to the fetcher 249 filter := make(chan *headerFilterTask) 250 251 select { 252 case f.headerFilter <- filter: 253 case <-f.quit: 254 return nil 255 } 256 // Request the filtering of the header list 257 select { 258 case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: 259 case <-f.quit: 260 return nil 261 } 262 // Retrieve the headers remaining after filtering 263 select { 264 case task := <-filter: 265 return task.headers 266 case <-f.quit: 267 return nil 268 } 269 } 270 271 // FilterBodies extracts all the block bodies that were explicitly requested by 272 // the fetcher, returning those that should be handled differently. 273 func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { 274 log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) 275 276 // Send the filter channel to the fetcher 277 filter := make(chan *bodyFilterTask) 278 279 select { 280 case f.bodyFilter <- filter: 281 case <-f.quit: 282 return nil, nil 283 } 284 // Request the filtering of the body list 285 select { 286 case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: 287 case <-f.quit: 288 return nil, nil 289 } 290 // Retrieve the bodies remaining after filtering 291 select { 292 case task := <-filter: 293 return task.transactions, task.uncles 294 case <-f.quit: 295 return nil, nil 296 } 297 } 298 299 // Loop is the main fetcher loop, checking and processing various notification 300 // events. 301 func (f *BlockFetcher) loop() { 302 // Iterate the block fetching until a quit is requested 303 fetchTimer := time.NewTimer(0) 304 completeTimer := time.NewTimer(0) 305 306 for { 307 // Clean up any expired block fetches 308 for hash, announce := range f.fetching { 309 if time.Since(announce.time) > fetchTimeout { 310 f.forgetHash(hash) 311 } 312 } 313 // Import any queued blocks that could potentially fit 314 height := f.chainHeight() 315 for !f.queue.Empty() { 316 op := f.queue.PopItem().(*blockInject) 317 hash := op.block.Hash() 318 if f.queueChangeHook != nil { 319 f.queueChangeHook(hash, false) 320 } 321 // If too high up the chain or phase, continue later 322 number := op.block.NumberU64() 323 if number > height+1 { 324 f.queue.Push(op, -int64(number)) 325 if f.queueChangeHook != nil { 326 f.queueChangeHook(hash, true) 327 } 328 break 329 } 330 // Otherwise if fresh and still unknown, try and import 331 if number+maxUncleDist < height || f.getBlock(hash) != nil { 332 f.forgetBlock(hash) 333 continue 334 } 335 f.insert(op.origin, op.block) 336 } 337 // Wait for an outside event to occur 338 select { 339 case <-f.quit: 340 // BlockFetcher terminating, abort all operations 341 return 342 343 case notification := <-f.notify: 344 // A block was announced, make sure the peer isn't DOSing us 345 blockAnnounceInMeter.Mark(1) 346 347 count := f.announces[notification.origin] + 1 348 if count > hashLimit { 349 log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) 350 blockAnnounceDOSMeter.Mark(1) 351 break 352 } 353 // If we have a valid block number, check that it's potentially useful 354 if notification.number > 0 { 355 if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 356 log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) 357 blockAnnounceDropMeter.Mark(1) 358 break 359 } 360 } 361 // All is well, schedule the announce if block's not yet downloading 362 if _, ok := f.fetching[notification.hash]; ok { 363 break 364 } 365 if _, ok := f.completing[notification.hash]; ok { 366 break 367 } 368 f.announces[notification.origin] = count 369 f.announced[notification.hash] = append(f.announced[notification.hash], notification) 370 if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { 371 f.announceChangeHook(notification.hash, true) 372 } 373 if len(f.announced) == 1 { 374 f.rescheduleFetch(fetchTimer) 375 } 376 377 case op := <-f.inject: 378 // A direct block insertion was requested, try and fill any pending gaps 379 blockBroadcastInMeter.Mark(1) 380 f.enqueue(op.origin, op.block) 381 382 case hash := <-f.done: 383 // A pending import finished, remove all traces of the notification 384 f.forgetHash(hash) 385 f.forgetBlock(hash) 386 387 case <-fetchTimer.C: 388 // At least one block's timer ran out, check for needing retrieval 389 request := make(map[string][]common.Hash) 390 391 for hash, announces := range f.announced { 392 if time.Since(announces[0].time) > arriveTimeout-gatherSlack { 393 // Pick a random peer to retrieve from, reset all others 394 announce := announces[rand.Intn(len(announces))] 395 f.forgetHash(hash) 396 397 // If the block still didn't arrive, queue for fetching 398 if f.getBlock(hash) == nil { 399 request[announce.origin] = append(request[announce.origin], hash) 400 f.fetching[hash] = announce 401 } 402 } 403 } 404 // Send out all block header requests 405 for peer, hashes := range request { 406 log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) 407 408 // Create a closure of the fetch and schedule in on a new thread 409 fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes 410 go func() { 411 if f.fetchingHook != nil { 412 f.fetchingHook(hashes) 413 } 414 for _, hash := range hashes { 415 headerFetchMeter.Mark(1) 416 fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals 417 } 418 }() 419 } 420 // Schedule the next fetch if blocks are still pending 421 f.rescheduleFetch(fetchTimer) 422 423 case <-completeTimer.C: 424 // At least one header's timer ran out, retrieve everything 425 request := make(map[string][]common.Hash) 426 427 for hash, announces := range f.fetched { 428 // Pick a random peer to retrieve from, reset all others 429 announce := announces[rand.Intn(len(announces))] 430 f.forgetHash(hash) 431 432 // If the block still didn't arrive, queue for completion 433 if f.getBlock(hash) == nil { 434 request[announce.origin] = append(request[announce.origin], hash) 435 f.completing[hash] = announce 436 } 437 } 438 // Send out all block body requests 439 for peer, hashes := range request { 440 log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) 441 442 // Create a closure of the fetch and schedule in on a new thread 443 if f.completingHook != nil { 444 f.completingHook(hashes) 445 } 446 bodyFetchMeter.Mark(int64(len(hashes))) 447 go f.completing[hashes[0]].fetchBodies(hashes) 448 } 449 // Schedule the next fetch if blocks are still pending 450 f.rescheduleComplete(completeTimer) 451 452 case filter := <-f.headerFilter: 453 // Headers arrived from a remote peer. Extract those that were explicitly 454 // requested by the fetcher, and return everything else so it's delivered 455 // to other parts of the system. 456 var task *headerFilterTask 457 select { 458 case task = <-filter: 459 case <-f.quit: 460 return 461 } 462 headerFilterInMeter.Mark(int64(len(task.headers))) 463 464 // Split the batch of headers into unknown ones (to return to the caller), 465 // known incomplete ones (requiring body retrievals) and completed blocks. 466 unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{} 467 for _, header := range task.headers { 468 hash := header.Hash() 469 470 // Filter fetcher-requested headers from other synchronisation algorithms 471 if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { 472 // If the delivered header does not match the promised number, drop the announcer 473 if header.Number.Uint64() != announce.number { 474 log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) 475 f.dropPeer(announce.origin) 476 f.forgetHash(hash) 477 continue 478 } 479 // Only keep if not imported by other means 480 if f.getBlock(hash) == nil { 481 announce.header = header 482 announce.time = task.time 483 484 // If the block is empty (header only), short circuit into the final import queue 485 if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { 486 log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 487 488 block := types.NewBlockWithHeader(header) 489 block.ReceivedAt = task.time 490 491 complete = append(complete, block) 492 f.completing[hash] = announce 493 continue 494 } 495 // Otherwise add to the list of blocks needing completion 496 incomplete = append(incomplete, announce) 497 } else { 498 log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 499 f.forgetHash(hash) 500 } 501 } else { 502 // BlockFetcher doesn't know about it, add to the return list 503 unknown = append(unknown, header) 504 } 505 } 506 headerFilterOutMeter.Mark(int64(len(unknown))) 507 select { 508 case filter <- &headerFilterTask{headers: unknown, time: task.time}: 509 case <-f.quit: 510 return 511 } 512 // Schedule the retrieved headers for body completion 513 for _, announce := range incomplete { 514 hash := announce.header.Hash() 515 if _, ok := f.completing[hash]; ok { 516 continue 517 } 518 f.fetched[hash] = append(f.fetched[hash], announce) 519 if len(f.fetched) == 1 { 520 f.rescheduleComplete(completeTimer) 521 } 522 } 523 // Schedule the header-only blocks for import 524 for _, block := range complete { 525 if announce := f.completing[block.Hash()]; announce != nil { 526 f.enqueue(announce.origin, block) 527 } 528 } 529 530 case filter := <-f.bodyFilter: 531 // Block bodies arrived, extract any explicitly requested blocks, return the rest 532 var task *bodyFilterTask 533 select { 534 case task = <-filter: 535 case <-f.quit: 536 return 537 } 538 bodyFilterInMeter.Mark(int64(len(task.transactions))) 539 540 blocks := []*types.Block{} 541 for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { 542 // Match up a body to any possible completion request 543 matched := false 544 545 for hash, announce := range f.completing { 546 if f.queued[hash] == nil { 547 txnHash := types.DeriveSha(types.Transactions(task.transactions[i])) 548 uncleHash := types.CalcUncleHash(task.uncles[i]) 549 550 if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer { 551 // Mark the body matched, reassemble if still unknown 552 matched = true 553 554 if f.getBlock(hash) == nil { 555 block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) 556 block.ReceivedAt = task.time 557 558 blocks = append(blocks, block) 559 } else { 560 f.forgetHash(hash) 561 } 562 } 563 } 564 } 565 if matched { 566 task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) 567 task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) 568 i-- 569 continue 570 } 571 } 572 573 bodyFilterOutMeter.Mark(int64(len(task.transactions))) 574 select { 575 case filter <- task: 576 case <-f.quit: 577 return 578 } 579 // Schedule the retrieved blocks for ordered import 580 for _, block := range blocks { 581 if announce := f.completing[block.Hash()]; announce != nil { 582 f.enqueue(announce.origin, block) 583 } 584 } 585 } 586 } 587 } 588 589 // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout. 590 func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { 591 // Short circuit if no blocks are announced 592 if len(f.announced) == 0 { 593 return 594 } 595 // Otherwise find the earliest expiring announcement 596 earliest := time.Now() 597 for _, announces := range f.announced { 598 if earliest.After(announces[0].time) { 599 earliest = announces[0].time 600 } 601 } 602 fetch.Reset(arriveTimeout - time.Since(earliest)) 603 } 604 605 // rescheduleComplete resets the specified completion timer to the next fetch timeout. 606 func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { 607 // Short circuit if no headers are fetched 608 if len(f.fetched) == 0 { 609 return 610 } 611 // Otherwise find the earliest expiring announcement 612 earliest := time.Now() 613 for _, announces := range f.fetched { 614 if earliest.After(announces[0].time) { 615 earliest = announces[0].time 616 } 617 } 618 complete.Reset(gatherSlack - time.Since(earliest)) 619 } 620 621 // enqueue schedules a new future import operation, if the block to be imported 622 // has not yet been seen. 623 func (f *BlockFetcher) enqueue(peer string, block *types.Block) { 624 hash := block.Hash() 625 626 // Ensure the peer isn't DOSing us 627 count := f.queues[peer] + 1 628 if count > blockLimit { 629 log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit) 630 blockBroadcastDOSMeter.Mark(1) 631 f.forgetHash(hash) 632 return 633 } 634 // Discard any past or too distant blocks 635 if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 636 log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist) 637 blockBroadcastDropMeter.Mark(1) 638 f.forgetHash(hash) 639 return 640 } 641 // Schedule the block for future importing 642 if _, ok := f.queued[hash]; !ok { 643 op := &blockInject{ 644 origin: peer, 645 block: block, 646 } 647 f.queues[peer] = count 648 f.queued[hash] = op 649 f.queue.Push(op, -int64(block.NumberU64())) 650 if f.queueChangeHook != nil { 651 f.queueChangeHook(op.block.Hash(), true) 652 } 653 log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size()) 654 } 655 } 656 657 // insert spawns a new goroutine to run a block insertion into the chain. If the 658 // block's number is at the same height as the current import phase, it updates 659 // the phase states accordingly. 660 func (f *BlockFetcher) insert(peer string, block *types.Block) { 661 hash := block.Hash() 662 663 // Run the import on a new thread 664 log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) 665 go func() { 666 defer func() { f.done <- hash }() 667 668 // If the parent's unknown, abort insertion 669 parent := f.getBlock(block.ParentHash()) 670 if parent == nil { 671 log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) 672 return 673 } 674 // Quickly validate the header and propagate the block if it passes 675 switch err := f.verifyHeader(block.Header()); err { 676 case nil: 677 // All ok, quickly propagate to our peers 678 blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) 679 go f.broadcastBlock(block, true) 680 681 case consensus.ErrFutureBlock: 682 // Weird future block, don't fail, but neither propagate 683 684 default: 685 // Something went very wrong, drop the peer 686 log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 687 f.dropPeer(peer) 688 return 689 } 690 // Run the actual import and log any issues 691 if _, err := f.insertChain(types.Blocks{block}); err != nil { 692 log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 693 return 694 } 695 // If import succeeded, broadcast the block 696 blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) 697 go f.broadcastBlock(block, false) 698 699 // Invoke the testing hook if needed 700 if f.importedHook != nil { 701 f.importedHook(block) 702 } 703 }() 704 } 705 706 // forgetHash removes all traces of a block announcement from the fetcher's 707 // internal state. 708 func (f *BlockFetcher) forgetHash(hash common.Hash) { 709 // Remove all pending announces and decrement DOS counters 710 for _, announce := range f.announced[hash] { 711 f.announces[announce.origin]-- 712 if f.announces[announce.origin] <= 0 { 713 delete(f.announces, announce.origin) 714 } 715 } 716 delete(f.announced, hash) 717 if f.announceChangeHook != nil { 718 f.announceChangeHook(hash, false) 719 } 720 // Remove any pending fetches and decrement the DOS counters 721 if announce := f.fetching[hash]; announce != nil { 722 f.announces[announce.origin]-- 723 if f.announces[announce.origin] <= 0 { 724 delete(f.announces, announce.origin) 725 } 726 delete(f.fetching, hash) 727 } 728 729 // Remove any pending completion requests and decrement the DOS counters 730 for _, announce := range f.fetched[hash] { 731 f.announces[announce.origin]-- 732 if f.announces[announce.origin] <= 0 { 733 delete(f.announces, announce.origin) 734 } 735 } 736 delete(f.fetched, hash) 737 738 // Remove any pending completions and decrement the DOS counters 739 if announce := f.completing[hash]; announce != nil { 740 f.announces[announce.origin]-- 741 if f.announces[announce.origin] <= 0 { 742 delete(f.announces, announce.origin) 743 } 744 delete(f.completing, hash) 745 } 746 } 747 748 // forgetBlock removes all traces of a queued block from the fetcher's internal 749 // state. 750 func (f *BlockFetcher) forgetBlock(hash common.Hash) { 751 if insert := f.queued[hash]; insert != nil { 752 f.queues[insert.origin]-- 753 if f.queues[insert.origin] == 0 { 754 delete(f.queues, insert.origin) 755 } 756 delete(f.queued, hash) 757 } 758 }