github.com/Debrief-BC/go-debrief@v0.0.0-20200420203408-0c26ca968123/eth/fetcher/block_fetcher.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package fetcher contains the announcement based blocks or transaction synchronisation. 18 package fetcher 19 20 import ( 21 "errors" 22 "math/rand" 23 "time" 24 25 "github.com/Debrief-BC/go-debrief/common" 26 "github.com/Debrief-BC/go-debrief/common/prque" 27 "github.com/Debrief-BC/go-debrief/consensus" 28 "github.com/Debrief-BC/go-debrief/core/types" 29 "github.com/Debrief-BC/go-debrief/log" 30 "github.com/Debrief-BC/go-debrief/metrics" 31 ) 32 33 const ( 34 arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block/transaction is explicitly requested 35 gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches 36 fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block/transaction 37 ) 38 39 const ( 40 maxUncleDist = 7 // Maximum allowed backward distance from the chain head 41 maxQueueDist = 32 // Maximum allowed distance from the chain head to queue 42 hashLimit = 256 // Maximum number of unique blocks a peer may have announced 43 blockLimit = 64 // Maximum number of unique blocks a peer may have delivered 44 ) 45 46 var ( 47 blockAnnounceInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/in", nil) 48 blockAnnounceOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/announces/out", nil) 49 blockAnnounceDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/drop", nil) 50 blockAnnounceDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/announces/dos", nil) 51 52 blockBroadcastInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/in", nil) 53 blockBroadcastOutTimer = metrics.NewRegisteredTimer("eth/fetcher/block/broadcasts/out", nil) 54 blockBroadcastDropMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/drop", nil) 55 blockBroadcastDOSMeter = metrics.NewRegisteredMeter("eth/fetcher/block/broadcasts/dos", nil) 56 57 headerFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/headers", nil) 58 bodyFetchMeter = metrics.NewRegisteredMeter("eth/fetcher/block/bodies", nil) 59 60 headerFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/in", nil) 61 headerFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/headers/out", nil) 62 bodyFilterInMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/in", nil) 63 bodyFilterOutMeter = metrics.NewRegisteredMeter("eth/fetcher/block/filter/bodies/out", nil) 64 ) 65 66 var ( 67 errTerminated = errors.New("terminated") 68 ) 69 70 // blockRetrievalFn is a callback type for retrieving a block from the local chain. 71 type blockRetrievalFn func(common.Hash) *types.Block 72 73 // headerRequesterFn is a callback type for sending a header retrieval request. 74 type headerRequesterFn func(common.Hash) error 75 76 // bodyRequesterFn is a callback type for sending a body retrieval request. 77 type bodyRequesterFn func([]common.Hash) error 78 79 // headerVerifierFn is a callback type to verify a block's header for fast propagation. 80 type headerVerifierFn func(header *types.Header) error 81 82 // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. 83 type blockBroadcasterFn func(block *types.Block, propagate bool) 84 85 // chainHeightFn is a callback type to retrieve the current chain height. 86 type chainHeightFn func() uint64 87 88 // chainInsertFn is a callback type to insert a batch of blocks into the local chain. 89 type chainInsertFn func(types.Blocks) (int, error) 90 91 // peerDropFn is a callback type for dropping a peer detected as malicious. 92 type peerDropFn func(id string) 93 94 // blockAnnounce is the hash notification of the availability of a new block in the 95 // network. 96 type blockAnnounce struct { 97 hash common.Hash // Hash of the block being announced 98 number uint64 // Number of the block being announced (0 = unknown | old protocol) 99 header *types.Header // Header of the block partially reassembled (new protocol) 100 time time.Time // Timestamp of the announcement 101 102 origin string // Identifier of the peer originating the notification 103 104 fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block 105 fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block 106 } 107 108 // headerFilterTask represents a batch of headers needing fetcher filtering. 109 type headerFilterTask struct { 110 peer string // The source peer of block headers 111 headers []*types.Header // Collection of headers to filter 112 time time.Time // Arrival time of the headers 113 } 114 115 // bodyFilterTask represents a batch of block bodies (transactions and uncles) 116 // needing fetcher filtering. 117 type bodyFilterTask struct { 118 peer string // The source peer of block bodies 119 transactions [][]*types.Transaction // Collection of transactions per block bodies 120 uncles [][]*types.Header // Collection of uncles per block bodies 121 time time.Time // Arrival time of the blocks' contents 122 } 123 124 // blockInject represents a schedules import operation. 125 type blockInject struct { 126 origin string 127 block *types.Block 128 } 129 130 // BlockFetcher is responsible for accumulating block announcements from various peers 131 // and scheduling them for retrieval. 132 type BlockFetcher struct { 133 // Various event channels 134 notify chan *blockAnnounce 135 inject chan *blockInject 136 137 headerFilter chan chan *headerFilterTask 138 bodyFilter chan chan *bodyFilterTask 139 140 done chan common.Hash 141 quit chan struct{} 142 143 // Announce states 144 announces map[string]int // Per peer blockAnnounce counts to prevent memory exhaustion 145 announced map[common.Hash][]*blockAnnounce // Announced blocks, scheduled for fetching 146 fetching map[common.Hash]*blockAnnounce // Announced blocks, currently fetching 147 fetched map[common.Hash][]*blockAnnounce // Blocks with headers fetched, scheduled for body retrieval 148 completing map[common.Hash]*blockAnnounce // Blocks with headers, currently body-completing 149 150 // Block cache 151 queue *prque.Prque // Queue containing the import operations (block number sorted) 152 queues map[string]int // Per peer block counts to prevent memory exhaustion 153 queued map[common.Hash]*blockInject // Set of already queued blocks (to dedupe imports) 154 155 // Callbacks 156 getBlock blockRetrievalFn // Retrieves a block from the local chain 157 verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work 158 broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers 159 chainHeight chainHeightFn // Retrieves the current chain's height 160 insertChain chainInsertFn // Injects a batch of blocks into the chain 161 dropPeer peerDropFn // Drops a peer for misbehaving 162 163 // Testing hooks 164 announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the blockAnnounce list 165 queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue 166 fetchingHook func([]common.Hash) // Method to call upon starting a block (eth/61) or header (eth/62) fetch 167 completingHook func([]common.Hash) // Method to call upon starting a block body fetch (eth/62) 168 importedHook func(*types.Block) // Method to call upon successful block import (both eth/61 and eth/62) 169 } 170 171 // NewBlockFetcher creates a block fetcher to retrieve blocks based on hash announcements. 172 func NewBlockFetcher(getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *BlockFetcher { 173 return &BlockFetcher{ 174 notify: make(chan *blockAnnounce), 175 inject: make(chan *blockInject), 176 headerFilter: make(chan chan *headerFilterTask), 177 bodyFilter: make(chan chan *bodyFilterTask), 178 done: make(chan common.Hash), 179 quit: make(chan struct{}), 180 announces: make(map[string]int), 181 announced: make(map[common.Hash][]*blockAnnounce), 182 fetching: make(map[common.Hash]*blockAnnounce), 183 fetched: make(map[common.Hash][]*blockAnnounce), 184 completing: make(map[common.Hash]*blockAnnounce), 185 queue: prque.New(nil), 186 queues: make(map[string]int), 187 queued: make(map[common.Hash]*blockInject), 188 getBlock: getBlock, 189 verifyHeader: verifyHeader, 190 broadcastBlock: broadcastBlock, 191 chainHeight: chainHeight, 192 insertChain: insertChain, 193 dropPeer: dropPeer, 194 } 195 } 196 197 // Start boots up the announcement based synchroniser, accepting and processing 198 // hash notifications and block fetches until termination requested. 199 func (f *BlockFetcher) Start() { 200 go f.loop() 201 } 202 203 // Stop terminates the announcement based synchroniser, canceling all pending 204 // operations. 205 func (f *BlockFetcher) Stop() { 206 close(f.quit) 207 } 208 209 // Notify announces the fetcher of the potential availability of a new block in 210 // the network. 211 func (f *BlockFetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, 212 headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { 213 block := &blockAnnounce{ 214 hash: hash, 215 number: number, 216 time: time, 217 origin: peer, 218 fetchHeader: headerFetcher, 219 fetchBodies: bodyFetcher, 220 } 221 select { 222 case f.notify <- block: 223 return nil 224 case <-f.quit: 225 return errTerminated 226 } 227 } 228 229 // Enqueue tries to fill gaps the fetcher's future import queue. 230 func (f *BlockFetcher) Enqueue(peer string, block *types.Block) error { 231 op := &blockInject{ 232 origin: peer, 233 block: block, 234 } 235 select { 236 case f.inject <- op: 237 return nil 238 case <-f.quit: 239 return errTerminated 240 } 241 } 242 243 // FilterHeaders extracts all the headers that were explicitly requested by the fetcher, 244 // returning those that should be handled differently. 245 func (f *BlockFetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { 246 log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) 247 248 // Send the filter channel to the fetcher 249 filter := make(chan *headerFilterTask) 250 251 select { 252 case f.headerFilter <- filter: 253 case <-f.quit: 254 return nil 255 } 256 // Request the filtering of the header list 257 select { 258 case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: 259 case <-f.quit: 260 return nil 261 } 262 // Retrieve the headers remaining after filtering 263 select { 264 case task := <-filter: 265 return task.headers 266 case <-f.quit: 267 return nil 268 } 269 } 270 271 // FilterBodies extracts all the block bodies that were explicitly requested by 272 // the fetcher, returning those that should be handled differently. 273 func (f *BlockFetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { 274 log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) 275 276 // Send the filter channel to the fetcher 277 filter := make(chan *bodyFilterTask) 278 279 select { 280 case f.bodyFilter <- filter: 281 case <-f.quit: 282 return nil, nil 283 } 284 // Request the filtering of the body list 285 select { 286 case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: 287 case <-f.quit: 288 return nil, nil 289 } 290 // Retrieve the bodies remaining after filtering 291 select { 292 case task := <-filter: 293 return task.transactions, task.uncles 294 case <-f.quit: 295 return nil, nil 296 } 297 } 298 299 // Loop is the main fetcher loop, checking and processing various notification 300 // events. 301 func (f *BlockFetcher) loop() { 302 // Iterate the block fetching until a quit is requested 303 fetchTimer := time.NewTimer(0) 304 completeTimer := time.NewTimer(0) 305 defer fetchTimer.Stop() 306 defer completeTimer.Stop() 307 308 for { 309 // Clean up any expired block fetches 310 for hash, announce := range f.fetching { 311 if time.Since(announce.time) > fetchTimeout { 312 f.forgetHash(hash) 313 } 314 } 315 // Import any queued blocks that could potentially fit 316 height := f.chainHeight() 317 for !f.queue.Empty() { 318 op := f.queue.PopItem().(*blockInject) 319 hash := op.block.Hash() 320 if f.queueChangeHook != nil { 321 f.queueChangeHook(hash, false) 322 } 323 // If too high up the chain or phase, continue later 324 number := op.block.NumberU64() 325 if number > height+1 { 326 f.queue.Push(op, -int64(number)) 327 if f.queueChangeHook != nil { 328 f.queueChangeHook(hash, true) 329 } 330 break 331 } 332 // Otherwise if fresh and still unknown, try and import 333 if number+maxUncleDist < height || f.getBlock(hash) != nil { 334 f.forgetBlock(hash) 335 continue 336 } 337 f.insert(op.origin, op.block) 338 } 339 // Wait for an outside event to occur 340 select { 341 case <-f.quit: 342 // BlockFetcher terminating, abort all operations 343 return 344 345 case notification := <-f.notify: 346 // A block was announced, make sure the peer isn't DOSing us 347 blockAnnounceInMeter.Mark(1) 348 349 count := f.announces[notification.origin] + 1 350 if count > hashLimit { 351 log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) 352 blockAnnounceDOSMeter.Mark(1) 353 break 354 } 355 // If we have a valid block number, check that it's potentially useful 356 if notification.number > 0 { 357 if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 358 log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) 359 blockAnnounceDropMeter.Mark(1) 360 break 361 } 362 } 363 // All is well, schedule the announce if block's not yet downloading 364 if _, ok := f.fetching[notification.hash]; ok { 365 break 366 } 367 if _, ok := f.completing[notification.hash]; ok { 368 break 369 } 370 f.announces[notification.origin] = count 371 f.announced[notification.hash] = append(f.announced[notification.hash], notification) 372 if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { 373 f.announceChangeHook(notification.hash, true) 374 } 375 if len(f.announced) == 1 { 376 f.rescheduleFetch(fetchTimer) 377 } 378 379 case op := <-f.inject: 380 // A direct block insertion was requested, try and fill any pending gaps 381 blockBroadcastInMeter.Mark(1) 382 f.enqueue(op.origin, op.block) 383 384 case hash := <-f.done: 385 // A pending import finished, remove all traces of the notification 386 f.forgetHash(hash) 387 f.forgetBlock(hash) 388 389 case <-fetchTimer.C: 390 // At least one block's timer ran out, check for needing retrieval 391 request := make(map[string][]common.Hash) 392 393 for hash, announces := range f.announced { 394 if time.Since(announces[0].time) > arriveTimeout-gatherSlack { 395 // Pick a random peer to retrieve from, reset all others 396 announce := announces[rand.Intn(len(announces))] 397 f.forgetHash(hash) 398 399 // If the block still didn't arrive, queue for fetching 400 if f.getBlock(hash) == nil { 401 request[announce.origin] = append(request[announce.origin], hash) 402 f.fetching[hash] = announce 403 } 404 } 405 } 406 // Send out all block header requests 407 for peer, hashes := range request { 408 log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) 409 410 // Create a closure of the fetch and schedule in on a new thread 411 fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes 412 go func() { 413 if f.fetchingHook != nil { 414 f.fetchingHook(hashes) 415 } 416 for _, hash := range hashes { 417 headerFetchMeter.Mark(1) 418 fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals 419 } 420 }() 421 } 422 // Schedule the next fetch if blocks are still pending 423 f.rescheduleFetch(fetchTimer) 424 425 case <-completeTimer.C: 426 // At least one header's timer ran out, retrieve everything 427 request := make(map[string][]common.Hash) 428 429 for hash, announces := range f.fetched { 430 // Pick a random peer to retrieve from, reset all others 431 announce := announces[rand.Intn(len(announces))] 432 f.forgetHash(hash) 433 434 // If the block still didn't arrive, queue for completion 435 if f.getBlock(hash) == nil { 436 request[announce.origin] = append(request[announce.origin], hash) 437 f.completing[hash] = announce 438 } 439 } 440 // Send out all block body requests 441 for peer, hashes := range request { 442 log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) 443 444 // Create a closure of the fetch and schedule in on a new thread 445 if f.completingHook != nil { 446 f.completingHook(hashes) 447 } 448 bodyFetchMeter.Mark(int64(len(hashes))) 449 go f.completing[hashes[0]].fetchBodies(hashes) 450 } 451 // Schedule the next fetch if blocks are still pending 452 f.rescheduleComplete(completeTimer) 453 454 case filter := <-f.headerFilter: 455 // Headers arrived from a remote peer. Extract those that were explicitly 456 // requested by the fetcher, and return everything else so it's delivered 457 // to other parts of the system. 458 var task *headerFilterTask 459 select { 460 case task = <-filter: 461 case <-f.quit: 462 return 463 } 464 headerFilterInMeter.Mark(int64(len(task.headers))) 465 466 // Split the batch of headers into unknown ones (to return to the caller), 467 // known incomplete ones (requiring body retrievals) and completed blocks. 468 unknown, incomplete, complete := []*types.Header{}, []*blockAnnounce{}, []*types.Block{} 469 for _, header := range task.headers { 470 hash := header.Hash() 471 472 // Filter fetcher-requested headers from other synchronisation algorithms 473 if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { 474 // If the delivered header does not match the promised number, drop the announcer 475 if header.Number.Uint64() != announce.number { 476 log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) 477 f.dropPeer(announce.origin) 478 f.forgetHash(hash) 479 continue 480 } 481 // Only keep if not imported by other means 482 if f.getBlock(hash) == nil { 483 announce.header = header 484 announce.time = task.time 485 486 // If the block is empty (header only), short circuit into the final import queue 487 if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { 488 log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 489 490 block := types.NewBlockWithHeader(header) 491 block.ReceivedAt = task.time 492 493 complete = append(complete, block) 494 f.completing[hash] = announce 495 continue 496 } 497 // Otherwise add to the list of blocks needing completion 498 incomplete = append(incomplete, announce) 499 } else { 500 log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 501 f.forgetHash(hash) 502 } 503 } else { 504 // BlockFetcher doesn't know about it, add to the return list 505 unknown = append(unknown, header) 506 } 507 } 508 headerFilterOutMeter.Mark(int64(len(unknown))) 509 select { 510 case filter <- &headerFilterTask{headers: unknown, time: task.time}: 511 case <-f.quit: 512 return 513 } 514 // Schedule the retrieved headers for body completion 515 for _, announce := range incomplete { 516 hash := announce.header.Hash() 517 if _, ok := f.completing[hash]; ok { 518 continue 519 } 520 f.fetched[hash] = append(f.fetched[hash], announce) 521 if len(f.fetched) == 1 { 522 f.rescheduleComplete(completeTimer) 523 } 524 } 525 // Schedule the header-only blocks for import 526 for _, block := range complete { 527 if announce := f.completing[block.Hash()]; announce != nil { 528 f.enqueue(announce.origin, block) 529 } 530 } 531 532 case filter := <-f.bodyFilter: 533 // Block bodies arrived, extract any explicitly requested blocks, return the rest 534 var task *bodyFilterTask 535 select { 536 case task = <-filter: 537 case <-f.quit: 538 return 539 } 540 bodyFilterInMeter.Mark(int64(len(task.transactions))) 541 542 blocks := []*types.Block{} 543 for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { 544 // Match up a body to any possible completion request 545 matched := false 546 547 for hash, announce := range f.completing { 548 if f.queued[hash] == nil { 549 txnHash := types.DeriveSha(types.Transactions(task.transactions[i])) 550 uncleHash := types.CalcUncleHash(task.uncles[i]) 551 552 if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer { 553 // Mark the body matched, reassemble if still unknown 554 matched = true 555 556 if f.getBlock(hash) == nil { 557 block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) 558 block.ReceivedAt = task.time 559 560 blocks = append(blocks, block) 561 } else { 562 f.forgetHash(hash) 563 } 564 } 565 } 566 } 567 if matched { 568 task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) 569 task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) 570 i-- 571 continue 572 } 573 } 574 575 bodyFilterOutMeter.Mark(int64(len(task.transactions))) 576 select { 577 case filter <- task: 578 case <-f.quit: 579 return 580 } 581 // Schedule the retrieved blocks for ordered import 582 for _, block := range blocks { 583 if announce := f.completing[block.Hash()]; announce != nil { 584 f.enqueue(announce.origin, block) 585 } 586 } 587 } 588 } 589 } 590 591 // rescheduleFetch resets the specified fetch timer to the next blockAnnounce timeout. 592 func (f *BlockFetcher) rescheduleFetch(fetch *time.Timer) { 593 // Short circuit if no blocks are announced 594 if len(f.announced) == 0 { 595 return 596 } 597 // Otherwise find the earliest expiring announcement 598 earliest := time.Now() 599 for _, announces := range f.announced { 600 if earliest.After(announces[0].time) { 601 earliest = announces[0].time 602 } 603 } 604 fetch.Reset(arriveTimeout - time.Since(earliest)) 605 } 606 607 // rescheduleComplete resets the specified completion timer to the next fetch timeout. 608 func (f *BlockFetcher) rescheduleComplete(complete *time.Timer) { 609 // Short circuit if no headers are fetched 610 if len(f.fetched) == 0 { 611 return 612 } 613 // Otherwise find the earliest expiring announcement 614 earliest := time.Now() 615 for _, announces := range f.fetched { 616 if earliest.After(announces[0].time) { 617 earliest = announces[0].time 618 } 619 } 620 complete.Reset(gatherSlack - time.Since(earliest)) 621 } 622 623 // enqueue schedules a new future import operation, if the block to be imported 624 // has not yet been seen. 625 func (f *BlockFetcher) enqueue(peer string, block *types.Block) { 626 hash := block.Hash() 627 628 // Ensure the peer isn't DOSing us 629 count := f.queues[peer] + 1 630 if count > blockLimit { 631 log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit) 632 blockBroadcastDOSMeter.Mark(1) 633 f.forgetHash(hash) 634 return 635 } 636 // Discard any past or too distant blocks 637 if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 638 log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist) 639 blockBroadcastDropMeter.Mark(1) 640 f.forgetHash(hash) 641 return 642 } 643 // Schedule the block for future importing 644 if _, ok := f.queued[hash]; !ok { 645 op := &blockInject{ 646 origin: peer, 647 block: block, 648 } 649 f.queues[peer] = count 650 f.queued[hash] = op 651 f.queue.Push(op, -int64(block.NumberU64())) 652 if f.queueChangeHook != nil { 653 f.queueChangeHook(op.block.Hash(), true) 654 } 655 log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size()) 656 } 657 } 658 659 // insert spawns a new goroutine to run a block insertion into the chain. If the 660 // block's number is at the same height as the current import phase, it updates 661 // the phase states accordingly. 662 func (f *BlockFetcher) insert(peer string, block *types.Block) { 663 hash := block.Hash() 664 665 // Run the import on a new thread 666 log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) 667 go func() { 668 defer func() { f.done <- hash }() 669 670 // If the parent's unknown, abort insertion 671 parent := f.getBlock(block.ParentHash()) 672 if parent == nil { 673 log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) 674 return 675 } 676 // Quickly validate the header and propagate the block if it passes 677 switch err := f.verifyHeader(block.Header()); err { 678 case nil: 679 // All ok, quickly propagate to our peers 680 blockBroadcastOutTimer.UpdateSince(block.ReceivedAt) 681 go f.broadcastBlock(block, true) 682 683 case consensus.ErrFutureBlock: 684 // Weird future block, don't fail, but neither propagate 685 686 default: 687 // Something went very wrong, drop the peer 688 log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 689 f.dropPeer(peer) 690 return 691 } 692 // Run the actual import and log any issues 693 if _, err := f.insertChain(types.Blocks{block}); err != nil { 694 log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 695 return 696 } 697 // If import succeeded, broadcast the block 698 blockAnnounceOutTimer.UpdateSince(block.ReceivedAt) 699 go f.broadcastBlock(block, false) 700 701 // Invoke the testing hook if needed 702 if f.importedHook != nil { 703 f.importedHook(block) 704 } 705 }() 706 } 707 708 // forgetHash removes all traces of a block announcement from the fetcher's 709 // internal state. 710 func (f *BlockFetcher) forgetHash(hash common.Hash) { 711 // Remove all pending announces and decrement DOS counters 712 for _, announce := range f.announced[hash] { 713 f.announces[announce.origin]-- 714 if f.announces[announce.origin] <= 0 { 715 delete(f.announces, announce.origin) 716 } 717 } 718 delete(f.announced, hash) 719 if f.announceChangeHook != nil { 720 f.announceChangeHook(hash, false) 721 } 722 // Remove any pending fetches and decrement the DOS counters 723 if announce := f.fetching[hash]; announce != nil { 724 f.announces[announce.origin]-- 725 if f.announces[announce.origin] <= 0 { 726 delete(f.announces, announce.origin) 727 } 728 delete(f.fetching, hash) 729 } 730 731 // Remove any pending completion requests and decrement the DOS counters 732 for _, announce := range f.fetched[hash] { 733 f.announces[announce.origin]-- 734 if f.announces[announce.origin] <= 0 { 735 delete(f.announces, announce.origin) 736 } 737 } 738 delete(f.fetched, hash) 739 740 // Remove any pending completions and decrement the DOS counters 741 if announce := f.completing[hash]; announce != nil { 742 f.announces[announce.origin]-- 743 if f.announces[announce.origin] <= 0 { 744 delete(f.announces, announce.origin) 745 } 746 delete(f.completing, hash) 747 } 748 } 749 750 // forgetBlock removes all traces of a queued block from the fetcher's internal 751 // state. 752 func (f *BlockFetcher) forgetBlock(hash common.Hash) { 753 if insert := f.queued[hash]; insert != nil { 754 f.queues[insert.origin]-- 755 if f.queues[insert.origin] == 0 { 756 delete(f.queues, insert.origin) 757 } 758 delete(f.queued, hash) 759 } 760 }