gitlab.com/aquachain/aquachain@v1.17.16-rc3.0.20221018032414-e3ddf1e1c055/aqua/fetcher/fetcher.go (about) 1 // Copyright 2018 The aquachain Authors 2 // This file is part of the aquachain library. 3 // 4 // The aquachain library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The aquachain library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the aquachain library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package fetcher contains the block announcement based synchronisation. 18 package fetcher 19 20 import ( 21 "errors" 22 "math/big" 23 "math/rand" 24 "sync" 25 "time" 26 27 "gitlab.com/aquachain/aquachain/common" 28 "gitlab.com/aquachain/aquachain/common/log" 29 "gitlab.com/aquachain/aquachain/common/prque" 30 "gitlab.com/aquachain/aquachain/consensus" 31 "gitlab.com/aquachain/aquachain/core/types" 32 "gitlab.com/aquachain/aquachain/params" 33 ) 34 35 const ( 36 arriveTimeout = 500 * time.Millisecond // Time allowance before an announced block is explicitly requested 37 gatherSlack = 100 * time.Millisecond // Interval used to collate almost-expired announces with fetches 38 fetchTimeout = 5 * time.Second // Maximum allotted time to return an explicitly requested block 39 maxUncleDist = 7 // Maximum allowed backward distance from the chain head 40 maxQueueDist = 32 // Maximum allowed distance from the chain head to queue 41 hashLimit = 256 // Maximum number of unique blocks a peer may have announced 42 blockLimit = 64 // Maximum number of unique blocks a peer may have delivered 43 ) 44 45 var ( 46 errTerminated = errors.New("terminated") 47 ) 48 49 // blockRetrievalFn is a callback type for retrieving a block from the local chain. 50 type blockRetrievalFn func(common.Hash) *types.Block 51 52 // headerRequesterFn is a callback type for sending a header retrieval request. 53 type headerRequesterFn func(common.Hash) error 54 55 // bodyRequesterFn is a callback type for sending a body retrieval request. 56 type bodyRequesterFn func([]common.Hash) error 57 58 // headerVerifierFn is a callback type to verify a block's header for fast propagation. 59 type headerVerifierFn func(header *types.Header) error 60 61 // blockBroadcasterFn is a callback type for broadcasting a block to connected peers. 62 type blockBroadcasterFn func(block *types.Block, propagate bool) 63 64 // chainHeightFn is a callback type to retrieve the current chain height. 65 type chainHeightFn func() uint64 66 67 // chainInsertFn is a callback type to insert a batch of blocks into the local chain. 68 type chainInsertFn func(types.Blocks) (int, error) 69 70 // peerDropFn is a callback type for dropping a peer detected as malicious. 71 type peerDropFn func(id string) 72 73 // announce is the hash notification of the availability of a new block in the 74 // network. 75 type announce struct { 76 hash common.Hash // Hash of the block being announced 77 number uint64 // Number of the block being announced (0 = unknown | old protocol) 78 header *types.Header // Header of the block partially reassembled (new protocol) 79 time time.Time // Timestamp of the announcement 80 81 origin string // Identifier of the peer originating the notification 82 83 fetchHeader headerRequesterFn // Fetcher function to retrieve the header of an announced block 84 fetchBodies bodyRequesterFn // Fetcher function to retrieve the body of an announced block 85 } 86 87 // headerFilterTask represents a batch of headers needing fetcher filtering. 88 type headerFilterTask struct { 89 peer string // The source peer of block headers 90 headers []*types.Header // Collection of headers to filter 91 time time.Time // Arrival time of the headers 92 } 93 94 // headerFilterTask represents a batch of block bodies (transactions and uncles) 95 // needing fetcher filtering. 96 type bodyFilterTask struct { 97 peer string // The source peer of block bodies 98 transactions [][]*types.Transaction // Collection of transactions per block bodies 99 uncles [][]*types.Header // Collection of uncles per block bodies 100 time time.Time // Arrival time of the blocks' contents 101 } 102 103 // inject represents a schedules import operation. 104 type inject struct { 105 origin string 106 block *types.Block 107 } 108 109 // Fetcher is responsible for accumulating block announcements from various peers 110 // and scheduling them for retrieval. 111 type Fetcher struct { 112 // Various event channels 113 notify chan *announce 114 inject chan *inject 115 116 blockFilter chan chan []*types.Block 117 headerFilter chan chan *headerFilterTask 118 bodyFilter chan chan *bodyFilterTask 119 120 done chan common.Hash 121 quit chan struct{} 122 123 // Announce states 124 announces map[string]int // Per peer announce counts to prevent memory exhaustion 125 announced map[common.Hash][]*announce // Announced blocks, scheduled for fetching 126 fetching map[common.Hash]*announce // Announced blocks, currently fetching 127 fetched map[common.Hash][]*announce // Blocks with headers fetched, scheduled for body retrieval 128 completing map[common.Hash]*announce // Blocks with headers, currently body-completing 129 mu *sync.Mutex 130 131 // Block cache 132 queue *prque.Prque // Queue containing the import operations (block number sorted) 133 queues map[string]int // Per peer block counts to prevent memory exhaustion 134 queued map[common.Hash]*inject // Set of already queued blocks (to dedup imports) 135 136 // Callbacks 137 getBlock blockRetrievalFn // Retrieves a block from the local chain 138 verifyHeader headerVerifierFn // Checks if a block's headers have a valid proof of work 139 broadcastBlock blockBroadcasterFn // Broadcasts a block to connected peers 140 chainHeight chainHeightFn // Retrieves the current chain's height 141 insertChain chainInsertFn // Injects a batch of blocks into the chain 142 dropPeer peerDropFn // Drops a peer for misbehaving 143 hashFunc func(*big.Int) params.HeaderVersion // Chooses a hashing algorithm ("header version") based on block number 144 145 // Testing hooks 146 announceChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a hash from the announce list 147 queueChangeHook func(common.Hash, bool) // Method to call upon adding or deleting a block from the import queue 148 fetchingHook func([]common.Hash) // Method to call upon starting a block (aqua/61) or header (aqua/62) fetch 149 completingHook func([]common.Hash) // Method to call upon starting a block body fetch (aqua/62) 150 importedHook func(*types.Block) // Method to call upon successful block import (both aqua/61 and aqua/62) 151 } 152 153 // New creates a block fetcher to retrieve blocks based on hash announcements. 154 func New(hashfunc func(*big.Int) params.HeaderVersion, getBlock blockRetrievalFn, verifyHeader headerVerifierFn, broadcastBlock blockBroadcasterFn, chainHeight chainHeightFn, insertChain chainInsertFn, dropPeer peerDropFn) *Fetcher { 155 return &Fetcher{ 156 hashFunc: hashfunc, 157 notify: make(chan *announce), 158 inject: make(chan *inject), 159 blockFilter: make(chan chan []*types.Block), 160 headerFilter: make(chan chan *headerFilterTask), 161 bodyFilter: make(chan chan *bodyFilterTask), 162 done: make(chan common.Hash), 163 quit: make(chan struct{}), 164 announces: make(map[string]int), 165 announced: make(map[common.Hash][]*announce), 166 fetching: make(map[common.Hash]*announce), 167 fetched: make(map[common.Hash][]*announce), 168 completing: make(map[common.Hash]*announce), 169 queue: prque.New(nil), 170 queues: make(map[string]int), 171 queued: make(map[common.Hash]*inject), 172 getBlock: getBlock, 173 verifyHeader: verifyHeader, 174 broadcastBlock: broadcastBlock, 175 chainHeight: chainHeight, 176 insertChain: insertChain, 177 dropPeer: dropPeer, 178 mu: new(sync.Mutex), 179 } 180 } 181 182 // Start boots up the announcement based synchroniser, accepting and processing 183 // hash notifications and block fetches until termination requested. 184 func (f *Fetcher) Start() { 185 go f.loop() 186 } 187 188 // Stop terminates the announcement based synchroniser, canceling all pending 189 // operations. 190 func (f *Fetcher) Stop() { 191 close(f.quit) 192 } 193 194 // Notify announces the fetcher of the potential availability of a new block in 195 // the network. 196 func (f *Fetcher) Notify(peer string, hash common.Hash, number uint64, time time.Time, 197 headerFetcher headerRequesterFn, bodyFetcher bodyRequesterFn) error { 198 block := &announce{ 199 hash: hash, 200 number: number, 201 time: time, 202 origin: peer, 203 fetchHeader: headerFetcher, 204 fetchBodies: bodyFetcher, 205 } 206 select { 207 case f.notify <- block: 208 return nil 209 case <-f.quit: 210 return errTerminated 211 } 212 } 213 214 // Enqueue tries to fill gaps the the fetcher's future import queue. 215 func (f *Fetcher) Enqueue(peer string, block *types.Block) error { 216 op := &inject{ 217 origin: peer, 218 block: block, 219 } 220 select { 221 case f.inject <- op: 222 return nil 223 case <-f.quit: 224 return errTerminated 225 } 226 } 227 228 // FilterHeaders extracts all the headers that were explicitly requested by the fetcher, 229 // returning those that should be handled differently. 230 func (f *Fetcher) FilterHeaders(peer string, headers []*types.Header, time time.Time) []*types.Header { 231 log.Trace("Filtering headers", "peer", peer, "headers", len(headers)) 232 233 // Send the filter channel to the fetcher 234 filter := make(chan *headerFilterTask) 235 236 select { 237 case f.headerFilter <- filter: 238 case <-f.quit: 239 return nil 240 } 241 // Request the filtering of the header list 242 select { 243 case filter <- &headerFilterTask{peer: peer, headers: headers, time: time}: 244 case <-f.quit: 245 return nil 246 } 247 // Retrieve the headers remaining after filtering 248 select { 249 case task := <-filter: 250 return task.headers 251 case <-f.quit: 252 return nil 253 } 254 } 255 256 // FilterBodies extracts all the block bodies that were explicitly requested by 257 // the fetcher, returning those that should be handled differently. 258 func (f *Fetcher) FilterBodies(peer string, transactions [][]*types.Transaction, uncles [][]*types.Header, time time.Time) ([][]*types.Transaction, [][]*types.Header) { 259 log.Trace("Filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) 260 defer log.Trace("Done filtering bodies", "peer", peer, "txs", len(transactions), "uncles", len(uncles)) 261 262 // Send the filter channel to the fetcher 263 filter := make(chan *bodyFilterTask) 264 265 select { 266 case f.bodyFilter <- filter: 267 case <-f.quit: 268 return nil, nil 269 } 270 // Request the filtering of the body list 271 select { 272 case filter <- &bodyFilterTask{peer: peer, transactions: transactions, uncles: uncles, time: time}: 273 case <-f.quit: 274 return nil, nil 275 } 276 // Retrieve the bodies remaining after filtering 277 select { 278 case task := <-filter: 279 return task.transactions, task.uncles 280 case <-f.quit: 281 return nil, nil 282 } 283 } 284 285 // Loop is the main fetcher loop, checking and processing various notification 286 // events. 287 func (f *Fetcher) loop() { 288 // Iterate the block fetching until a quit is requested 289 fetchTimer := time.NewTimer(0) 290 completeTimer := time.NewTimer(0) 291 292 for { 293 // Clean up any expired block fetches 294 for hash, announce := range f.fetching { 295 if time.Since(announce.time) > fetchTimeout { 296 f.forgetHash(hash) 297 } 298 } 299 // Import any queued blocks that could potentially fit 300 height := f.chainHeight() 301 for !f.queue.Empty() { 302 op := f.queue.PopItem().(*inject) 303 if f.queueChangeHook != nil { 304 f.queueChangeHook(op.block.Hash(), false) 305 } 306 // If too high up the chain or phase, continue later 307 number := op.block.NumberU64() 308 if number > height+1 { 309 f.queue.Push(op, -int64(op.block.NumberU64())) 310 if f.queueChangeHook != nil { 311 f.queueChangeHook(op.block.Hash(), true) 312 } 313 break 314 } 315 // Otherwise if fresh and still unknown, try and import 316 hash := op.block.Hash() 317 if number+maxUncleDist < height || f.getBlock(hash) != nil { 318 f.forgetBlock(hash) 319 continue 320 } 321 f.insert(op.origin, op.block) 322 } 323 // Wait for an outside event to occur 324 select { 325 case <-f.quit: 326 // Fetcher terminating, abort all operations 327 return 328 329 case notification := <-f.notify: 330 // A block was announced, make sure the peer isn't DOSing us 331 propAnnounceInMeter.Mark(1) 332 333 count := f.announces[notification.origin] + 1 334 if count > hashLimit { 335 log.Debug("Peer exceeded outstanding announces", "peer", notification.origin, "limit", hashLimit) 336 propAnnounceDOSMeter.Mark(1) 337 break 338 } 339 // If we have a valid block number, check that it's potentially useful 340 if notification.number > 0 { 341 if dist := int64(notification.number) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 342 log.Debug("Peer discarded announcement", "peer", notification.origin, "number", notification.number, "hash", notification.hash, "distance", dist) 343 propAnnounceDropMeter.Mark(1) 344 break 345 } 346 } 347 // All is well, schedule the announce if block's not yet downloading 348 if _, ok := f.fetching[notification.hash]; ok { 349 break 350 } 351 if _, ok := f.completing[notification.hash]; ok { 352 break 353 } 354 log.Trace("block announced, getting lock") 355 f.mu.Lock() 356 f.announces[notification.origin] = count 357 f.announced[notification.hash] = append(f.announced[notification.hash], notification) 358 f.mu.Unlock() 359 log.Trace("block announced, unlocked") 360 if f.announceChangeHook != nil && len(f.announced[notification.hash]) == 1 { 361 f.announceChangeHook(notification.hash, true) 362 } 363 if len(f.announced) == 1 { 364 f.rescheduleFetch(fetchTimer) 365 } 366 367 case op := <-f.inject: 368 // A direct block insertion was requested, try and fill any pending gaps 369 propBroadcastInMeter.Mark(1) 370 f.enqueue(op.origin, op.block) 371 372 case hash := <-f.done: 373 // A pending import finished, remove all traces of the notification 374 f.forgetHash(hash) 375 f.forgetBlock(hash) 376 377 case <-fetchTimer.C: 378 // At least one block's timer ran out, check for needing retrieval 379 request := make(map[string][]common.Hash) 380 381 for hash, announces := range f.announced { 382 if time.Since(announces[0].time) > arriveTimeout-gatherSlack { 383 // Pick a random peer to retrieve from, reset all others 384 announce := announces[rand.Intn(len(announces))] 385 f.forgetHash(hash) 386 387 // If the block still didn't arrive, queue for fetching 388 if f.getBlock(hash) == nil { 389 request[announce.origin] = append(request[announce.origin], hash) 390 f.fetching[hash] = announce 391 } 392 } 393 } 394 // Send out all block header requests 395 for peer, hashes := range request { 396 log.Trace("Fetching scheduled headers", "peer", peer, "list", hashes) 397 398 // Create a closure of the fetch and schedule in on a new thread 399 fetchHeader, hashes := f.fetching[hashes[0]].fetchHeader, hashes 400 go func() { 401 f.mu.Lock() 402 defer f.mu.Unlock() 403 if f.fetchingHook != nil { 404 f.fetchingHook(hashes) 405 } 406 for _, hash := range hashes { 407 headerFetchMeter.Mark(1) 408 fetchHeader(hash) // Suboptimal, but protocol doesn't allow batch header retrievals 409 } 410 }() 411 } 412 // Schedule the next fetch if blocks are still pending 413 f.rescheduleFetch(fetchTimer) 414 415 case <-completeTimer.C: 416 // At least one header's timer ran out, retrieve everything 417 request := make(map[string][]common.Hash) 418 419 for hash, announces := range f.fetched { 420 // Pick a random peer to retrieve from, reset all others 421 announce := announces[rand.Intn(len(announces))] 422 f.forgetHash(hash) 423 424 // If the block still didn't arrive, queue for completion 425 if f.getBlock(hash) == nil { 426 log.Trace("timer ran out, getting lock") 427 f.mu.Lock() 428 request[announce.origin] = append(request[announce.origin], hash) 429 f.completing[hash] = announce 430 f.mu.Unlock() 431 log.Trace("timer ran out, released lock") 432 } 433 } 434 // Send out all block body requests 435 for peer, hashes := range request { 436 log.Trace("Fetching scheduled bodies", "peer", peer, "list", hashes) 437 438 // Create a closure of the fetch and schedule in on a new thread 439 if f.completingHook != nil { 440 f.completingHook(hashes) 441 } 442 bodyFetchMeter.Mark(int64(len(hashes))) 443 go f.completing[hashes[0]].fetchBodies(hashes) 444 } 445 // Schedule the next fetch if blocks are still pending 446 f.rescheduleComplete(completeTimer) 447 448 case filter := <-f.headerFilter: 449 // Headers arrived from a remote peer. Extract those that were explicitly 450 // requested by the fetcher, and return everything else so it's delivered 451 // to other parts of the system. 452 var task *headerFilterTask 453 select { 454 case task = <-filter: 455 case <-f.quit: 456 return 457 } 458 headerFilterInMeter.Mark(int64(len(task.headers))) 459 460 // Split the batch of headers into unknown ones (to return to the caller), 461 // known incomplete ones (requiring body retrievals) and completed blocks. 462 unknown, incomplete, complete := []*types.Header{}, []*announce{}, []*types.Block{} 463 for _, header := range task.headers { 464 header.SetVersion(byte(f.hashFunc(header.Number))) 465 hash := header.Hash() 466 467 // Filter fetcher-requested headers from other synchronisation algorithms 468 if announce := f.fetching[hash]; announce != nil && announce.origin == task.peer && f.fetched[hash] == nil && f.completing[hash] == nil && f.queued[hash] == nil { 469 // If the delivered header does not match the promised number, drop the announcer 470 if header.Number.Uint64() != announce.number { 471 log.Trace("Invalid block number fetched", "peer", announce.origin, "hash", header.Hash(), "announced", announce.number, "provided", header.Number) 472 f.dropPeer(announce.origin) 473 f.forgetHash(hash) 474 continue 475 } 476 // Only keep if not imported by other means 477 if f.getBlock(hash) == nil { 478 announce.header = header 479 announce.time = task.time 480 481 // If the block is empty (header only), short circuit into the final import queue 482 if header.TxHash == types.DeriveSha(types.Transactions{}) && header.UncleHash == types.CalcUncleHash([]*types.Header{}) { 483 log.Trace("Block empty, skipping body retrieval", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 484 485 block := types.NewBlockWithHeader(header) 486 block.ReceivedAt = task.time 487 488 complete = append(complete, block) 489 log.Trace("getting lock") 490 f.mu.Lock() 491 f.completing[hash] = announce 492 f.mu.Unlock() 493 log.Trace("lock done") 494 continue 495 } 496 // Otherwise add to the list of blocks needing completion 497 incomplete = append(incomplete, announce) 498 } else { 499 log.Trace("Block already imported, discarding header", "peer", announce.origin, "number", header.Number, "hash", header.Hash()) 500 f.forgetHash(hash) 501 } 502 } else { 503 // Fetcher doesn't know about it, add to the return list 504 unknown = append(unknown, header) 505 } 506 } 507 headerFilterOutMeter.Mark(int64(len(unknown))) 508 select { 509 case filter <- &headerFilterTask{headers: unknown, time: task.time}: 510 case <-f.quit: 511 return 512 } 513 // Schedule the retrieved headers for body completion 514 for _, announce := range incomplete { 515 hash := announce.header.Hash() 516 if _, ok := f.completing[hash]; ok { 517 continue 518 } 519 520 log.Trace("body getting lock") 521 f.mu.Lock() 522 f.fetched[hash] = append(f.fetched[hash], announce) 523 f.mu.Unlock() 524 log.Trace("body released lock") 525 if len(f.fetched) == 1 { 526 f.rescheduleComplete(completeTimer) 527 } 528 } 529 // Schedule the header-only blocks for import 530 for _, block := range complete { 531 if announce := f.completing[block.Hash()]; announce != nil { 532 f.enqueue(announce.origin, block) 533 } 534 } 535 536 case filter := <-f.bodyFilter: 537 // Block bodies arrived, extract any explicitly requested blocks, return the rest 538 var task *bodyFilterTask 539 select { 540 case task = <-filter: 541 case <-f.quit: 542 return 543 } 544 bodyFilterInMeter.Mark(int64(len(task.transactions))) 545 546 blocks := []*types.Block{} 547 for i := 0; i < len(task.transactions) && i < len(task.uncles); i++ { 548 // Match up a body to any possible completion request 549 matched := false 550 551 for hash, announce := range f.completing { 552 if f.queued[hash] == nil { 553 txnHash := types.DeriveSha(types.Transactions(task.transactions[i])) 554 uncleHash := types.CalcUncleHash(task.uncles[i]) 555 556 if txnHash == announce.header.TxHash && uncleHash == announce.header.UncleHash && announce.origin == task.peer { 557 // Mark the body matched, reassemble if still unknown 558 matched = true 559 560 if f.getBlock(hash) == nil { 561 block := types.NewBlockWithHeader(announce.header).WithBody(task.transactions[i], task.uncles[i]) 562 block.ReceivedAt = task.time 563 564 blocks = append(blocks, block) 565 } else { 566 f.forgetHash(hash) 567 } 568 } 569 } 570 } 571 if matched { 572 task.transactions = append(task.transactions[:i], task.transactions[i+1:]...) 573 task.uncles = append(task.uncles[:i], task.uncles[i+1:]...) 574 i-- 575 continue 576 } 577 } 578 579 bodyFilterOutMeter.Mark(int64(len(task.transactions))) 580 select { 581 case filter <- task: 582 case <-f.quit: 583 return 584 } 585 // Schedule the retrieved blocks for ordered import 586 for _, block := range blocks { 587 if announce := f.completing[block.Hash()]; announce != nil { 588 f.enqueue(announce.origin, block) 589 } 590 } 591 } 592 } 593 } 594 595 // rescheduleFetch resets the specified fetch timer to the next announce timeout. 596 func (f *Fetcher) rescheduleFetch(fetch *time.Timer) { 597 // Short circuit if no blocks are announced 598 if len(f.announced) == 0 { 599 return 600 } 601 // Otherwise find the earliest expiring announcement 602 earliest := time.Now() 603 for _, announces := range f.announced { 604 if earliest.After(announces[0].time) { 605 earliest = announces[0].time 606 } 607 } 608 fetch.Reset(arriveTimeout - time.Since(earliest)) 609 } 610 611 // rescheduleComplete resets the specified completion timer to the next fetch timeout. 612 func (f *Fetcher) rescheduleComplete(complete *time.Timer) { 613 // Short circuit if no headers are fetched 614 if len(f.fetched) == 0 { 615 return 616 } 617 // Otherwise find the earliest expiring announcement 618 earliest := time.Now() 619 for _, announces := range f.fetched { 620 if earliest.After(announces[0].time) { 621 earliest = announces[0].time 622 } 623 } 624 complete.Reset(gatherSlack - time.Since(earliest)) 625 } 626 627 // enqueue schedules a new future import operation, if the block to be imported 628 // has not yet been seen. 629 func (f *Fetcher) enqueue(peer string, block *types.Block) { 630 hash := block.Hash() 631 632 // Ensure the peer isn't DOSing us 633 count := f.queues[peer] + 1 634 if count > blockLimit { 635 log.Debug("Discarded propagated block, exceeded allowance", "peer", peer, "number", block.Number(), "hash", hash, "limit", blockLimit) 636 propBroadcastDOSMeter.Mark(1) 637 f.forgetHash(hash) 638 return 639 } 640 // Discard any past or too distant blocks 641 if dist := int64(block.NumberU64()) - int64(f.chainHeight()); dist < -maxUncleDist || dist > maxQueueDist { 642 log.Debug("Discarded propagated block, too far away", "peer", peer, "number", block.Number(), "hash", hash, "distance", dist) 643 propBroadcastDropMeter.Mark(1) 644 f.forgetHash(hash) 645 return 646 } 647 // Schedule the block for future importing 648 if _, ok := f.queued[hash]; !ok { 649 op := &inject{ 650 origin: peer, 651 block: block, 652 } 653 f.queues[peer] = count 654 f.queued[hash] = op 655 f.queue.Push(op, -int64(block.NumberU64())) 656 if f.queueChangeHook != nil { 657 f.queueChangeHook(op.block.Hash(), true) 658 } 659 log.Debug("Queued propagated block", "peer", peer, "number", block.Number(), "hash", hash, "queued", f.queue.Size()) 660 } 661 } 662 663 // insert spawns a new goroutine to run a block insertion into the chain. If the 664 // block's number is at the same height as the current import phase, if updates 665 // the phase states accordingly. 666 func (f *Fetcher) insert(peer string, block *types.Block) { 667 hash := block.Hash() 668 669 // Run the import on a new thread 670 log.Debug("Importing propagated block", "peer", peer, "number", block.Number(), "hash", hash) 671 go func() { 672 defer func() { f.done <- hash }() 673 674 // If the parent's unknown, abort insertion 675 parent := f.getBlock(block.ParentHash()) 676 if parent == nil { 677 log.Debug("Unknown parent of propagated block", "peer", peer, "number", block.Number(), "hash", hash, "parent", block.ParentHash()) 678 return 679 } 680 // Quickly validate the header and propagate the block if it passes 681 switch err := f.verifyHeader(block.Header()); err { 682 case nil: 683 // All ok, quickly propagate to our peers 684 propBroadcastOutTimer.UpdateSince(block.ReceivedAt) 685 go f.broadcastBlock(block, true) 686 687 case consensus.ErrFutureBlock: 688 // Weird future block, don't fail, but neither propagate 689 690 default: 691 // Something went very wrong, drop the peer 692 log.Debug("Propagated block verification failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 693 f.dropPeer(peer) 694 return 695 } 696 // Run the actual import and log any issues 697 if _, err := f.insertChain(types.Blocks{block}); err != nil { 698 log.Debug("Propagated block import failed", "peer", peer, "number", block.Number(), "hash", hash, "err", err) 699 return 700 } 701 // If import succeeded, broadcast the block 702 propAnnounceOutTimer.UpdateSince(block.ReceivedAt) 703 go f.broadcastBlock(block, false) 704 705 // Invoke the testing hook if needed 706 if f.importedHook != nil { 707 f.importedHook(block) 708 } 709 }() 710 } 711 712 // forgetHash removes all traces of a block announcement from the fetcher's 713 // internal state. 714 func (f *Fetcher) forgetHash(hash common.Hash) { 715 f.mu.Lock() 716 defer f.mu.Unlock() 717 // Remove all pending announces and decrement DOS counters 718 for _, announce := range f.announced[hash] { 719 f.announces[announce.origin]-- 720 if f.announces[announce.origin] == 0 { 721 delete(f.announces, announce.origin) 722 } 723 } 724 delete(f.announced, hash) 725 if f.announceChangeHook != nil { 726 f.announceChangeHook(hash, false) 727 } 728 // Remove any pending fetches and decrement the DOS counters 729 if announce := f.fetching[hash]; announce != nil { 730 f.announces[announce.origin]-- 731 if f.announces[announce.origin] == 0 { 732 delete(f.announces, announce.origin) 733 } 734 delete(f.fetching, hash) 735 } 736 737 // Remove any pending completion requests and decrement the DOS counters 738 for _, announce := range f.fetched[hash] { 739 f.announces[announce.origin]-- 740 if f.announces[announce.origin] == 0 { 741 delete(f.announces, announce.origin) 742 } 743 } 744 delete(f.fetched, hash) 745 746 // Remove any pending completions and decrement the DOS counters 747 if announce := f.completing[hash]; announce != nil { 748 f.announces[announce.origin]-- 749 if f.announces[announce.origin] == 0 { 750 delete(f.announces, announce.origin) 751 } 752 delete(f.completing, hash) 753 } 754 } 755 756 // forgetBlock removes all traces of a queued block from the fetcher's internal 757 // state. 758 func (f *Fetcher) forgetBlock(hash common.Hash) { 759 f.mu.Lock() 760 defer f.mu.Unlock() 761 if insert := f.queued[hash]; insert != nil { 762 f.queues[insert.origin]-- 763 if f.queues[insert.origin] == 0 { 764 delete(f.queues, insert.origin) 765 } 766 delete(f.queued, hash) 767 } 768 }