github.com/Blockdaemon/celo-blockchain@v0.0.0-20200129231733-e667f6b08419/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 "sync" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/core/rawdb" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/ethdb" 34 "github.com/ethereum/go-ethereum/event" 35 "github.com/ethereum/go-ethereum/log" 36 "github.com/ethereum/go-ethereum/metrics" 37 "github.com/ethereum/go-ethereum/params" 38 ) 39 40 var ( 41 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 42 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 43 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 44 MaxEpochHeaderFetch = 192 // Number of epoch block headers to fetch (only used in IBFT consensus + UltraLight sync mode) 45 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 46 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 47 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 48 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 49 50 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 51 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 52 rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests 53 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 54 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 55 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 56 57 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 58 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 59 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 60 61 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 62 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 63 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 64 65 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 66 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 67 68 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 69 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 70 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 71 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 72 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 73 ) 74 75 var ( 76 errBusy = errors.New("busy") 77 errUnknownPeer = errors.New("peer is unknown or unhealthy") 78 errBadPeer = errors.New("action from bad peer ignored") 79 errStallingPeer = errors.New("peer is stalling") 80 errNoPeers = errors.New("no peers to keep download active") 81 errTimeout = errors.New("timeout") 82 errEmptyHeaderSet = errors.New("empty header set by peer") 83 errPeersUnavailable = errors.New("no peers available or all tried for download") 84 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 85 errInvalidChain = errors.New("retrieved hash chain is invalid") 86 errInvalidBlock = errors.New("retrieved block is invalid") 87 errInvalidBody = errors.New("retrieved block body is invalid") 88 errInvalidReceipt = errors.New("retrieved receipt is invalid") 89 errCancelBlockFetch = errors.New("block download canceled (requested)") 90 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 91 errCancelBodyFetch = errors.New("block body download canceled (requested)") 92 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 93 errCancelStateFetch = errors.New("state data download canceled (requested)") 94 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 95 errCancelContentProcessing = errors.New("content processing canceled (requested)") 96 errNoSyncActive = errors.New("no sync active") 97 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 98 ) 99 100 // If you adding a new variable add it at the bottom. Otherwise, you can end up making some uint64 unaligned to 8-byte 101 // boundary. That seems fine with ARM but on X86 (emulator), atomic loading of 64-bit variables causes a confusing crash. 102 // Some variables like rttEstimate are loaded atomically with atomic.LoadUint64() 103 104 type Downloader struct { 105 Mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 106 mux *event.TypeMux // Event multiplexer to announce sync operation events 107 108 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 109 queue *queue // Scheduler for selecting the hashes to download 110 peers *peerSet // Set of active peers from which download can proceed 111 stateDB ethdb.Database 112 rttEstimate uint64 // Round trip time to target for download requests 113 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 114 115 // Statistics 116 syncStatsChainOrigin uint64 // Origin block number where syncing started at 117 syncStatsChainHeight uint64 // Highest block number known when syncing started 118 syncStatsState stateSyncStats 119 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 120 121 lightchain LightChain 122 blockchain BlockChain 123 124 // Callbacks 125 dropPeer peerDropFn // Drops a peer for misbehaving 126 127 // Status 128 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 129 synchronising int32 130 notified int32 131 committed int32 132 133 // Channels 134 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 135 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 136 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 137 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 138 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 139 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 140 141 // for stateFetcher 142 stateSyncStart chan *stateSync 143 trackStateReq chan *stateReq 144 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 145 146 // Cancellation and termination 147 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 148 cancelCh chan struct{} // Channel to cancel mid-flight syncs 149 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 150 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 151 152 quitCh chan struct{} // Quit channel to signal termination 153 quitLock sync.RWMutex // Lock to prevent double closes 154 epoch uint64 // Epoch value is useful in IBFT consensus 155 ibftConsensus bool // True if we are in IBFT consensus mode 156 157 // Testing hooks 158 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 159 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 160 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 161 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 162 } 163 164 // LightChain encapsulates functions required to synchronise a light chain. 165 type LightChain interface { 166 // HasHeader verifies a header's presence in the local chain. 167 HasHeader(common.Hash, uint64) bool 168 169 // GetHeaderByHash retrieves a header from the local chain. 170 GetHeaderByHash(common.Hash) *types.Header 171 172 // CurrentHeader retrieves the head header from the local chain. 173 CurrentHeader() *types.Header 174 175 // GetTd returns the total difficulty of a local block. 176 GetTd(common.Hash, uint64) *big.Int 177 178 // InsertHeaderChain inserts a batch of headers into the local chain. 179 InsertHeaderChain([]*types.Header, int, bool) (int, error) 180 181 // Rollback removes a few recently added elements from the local chain. 182 Rollback([]common.Hash, bool) 183 184 Config() *params.ChainConfig 185 } 186 187 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 188 type BlockChain interface { 189 LightChain 190 191 // HasBlock verifies a block's presence in the local chain. 192 HasBlock(common.Hash, uint64) bool 193 194 // HasFastBlock verifies a fast block's presence in the local chain. 195 HasFastBlock(common.Hash, uint64) bool 196 197 // GetBlockByHash retrieves a block from the local chain. 198 GetBlockByHash(common.Hash) *types.Block 199 200 // CurrentBlock retrieves the head block from the local chain. 201 CurrentBlock() *types.Block 202 203 // CurrentFastBlock retrieves the head fast block from the local chain. 204 CurrentFastBlock() *types.Block 205 206 // FastSyncCommitHead directly commits the head block to a certain entity. 207 FastSyncCommitHead(common.Hash) error 208 209 // InsertChain inserts a batch of blocks into the local chain. 210 InsertChain(types.Blocks) (int, error) 211 212 // InsertReceiptChain inserts a batch of receipts into the local chain. 213 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 214 } 215 216 // New creates a new downloader to fetch hashes and blocks from remote peers. 217 func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, 218 dropPeer peerDropFn) *Downloader { 219 if lightchain == nil { 220 lightchain = chain 221 } 222 ibftConsensus := false 223 epoch := uint64(0) 224 if chain != nil && chain.Config() != nil && chain.Config().Istanbul != nil { 225 epoch = chain.Config().Istanbul.Epoch 226 ibftConsensus = true 227 } else if lightchain != nil && lightchain.Config() != nil && lightchain.Config().Istanbul != nil { 228 epoch = lightchain.Config().Istanbul.Epoch 229 ibftConsensus = true 230 } 231 if epoch > math.MaxInt32 { 232 panic(fmt.Sprintf("epoch is too big(%d), the code to fetch epoch headers casts epoch to an int to calculate value for skip variable", epoch)) 233 } 234 235 dl := &Downloader{ 236 Mode: mode, 237 stateDB: stateDb, 238 mux: mux, 239 queue: newQueue(), 240 peers: newPeerSet(), 241 rttEstimate: uint64(rttMaxEstimate), 242 rttConfidence: uint64(1000000), 243 blockchain: chain, 244 lightchain: lightchain, 245 dropPeer: dropPeer, 246 headerCh: make(chan dataPack, 1), 247 bodyCh: make(chan dataPack, 1), 248 receiptCh: make(chan dataPack, 1), 249 bodyWakeCh: make(chan bool, 1), 250 receiptWakeCh: make(chan bool, 1), 251 headerProcCh: make(chan []*types.Header, 1), 252 quitCh: make(chan struct{}), 253 stateCh: make(chan dataPack), 254 stateSyncStart: make(chan *stateSync), 255 syncStatsState: stateSyncStats{ 256 processed: rawdb.ReadFastTrieProgress(stateDb), 257 }, 258 trackStateReq: make(chan *stateReq), 259 ibftConsensus: ibftConsensus, 260 epoch: epoch, 261 } 262 go dl.qosTuner() 263 go dl.stateFetcher() 264 return dl 265 } 266 267 // Progress retrieves the synchronisation boundaries, specifically the origin 268 // block where synchronisation started at (may have failed/suspended); the block 269 // or header sync is currently at; and the latest known block which the sync targets. 270 // 271 // In addition, during the state download phase of fast synchronisation the number 272 // of processed and the total number of known states are also returned. Otherwise 273 // these are zero. 274 func (d *Downloader) Progress() ethereum.SyncProgress { 275 // Lock the current stats and return the progress 276 d.syncStatsLock.RLock() 277 defer d.syncStatsLock.RUnlock() 278 279 current := uint64(0) 280 switch d.Mode { 281 case FullSync: 282 current = d.blockchain.CurrentBlock().NumberU64() 283 case FastSync: 284 current = d.blockchain.CurrentFastBlock().NumberU64() 285 case LightSync: 286 fallthrough 287 case UltraLightSync: 288 current = d.lightchain.CurrentHeader().Number.Uint64() 289 } 290 log.Debug(fmt.Sprintf("Current head is %v", current)) 291 return ethereum.SyncProgress{ 292 StartingBlock: d.syncStatsChainOrigin, 293 CurrentBlock: current, 294 HighestBlock: d.syncStatsChainHeight, 295 PulledStates: d.syncStatsState.processed, 296 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 297 } 298 } 299 300 // Synchronising returns whether the downloader is currently retrieving blocks. 301 func (d *Downloader) Synchronising() bool { 302 return atomic.LoadInt32(&d.synchronising) > 0 303 } 304 305 // RegisterPeer injects a new download peer into the set of block source to be 306 // used for fetching hashes and blocks from. 307 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 308 logger := log.New("peer", id) 309 logger.Trace("Registering sync peer") 310 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 311 logger.Error("Failed to register sync peer", "err", err) 312 return err 313 } 314 d.qosReduceConfidence() 315 316 return nil 317 } 318 319 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 320 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 321 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 322 } 323 324 // UnregisterPeer remove a peer from the known list, preventing any action from 325 // the specified peer. An effort is also made to return any pending fetches into 326 // the queue. 327 func (d *Downloader) UnregisterPeer(id string) error { 328 // Unregister the peer from the active peer set and revoke any fetch tasks 329 logger := log.New("peer", id) 330 logger.Trace("Unregistering sync peer") 331 if err := d.peers.Unregister(id); err != nil { 332 logger.Error("Failed to unregister sync peer", "err", err) 333 return err 334 } 335 d.queue.Revoke(id) 336 337 // If this peer was the master peer, abort sync immediately 338 d.cancelLock.RLock() 339 master := id == d.cancelPeer 340 d.cancelLock.RUnlock() 341 342 if master { 343 d.cancel() 344 } 345 return nil 346 } 347 348 // Synchronise tries to sync up our local block chain with a remote peer, both 349 // adding various sanity checks as well as wrapping it with various log entries. 350 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 351 err := d.synchronise(id, head, td, mode) 352 switch err { 353 case nil: 354 case errBusy: 355 356 case errTimeout, errBadPeer, errStallingPeer, 357 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 358 errInvalidAncestor, errInvalidChain: 359 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 360 if d.dropPeer == nil { 361 // The dropPeer method is nil when `--copydb` is used for a local copy. 362 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 363 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 364 } else { 365 d.dropPeer(id) 366 } 367 default: 368 log.Warn("Synchronisation failed, retrying", "err", err) 369 } 370 return err 371 } 372 373 // synchronise will select the peer and use it for synchronising. If an empty string is given 374 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 375 // checks fail an error will be returned. This method is synchronous 376 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 377 // Mock out the synchronisation if testing 378 if d.synchroniseMock != nil { 379 return d.synchroniseMock(id, hash) 380 } 381 // Make sure only one goroutine is ever allowed past this point at once 382 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 383 return errBusy 384 } 385 defer atomic.StoreInt32(&d.synchronising, 0) 386 387 // Post a user notification of the sync (only once per session) 388 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 389 log.Info("Block synchronisation started") 390 } 391 // Reset the queue, peer set and wake channels to clean any internal leftover state 392 d.queue.Reset() 393 d.peers.Reset() 394 395 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 396 select { 397 case <-ch: 398 default: 399 } 400 } 401 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 402 for empty := false; !empty; { 403 select { 404 case <-ch: 405 default: 406 empty = true 407 } 408 } 409 } 410 for empty := false; !empty; { 411 select { 412 case <-d.headerProcCh: 413 default: 414 empty = true 415 } 416 } 417 // Create cancel channel for aborting mid-flight and mark the master peer 418 d.cancelLock.Lock() 419 d.cancelCh = make(chan struct{}) 420 d.cancelPeer = id 421 d.cancelLock.Unlock() 422 423 defer d.Cancel() // No matter what, we can't leave the cancel channel open 424 425 // Set the requested sync mode, unless it's forbidden 426 d.Mode = mode 427 428 // Retrieve the origin peer and initiate the downloading process 429 p := d.peers.Peer(id) 430 if p == nil { 431 return errUnknownPeer 432 } 433 return d.syncWithPeer(p, hash, td) 434 } 435 436 // syncWithPeer starts a block synchronization based on the hash chain from the 437 // specified peer and head hash. 438 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 439 d.mux.Post(StartEvent{}) 440 defer func() { 441 // reset on error 442 if err != nil { 443 d.mux.Post(FailedEvent{err}) 444 } else { 445 d.mux.Post(DoneEvent{}) 446 } 447 }() 448 if p.version < 62 { 449 return errTooOld 450 } 451 452 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.Mode) 453 defer func(start time.Time) { 454 log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 455 }(time.Now()) 456 457 // Look up the sync boundaries: the common ancestor and the target block 458 latest, err := d.fetchHeight(p) 459 if err != nil { 460 return err 461 } 462 height := latest.Number.Uint64() 463 464 origin, err := d.findAncestor(p, latest) 465 if err != nil { 466 return err 467 } 468 d.syncStatsLock.Lock() 469 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 470 d.syncStatsChainOrigin = origin 471 } 472 log.Debug(fmt.Sprintf("After the check origin is %d height is %d", origin, height)) 473 d.syncStatsChainHeight = height 474 d.syncStatsLock.Unlock() 475 476 // Ensure our origin point is below any fast sync pivot point 477 pivot := uint64(0) 478 if d.Mode == FastSync { 479 if height <= uint64(fsMinFullBlocks) { 480 origin = 0 481 } else { 482 pivot = height - uint64(fsMinFullBlocks) 483 if pivot <= origin { 484 origin = pivot - 1 485 } 486 } 487 } 488 d.committed = 1 489 if d.Mode == FastSync && pivot != 0 { 490 d.committed = 0 491 } 492 // Initiate the sync using a concurrent header and content retrieval algorithm 493 d.queue.Prepare(origin+1, d.Mode) 494 if d.syncInitHook != nil { 495 d.syncInitHook(origin, height) 496 } 497 498 fetchers := []func() error{ 499 func() error { return d.fetchHeaders(p, origin+1, pivot, height) }, // Headers are always retrieved 500 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 501 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 502 func() error { return d.processHeaders(origin+1, pivot, td) }, 503 } 504 if d.Mode == FastSync { 505 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 506 } else if d.Mode == FullSync { 507 fetchers = append(fetchers, d.processFullSyncContent) 508 } 509 return d.spawnSync(fetchers) 510 } 511 512 // spawnSync runs d.process and all given fetcher functions to completion in 513 // separate goroutines, returning the first error that appears. 514 func (d *Downloader) spawnSync(fetchers []func() error) error { 515 errc := make(chan error, len(fetchers)) 516 d.cancelWg.Add(len(fetchers)) 517 for _, fn := range fetchers { 518 fn := fn 519 go func() { defer d.cancelWg.Done(); errc <- fn() }() 520 } 521 // Wait for the first error, then terminate the others. 522 var err error 523 for i := 0; i < len(fetchers); i++ { 524 if i == len(fetchers)-1 { 525 // Close the queue when all fetchers have exited. 526 // This will cause the block processor to end when 527 // it has processed the queue. 528 d.queue.Close() 529 } 530 if err = <-errc; err != nil { 531 break 532 } 533 } 534 d.queue.Close() 535 d.Cancel() 536 return err 537 } 538 539 // cancel aborts all of the operations and resets the queue. However, cancel does 540 // not wait for the running download goroutines to finish. This method should be 541 // used when cancelling the downloads from inside the downloader. 542 func (d *Downloader) cancel() { 543 // Close the current cancel channel 544 d.cancelLock.Lock() 545 if d.cancelCh != nil { 546 select { 547 case <-d.cancelCh: 548 // Channel was already closed 549 default: 550 close(d.cancelCh) 551 } 552 } 553 d.cancelLock.Unlock() 554 } 555 556 // Cancel aborts all of the operations and waits for all download goroutines to 557 // finish before returning. 558 func (d *Downloader) Cancel() { 559 d.cancel() 560 d.cancelWg.Wait() 561 } 562 563 // Terminate interrupts the downloader, canceling all pending operations. 564 // The downloader cannot be reused after calling Terminate. 565 func (d *Downloader) Terminate() { 566 // Close the termination channel (make sure double close is allowed) 567 d.quitLock.Lock() 568 select { 569 case <-d.quitCh: 570 default: 571 close(d.quitCh) 572 } 573 d.quitLock.Unlock() 574 575 // Cancel any pending download requests 576 d.Cancel() 577 } 578 579 // fetchHeight retrieves the head header of the remote peer to aid in estimating 580 // the total time a pending synchronisation would take. 581 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 582 p.log.Debug("Retrieving remote chain height") 583 584 // Request the advertised remote head block and wait for the response 585 head, _ := p.peer.Head() 586 go p.peer.RequestHeadersByHash(head, 1, 0, false) 587 588 ttl := d.requestTTL() 589 timeout := time.After(ttl) 590 for { 591 select { 592 case <-d.cancelCh: 593 return nil, errCancelBlockFetch 594 595 case packet := <-d.headerCh: 596 // Discard anything not from the origin peer 597 if packet.PeerId() != p.id { 598 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 599 break 600 } 601 // Make sure the peer actually gave something valid 602 headers := packet.(*headerPack).headers 603 if len(headers) != 1 { 604 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 605 return nil, errBadPeer 606 } 607 head := headers[0] 608 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 609 return head, nil 610 611 case <-timeout: 612 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 613 return nil, errTimeout 614 615 case <-d.bodyCh: 616 case <-d.receiptCh: 617 // Out of bounds delivery, ignore 618 } 619 } 620 } 621 622 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 623 // common ancestor. 624 // It returns parameters to be used for peer.RequestHeadersByNumber: 625 // from - starting block number 626 // count - number of headers to request 627 // skip - number of headers to skip 628 // and also returns 'max', the last block which is expected to be returned by the remote peers, 629 // given the (from,count,skip) 630 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 631 var ( 632 from int 633 count int 634 MaxCount = MaxHeaderFetch / 16 635 ) 636 // requestHead is the highest block that we will ask for. If requestHead is not offset, 637 // the highest block that we will get is 16 blocks back from head, which means we 638 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 639 // between us and the peer is 1-2 blocks, which is most common 640 requestHead := int(remoteHeight) - 1 641 if requestHead < 0 { 642 requestHead = 0 643 } 644 // requestBottom is the lowest block we want included in the query 645 // Ideally, we want to include just below own head 646 requestBottom := int(localHeight - 1) 647 if requestBottom < 0 { 648 requestBottom = 0 649 } 650 totalSpan := requestHead - requestBottom 651 span := 1 + totalSpan/MaxCount 652 if span < 2 { 653 span = 2 654 } 655 if span > 16 { 656 span = 16 657 } 658 659 count = 1 + totalSpan/span 660 if count > MaxCount { 661 count = MaxCount 662 } 663 if count < 2 { 664 count = 2 665 } 666 from = requestHead - (count-1)*span 667 if from < 0 { 668 from = 0 669 } 670 max := from + (count-1)*span 671 return int64(from), count, span - 1, uint64(max) 672 } 673 674 // findAncestor tries to locate the common ancestor link of the local chain and 675 // a remote peers blockchain. In the general case when our node was in sync and 676 // on the correct chain, checking the top N links should already get us a match. 677 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 678 // the head links match), we do a binary search to find the common ancestor. 679 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 680 // Figure out the valid ancestor range to prevent rewrite attacks 681 var ( 682 floor = int64(-1) 683 localHeight uint64 684 remoteHeight = remoteHeader.Number.Uint64() 685 ) 686 switch d.Mode { 687 case FullSync: 688 localHeight = d.blockchain.CurrentBlock().NumberU64() 689 case FastSync: 690 localHeight = d.blockchain.CurrentFastBlock().NumberU64() 691 default: 692 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 693 } 694 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 695 if localHeight >= MaxForkAncestry { 696 // We're above the max reorg threshold, find the earliest fork point 697 floor = int64(localHeight - MaxForkAncestry) 698 699 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 700 // all headers before that point will be missing. 701 if !d.Mode.SyncFullBlockChain() { 702 // If we dont know the current CHT position, find it 703 if d.genesis == 0 { 704 header := d.lightchain.CurrentHeader() 705 for header != nil { 706 d.genesis = header.Number.Uint64() 707 if floor >= int64(d.genesis)-1 { 708 break 709 } 710 header = d.lightchain.GetHeaderByHash(header.ParentHash) 711 } 712 } 713 // We already know the "genesis" block number, cap floor to that 714 if floor < int64(d.genesis)-1 { 715 floor = int64(d.genesis) - 1 716 } 717 } 718 } 719 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 720 721 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 722 go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) 723 724 // Wait for the remote response to the head fetch 725 number, hash := uint64(0), common.Hash{} 726 727 ttl := d.requestTTL() 728 timeout := time.After(ttl) 729 730 for finished := false; !finished; { 731 select { 732 case <-d.cancelCh: 733 return 0, errCancelHeaderFetch 734 735 case packet := <-d.headerCh: 736 // Discard anything not from the origin peer 737 if packet.PeerId() != p.id { 738 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 739 break 740 } 741 // Make sure the peer actually gave something valid 742 headers := packet.(*headerPack).headers 743 if len(headers) == 0 { 744 p.log.Warn("Empty head header set") 745 return 0, errEmptyHeaderSet 746 } 747 // Make sure the peer's reply conforms to the request 748 for i, header := range headers { 749 expectNumber := from + int64(i)*int64((skip+1)) 750 if number := header.Number.Int64(); number != expectNumber { 751 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 752 return 0, errInvalidChain 753 } 754 } 755 // Check if a common ancestor was found 756 finished = true 757 for i := len(headers) - 1; i >= 0; i-- { 758 // Skip any headers that underflow/overflow our requested set 759 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 760 continue 761 } 762 // Otherwise check if we already know the header or not 763 h := headers[i].Hash() 764 n := headers[i].Number.Uint64() 765 766 var known bool 767 switch d.Mode { 768 case FullSync: 769 known = d.blockchain.HasBlock(h, n) 770 case FastSync: 771 known = d.blockchain.HasFastBlock(h, n) 772 default: 773 known = d.lightchain.HasHeader(h, n) 774 } 775 if known { 776 number, hash = n, h 777 break 778 } 779 } 780 781 case <-timeout: 782 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 783 return 0, errTimeout 784 785 case <-d.bodyCh: 786 case <-d.receiptCh: 787 // Out of bounds delivery, ignore 788 } 789 } 790 // If the head fetch already found an ancestor, return 791 if hash != (common.Hash{}) { 792 if int64(number) <= floor { 793 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 794 return 0, errInvalidAncestor 795 } 796 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 797 return number, nil 798 } 799 // Ancestor not found, we need to binary search over our chain 800 start, end := uint64(0), remoteHeight 801 if floor > 0 { 802 start = uint64(floor) 803 } 804 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 805 806 for start+1 < end { 807 // Split our chain interval in two, and request the hash to cross check 808 check := (start + end) / 2 809 810 ttl := d.requestTTL() 811 timeout := time.After(ttl) 812 813 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 814 815 // Wait until a reply arrives to this request 816 for arrived := false; !arrived; { 817 select { 818 case <-d.cancelCh: 819 return 0, errCancelHeaderFetch 820 821 case packer := <-d.headerCh: 822 // Discard anything not from the origin peer 823 if packer.PeerId() != p.id { 824 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 825 break 826 } 827 // Make sure the peer actually gave something valid 828 headers := packer.(*headerPack).headers 829 if len(headers) != 1 { 830 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 831 return 0, errBadPeer 832 } 833 arrived = true 834 835 // Modify the search interval based on the response 836 h := headers[0].Hash() 837 n := headers[0].Number.Uint64() 838 839 var known bool 840 switch d.Mode { 841 case FullSync: 842 known = d.blockchain.HasBlock(h, n) 843 case FastSync: 844 known = d.blockchain.HasFastBlock(h, n) 845 default: 846 known = d.lightchain.HasHeader(h, n) 847 } 848 if !known { 849 end = check 850 break 851 } 852 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 853 if header.Number.Uint64() != check { 854 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 855 return 0, errBadPeer 856 } 857 start = check 858 hash = h 859 860 case <-timeout: 861 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 862 return 0, errTimeout 863 864 case <-d.bodyCh: 865 case <-d.receiptCh: 866 // Out of bounds delivery, ignore 867 } 868 } 869 } 870 // Ensure valid ancestry and return 871 if int64(start) <= floor { 872 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 873 return 0, errInvalidAncestor 874 } 875 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 876 return start, nil 877 } 878 879 // fetchHeaders keeps retrieving headers concurrently from the number 880 // requested, until no more are returned, potentially throttling on the way. To 881 // facilitate concurrency but still protect against malicious nodes sending bad 882 // headers, we construct a header chain skeleton using the "origin" peer we are 883 // syncing with, and fill in the missing headers using anyone else. Headers from 884 // other peers are only accepted if they map cleanly to the skeleton. If no one 885 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 886 // the origin is dropped. 887 // height = latest block announced by the peers. 888 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64, height uint64) error { 889 p.log.Debug("fetchHeaders", "origin", from, "pivot", pivot, "height", height) 890 defer p.log.Debug("Header download terminated") 891 892 // Create a timeout timer, and the associated header fetcher 893 skeleton := true // Skeleton assembly phase or finishing up 894 request := time.Now() // time of the last skeleton fetch request 895 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 896 <-timeout.C // timeout channel should be initially empty 897 defer timeout.Stop() 898 epoch := d.epoch 899 900 var ttl time.Duration 901 getHeaders := func(from uint64) { 902 request = time.Now() 903 904 ttl = d.requestTTL() 905 timeout.Reset(ttl) 906 907 if skeleton { 908 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 909 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 910 } else { 911 count := MaxHeaderFetch 912 skip := 0 913 p.log.Trace("Fetching full headers", "count", count, "from", from) 914 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, skip, false) 915 } 916 } 917 918 getEpochHeaders := func(fromEpochBlock uint64) { 919 if d.Mode != UltraLightSync { 920 panic("This method should be called only in UltraLightSync mode") 921 } 922 if fromEpochBlock%epoch != 0 { 923 panic(fmt.Sprintf( 924 "Logic error: getEpochHeaders received a request to fetch non-epoch block %d with epoch %d", 925 fromEpochBlock, epoch)) 926 } 927 928 request = time.Now() 929 930 ttl = d.requestTTL() 931 timeout.Reset(ttl) 932 933 // if epoch is 100 and we fetch from=1000 and skip=100 then we will get 934 // 1000, 1101, 1202, 1303 ... 935 // So, skip has to be epoch - 1 to get the right set of blocks. 936 skip := int(epoch - 1) 937 count := MaxEpochHeaderFetch 938 log.Trace("getEpochHeaders", "from", fromEpochBlock, "count", count, "skip", skip) 939 p.log.Trace("Fetching full headers", "count", count, "from", fromEpochBlock) 940 go p.peer.RequestHeadersByNumber(fromEpochBlock, count, skip, false) 941 } 942 943 // Returns true if a header(s) fetch request was made, false if the syncing is finished. 944 getEpochOrNormalHeaders := func(from uint64) bool { 945 // Download the epoch headers including and beyond the current head. 946 nextEpochBlock := (from-1)/epoch*epoch + epoch 947 // If we're still not synced up to the latest epoch, sync only epoch headers. 948 // Otherwise, sync block headers as we would normally in light sync. 949 log.Trace("Getting headers in ultralight sync mode", "from", from, "height", height, "nextEpochBlock", nextEpochBlock, "epoch", epoch) 950 if nextEpochBlock < height { 951 getEpochHeaders(nextEpochBlock) 952 return true 953 } else if from <= height { 954 getHeaders(height) 955 return true 956 } else { 957 // During repeated invocations, "from" can be more than height since the blocks could have 958 // created after this method was invoked and in that case, the from which is one beyond the 959 // last fetched header number can be more than the height. 960 // If we have already fetched a block header >= height block header then we declare that the sync 961 // is finished and stop. 962 return false 963 } 964 } 965 966 if d.Mode == UltraLightSync { 967 if epoch == 0 { 968 panic("Epoch cannot be 0 in IBFT + UltraLightSync") 969 } 970 // Don't fetch skeleton, only fetch the headers. 971 skeleton = false 972 getEpochOrNormalHeaders(from) 973 } else { 974 log.Trace("getHeaders#initialHeaderDownload", "from", from) 975 getHeaders(from) 976 } 977 978 for { 979 select { 980 case <-d.cancelCh: 981 return errCancelHeaderFetch 982 983 case packet := <-d.headerCh: 984 // Make sure the active peer is giving us the skeleton headers 985 if packet.PeerId() != p.id { 986 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 987 break 988 } 989 headerReqTimer.UpdateSince(request) 990 timeout.Stop() 991 992 // If the skeleton's finished, pull any remaining head headers directly from the origin 993 if packet.Items() == 0 && skeleton { 994 skeleton = false 995 log.Trace("getHeaders, skeleton finished, download remaining headers") 996 getHeaders(from) 997 continue 998 } 999 // If no more headers are inbound, notify the content fetchers and return 1000 if packet.Items() == 0 { 1001 // Don't abort header fetches while the pivot is downloading 1002 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 1003 p.log.Debug("No headers, waiting for pivot commit") 1004 select { 1005 case <-time.After(fsHeaderContCheck): 1006 getHeaders(from) 1007 continue 1008 case <-d.cancelCh: 1009 return errCancelHeaderFetch 1010 } 1011 } 1012 // Pivot done (or not in fast sync) and no more headers, terminate the process 1013 p.log.Debug("No more headers available") 1014 select { 1015 case d.headerProcCh <- nil: 1016 return nil 1017 case <-d.cancelCh: 1018 return errCancelHeaderFetch 1019 } 1020 } 1021 // Received headers 1022 headers := packet.(*headerPack).headers 1023 1024 // If we received a skeleton batch, resolve internals concurrently 1025 if skeleton { 1026 filled, proced, err := d.fillHeaderSkeleton(from, headers) 1027 if err != nil { 1028 p.log.Debug("Skeleton chain invalid", "err", err) 1029 return errInvalidChain 1030 } 1031 headers = filled[proced:] 1032 from += uint64(proced) 1033 } else { 1034 // If we're closing in on the chain head, but haven't yet reached it, delay 1035 // the last few headers so mini reorgs on the head don't cause invalid hash 1036 // chain errors. 1037 // Don't delay last few headers in IBFT since we are not expecting chain reorgs in IBFT 1038 if !d.ibftConsensus { 1039 if n := len(headers); n > 0 { 1040 // Retrieve the current head we're at 1041 head := uint64(0) 1042 if !d.Mode.SyncFullBlockChain() { 1043 head = d.lightchain.CurrentHeader().Number.Uint64() 1044 } else { 1045 head = d.blockchain.CurrentFastBlock().NumberU64() 1046 if full := d.blockchain.CurrentBlock().NumberU64(); head < full { 1047 head = full 1048 } 1049 } 1050 // If the head is way older than this batch, delay the last few headers 1051 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 1052 delay := reorgProtHeaderDelay 1053 if delay > n { 1054 delay = n 1055 } 1056 log.Trace("Headers received", "received", len(headers), "kept from", 0, "kept till", n-delay) 1057 headers = headers[:n-delay] 1058 } 1059 } 1060 } 1061 } 1062 // Insert all the new headers and fetch the next batch 1063 if len(headers) > 0 { 1064 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 1065 select { 1066 case d.headerProcCh <- headers: 1067 case <-d.cancelCh: 1068 return errCancelHeaderFetch 1069 } 1070 // In all other sync modes, we fetch the block immediately after the current block. 1071 // In the ultralight sync mode, increment the value by epoch instead. 1072 if d.Mode == UltraLightSync { 1073 lastFetchedHeaderNumber := headers[len(headers)-1].Number.Uint64() 1074 moreHeaderFetchesPending := getEpochOrNormalHeaders(lastFetchedHeaderNumber + 1) 1075 if !moreHeaderFetchesPending { 1076 p.log.Debug("No more headers available") 1077 select { 1078 case d.headerProcCh <- nil: 1079 return nil 1080 case <-d.cancelCh: 1081 return errCancelHeaderFetch 1082 } 1083 } 1084 } else { 1085 from += uint64(len(headers)) 1086 log.Trace("getHeaders#downloadMoreHeaders", "from", from) 1087 getHeaders(from) 1088 } 1089 } else { 1090 // No headers delivered, or all of them being delayed, sleep a bit and retry 1091 p.log.Trace("All headers delayed, waiting") 1092 select { 1093 case <-time.After(fsHeaderContCheck): 1094 if d.Mode == UltraLightSync { 1095 getEpochOrNormalHeaders(from) 1096 } else { 1097 getHeaders(from) 1098 } 1099 continue 1100 case <-d.cancelCh: 1101 return errCancelHeaderFetch 1102 } 1103 } 1104 1105 case <-timeout.C: 1106 if d.dropPeer == nil { 1107 // The dropPeer method is nil when `--copydb` is used for a local copy. 1108 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1109 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 1110 break 1111 } 1112 // Header retrieval timed out, consider the peer bad and drop 1113 p.log.Debug("Header request timed out", "elapsed", ttl) 1114 headerTimeoutMeter.Mark(1) 1115 d.dropPeer(p.id) 1116 1117 // Finish the sync gracefully instead of dumping the gathered data though 1118 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1119 select { 1120 case ch <- false: 1121 case <-d.cancelCh: 1122 } 1123 } 1124 select { 1125 case d.headerProcCh <- nil: 1126 case <-d.cancelCh: 1127 } 1128 return errBadPeer 1129 } 1130 } 1131 } 1132 1133 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1134 // and maps them to the provided skeleton header chain. 1135 // 1136 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1137 // immediately to the header processor to keep the rest of the pipeline full even 1138 // in the case of header stalls. 1139 // 1140 // The method returns the entire filled skeleton and also the number of headers 1141 // already forwarded for processing. 1142 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 1143 log.Debug("Filling up skeleton", "from", from) 1144 d.queue.ScheduleSkeleton(from, skeleton) 1145 1146 var ( 1147 deliver = func(packet dataPack) (int, error) { 1148 pack := packet.(*headerPack) 1149 return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) 1150 } 1151 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 1152 throttle = func() bool { return false } 1153 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 1154 return d.queue.ReserveHeaders(p, count), false, nil 1155 } 1156 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 1157 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 1158 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 1159 ) 1160 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 1161 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 1162 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 1163 1164 log.Debug("Skeleton fill terminated", "err", err) 1165 1166 filled, proced := d.queue.RetrieveHeaders() 1167 return filled, proced, err 1168 } 1169 1170 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1171 // available peers, reserving a chunk of blocks for each, waiting for delivery 1172 // and also periodically checking for timeouts. 1173 func (d *Downloader) fetchBodies(from uint64) error { 1174 log.Debug("Downloading block bodies", "origin", from) 1175 1176 var ( 1177 deliver = func(packet dataPack) (int, error) { 1178 pack := packet.(*bodyPack) 1179 return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles, pack.randomness, pack.epochSnarkData) 1180 } 1181 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 1182 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 1183 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 1184 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 1185 ) 1186 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 1187 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 1188 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 1189 1190 log.Debug("Block body download terminated", "err", err) 1191 return err 1192 } 1193 1194 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1195 // available peers, reserving a chunk of receipts for each, waiting for delivery 1196 // and also periodically checking for timeouts. 1197 func (d *Downloader) fetchReceipts(from uint64) error { 1198 log.Debug("Downloading transaction receipts", "origin", from) 1199 1200 var ( 1201 deliver = func(packet dataPack) (int, error) { 1202 pack := packet.(*receiptPack) 1203 return d.queue.DeliverReceipts(pack.peerID, pack.receipts) 1204 } 1205 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 1206 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 1207 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 1208 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 1209 ) 1210 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 1211 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 1212 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 1213 1214 log.Debug("Transaction receipt download terminated", "err", err) 1215 return err 1216 } 1217 1218 // fetchParts iteratively downloads scheduled block parts, taking any available 1219 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 1220 // also periodically checking for timeouts. 1221 // 1222 // As the scheduling/timeout logic mostly is the same for all downloaded data 1223 // types, this method is used by each for data gathering and is instrumented with 1224 // various callbacks to handle the slight differences between processing them. 1225 // 1226 // The instrumentation parameters: 1227 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 1228 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 1229 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 1230 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 1231 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 1232 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 1233 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 1234 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 1235 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 1236 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 1237 // - fetch: network callback to actually send a particular download request to a physical remote peer 1238 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 1239 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 1240 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 1241 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 1242 // - kind: textual label of the type being downloaded to display in log mesages 1243 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1244 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 1245 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 1246 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 1247 1248 // Create a ticker to detect expired retrieval tasks 1249 ticker := time.NewTicker(100 * time.Millisecond) 1250 defer ticker.Stop() 1251 1252 update := make(chan struct{}, 1) 1253 1254 // Prepare the queue and fetch block parts until the block header fetcher's done 1255 finished := false 1256 for { 1257 select { 1258 case <-d.cancelCh: 1259 return errCancel 1260 1261 case packet := <-deliveryCh: 1262 // If the peer was previously banned and failed to deliver its pack 1263 // in a reasonable time frame, ignore its message. 1264 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1265 // Deliver the received chunk of data and check chain validity 1266 accepted, err := deliver(packet) 1267 if err == errInvalidChain { 1268 return err 1269 } 1270 // Unless a peer delivered something completely else than requested (usually 1271 // caused by a timed out request which came through in the end), set it to 1272 // idle. If the delivery's stale, the peer should have already been idled. 1273 if err != errStaleDelivery { 1274 setIdle(peer, accepted) 1275 } 1276 // Issue a log to the user to see what's going on 1277 switch { 1278 case err == nil && packet.Items() == 0: 1279 peer.log.Trace("Requested data not delivered", "type", kind) 1280 case err == nil: 1281 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1282 default: 1283 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1284 } 1285 } 1286 // Blocks assembled, try to update the progress 1287 select { 1288 case update <- struct{}{}: 1289 default: 1290 } 1291 1292 case cont := <-wakeCh: 1293 // The header fetcher sent a continuation flag, check if it's done 1294 if !cont { 1295 finished = true 1296 } 1297 // Headers arrive, try to update the progress 1298 select { 1299 case update <- struct{}{}: 1300 default: 1301 } 1302 1303 case <-ticker.C: 1304 // Sanity check update the progress 1305 select { 1306 case update <- struct{}{}: 1307 default: 1308 } 1309 1310 case <-update: 1311 // Short circuit if we lost all our peers 1312 if d.peers.Len() == 0 { 1313 return errNoPeers 1314 } 1315 // Check for fetch request timeouts and demote the responsible peers 1316 for pid, fails := range expire() { 1317 if peer := d.peers.Peer(pid); peer != nil { 1318 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1319 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1320 // out that sync wise we need to get rid of the peer. 1321 // 1322 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1323 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1324 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1325 if fails > 2 { 1326 peer.log.Trace("Data delivery timed out", "type", kind) 1327 setIdle(peer, 0) 1328 } else { 1329 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1330 if d.dropPeer == nil { 1331 // The dropPeer method is nil when `--copydb` is used for a local copy. 1332 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1333 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1334 } else { 1335 d.dropPeer(pid) 1336 } 1337 } 1338 } 1339 } 1340 // If there's nothing more to fetch, wait or terminate 1341 if pending() == 0 { 1342 if !inFlight() && finished { 1343 log.Debug("Data fetching completed", "type", kind) 1344 return nil 1345 } 1346 break 1347 } 1348 // Send a download request to all idle peers, until throttled 1349 progressed, throttled, running := false, false, inFlight() 1350 idles, total := idle() 1351 1352 for _, peer := range idles { 1353 // Short circuit if throttling activated 1354 if throttle() { 1355 throttled = true 1356 break 1357 } 1358 // Short circuit if there is no more available task. 1359 if pending() == 0 { 1360 break 1361 } 1362 // Reserve a chunk of fetches for a peer. A nil can mean either that 1363 // no more headers are available, or that the peer is known not to 1364 // have them. 1365 request, progress, err := reserve(peer, capacity(peer)) 1366 if err != nil { 1367 return err 1368 } 1369 if progress { 1370 progressed = true 1371 } 1372 if request == nil { 1373 continue 1374 } 1375 if request.From > 0 { 1376 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1377 } else { 1378 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1379 } 1380 // Fetch the chunk and make sure any errors return the hashes to the queue 1381 if fetchHook != nil { 1382 fetchHook(request.Headers) 1383 } 1384 if err := fetch(peer, request); err != nil { 1385 // Although we could try and make an attempt to fix this, this error really 1386 // means that we've double allocated a fetch task to a peer. If that is the 1387 // case, the internal state of the downloader and the queue is very wrong so 1388 // better hard crash and note the error instead of silently accumulating into 1389 // a much bigger issue. 1390 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1391 } 1392 running = true 1393 } 1394 // Make sure that we have peers available for fetching. If all peers have been tried 1395 // and all failed throw an error 1396 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1397 return errPeersUnavailable 1398 } 1399 } 1400 } 1401 } 1402 1403 // processHeaders takes batches of retrieved headers from an input channel and 1404 // keeps processing and scheduling them into the header chain and downloader's 1405 // queue until the stream ends or a failure occurs. 1406 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1407 // Keep a count of uncertain headers to roll back 1408 rollback := []*types.Header{} 1409 defer func() { 1410 if len(rollback) > 0 { 1411 // Flatten the headers and roll them back 1412 hashes := make([]common.Hash, len(rollback)) 1413 for i, header := range rollback { 1414 hashes[i] = header.Hash() 1415 } 1416 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1417 if d.Mode.SyncFullBlockChain() { 1418 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1419 lastBlock = d.blockchain.CurrentBlock().Number() 1420 } 1421 d.lightchain.Rollback(hashes, d.Mode.SyncFullHeaderChain()) 1422 curFastBlock, curBlock := common.Big0, common.Big0 1423 if d.Mode.SyncFullBlockChain() { 1424 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1425 curBlock = d.blockchain.CurrentBlock().Number() 1426 } 1427 log.Warn("Rolled back headers", "count", len(hashes), 1428 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1429 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1430 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1431 } 1432 }() 1433 1434 // Wait for batches of headers to process 1435 gotHeaders := false 1436 1437 for { 1438 select { 1439 case <-d.cancelCh: 1440 return errCancelHeaderProcessing 1441 1442 case headers := <-d.headerProcCh: 1443 // Terminate header processing if we synced up 1444 if len(headers) == 0 { 1445 // Notify everyone that headers are fully processed 1446 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1447 select { 1448 case ch <- false: 1449 case <-d.cancelCh: 1450 } 1451 } 1452 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1453 // better chain compared to ours. The only exception is if its promised blocks were 1454 // already imported by other means (e.g. fetcher): 1455 // 1456 // R <remote peer>, L <local node>: Both at block 10 1457 // R: Mine block 11, and propagate it to L 1458 // L: Queue block 11 for import 1459 // L: Notice that R's head and TD increased compared to ours, start sync 1460 // L: Import of block 11 finishes 1461 // L: Sync begins, and finds common ancestor at 11 1462 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1463 // R: Nothing to give 1464 if d.Mode.SyncFullBlockChain() { 1465 head := d.blockchain.CurrentBlock() 1466 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1467 return errStallingPeer 1468 } 1469 } 1470 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1471 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1472 // of delivering the post-pivot blocks that would flag the invalid content. 1473 // 1474 // This check cannot be executed "as is" for full imports, since blocks may still be 1475 // queued for processing when the header download completes. However, as long as the 1476 // peer gave us something useful, we're already happy/progressed (above check). 1477 1478 if d.Mode == FastSync || d.Mode == LightSync { 1479 head := d.lightchain.CurrentHeader() 1480 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1481 return errStallingPeer 1482 } 1483 } 1484 // Disable any rollback and return 1485 rollback = nil 1486 return nil 1487 } 1488 // Otherwise split the chunk of headers into batches and process them 1489 gotHeaders = true 1490 1491 for len(headers) > 0 { 1492 // Terminate if something failed in between processing chunks 1493 select { 1494 case <-d.cancelCh: 1495 return errCancelHeaderProcessing 1496 default: 1497 } 1498 // Select the next chunk of headers to import 1499 limit := maxHeadersProcess 1500 if limit > len(headers) { 1501 limit = len(headers) 1502 } 1503 chunk := headers[:limit] 1504 1505 // In case of header only syncing, validate the chunk immediately 1506 if d.Mode == FastSync || !d.Mode.SyncFullBlockChain() { 1507 // Collect the yet unknown headers to mark them as uncertain 1508 unknown := make([]*types.Header, 0, len(headers)) 1509 for _, header := range chunk { 1510 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1511 unknown = append(unknown, header) 1512 } 1513 } 1514 // If we're importing pure headers, verify based on their recentness 1515 frequency := fsHeaderCheckFrequency 1516 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1517 frequency = 1 1518 } 1519 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency, d.Mode.SyncFullHeaderChain()); err != nil { 1520 // If some headers were inserted, add them too to the rollback list 1521 if n > 0 { 1522 rollback = append(rollback, chunk[:n]...) 1523 } 1524 log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1525 return errInvalidChain 1526 } 1527 // All verifications passed, store newly found uncertain headers 1528 log.Trace(fmt.Sprintf("Adding headers for potential rollback: %v", headersToNumbers(unknown))) 1529 rollback = append(rollback, unknown...) 1530 if len(rollback) > fsHeaderSafetyNet { 1531 log.Debug("Adding some headers for rollback") 1532 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1533 } 1534 } 1535 // Unless we're doing light chains, schedule the headers for associated content retrieval 1536 if d.Mode.SyncFullBlockChain() { 1537 // If we've reached the allowed number of pending headers, stall a bit 1538 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1539 select { 1540 case <-d.cancelCh: 1541 return errCancelHeaderProcessing 1542 case <-time.After(time.Second): 1543 } 1544 } 1545 // Otherwise insert the headers for content retrieval 1546 inserts := d.queue.Schedule(chunk, origin) 1547 if len(inserts) != len(chunk) { 1548 log.Debug("Stale headers") 1549 return errBadPeer 1550 } 1551 } 1552 headers = headers[limit:] 1553 origin += uint64(limit) 1554 } 1555 1556 // Update the highest block number we know if a higher one is found. 1557 d.syncStatsLock.Lock() 1558 if d.syncStatsChainHeight < origin { 1559 d.syncStatsChainHeight = origin - 1 1560 } 1561 d.syncStatsLock.Unlock() 1562 1563 // Signal the content downloaders of the availablility of new tasks 1564 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1565 select { 1566 case ch <- true: 1567 default: 1568 } 1569 } 1570 } 1571 } 1572 } 1573 1574 func headersToNumbers(headers []*types.Header) []*big.Int { 1575 headerNumbers := make([]*big.Int, 0) 1576 for _, header := range headers { 1577 headerNumbers = append(headerNumbers, header.Number) 1578 } 1579 return headerNumbers 1580 } 1581 1582 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1583 func (d *Downloader) processFullSyncContent() error { 1584 for { 1585 results := d.queue.Results(true) 1586 if len(results) == 0 { 1587 return nil 1588 } 1589 if d.chainInsertHook != nil { 1590 d.chainInsertHook(results) 1591 } 1592 if err := d.importBlockResults(results); err != nil { 1593 return err 1594 } 1595 } 1596 } 1597 1598 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1599 // Check for any early termination requests 1600 if len(results) == 0 { 1601 return nil 1602 } 1603 select { 1604 case <-d.quitCh: 1605 return errCancelContentProcessing 1606 default: 1607 } 1608 // Retrieve the a batch of results to import 1609 first, last := results[0].Header, results[len(results)-1].Header 1610 log.Debug("Inserting downloaded chain", "items", len(results), 1611 "firstnum", first.Number, "firsthash", first.Hash(), 1612 "lastnum", last.Number, "lasthash", last.Hash(), 1613 ) 1614 blocks := make([]*types.Block, len(results)) 1615 for i, result := range results { 1616 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Randomness, result.EpochSnarkData) 1617 } 1618 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1619 if index < len(results) { 1620 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1621 } else { 1622 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1623 // when it needs to preprocess blocks to import a sidechain. 1624 // The importer will put together a new list of blocks to import, which is a superset 1625 // of the blocks delivered from the downloader, and the indexing will be off. 1626 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1627 } 1628 return errInvalidChain 1629 } 1630 return nil 1631 } 1632 1633 // processFastSyncContent takes fetch results from the queue and writes them to the 1634 // database. It also controls the synchronisation of state nodes of the pivot block. 1635 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1636 // Start syncing state of the reported head block. This should get us most of 1637 // the state of the pivot block. 1638 stateSync := d.syncState(latest.Root) 1639 defer stateSync.Cancel() 1640 go func() { 1641 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1642 d.queue.Close() // wake up Results 1643 } 1644 }() 1645 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1646 // sync takes long enough for the chain head to move significantly. 1647 pivot := uint64(0) 1648 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1649 pivot = height - uint64(fsMinFullBlocks) 1650 } 1651 // To cater for moving pivot points, track the pivot block and subsequently 1652 // accumulated download results separately. 1653 var ( 1654 oldPivot *fetchResult // Locked in pivot block, might change eventually 1655 oldTail []*fetchResult // Downloaded content after the pivot 1656 ) 1657 for { 1658 // Wait for the next batch of downloaded data to be available, and if the pivot 1659 // block became stale, move the goalpost 1660 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1661 if len(results) == 0 { 1662 // If pivot sync is done, stop 1663 if oldPivot == nil { 1664 return stateSync.Cancel() 1665 } 1666 // If sync failed, stop 1667 select { 1668 case <-d.cancelCh: 1669 return stateSync.Cancel() 1670 default: 1671 } 1672 } 1673 if d.chainInsertHook != nil { 1674 d.chainInsertHook(results) 1675 } 1676 if oldPivot != nil { 1677 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1678 } 1679 // Split around the pivot block and process the two sides via fast/full sync 1680 if atomic.LoadInt32(&d.committed) == 0 { 1681 latest = results[len(results)-1].Header 1682 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1683 log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1684 pivot = height - uint64(fsMinFullBlocks) 1685 } 1686 } 1687 P, beforeP, afterP := splitAroundPivot(pivot, results) 1688 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1689 return err 1690 } 1691 if P != nil { 1692 // If new pivot block found, cancel old state retrieval and restart 1693 if oldPivot != P { 1694 stateSync.Cancel() 1695 1696 stateSync = d.syncState(P.Header.Root) 1697 defer stateSync.Cancel() 1698 go func() { 1699 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1700 d.queue.Close() // wake up Results 1701 } 1702 }() 1703 oldPivot = P 1704 } 1705 // Wait for completion, occasionally checking for pivot staleness 1706 select { 1707 case <-stateSync.done: 1708 if stateSync.err != nil { 1709 return stateSync.err 1710 } 1711 if err := d.commitPivotBlock(P); err != nil { 1712 return err 1713 } 1714 oldPivot = nil 1715 1716 case <-time.After(time.Second): 1717 oldTail = afterP 1718 continue 1719 } 1720 } 1721 // Fast sync done, pivot commit done, full import 1722 if err := d.importBlockResults(afterP); err != nil { 1723 return err 1724 } 1725 } 1726 } 1727 1728 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1729 for _, result := range results { 1730 num := result.Header.Number.Uint64() 1731 switch { 1732 case num < pivot: 1733 before = append(before, result) 1734 case num == pivot: 1735 p = result 1736 default: 1737 after = append(after, result) 1738 } 1739 } 1740 return p, before, after 1741 } 1742 1743 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1744 // Check for any early termination requests 1745 if len(results) == 0 { 1746 return nil 1747 } 1748 select { 1749 case <-d.quitCh: 1750 return errCancelContentProcessing 1751 case <-stateSync.done: 1752 if err := stateSync.Wait(); err != nil { 1753 return err 1754 } 1755 default: 1756 } 1757 // Retrieve the a batch of results to import 1758 first, last := results[0].Header, results[len(results)-1].Header 1759 log.Debug("Inserting fast-sync blocks", "items", len(results), 1760 "firstnum", first.Number, "firsthash", first.Hash(), 1761 "lastnumn", last.Number, "lasthash", last.Hash(), 1762 ) 1763 blocks := make([]*types.Block, len(results)) 1764 receipts := make([]types.Receipts, len(results)) 1765 for i, result := range results { 1766 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Randomness, result.EpochSnarkData) 1767 receipts[i] = result.Receipts 1768 } 1769 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1770 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1771 return errInvalidChain 1772 } 1773 return nil 1774 } 1775 1776 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1777 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles, result.Randomness, result.EpochSnarkData) 1778 log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1779 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1780 return err 1781 } 1782 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1783 return err 1784 } 1785 atomic.StoreInt32(&d.committed, 1) 1786 return nil 1787 } 1788 1789 // DeliverHeaders injects a new batch of block headers received from a remote 1790 // node into the download schedule. 1791 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1792 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1793 } 1794 1795 // DeliverBodies injects a new batch of block bodies received from a remote node. 1796 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header, randomness []*types.Randomness, epochSnarkData []*types.EpochSnarkData) (err error) { 1797 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles, randomness, epochSnarkData}, bodyInMeter, bodyDropMeter) 1798 } 1799 1800 // DeliverReceipts injects a new batch of receipts received from a remote node. 1801 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1802 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1803 } 1804 1805 // DeliverNodeData injects a new batch of node state data received from a remote node. 1806 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1807 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1808 } 1809 1810 // deliver injects a new batch of data received from a remote node. 1811 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1812 // Update the delivery metrics for both good and failed deliveries 1813 inMeter.Mark(int64(packet.Items())) 1814 defer func() { 1815 if err != nil { 1816 dropMeter.Mark(int64(packet.Items())) 1817 } 1818 }() 1819 // Deliver or abort if the sync is canceled while queuing 1820 d.cancelLock.RLock() 1821 cancel := d.cancelCh 1822 d.cancelLock.RUnlock() 1823 if cancel == nil { 1824 return errNoSyncActive 1825 } 1826 select { 1827 case destCh <- packet: 1828 return nil 1829 case <-cancel: 1830 return errNoSyncActive 1831 } 1832 } 1833 1834 // qosTuner is the quality of service tuning loop that occasionally gathers the 1835 // peer latency statistics and updates the estimated request round trip time. 1836 func (d *Downloader) qosTuner() { 1837 for { 1838 // Retrieve the current median RTT and integrate into the previoust target RTT 1839 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1840 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1841 1842 // A new RTT cycle passed, increase our confidence in the estimated RTT 1843 conf := atomic.LoadUint64(&d.rttConfidence) 1844 conf = conf + (1000000-conf)/2 1845 atomic.StoreUint64(&d.rttConfidence, conf) 1846 1847 // Log the new QoS values and sleep until the next RTT 1848 log.Trace("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1849 select { 1850 case <-d.quitCh: 1851 return 1852 case <-time.After(rtt): 1853 } 1854 } 1855 } 1856 1857 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1858 // peer set, needing to reduce the confidence we have in out QoS estimates. 1859 func (d *Downloader) qosReduceConfidence() { 1860 // If we have a single peer, confidence is always 1 1861 peers := uint64(d.peers.Len()) 1862 if peers == 0 { 1863 // Ensure peer connectivity races don't catch us off guard 1864 return 1865 } 1866 if peers == 1 { 1867 atomic.StoreUint64(&d.rttConfidence, 1000000) 1868 return 1869 } 1870 // If we have a ton of peers, don't drop confidence) 1871 if peers >= uint64(qosConfidenceCap) { 1872 return 1873 } 1874 // Otherwise drop the confidence factor 1875 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1876 if float64(conf)/1000000 < rttMinConfidence { 1877 conf = uint64(rttMinConfidence * 1000000) 1878 } 1879 atomic.StoreUint64(&d.rttConfidence, conf) 1880 1881 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1882 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1883 } 1884 1885 // requestRTT returns the current target round trip time for a download request 1886 // to complete in. 1887 // 1888 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1889 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1890 // be adapted to, but smaller ones are preferred (stabler download stream). 1891 func (d *Downloader) requestRTT() time.Duration { 1892 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1893 } 1894 1895 // requestTTL returns the current timeout allowance for a single download request 1896 // to finish under. 1897 func (d *Downloader) requestTTL() time.Duration { 1898 var ( 1899 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1900 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1901 ) 1902 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1903 if ttl > ttlLimit { 1904 ttl = ttlLimit 1905 } 1906 return ttl 1907 }