github.com/shyftnetwork/go-empyrean@v1.8.3-0.20191127201940-fbfca9338f04/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 ethereum "github.com/ShyftNetwork/go-empyrean" 29 "github.com/ShyftNetwork/go-empyrean/common" 30 "github.com/ShyftNetwork/go-empyrean/core/rawdb" 31 "github.com/ShyftNetwork/go-empyrean/core/types" 32 "github.com/ShyftNetwork/go-empyrean/ethdb" 33 "github.com/ShyftNetwork/go-empyrean/event" 34 "github.com/ShyftNetwork/go-empyrean/log" 35 "github.com/ShyftNetwork/go-empyrean/metrics" 36 "github.com/ShyftNetwork/go-empyrean/params" 37 ) 38 39 var ( 40 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 41 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 42 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 43 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 44 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 45 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 46 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 47 48 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 49 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 50 rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests 51 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 52 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 53 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 54 55 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 56 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 57 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 58 59 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 60 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 61 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 62 63 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 64 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 65 66 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 67 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 68 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 69 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 70 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 71 ) 72 73 var ( 74 errBusy = errors.New("busy") 75 errUnknownPeer = errors.New("peer is unknown or unhealthy") 76 errBadPeer = errors.New("action from bad peer ignored") 77 errStallingPeer = errors.New("peer is stalling") 78 errNoPeers = errors.New("no peers to keep download active") 79 errTimeout = errors.New("timeout") 80 errEmptyHeaderSet = errors.New("empty header set by peer") 81 errPeersUnavailable = errors.New("no peers available or all tried for download") 82 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 83 errInvalidChain = errors.New("retrieved hash chain is invalid") 84 errInvalidBlock = errors.New("retrieved block is invalid") 85 errInvalidBody = errors.New("retrieved block body is invalid") 86 errInvalidReceipt = errors.New("retrieved receipt is invalid") 87 errCancelBlockFetch = errors.New("block download canceled (requested)") 88 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 89 errCancelBodyFetch = errors.New("block body download canceled (requested)") 90 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 91 errCancelStateFetch = errors.New("state data download canceled (requested)") 92 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 93 errCancelContentProcessing = errors.New("content processing canceled (requested)") 94 errNoSyncActive = errors.New("no sync active") 95 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 96 ) 97 98 type Downloader struct { 99 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 100 mux *event.TypeMux // Event multiplexer to announce sync operation events 101 102 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 103 queue *queue // Scheduler for selecting the hashes to download 104 peers *peerSet // Set of active peers from which download can proceed 105 stateDB ethdb.Database 106 shyftDb ethdb.SDatabase 107 108 rttEstimate uint64 // Round trip time to target for download requests 109 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 110 111 // Statistics 112 syncStatsChainOrigin uint64 // Origin block number where syncing started at 113 syncStatsChainHeight uint64 // Highest block number known when syncing started 114 syncStatsState stateSyncStats 115 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 116 117 lightchain LightChain 118 blockchain BlockChain 119 120 // Callbacks 121 dropPeer peerDropFn // Drops a peer for misbehaving 122 123 // Status 124 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 125 synchronising int32 126 notified int32 127 committed int32 128 129 // Channels 130 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 131 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 132 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 133 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 134 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 135 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 136 137 // for stateFetcher 138 stateSyncStart chan *stateSync 139 trackStateReq chan *stateReq 140 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 141 142 // Cancellation and termination 143 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 144 cancelCh chan struct{} // Channel to cancel mid-flight syncs 145 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 146 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 147 148 quitCh chan struct{} // Quit channel to signal termination 149 quitLock sync.RWMutex // Lock to prevent double closes 150 151 // Testing hooks 152 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 153 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 154 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 155 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 156 } 157 158 // LightChain encapsulates functions required to synchronise a light chain. 159 type LightChain interface { 160 // HasHeader verifies a header's presence in the local chain. 161 HasHeader(common.Hash, uint64) bool 162 163 // GetHeaderByHash retrieves a header from the local chain. 164 GetHeaderByHash(common.Hash) *types.Header 165 166 // CurrentHeader retrieves the head header from the local chain. 167 CurrentHeader() *types.Header 168 169 // GetTd returns the total difficulty of a local block. 170 GetTd(common.Hash, uint64) *big.Int 171 172 // InsertHeaderChain inserts a batch of headers into the local chain. 173 InsertHeaderChain([]*types.Header, int) (int, error) 174 175 // Rollback removes a few recently added elements from the local chain. 176 Rollback([]common.Hash) 177 } 178 179 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 180 type BlockChain interface { 181 LightChain 182 183 // HasBlock verifies a block's presence in the local chain. 184 HasBlock(common.Hash, uint64) bool 185 186 // HasFastBlock verifies a fast block's presence in the local chain. 187 HasFastBlock(common.Hash, uint64) bool 188 189 // GetBlockByHash retrieves a block from the local chain. 190 GetBlockByHash(common.Hash) *types.Block 191 192 // CurrentBlock retrieves the head block from the local chain. 193 CurrentBlock() *types.Block 194 195 // CurrentFastBlock retrieves the head fast block from the local chain. 196 CurrentFastBlock() *types.Block 197 198 // FastSyncCommitHead directly commits the head block to a certain entity. 199 FastSyncCommitHead(common.Hash) error 200 201 // InsertChain inserts a batch of blocks into the local chain. 202 InsertChain(types.Blocks) (int, error) 203 204 // InsertReceiptChain inserts a batch of receipts into the local chain. 205 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 206 } 207 208 // New creates a new downloader to fetch hashes and blocks from remote peers. 209 func New(mode SyncMode, stateDb ethdb.Database, shyftDb ethdb.SDatabase, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 210 if lightchain == nil { 211 lightchain = chain 212 } 213 214 dl := &Downloader{ 215 mode: mode, 216 stateDB: stateDb, 217 shyftDb: shyftDb, 218 mux: mux, 219 queue: newQueue(), 220 peers: newPeerSet(), 221 rttEstimate: uint64(rttMaxEstimate), 222 rttConfidence: uint64(1000000), 223 blockchain: chain, 224 lightchain: lightchain, 225 dropPeer: dropPeer, 226 headerCh: make(chan dataPack, 1), 227 bodyCh: make(chan dataPack, 1), 228 receiptCh: make(chan dataPack, 1), 229 bodyWakeCh: make(chan bool, 1), 230 receiptWakeCh: make(chan bool, 1), 231 headerProcCh: make(chan []*types.Header, 1), 232 quitCh: make(chan struct{}), 233 stateCh: make(chan dataPack), 234 stateSyncStart: make(chan *stateSync), 235 syncStatsState: stateSyncStats{ 236 processed: rawdb.ReadFastTrieProgress(stateDb), 237 }, 238 trackStateReq: make(chan *stateReq), 239 } 240 go dl.qosTuner() 241 go dl.stateFetcher() 242 return dl 243 } 244 245 // Progress retrieves the synchronisation boundaries, specifically the origin 246 // block where synchronisation started at (may have failed/suspended); the block 247 // or header sync is currently at; and the latest known block which the sync targets. 248 // 249 // In addition, during the state download phase of fast synchronisation the number 250 // of processed and the total number of known states are also returned. Otherwise 251 // these are zero. 252 func (d *Downloader) Progress() ethereum.SyncProgress { 253 // Lock the current stats and return the progress 254 d.syncStatsLock.RLock() 255 defer d.syncStatsLock.RUnlock() 256 257 current := uint64(0) 258 switch d.mode { 259 case FullSync: 260 current = d.blockchain.CurrentBlock().NumberU64() 261 case FastSync: 262 current = d.blockchain.CurrentFastBlock().NumberU64() 263 case LightSync: 264 current = d.lightchain.CurrentHeader().Number.Uint64() 265 } 266 return ethereum.SyncProgress{ 267 StartingBlock: d.syncStatsChainOrigin, 268 CurrentBlock: current, 269 HighestBlock: d.syncStatsChainHeight, 270 PulledStates: d.syncStatsState.processed, 271 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 272 } 273 } 274 275 // Synchronising returns whether the downloader is currently retrieving blocks. 276 func (d *Downloader) Synchronising() bool { 277 return atomic.LoadInt32(&d.synchronising) > 0 278 } 279 280 // RegisterPeer injects a new download peer into the set of block source to be 281 // used for fetching hashes and blocks from. 282 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 283 logger := log.New("peer", id) 284 logger.Trace("Registering sync peer") 285 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 286 logger.Error("Failed to register sync peer", "err", err) 287 return err 288 } 289 d.qosReduceConfidence() 290 291 return nil 292 } 293 294 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 295 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 296 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 297 } 298 299 // UnregisterPeer remove a peer from the known list, preventing any action from 300 // the specified peer. An effort is also made to return any pending fetches into 301 // the queue. 302 func (d *Downloader) UnregisterPeer(id string) error { 303 // Unregister the peer from the active peer set and revoke any fetch tasks 304 logger := log.New("peer", id) 305 logger.Trace("Unregistering sync peer") 306 if err := d.peers.Unregister(id); err != nil { 307 logger.Error("Failed to unregister sync peer", "err", err) 308 return err 309 } 310 d.queue.Revoke(id) 311 312 // If this peer was the master peer, abort sync immediately 313 d.cancelLock.RLock() 314 master := id == d.cancelPeer 315 d.cancelLock.RUnlock() 316 317 if master { 318 d.cancel() 319 } 320 return nil 321 } 322 323 // Synchronise tries to sync up our local block chain with a remote peer, both 324 // adding various sanity checks as well as wrapping it with various log entries. 325 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 326 err := d.synchronise(id, head, td, mode) 327 switch err { 328 case nil: 329 case errBusy: 330 331 case errTimeout, errBadPeer, errStallingPeer, 332 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 333 errInvalidAncestor, errInvalidChain: 334 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 335 if d.dropPeer == nil { 336 // The dropPeer method is nil when `--copydb` is used for a local copy. 337 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 338 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 339 } else { 340 d.dropPeer(id) 341 } 342 default: 343 log.Warn("Synchronisation failed, retrying", "err", err) 344 } 345 return err 346 } 347 348 // synchronise will select the peer and use it for synchronising. If an empty string is given 349 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 350 // checks fail an error will be returned. This method is synchronous 351 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 352 // Mock out the synchronisation if testing 353 if d.synchroniseMock != nil { 354 return d.synchroniseMock(id, hash) 355 } 356 // Make sure only one goroutine is ever allowed past this point at once 357 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 358 return errBusy 359 } 360 defer atomic.StoreInt32(&d.synchronising, 0) 361 362 // Post a user notification of the sync (only once per session) 363 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 364 log.Info("Block synchronisation started") 365 } 366 // Reset the queue, peer set and wake channels to clean any internal leftover state 367 d.queue.Reset() 368 d.peers.Reset() 369 370 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 371 select { 372 case <-ch: 373 default: 374 } 375 } 376 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 377 for empty := false; !empty; { 378 select { 379 case <-ch: 380 default: 381 empty = true 382 } 383 } 384 } 385 for empty := false; !empty; { 386 select { 387 case <-d.headerProcCh: 388 default: 389 empty = true 390 } 391 } 392 // Create cancel channel for aborting mid-flight and mark the master peer 393 d.cancelLock.Lock() 394 d.cancelCh = make(chan struct{}) 395 d.cancelPeer = id 396 d.cancelLock.Unlock() 397 398 defer d.Cancel() // No matter what, we can't leave the cancel channel open 399 400 // Set the requested sync mode, unless it's forbidden 401 d.mode = mode 402 403 // Retrieve the origin peer and initiate the downloading process 404 p := d.peers.Peer(id) 405 if p == nil { 406 return errUnknownPeer 407 } 408 return d.syncWithPeer(p, hash, td) 409 } 410 411 // syncWithPeer starts a block synchronization based on the hash chain from the 412 // specified peer and head hash. 413 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 414 d.mux.Post(StartEvent{}) 415 defer func() { 416 // reset on error 417 if err != nil { 418 d.mux.Post(FailedEvent{err}) 419 } else { 420 d.mux.Post(DoneEvent{}) 421 } 422 }() 423 if p.version < 62 { 424 return errTooOld 425 } 426 427 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) 428 defer func(start time.Time) { 429 log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 430 }(time.Now()) 431 432 // Look up the sync boundaries: the common ancestor and the target block 433 latest, err := d.fetchHeight(p) 434 if err != nil { 435 return err 436 } 437 height := latest.Number.Uint64() 438 439 origin, err := d.findAncestor(p, latest) 440 if err != nil { 441 return err 442 } 443 d.syncStatsLock.Lock() 444 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 445 d.syncStatsChainOrigin = origin 446 } 447 d.syncStatsChainHeight = height 448 d.syncStatsLock.Unlock() 449 450 // Ensure our origin point is below any fast sync pivot point 451 pivot := uint64(0) 452 if d.mode == FastSync { 453 if height <= uint64(fsMinFullBlocks) { 454 origin = 0 455 } else { 456 pivot = height - uint64(fsMinFullBlocks) 457 if pivot <= origin { 458 origin = pivot - 1 459 } 460 } 461 } 462 d.committed = 1 463 if d.mode == FastSync && pivot != 0 { 464 d.committed = 0 465 } 466 // Initiate the sync using a concurrent header and content retrieval algorithm 467 d.queue.Prepare(origin+1, d.mode) 468 if d.syncInitHook != nil { 469 d.syncInitHook(origin, height) 470 } 471 472 fetchers := []func() error{ 473 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 474 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 475 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 476 func() error { return d.processHeaders(origin+1, pivot, td) }, 477 } 478 if d.mode == FastSync { 479 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 480 } else if d.mode == FullSync { 481 fetchers = append(fetchers, d.processFullSyncContent) 482 } 483 return d.spawnSync(fetchers) 484 } 485 486 // spawnSync runs d.process and all given fetcher functions to completion in 487 // separate goroutines, returning the first error that appears. 488 func (d *Downloader) spawnSync(fetchers []func() error) error { 489 errc := make(chan error, len(fetchers)) 490 d.cancelWg.Add(len(fetchers)) 491 for _, fn := range fetchers { 492 fn := fn 493 go func() { defer d.cancelWg.Done(); errc <- fn() }() 494 } 495 // Wait for the first error, then terminate the others. 496 var err error 497 for i := 0; i < len(fetchers); i++ { 498 if i == len(fetchers)-1 { 499 // Close the queue when all fetchers have exited. 500 // This will cause the block processor to end when 501 // it has processed the queue. 502 d.queue.Close() 503 } 504 if err = <-errc; err != nil { 505 break 506 } 507 } 508 d.queue.Close() 509 d.Cancel() 510 return err 511 } 512 513 // cancel aborts all of the operations and resets the queue. However, cancel does 514 // not wait for the running download goroutines to finish. This method should be 515 // used when cancelling the downloads from inside the downloader. 516 func (d *Downloader) cancel() { 517 // Close the current cancel channel 518 d.cancelLock.Lock() 519 if d.cancelCh != nil { 520 select { 521 case <-d.cancelCh: 522 // Channel was already closed 523 default: 524 close(d.cancelCh) 525 } 526 } 527 d.cancelLock.Unlock() 528 } 529 530 // Cancel aborts all of the operations and waits for all download goroutines to 531 // finish before returning. 532 func (d *Downloader) Cancel() { 533 d.cancel() 534 d.cancelWg.Wait() 535 } 536 537 // Terminate interrupts the downloader, canceling all pending operations. 538 // The downloader cannot be reused after calling Terminate. 539 func (d *Downloader) Terminate() { 540 // Close the termination channel (make sure double close is allowed) 541 d.quitLock.Lock() 542 select { 543 case <-d.quitCh: 544 default: 545 close(d.quitCh) 546 } 547 d.quitLock.Unlock() 548 549 // Cancel any pending download requests 550 d.Cancel() 551 } 552 553 // fetchHeight retrieves the head header of the remote peer to aid in estimating 554 // the total time a pending synchronisation would take. 555 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 556 p.log.Debug("Retrieving remote chain height") 557 558 // Request the advertised remote head block and wait for the response 559 head, _ := p.peer.Head() 560 go p.peer.RequestHeadersByHash(head, 1, 0, false) 561 562 ttl := d.requestTTL() 563 timeout := time.After(ttl) 564 for { 565 select { 566 case <-d.cancelCh: 567 return nil, errCancelBlockFetch 568 569 case packet := <-d.headerCh: 570 // Discard anything not from the origin peer 571 if packet.PeerId() != p.id { 572 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 573 break 574 } 575 // Make sure the peer actually gave something valid 576 headers := packet.(*headerPack).headers 577 if len(headers) != 1 { 578 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 579 return nil, errBadPeer 580 } 581 head := headers[0] 582 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 583 return head, nil 584 585 case <-timeout: 586 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 587 return nil, errTimeout 588 589 case <-d.bodyCh: 590 case <-d.receiptCh: 591 // Out of bounds delivery, ignore 592 } 593 } 594 } 595 596 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 597 // common ancestor. 598 // It returns parameters to be used for peer.RequestHeadersByNumber: 599 // from - starting block number 600 // count - number of headers to request 601 // skip - number of headers to skip 602 // and also returns 'max', the last block which is expected to be returned by the remote peers, 603 // given the (from,count,skip) 604 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 605 var ( 606 from int 607 count int 608 MaxCount = MaxHeaderFetch / 16 609 ) 610 // requestHead is the highest block that we will ask for. If requestHead is not offset, 611 // the highest block that we will get is 16 blocks back from head, which means we 612 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 613 // between us and the peer is 1-2 blocks, which is most common 614 requestHead := int(remoteHeight) - 1 615 if requestHead < 0 { 616 requestHead = 0 617 } 618 // requestBottom is the lowest block we want included in the query 619 // Ideally, we want to include just below own head 620 requestBottom := int(localHeight - 1) 621 if requestBottom < 0 { 622 requestBottom = 0 623 } 624 totalSpan := requestHead - requestBottom 625 span := 1 + totalSpan/MaxCount 626 if span < 2 { 627 span = 2 628 } 629 if span > 16 { 630 span = 16 631 } 632 633 count = 1 + totalSpan/span 634 if count > MaxCount { 635 count = MaxCount 636 } 637 if count < 2 { 638 count = 2 639 } 640 from = requestHead - (count-1)*span 641 if from < 0 { 642 from = 0 643 } 644 max := from + (count-1)*span 645 return int64(from), count, span - 1, uint64(max) 646 } 647 648 // findAncestor tries to locate the common ancestor link of the local chain and 649 // a remote peers blockchain. In the general case when our node was in sync and 650 // on the correct chain, checking the top N links should already get us a match. 651 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 652 // the head links match), we do a binary search to find the common ancestor. 653 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 654 // Figure out the valid ancestor range to prevent rewrite attacks 655 var ( 656 floor = int64(-1) 657 localHeight uint64 658 remoteHeight = remoteHeader.Number.Uint64() 659 ) 660 switch d.mode { 661 case FullSync: 662 localHeight = d.blockchain.CurrentBlock().NumberU64() 663 case FastSync: 664 localHeight = d.blockchain.CurrentFastBlock().NumberU64() 665 default: 666 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 667 } 668 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 669 if localHeight >= MaxForkAncestry { 670 // We're above the max reorg threshold, find the earliest fork point 671 floor = int64(localHeight - MaxForkAncestry) 672 673 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 674 // all headers before that point will be missing. 675 if d.mode == LightSync { 676 // If we dont know the current CHT position, find it 677 if d.genesis == 0 { 678 header := d.lightchain.CurrentHeader() 679 for header != nil { 680 d.genesis = header.Number.Uint64() 681 if floor >= int64(d.genesis)-1 { 682 break 683 } 684 header = d.lightchain.GetHeaderByHash(header.ParentHash) 685 } 686 } 687 // We already know the "genesis" block number, cap floor to that 688 if floor < int64(d.genesis)-1 { 689 floor = int64(d.genesis) - 1 690 } 691 } 692 } 693 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 694 695 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 696 go p.peer.RequestHeadersByNumber(uint64(from), count, skip, false) 697 698 // Wait for the remote response to the head fetch 699 number, hash := uint64(0), common.Hash{} 700 701 ttl := d.requestTTL() 702 timeout := time.After(ttl) 703 704 for finished := false; !finished; { 705 select { 706 case <-d.cancelCh: 707 return 0, errCancelHeaderFetch 708 709 case packet := <-d.headerCh: 710 // Discard anything not from the origin peer 711 if packet.PeerId() != p.id { 712 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 713 break 714 } 715 // Make sure the peer actually gave something valid 716 headers := packet.(*headerPack).headers 717 if len(headers) == 0 { 718 p.log.Warn("Empty head header set") 719 return 0, errEmptyHeaderSet 720 } 721 // Make sure the peer's reply conforms to the request 722 for i, header := range headers { 723 expectNumber := from + int64(i)*int64((skip+1)) 724 if number := header.Number.Int64(); number != expectNumber { 725 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 726 return 0, errInvalidChain 727 } 728 } 729 // Check if a common ancestor was found 730 finished = true 731 for i := len(headers) - 1; i >= 0; i-- { 732 // Skip any headers that underflow/overflow our requested set 733 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 734 continue 735 } 736 // Otherwise check if we already know the header or not 737 h := headers[i].Hash() 738 n := headers[i].Number.Uint64() 739 740 var known bool 741 switch d.mode { 742 case FullSync: 743 known = d.blockchain.HasBlock(h, n) 744 case FastSync: 745 known = d.blockchain.HasFastBlock(h, n) 746 default: 747 known = d.lightchain.HasHeader(h, n) 748 } 749 if known { 750 number, hash = n, h 751 break 752 } 753 } 754 755 case <-timeout: 756 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 757 return 0, errTimeout 758 759 case <-d.bodyCh: 760 case <-d.receiptCh: 761 // Out of bounds delivery, ignore 762 } 763 } 764 // If the head fetch already found an ancestor, return 765 if hash != (common.Hash{}) { 766 if int64(number) <= floor { 767 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 768 return 0, errInvalidAncestor 769 } 770 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 771 return number, nil 772 } 773 // Ancestor not found, we need to binary search over our chain 774 start, end := uint64(0), remoteHeight 775 if floor > 0 { 776 start = uint64(floor) 777 } 778 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 779 780 for start+1 < end { 781 // Split our chain interval in two, and request the hash to cross check 782 check := (start + end) / 2 783 784 ttl := d.requestTTL() 785 timeout := time.After(ttl) 786 787 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 788 789 // Wait until a reply arrives to this request 790 for arrived := false; !arrived; { 791 select { 792 case <-d.cancelCh: 793 return 0, errCancelHeaderFetch 794 795 case packer := <-d.headerCh: 796 // Discard anything not from the origin peer 797 if packer.PeerId() != p.id { 798 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 799 break 800 } 801 // Make sure the peer actually gave something valid 802 headers := packer.(*headerPack).headers 803 if len(headers) != 1 { 804 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 805 return 0, errBadPeer 806 } 807 arrived = true 808 809 // Modify the search interval based on the response 810 h := headers[0].Hash() 811 n := headers[0].Number.Uint64() 812 813 var known bool 814 switch d.mode { 815 case FullSync: 816 known = d.blockchain.HasBlock(h, n) 817 case FastSync: 818 known = d.blockchain.HasFastBlock(h, n) 819 default: 820 known = d.lightchain.HasHeader(h, n) 821 } 822 if !known { 823 end = check 824 break 825 } 826 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 827 if header.Number.Uint64() != check { 828 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 829 return 0, errBadPeer 830 } 831 start = check 832 hash = h 833 834 case <-timeout: 835 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 836 return 0, errTimeout 837 838 case <-d.bodyCh: 839 case <-d.receiptCh: 840 // Out of bounds delivery, ignore 841 } 842 } 843 } 844 // Ensure valid ancestry and return 845 if int64(start) <= floor { 846 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 847 return 0, errInvalidAncestor 848 } 849 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 850 return start, nil 851 } 852 853 // fetchHeaders keeps retrieving headers concurrently from the number 854 // requested, until no more are returned, potentially throttling on the way. To 855 // facilitate concurrency but still protect against malicious nodes sending bad 856 // headers, we construct a header chain skeleton using the "origin" peer we are 857 // syncing with, and fill in the missing headers using anyone else. Headers from 858 // other peers are only accepted if they map cleanly to the skeleton. If no one 859 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 860 // the origin is dropped. 861 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 862 p.log.Debug("Directing header downloads", "origin", from) 863 defer p.log.Debug("Header download terminated") 864 865 // Create a timeout timer, and the associated header fetcher 866 skeleton := true // Skeleton assembly phase or finishing up 867 request := time.Now() // time of the last skeleton fetch request 868 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 869 <-timeout.C // timeout channel should be initially empty 870 defer timeout.Stop() 871 872 var ttl time.Duration 873 getHeaders := func(from uint64) { 874 request = time.Now() 875 876 ttl = d.requestTTL() 877 timeout.Reset(ttl) 878 879 if skeleton { 880 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 881 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 882 } else { 883 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 884 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 885 } 886 } 887 // Start pulling the header chain skeleton until all is done 888 getHeaders(from) 889 890 for { 891 select { 892 case <-d.cancelCh: 893 return errCancelHeaderFetch 894 895 case packet := <-d.headerCh: 896 // Make sure the active peer is giving us the skeleton headers 897 if packet.PeerId() != p.id { 898 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 899 break 900 } 901 headerReqTimer.UpdateSince(request) 902 timeout.Stop() 903 904 // If the skeleton's finished, pull any remaining head headers directly from the origin 905 if packet.Items() == 0 && skeleton { 906 skeleton = false 907 getHeaders(from) 908 continue 909 } 910 // If no more headers are inbound, notify the content fetchers and return 911 if packet.Items() == 0 { 912 // Don't abort header fetches while the pivot is downloading 913 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 914 p.log.Debug("No headers, waiting for pivot commit") 915 select { 916 case <-time.After(fsHeaderContCheck): 917 getHeaders(from) 918 continue 919 case <-d.cancelCh: 920 return errCancelHeaderFetch 921 } 922 } 923 // Pivot done (or not in fast sync) and no more headers, terminate the process 924 p.log.Debug("No more headers available") 925 select { 926 case d.headerProcCh <- nil: 927 return nil 928 case <-d.cancelCh: 929 return errCancelHeaderFetch 930 } 931 } 932 headers := packet.(*headerPack).headers 933 934 // If we received a skeleton batch, resolve internals concurrently 935 if skeleton { 936 filled, proced, err := d.fillHeaderSkeleton(from, headers) 937 if err != nil { 938 p.log.Debug("Skeleton chain invalid", "err", err) 939 return errInvalidChain 940 } 941 headers = filled[proced:] 942 from += uint64(proced) 943 } else { 944 // If we're closing in on the chain head, but haven't yet reached it, delay 945 // the last few headers so mini reorgs on the head don't cause invalid hash 946 // chain errors. 947 if n := len(headers); n > 0 { 948 // Retrieve the current head we're at 949 head := uint64(0) 950 if d.mode == LightSync { 951 head = d.lightchain.CurrentHeader().Number.Uint64() 952 } else { 953 head = d.blockchain.CurrentFastBlock().NumberU64() 954 if full := d.blockchain.CurrentBlock().NumberU64(); head < full { 955 head = full 956 } 957 } 958 // If the head is way older than this batch, delay the last few headers 959 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 960 delay := reorgProtHeaderDelay 961 if delay > n { 962 delay = n 963 } 964 headers = headers[:n-delay] 965 } 966 } 967 } 968 // Insert all the new headers and fetch the next batch 969 if len(headers) > 0 { 970 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 971 select { 972 case d.headerProcCh <- headers: 973 case <-d.cancelCh: 974 return errCancelHeaderFetch 975 } 976 from += uint64(len(headers)) 977 getHeaders(from) 978 } else { 979 // No headers delivered, or all of them being delayed, sleep a bit and retry 980 p.log.Trace("All headers delayed, waiting") 981 select { 982 case <-time.After(fsHeaderContCheck): 983 getHeaders(from) 984 continue 985 case <-d.cancelCh: 986 return errCancelHeaderFetch 987 } 988 } 989 990 case <-timeout.C: 991 if d.dropPeer == nil { 992 // The dropPeer method is nil when `--copydb` is used for a local copy. 993 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 994 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 995 break 996 } 997 // Header retrieval timed out, consider the peer bad and drop 998 p.log.Debug("Header request timed out", "elapsed", ttl) 999 headerTimeoutMeter.Mark(1) 1000 d.dropPeer(p.id) 1001 1002 // Finish the sync gracefully instead of dumping the gathered data though 1003 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1004 select { 1005 case ch <- false: 1006 case <-d.cancelCh: 1007 } 1008 } 1009 select { 1010 case d.headerProcCh <- nil: 1011 case <-d.cancelCh: 1012 } 1013 return errBadPeer 1014 } 1015 } 1016 } 1017 1018 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1019 // and maps them to the provided skeleton header chain. 1020 // 1021 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1022 // immediately to the header processor to keep the rest of the pipeline full even 1023 // in the case of header stalls. 1024 // 1025 // The method returns the entire filled skeleton and also the number of headers 1026 // already forwarded for processing. 1027 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 1028 log.Debug("Filling up skeleton", "from", from) 1029 d.queue.ScheduleSkeleton(from, skeleton) 1030 1031 var ( 1032 deliver = func(packet dataPack) (int, error) { 1033 pack := packet.(*headerPack) 1034 return d.queue.DeliverHeaders(pack.peerID, pack.headers, d.headerProcCh) 1035 } 1036 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 1037 throttle = func() bool { return false } 1038 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 1039 return d.queue.ReserveHeaders(p, count), false, nil 1040 } 1041 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 1042 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 1043 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 1044 ) 1045 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 1046 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 1047 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 1048 1049 log.Debug("Skeleton fill terminated", "err", err) 1050 1051 filled, proced := d.queue.RetrieveHeaders() 1052 return filled, proced, err 1053 } 1054 1055 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1056 // available peers, reserving a chunk of blocks for each, waiting for delivery 1057 // and also periodically checking for timeouts. 1058 func (d *Downloader) fetchBodies(from uint64) error { 1059 log.Debug("Downloading block bodies", "origin", from) 1060 1061 var ( 1062 deliver = func(packet dataPack) (int, error) { 1063 pack := packet.(*bodyPack) 1064 return d.queue.DeliverBodies(pack.peerID, pack.transactions, pack.uncles) 1065 } 1066 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 1067 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 1068 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 1069 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 1070 ) 1071 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 1072 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 1073 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 1074 1075 log.Debug("Block body download terminated", "err", err) 1076 return err 1077 } 1078 1079 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1080 // available peers, reserving a chunk of receipts for each, waiting for delivery 1081 // and also periodically checking for timeouts. 1082 func (d *Downloader) fetchReceipts(from uint64) error { 1083 log.Debug("Downloading transaction receipts", "origin", from) 1084 1085 var ( 1086 deliver = func(packet dataPack) (int, error) { 1087 pack := packet.(*receiptPack) 1088 return d.queue.DeliverReceipts(pack.peerID, pack.receipts) 1089 } 1090 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 1091 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 1092 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 1093 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 1094 ) 1095 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 1096 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 1097 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 1098 1099 log.Debug("Transaction receipt download terminated", "err", err) 1100 return err 1101 } 1102 1103 // fetchParts iteratively downloads scheduled block parts, taking any available 1104 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 1105 // also periodically checking for timeouts. 1106 // 1107 // As the scheduling/timeout logic mostly is the same for all downloaded data 1108 // types, this method is used by each for data gathering and is instrumented with 1109 // various callbacks to handle the slight differences between processing them. 1110 // 1111 // The instrumentation parameters: 1112 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 1113 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 1114 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 1115 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 1116 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 1117 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 1118 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 1119 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 1120 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 1121 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 1122 // - fetch: network callback to actually send a particular download request to a physical remote peer 1123 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 1124 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 1125 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 1126 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 1127 // - kind: textual label of the type being downloaded to display in log mesages 1128 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1129 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 1130 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 1131 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 1132 1133 // Create a ticker to detect expired retrieval tasks 1134 ticker := time.NewTicker(100 * time.Millisecond) 1135 defer ticker.Stop() 1136 1137 update := make(chan struct{}, 1) 1138 1139 // Prepare the queue and fetch block parts until the block header fetcher's done 1140 finished := false 1141 for { 1142 select { 1143 case <-d.cancelCh: 1144 return errCancel 1145 1146 case packet := <-deliveryCh: 1147 // If the peer was previously banned and failed to deliver its pack 1148 // in a reasonable time frame, ignore its message. 1149 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1150 // Deliver the received chunk of data and check chain validity 1151 accepted, err := deliver(packet) 1152 if err == errInvalidChain { 1153 return err 1154 } 1155 // Unless a peer delivered something completely else than requested (usually 1156 // caused by a timed out request which came through in the end), set it to 1157 // idle. If the delivery's stale, the peer should have already been idled. 1158 if err != errStaleDelivery { 1159 setIdle(peer, accepted) 1160 } 1161 // Issue a log to the user to see what's going on 1162 switch { 1163 case err == nil && packet.Items() == 0: 1164 peer.log.Trace("Requested data not delivered", "type", kind) 1165 case err == nil: 1166 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1167 default: 1168 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1169 } 1170 } 1171 // Blocks assembled, try to update the progress 1172 select { 1173 case update <- struct{}{}: 1174 default: 1175 } 1176 1177 case cont := <-wakeCh: 1178 // The header fetcher sent a continuation flag, check if it's done 1179 if !cont { 1180 finished = true 1181 } 1182 // Headers arrive, try to update the progress 1183 select { 1184 case update <- struct{}{}: 1185 default: 1186 } 1187 1188 case <-ticker.C: 1189 // Sanity check update the progress 1190 select { 1191 case update <- struct{}{}: 1192 default: 1193 } 1194 1195 case <-update: 1196 // Short circuit if we lost all our peers 1197 if d.peers.Len() == 0 { 1198 return errNoPeers 1199 } 1200 // Check for fetch request timeouts and demote the responsible peers 1201 for pid, fails := range expire() { 1202 if peer := d.peers.Peer(pid); peer != nil { 1203 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1204 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1205 // out that sync wise we need to get rid of the peer. 1206 // 1207 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1208 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1209 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1210 if fails > 2 { 1211 peer.log.Trace("Data delivery timed out", "type", kind) 1212 setIdle(peer, 0) 1213 } else { 1214 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1215 if d.dropPeer == nil { 1216 // The dropPeer method is nil when `--copydb` is used for a local copy. 1217 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1218 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1219 } else { 1220 d.dropPeer(pid) 1221 } 1222 } 1223 } 1224 } 1225 // If there's nothing more to fetch, wait or terminate 1226 if pending() == 0 { 1227 if !inFlight() && finished { 1228 log.Debug("Data fetching completed", "type", kind) 1229 return nil 1230 } 1231 break 1232 } 1233 // Send a download request to all idle peers, until throttled 1234 progressed, throttled, running := false, false, inFlight() 1235 idles, total := idle() 1236 1237 for _, peer := range idles { 1238 // Short circuit if throttling activated 1239 if throttle() { 1240 throttled = true 1241 break 1242 } 1243 // Short circuit if there is no more available task. 1244 if pending() == 0 { 1245 break 1246 } 1247 // Reserve a chunk of fetches for a peer. A nil can mean either that 1248 // no more headers are available, or that the peer is known not to 1249 // have them. 1250 request, progress, err := reserve(peer, capacity(peer)) 1251 if err != nil { 1252 return err 1253 } 1254 if progress { 1255 progressed = true 1256 } 1257 if request == nil { 1258 continue 1259 } 1260 if request.From > 0 { 1261 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1262 } else { 1263 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1264 } 1265 // Fetch the chunk and make sure any errors return the hashes to the queue 1266 if fetchHook != nil { 1267 fetchHook(request.Headers) 1268 } 1269 if err := fetch(peer, request); err != nil { 1270 // Although we could try and make an attempt to fix this, this error really 1271 // means that we've double allocated a fetch task to a peer. If that is the 1272 // case, the internal state of the downloader and the queue is very wrong so 1273 // better hard crash and note the error instead of silently accumulating into 1274 // a much bigger issue. 1275 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1276 } 1277 running = true 1278 } 1279 // Make sure that we have peers available for fetching. If all peers have been tried 1280 // and all failed throw an error 1281 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1282 return errPeersUnavailable 1283 } 1284 } 1285 } 1286 } 1287 1288 // processHeaders takes batches of retrieved headers from an input channel and 1289 // keeps processing and scheduling them into the header chain and downloader's 1290 // queue until the stream ends or a failure occurs. 1291 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1292 // Keep a count of uncertain headers to roll back 1293 rollback := []*types.Header{} 1294 defer func() { 1295 if len(rollback) > 0 { 1296 // Flatten the headers and roll them back 1297 hashes := make([]common.Hash, len(rollback)) 1298 for i, header := range rollback { 1299 hashes[i] = header.Hash() 1300 } 1301 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1302 if d.mode != LightSync { 1303 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1304 lastBlock = d.blockchain.CurrentBlock().Number() 1305 } 1306 d.lightchain.Rollback(hashes) 1307 curFastBlock, curBlock := common.Big0, common.Big0 1308 if d.mode != LightSync { 1309 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1310 curBlock = d.blockchain.CurrentBlock().Number() 1311 } 1312 log.Warn("Rolled back headers", "count", len(hashes), 1313 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1314 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1315 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1316 } 1317 }() 1318 1319 // Wait for batches of headers to process 1320 gotHeaders := false 1321 1322 for { 1323 select { 1324 case <-d.cancelCh: 1325 return errCancelHeaderProcessing 1326 1327 case headers := <-d.headerProcCh: 1328 // Terminate header processing if we synced up 1329 if len(headers) == 0 { 1330 // Notify everyone that headers are fully processed 1331 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1332 select { 1333 case ch <- false: 1334 case <-d.cancelCh: 1335 } 1336 } 1337 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1338 // better chain compared to ours. The only exception is if its promised blocks were 1339 // already imported by other means (e.g. fetcher): 1340 // 1341 // R <remote peer>, L <local node>: Both at block 10 1342 // R: Mine block 11, and propagate it to L 1343 // L: Queue block 11 for import 1344 // L: Notice that R's head and TD increased compared to ours, start sync 1345 // L: Import of block 11 finishes 1346 // L: Sync begins, and finds common ancestor at 11 1347 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1348 // R: Nothing to give 1349 if d.mode != LightSync { 1350 head := d.blockchain.CurrentBlock() 1351 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1352 return errStallingPeer 1353 } 1354 } 1355 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1356 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1357 // of delivering the post-pivot blocks that would flag the invalid content. 1358 // 1359 // This check cannot be executed "as is" for full imports, since blocks may still be 1360 // queued for processing when the header download completes. However, as long as the 1361 // peer gave us something useful, we're already happy/progressed (above check). 1362 if d.mode == FastSync || d.mode == LightSync { 1363 head := d.lightchain.CurrentHeader() 1364 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1365 return errStallingPeer 1366 } 1367 } 1368 // Disable any rollback and return 1369 rollback = nil 1370 return nil 1371 } 1372 // Otherwise split the chunk of headers into batches and process them 1373 gotHeaders = true 1374 1375 for len(headers) > 0 { 1376 // Terminate if something failed in between processing chunks 1377 select { 1378 case <-d.cancelCh: 1379 return errCancelHeaderProcessing 1380 default: 1381 } 1382 // Select the next chunk of headers to import 1383 limit := maxHeadersProcess 1384 if limit > len(headers) { 1385 limit = len(headers) 1386 } 1387 chunk := headers[:limit] 1388 1389 // In case of header only syncing, validate the chunk immediately 1390 if d.mode == FastSync || d.mode == LightSync { 1391 // Collect the yet unknown headers to mark them as uncertain 1392 unknown := make([]*types.Header, 0, len(headers)) 1393 for _, header := range chunk { 1394 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1395 unknown = append(unknown, header) 1396 } 1397 } 1398 // If we're importing pure headers, verify based on their recentness 1399 frequency := fsHeaderCheckFrequency 1400 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1401 frequency = 1 1402 } 1403 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1404 // If some headers were inserted, add them too to the rollback list 1405 if n > 0 { 1406 rollback = append(rollback, chunk[:n]...) 1407 } 1408 log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1409 return errInvalidChain 1410 } 1411 // All verifications passed, store newly found uncertain headers 1412 rollback = append(rollback, unknown...) 1413 if len(rollback) > fsHeaderSafetyNet { 1414 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1415 } 1416 } 1417 // Unless we're doing light chains, schedule the headers for associated content retrieval 1418 if d.mode == FullSync || d.mode == FastSync { 1419 // If we've reached the allowed number of pending headers, stall a bit 1420 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1421 select { 1422 case <-d.cancelCh: 1423 return errCancelHeaderProcessing 1424 case <-time.After(time.Second): 1425 } 1426 } 1427 // Otherwise insert the headers for content retrieval 1428 inserts := d.queue.Schedule(chunk, origin) 1429 if len(inserts) != len(chunk) { 1430 log.Debug("Stale headers") 1431 return errBadPeer 1432 } 1433 } 1434 headers = headers[limit:] 1435 origin += uint64(limit) 1436 } 1437 1438 // Update the highest block number we know if a higher one is found. 1439 d.syncStatsLock.Lock() 1440 if d.syncStatsChainHeight < origin { 1441 d.syncStatsChainHeight = origin - 1 1442 } 1443 d.syncStatsLock.Unlock() 1444 1445 // Signal the content downloaders of the availablility of new tasks 1446 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1447 select { 1448 case ch <- true: 1449 default: 1450 } 1451 } 1452 } 1453 } 1454 } 1455 1456 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1457 func (d *Downloader) processFullSyncContent() error { 1458 for { 1459 results := d.queue.Results(true) 1460 if len(results) == 0 { 1461 return nil 1462 } 1463 if d.chainInsertHook != nil { 1464 d.chainInsertHook(results) 1465 } 1466 if err := d.importBlockResults(results); err != nil { 1467 return err 1468 } 1469 } 1470 } 1471 1472 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1473 // Check for any early termination requests 1474 if len(results) == 0 { 1475 return nil 1476 } 1477 select { 1478 case <-d.quitCh: 1479 return errCancelContentProcessing 1480 default: 1481 } 1482 // Retrieve the a batch of results to import 1483 first, last := results[0].Header, results[len(results)-1].Header 1484 log.Debug("Inserting downloaded chain", "items", len(results), 1485 "firstnum", first.Number, "firsthash", first.Hash(), 1486 "lastnum", last.Number, "lasthash", last.Hash(), 1487 ) 1488 blocks := make([]*types.Block, len(results)) 1489 for i, result := range results { 1490 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1491 } 1492 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1493 if index < len(results) { 1494 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1495 } else { 1496 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1497 // when it needs to preprocess blocks to import a sidechain. 1498 // The importer will put together a new list of blocks to import, which is a superset 1499 // of the blocks delivered from the downloader, and the indexing will be off. 1500 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1501 } 1502 return errInvalidChain 1503 } 1504 return nil 1505 } 1506 1507 // processFastSyncContent takes fetch results from the queue and writes them to the 1508 // database. It also controls the synchronisation of state nodes of the pivot block. 1509 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1510 // Start syncing state of the reported head block. This should get us most of 1511 // the state of the pivot block. 1512 stateSync := d.syncState(latest.Root) 1513 defer stateSync.Cancel() 1514 go func() { 1515 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1516 d.queue.Close() // wake up Results 1517 } 1518 }() 1519 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1520 // sync takes long enough for the chain head to move significantly. 1521 pivot := uint64(0) 1522 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1523 pivot = height - uint64(fsMinFullBlocks) 1524 } 1525 // To cater for moving pivot points, track the pivot block and subsequently 1526 // accumulated download results separately. 1527 var ( 1528 oldPivot *fetchResult // Locked in pivot block, might change eventually 1529 oldTail []*fetchResult // Downloaded content after the pivot 1530 ) 1531 for { 1532 // Wait for the next batch of downloaded data to be available, and if the pivot 1533 // block became stale, move the goalpost 1534 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1535 if len(results) == 0 { 1536 // If pivot sync is done, stop 1537 if oldPivot == nil { 1538 return stateSync.Cancel() 1539 } 1540 // If sync failed, stop 1541 select { 1542 case <-d.cancelCh: 1543 return stateSync.Cancel() 1544 default: 1545 } 1546 } 1547 if d.chainInsertHook != nil { 1548 d.chainInsertHook(results) 1549 } 1550 if oldPivot != nil { 1551 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1552 } 1553 // Split around the pivot block and process the two sides via fast/full sync 1554 if atomic.LoadInt32(&d.committed) == 0 { 1555 latest = results[len(results)-1].Header 1556 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1557 log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1558 pivot = height - uint64(fsMinFullBlocks) 1559 } 1560 } 1561 P, beforeP, afterP := splitAroundPivot(pivot, results) 1562 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1563 return err 1564 } 1565 if P != nil { 1566 // If new pivot block found, cancel old state retrieval and restart 1567 if oldPivot != P { 1568 stateSync.Cancel() 1569 1570 stateSync = d.syncState(P.Header.Root) 1571 defer stateSync.Cancel() 1572 go func() { 1573 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1574 d.queue.Close() // wake up Results 1575 } 1576 }() 1577 oldPivot = P 1578 } 1579 // Wait for completion, occasionally checking for pivot staleness 1580 select { 1581 case <-stateSync.done: 1582 if stateSync.err != nil { 1583 return stateSync.err 1584 } 1585 if err := d.commitPivotBlock(P); err != nil { 1586 return err 1587 } 1588 oldPivot = nil 1589 1590 case <-time.After(time.Second): 1591 oldTail = afterP 1592 continue 1593 } 1594 } 1595 // Fast sync done, pivot commit done, full import 1596 if err := d.importBlockResults(afterP); err != nil { 1597 return err 1598 } 1599 } 1600 } 1601 1602 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1603 for _, result := range results { 1604 num := result.Header.Number.Uint64() 1605 switch { 1606 case num < pivot: 1607 before = append(before, result) 1608 case num == pivot: 1609 p = result 1610 default: 1611 after = append(after, result) 1612 } 1613 } 1614 return p, before, after 1615 } 1616 1617 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1618 // Check for any early termination requests 1619 if len(results) == 0 { 1620 return nil 1621 } 1622 select { 1623 case <-d.quitCh: 1624 return errCancelContentProcessing 1625 case <-stateSync.done: 1626 if err := stateSync.Wait(); err != nil { 1627 return err 1628 } 1629 default: 1630 } 1631 // Retrieve the a batch of results to import 1632 first, last := results[0].Header, results[len(results)-1].Header 1633 log.Debug("Inserting fast-sync blocks", "items", len(results), 1634 "firstnum", first.Number, "firsthash", first.Hash(), 1635 "lastnumn", last.Number, "lasthash", last.Hash(), 1636 ) 1637 blocks := make([]*types.Block, len(results)) 1638 receipts := make([]types.Receipts, len(results)) 1639 for i, result := range results { 1640 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1641 receipts[i] = result.Receipts 1642 } 1643 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1644 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1645 return errInvalidChain 1646 } 1647 return nil 1648 } 1649 1650 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1651 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1652 log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1653 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1654 return err 1655 } 1656 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1657 return err 1658 } 1659 atomic.StoreInt32(&d.committed, 1) 1660 return nil 1661 } 1662 1663 // DeliverHeaders injects a new batch of block headers received from a remote 1664 // node into the download schedule. 1665 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1666 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1667 } 1668 1669 // DeliverBodies injects a new batch of block bodies received from a remote node. 1670 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { 1671 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) 1672 } 1673 1674 // DeliverReceipts injects a new batch of receipts received from a remote node. 1675 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1676 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1677 } 1678 1679 // DeliverNodeData injects a new batch of node state data received from a remote node. 1680 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1681 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1682 } 1683 1684 // deliver injects a new batch of data received from a remote node. 1685 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1686 // Update the delivery metrics for both good and failed deliveries 1687 inMeter.Mark(int64(packet.Items())) 1688 defer func() { 1689 if err != nil { 1690 dropMeter.Mark(int64(packet.Items())) 1691 } 1692 }() 1693 // Deliver or abort if the sync is canceled while queuing 1694 d.cancelLock.RLock() 1695 cancel := d.cancelCh 1696 d.cancelLock.RUnlock() 1697 if cancel == nil { 1698 return errNoSyncActive 1699 } 1700 select { 1701 case destCh <- packet: 1702 return nil 1703 case <-cancel: 1704 return errNoSyncActive 1705 } 1706 } 1707 1708 // qosTuner is the quality of service tuning loop that occasionally gathers the 1709 // peer latency statistics and updates the estimated request round trip time. 1710 func (d *Downloader) qosTuner() { 1711 for { 1712 // Retrieve the current median RTT and integrate into the previoust target RTT 1713 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1714 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1715 1716 // A new RTT cycle passed, increase our confidence in the estimated RTT 1717 conf := atomic.LoadUint64(&d.rttConfidence) 1718 conf = conf + (1000000-conf)/2 1719 atomic.StoreUint64(&d.rttConfidence, conf) 1720 1721 // Log the new QoS values and sleep until the next RTT 1722 log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1723 select { 1724 case <-d.quitCh: 1725 return 1726 case <-time.After(rtt): 1727 } 1728 } 1729 } 1730 1731 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1732 // peer set, needing to reduce the confidence we have in out QoS estimates. 1733 func (d *Downloader) qosReduceConfidence() { 1734 // If we have a single peer, confidence is always 1 1735 peers := uint64(d.peers.Len()) 1736 if peers == 0 { 1737 // Ensure peer connectivity races don't catch us off guard 1738 return 1739 } 1740 if peers == 1 { 1741 atomic.StoreUint64(&d.rttConfidence, 1000000) 1742 return 1743 } 1744 // If we have a ton of peers, don't drop confidence) 1745 if peers >= uint64(qosConfidenceCap) { 1746 return 1747 } 1748 // Otherwise drop the confidence factor 1749 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1750 if float64(conf)/1000000 < rttMinConfidence { 1751 conf = uint64(rttMinConfidence * 1000000) 1752 } 1753 atomic.StoreUint64(&d.rttConfidence, conf) 1754 1755 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1756 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1757 } 1758 1759 // requestRTT returns the current target round trip time for a download request 1760 // to complete in. 1761 // 1762 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1763 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1764 // be adapted to, but smaller ones are preferred (stabler download stream). 1765 func (d *Downloader) requestRTT() time.Duration { 1766 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1767 } 1768 1769 // requestTTL returns the current timeout allowance for a single download request 1770 // to finish under. 1771 func (d *Downloader) requestTTL() time.Duration { 1772 var ( 1773 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1774 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1775 ) 1776 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1777 if ttl > ttlLimit { 1778 ttl = ttlLimit 1779 } 1780 return ttl 1781 }