github.com/r8d8/go-ethereum@v5.5.2+incompatible/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/ethereumproject/go-ethereum/common" 29 "github.com/ethereumproject/go-ethereum/core" 30 "github.com/ethereumproject/go-ethereum/core/types" 31 "github.com/ethereumproject/go-ethereum/ethdb" 32 "github.com/ethereumproject/go-ethereum/event" 33 "github.com/ethereumproject/go-ethereum/logger" 34 "github.com/ethereumproject/go-ethereum/logger/glog" 35 "github.com/ethereumproject/go-ethereum/metrics" 36 ) 37 38 const ( 39 EpochDuration = 30000 // Duration between proof-of-work epochs 40 ) 41 42 var ( 43 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 44 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 45 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 46 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 47 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 48 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 49 50 MaxForkAncestry uint64 = 3 * EpochDuration // Maximum chain reorganisation 51 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 52 rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests 53 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 54 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 55 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 56 57 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 58 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 59 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 60 61 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 62 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 63 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 64 65 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 66 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 67 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 68 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 69 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 70 ) 71 72 var ( 73 errBusy = errors.New("busy") 74 errUnknownPeer = errors.New("peer is unknown or unhealthy") 75 errBadPeer = errors.New("action from bad peer ignored") 76 errStallingPeer = errors.New("peer is stalling") 77 errNoPeers = errors.New("no peers to keep download active") 78 errTimeout = errors.New("timeout") 79 errEmptyHeaderSet = errors.New("empty header set by peer") 80 errPeersUnavailable = errors.New("no peers available or all tried for download") 81 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 82 errInvalidChain = errors.New("retrieved hash chain is invalid") 83 errInvalidBlock = errors.New("retrieved block is invalid") 84 errInvalidBody = errors.New("retrieved block body is invalid") 85 errInvalidReceipt = errors.New("retrieved receipt is invalid") 86 errCancelBlockFetch = errors.New("block fetch canceled (requested)") 87 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 88 errCancelBodyFetch = errors.New("block body download canceled (requested)") 89 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 90 errCancelStateFetch = errors.New("state data download canceled (requested)") 91 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 92 errCancelContentProcessing = errors.New("content processing canceled (requested)") 93 errNoSyncActive = errors.New("no sync active") 94 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 95 ) 96 97 func ErrWasRequested(e error) bool { 98 switch e { 99 case errCancelBlockFetch: 100 return true 101 case errCancelHeaderFetch: 102 return true 103 case errCancelBodyFetch: 104 return true 105 case errCancelReceiptFetch: 106 return true 107 case errCancelStateFetch: 108 return true 109 case errCancelHeaderProcessing: 110 return true 111 case errCancelContentProcessing: 112 return true 113 } 114 return false 115 } 116 117 // SyncMode represents the synchronisation mode of the downloader. 118 type SyncMode int 119 120 const ( 121 FullSync SyncMode = iota // Synchronise the entire blockchain history from full blocks 122 FastSync // Quickly download the headers, full sync only at the chain head 123 LightSync // Download only the headers and terminate afterwards 124 ) 125 126 func (m SyncMode) String() string { 127 switch m { 128 case FullSync: 129 return "FULL" 130 case FastSync: 131 return "FAST" 132 default: 133 return "LIGHT" 134 } 135 return "" 136 } 137 138 type Downloader struct { 139 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 140 mux *event.TypeMux // Event multiplexer to announce sync operation events 141 142 queue *queue // Scheduler for selecting the hashes to download 143 peers *peerSet // Set of active peers from which download can proceed 144 stateDB ethdb.Database 145 146 rttEstimate uint64 // Round trip time to target for download requests 147 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 148 149 // Statistics 150 syncStatsChainOrigin uint64 // Origin block number where syncing started at 151 syncStatsChainHeight uint64 // Highest block number known when syncing started 152 syncStatsState stateSyncStats 153 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 154 155 lightchain LightChain 156 blockchain BlockChain 157 158 // Callbacks 159 dropPeer peerDropFn // Drops a peer for misbehaving 160 161 // Status 162 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 163 synchronising int32 164 committed int32 165 166 // Channels 167 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 168 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 169 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 170 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 171 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 172 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 173 174 // for stateFetcher 175 stateSyncStart chan *stateSync 176 trackStateReq chan *stateReq 177 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 178 179 // Cancellation and termination 180 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 181 cancelCh chan struct{} // Channel to cancel mid-flight syncs 182 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 183 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 184 185 quitCh chan struct{} // Quit channel to signal termination 186 quitLock sync.RWMutex // Lock to prevent double closes 187 188 // Testing hooks 189 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 190 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 191 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 192 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 193 } 194 195 // LightChain encapsulates functions required to synchronise a light chain. 196 type LightChain interface { 197 // HasHeader verifies a header's presence in the local chain. 198 HasHeader(common.Hash) bool 199 200 // GetHeaderByHash retrieves a header from the local chain. 201 GetHeaderByHash(common.Hash) *types.Header 202 203 // CurrentHeader retrieves the head header from the local chain. 204 CurrentHeader() *types.Header 205 206 // GetTd returns the total difficulty of a local block. 207 GetTd(common.Hash) *big.Int 208 209 // InsertHeaderChain inserts a batch of headers into the local chain. 210 InsertHeaderChain([]*types.Header, int) *core.HeaderChainInsertResult 211 212 // Rollback removes a few recently added elements from the local chain. 213 Rollback([]common.Hash) 214 } 215 216 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 217 type BlockChain interface { 218 LightChain 219 220 // HasBlock verifies block presence in the local chain. // NOTE(whilei): https://github.com/ethereum/go-ethereum/pull/16061 221 HasBlock(common.Hash) bool 222 223 // HasBlock verifies block and associate state presence in the local chain. 224 HasBlockAndState(common.Hash) bool 225 226 // GetBlockByHash retrieves a block from the local chain. 227 GetBlockByHash(common.Hash) *types.Block 228 229 // CurrentBlock retrieves the head block from the local chain. 230 CurrentBlock() *types.Block 231 232 // CurrentFastBlock retrieves the head fast block from the local chain. 233 CurrentFastBlock() *types.Block 234 235 // FastSyncCommitHead directly commits the head block to a certain entity. 236 FastSyncCommitHead(common.Hash) error 237 238 // InsertChain inserts a batch of blocks into the local chain. 239 InsertChain(types.Blocks) *core.ChainInsertResult 240 241 // InsertReceiptChain inserts a batch of receipts into the local chain. 242 InsertReceiptChain(types.Blocks, []types.Receipts) *core.ReceiptChainInsertResult 243 } 244 245 // New creates a new downloader to fetch hashes and blocks from remote peers. 246 func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 247 if lightchain == nil { 248 lightchain = chain 249 } 250 251 dl := &Downloader{ 252 mode: mode, 253 stateDB: stateDb, 254 mux: mux, // inherited from protocolManager, which inherits from Ethereum 255 queue: newQueue(), 256 peers: newPeerSet(), 257 rttEstimate: uint64(rttMaxEstimate), 258 rttConfidence: uint64(1000000), 259 blockchain: chain, 260 lightchain: lightchain, 261 dropPeer: dropPeer, 262 headerCh: make(chan dataPack, 1), 263 bodyCh: make(chan dataPack, 1), 264 receiptCh: make(chan dataPack, 1), 265 bodyWakeCh: make(chan bool, 1), 266 receiptWakeCh: make(chan bool, 1), 267 headerProcCh: make(chan []*types.Header, 1), 268 quitCh: make(chan struct{}), 269 stateCh: make(chan dataPack), 270 stateSyncStart: make(chan *stateSync), 271 trackStateReq: make(chan *stateReq), 272 } 273 go dl.qosTuner() 274 go dl.stateFetcher() 275 return dl 276 } 277 278 func (d *Downloader) currentLocalChainHeight() (current uint64) { 279 current = d.lightchain.CurrentHeader().Number.Uint64() // "LightSync" 280 switch d.mode { 281 case FullSync: 282 current = d.blockchain.CurrentBlock().NumberU64() 283 case FastSync: 284 current = d.blockchain.CurrentFastBlock().NumberU64() 285 } 286 return 287 } 288 289 // Progress retrieves the synchronisation boundaries, specifically the origin 290 // block where synchronisation started at (may have failed/suspended); the block 291 // or header sync is currently at; and the latest known block which the sync targets. 292 // 293 // In addition, during the state download phase of fast synchronisation the number 294 // of processed and the total number of known states are also returned. Otherwise 295 // these are zero. 296 func (d *Downloader) Progress() (uint64, uint64, uint64, uint64, uint64) { 297 // Lock the current stats and return the progress 298 d.syncStatsLock.RLock() 299 defer d.syncStatsLock.RUnlock() 300 301 return d.syncStatsChainOrigin, d.currentLocalChainHeight(), d.syncStatsChainHeight, d.syncStatsState.processed, d.syncStatsState.processed + d.syncStatsState.pending 302 } 303 304 func (d *Downloader) Qos() (rtt time.Duration, ttl time.Duration, conf float64) { 305 rtt = d.requestRTT() 306 ttl = d.requestTTL() 307 conf = float64(d.rttConfidence) / 1000000.0 308 return 309 } 310 311 func (d *Downloader) GetMode() SyncMode { 312 return d.mode 313 } 314 315 func (d *Downloader) GetPeers() *peerSet { 316 return d.peers 317 } 318 319 // Synchronising returns whether the downloader is currently retrieving blocks. 320 func (d *Downloader) Synchronising() bool { 321 return atomic.LoadInt32(&d.synchronising) > 0 322 } 323 324 // RegisterPeer injects a new download peer into the set of block source to be 325 // used for fetching hashes and blocks from. 326 func (d *Downloader) RegisterPeer(id string, version int, name string, currentHead currentHeadRetrievalFn, 327 getRelHeaders relativeHeaderFetcherFn, getAbsHeaders absoluteHeaderFetcherFn, getBlockBodies blockBodyFetcherFn, 328 getReceipts receiptFetcherFn, getNodeData stateFetcherFn) error { 329 330 var err error 331 defer func() { 332 if logger.MlogEnabled() { 333 mlogDownloaderRegisterPeer.AssignDetails( 334 id, 335 version, 336 err, 337 ).Send(mlogDownloader) 338 } 339 }() 340 341 glog.V(logger.Detail).Infoln("Registering peer", id) 342 err = d.peers.Register(newPeer(id, version, name, currentHead, getRelHeaders, getAbsHeaders, getBlockBodies, getReceipts, getNodeData)) 343 if err != nil { 344 glog.V(logger.Error).Errorf("Register failed, err: %v", err) 345 return err 346 } 347 d.qosReduceConfidence() 348 349 return nil 350 } 351 352 // UnregisterPeer remove a peer from the known list, preventing any action from 353 // the specified peer. An effort is also made to return any pending fetches into 354 // the queue. 355 func (d *Downloader) UnregisterPeer(id string) error { 356 357 var err error 358 defer func() { 359 if logger.MlogEnabled() { 360 mlogDownloaderUnregisterPeer.AssignDetails( 361 id, 362 err, 363 ).Send(mlogDownloader) 364 } 365 }() 366 367 // Unregister the peer from the active peer set and revoke any fetch tasks 368 glog.V(logger.Detail).Infoln("Unregistering peer", id) 369 err = d.peers.Unregister(id) 370 371 defer func() { 372 // If this peer was the master peer, abort sync immediately 373 d.cancelLock.RLock() 374 master := id == d.cancelPeer 375 d.cancelLock.RUnlock() 376 377 if master { 378 d.cancel() 379 } 380 }() 381 382 if err != nil { 383 glog.V(logger.Warn).Warnln("Unregister failed:", err) 384 return err 385 } 386 d.queue.Revoke(id) 387 388 return nil 389 } 390 391 // Synchronise tries to sync up our local block chain with a remote peer, both 392 // adding various sanity checks as well as wrapping it with various log entries. 393 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 394 err := d.synchronise(id, head, td, mode) 395 switch err { 396 case nil: 397 glog.V(logger.Core).Infof("Peer %s: sync complete", id) 398 case errBusy: 399 glog.V(logger.Debug).Warnln("sync busy") 400 case errTimeout, errBadPeer, errStallingPeer, 401 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 402 errInvalidAncestor, errInvalidChain: 403 glog.V(logger.Core).Warnf("Peer %s: drop: %s", id, err) 404 d.dropPeer(id) 405 406 default: 407 glog.V(logger.Core).Warnf("Peer %s: sync: %s", id, err) 408 } 409 return err 410 } 411 412 // synchronise will select the peer and use it for synchronising. If an empty string is given 413 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 414 // checks fail an error will be returned. This method is synchronous 415 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 416 // Mock out the synchronisation if testing 417 if d.synchroniseMock != nil { 418 return d.synchroniseMock(id, hash) 419 } 420 // Make sure only one goroutine is ever allowed past this point at once 421 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 422 return errBusy 423 } 424 defer atomic.StoreInt32(&d.synchronising, 0) 425 426 // Reset the queue, peer set, wake channels, and incoming channels to clean any internal leftover state 427 d.queue.Reset() 428 d.peers.Reset() 429 430 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 431 select { 432 case <-ch: 433 default: 434 } 435 } 436 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 437 for empty := false; !empty; { 438 select { 439 case <-ch: 440 default: 441 empty = true 442 } 443 } 444 } 445 for empty := false; !empty; { 446 select { 447 case <-d.headerProcCh: 448 default: 449 empty = true 450 } 451 } 452 // Create cancel channel for aborting mid-flight and mark the master peer 453 d.cancelLock.Lock() 454 d.cancelCh = make(chan struct{}) 455 d.cancelPeer = id 456 d.cancelLock.Unlock() 457 458 defer d.Cancel() // No matter what, we can't leave the cancel channel open 459 460 // Set the requested sync mode, unless it's forbidden 461 d.mode = mode 462 463 // Retrieve the origin peer and initiate the downloading process 464 p := d.peers.Peer(id) 465 if p == nil { 466 return errUnknownPeer 467 } 468 return d.syncWithPeer(p, hash, td) 469 } 470 471 // syncWithPeer starts a block synchronization based on the hash chain from the 472 // specified peer and head hash. 473 func (d *Downloader) syncWithPeer(p *peer, hash common.Hash, td *big.Int) (err error) { 474 d.mux.Post(StartEvent{p, hash, td}) 475 defer func() { 476 // reset on error 477 if err != nil { 478 d.mux.Post(FailedEvent{p, err}) 479 } else { 480 d.mux.Post(DoneEvent{p, hash, td}) 481 } 482 }() 483 484 var pivot uint64 485 486 glog.V(logger.Debug).Infof("Synchronising with the network using: %s [eth/%d]", p.id, p.version) 487 if logger.MlogEnabled() { 488 mlogDownloaderStartSync.AssignDetails( 489 d.mode.String(), 490 p.id, 491 p.name, 492 p.version, 493 hash.Hex(), 494 td.Uint64(), 495 ).Send(mlogDownloader) 496 } 497 defer func(start time.Time) { 498 elapsed := time.Since(start) 499 glog.V(logger.Debug).Warnf("Synchronisation with [%v][eth/%d] terminated after %v", p, p.version, elapsed) 500 if logger.MlogEnabled() { 501 mlogDownloaderStopSync.AssignDetails( 502 d.mode.String(), 503 p.id, 504 p.name, 505 p.version, 506 hash.Hex(), 507 td.Uint64(), 508 pivot, 509 d.syncStatsChainOrigin, 510 d.syncStatsChainHeight, 511 elapsed, 512 err, 513 ).Send(mlogDownloader) 514 } 515 }(time.Now()) 516 517 if p.version < 62 { 518 glog.V(logger.Debug).Warnf("download: peer %q protocol %d too old", p.id, p.version) 519 return errTooOld 520 } 521 522 // Look up the sync boundaries: the common ancestor and the target block 523 latest, err := d.fetchHeight(p) 524 if err != nil { 525 return err 526 } 527 height := latest.Number.Uint64() 528 529 origin, err := d.findAncestor(p, height) 530 if err != nil { 531 return err 532 } 533 d.syncStatsLock.Lock() 534 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 535 d.syncStatsChainOrigin = origin 536 } 537 d.syncStatsChainHeight = height 538 d.syncStatsLock.Unlock() 539 540 // Ensure our origin point is below any fast sync pivot point 541 if d.mode == FastSync { 542 if height <= uint64(fsMinFullBlocks) { 543 origin = 0 544 } else { 545 pivot = height - uint64(fsMinFullBlocks) 546 if pivot <= origin { 547 origin = pivot - 1 548 } 549 } 550 } 551 d.committed = 1 552 if d.mode == FastSync && pivot != 0 { 553 d.committed = 0 554 } 555 // Initiate the sync using a concurrent header and content retrieval algorithm 556 d.queue.Prepare(origin+1, d.mode) 557 if d.syncInitHook != nil { 558 d.syncInitHook(origin, height) 559 } 560 561 fetchers := []func() error{ 562 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 563 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 564 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 565 func() error { return d.processHeaders(origin+1, pivot, td) }, 566 } 567 if d.mode == FastSync { 568 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 569 } else if d.mode == FullSync { 570 fetchers = append(fetchers, d.processFullSyncContent) 571 } 572 return d.spawnSync(fetchers) 573 } 574 575 // spawnSync runs d.process and all given fetcher functions to completion in 576 // separate goroutines, returning the first error that appears. 577 func (d *Downloader) spawnSync(fetchers []func() error) error { 578 errc := make(chan error, len(fetchers)) 579 d.cancelWg.Add(len(fetchers)) 580 for _, fn := range fetchers { 581 fn := fn 582 go func() { defer d.cancelWg.Done(); errc <- fn() }() 583 } 584 // Wait for the first error, then terminate the others. 585 var err error 586 for i := 0; i < len(fetchers); i++ { 587 if i == len(fetchers)-1 { 588 // Close the queue when all fetchers have exited. 589 // This will cause the block processor to end when 590 // it has processed the queue. 591 d.queue.Close() 592 } 593 if err = <-errc; err != nil { 594 break 595 } 596 } 597 d.queue.Close() 598 d.Cancel() 599 return err 600 } 601 602 // cancel aborts all of the operations and resets the queue. However, cancel does 603 // not wait for the running download goroutines to finish. This method should be 604 // used when cancelling the downloads from inside the downloader. 605 func (d *Downloader) cancel() { 606 // Close the current cancel channel 607 d.cancelLock.Lock() 608 if d.cancelCh != nil { 609 select { 610 case <-d.cancelCh: 611 // Channel was already closed 612 default: 613 close(d.cancelCh) 614 } 615 } 616 d.cancelLock.Unlock() 617 } 618 619 // Cancel aborts all of the operations and waits for all download goroutines to 620 // finish before returning. 621 func (d *Downloader) Cancel() { 622 d.cancel() 623 d.cancelWg.Wait() 624 } 625 626 // Terminate interrupts the downloader, canceling all pending operations. 627 // The downloader cannot be reused after calling Terminate. 628 func (d *Downloader) Terminate() { 629 // Close the termination channel (make sure double close is allowed) 630 d.quitLock.Lock() 631 select { 632 case <-d.quitCh: 633 default: 634 close(d.quitCh) 635 } 636 d.quitLock.Unlock() 637 638 // Cancel any pending download requests 639 d.Cancel() 640 } 641 642 // fetchHeight retrieves the head header of the remote peer to aid in estimating 643 // the total time a pending synchronisation would take. 644 func (d *Downloader) fetchHeight(p *peer) (*types.Header, error) { 645 glog.V(logger.Debug).Infof("%v: retrieving remote chain height", p) 646 647 // Request the advertised remote head block and wait for the response 648 head, _ := p.currentHead() 649 go p.getRelHeaders(head, 1, 0, false) 650 651 // After waits for the duration to elapse and then sends the current time on the returned channel. 652 // It is equivalent to NewTimer(d).C. 653 // The underlying Timer is not recovered by the garbage collector until the timer fires. 654 // If efficiency is a concern, use NewTimer instead and call Timer.Stop if the timer is no longer needed. 655 ttl := d.requestTTL() 656 timer := time.NewTimer(ttl) 657 defer timer.Stop() 658 for { 659 select { 660 case <-d.cancelCh: 661 return nil, errCancelBlockFetch 662 663 case packet := <-d.headerCh: 664 // Discard anything not from the origin peer 665 if packet.PeerId() != p.id { 666 glog.V(logger.Debug).Infof("Received headers from incorrect peer(%s)", packet.PeerId()) 667 break 668 } 669 // Make sure the peer actually gave something valid 670 headers := packet.(*headerPack).headers 671 if len(headers) != 1 { 672 glog.V(logger.Debug).Infof("%v: invalid number of head headers: %d != 1", p, len(headers)) 673 return nil, errBadPeer 674 } 675 return headers[0], nil 676 677 case <-timer.C: 678 glog.V(logger.Debug).Infof("%v: head header timeout, ttl: %v", p, ttl) 679 return nil, errTimeout 680 681 case <-d.bodyCh: 682 case <-d.receiptCh: 683 // Out of bounds delivery, ignore 684 } 685 } 686 } 687 688 // findAncestor tries to locate the common ancestor link of the local chain and 689 // a remote peers blockchain. In the general case when our node was in sync and 690 // on the correct chain, checking the top N links should already get us a match. 691 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 692 // the head links match), we do a binary search to find the common ancestor. 693 func (d *Downloader) findAncestor(p *peer, height uint64) (uint64, error) { 694 glog.V(logger.Debug).Infof("%v: looking for common ancestor (remote height %d)", p, height) 695 // Figure out the valid ancestor range to prevent rewrite attacks 696 floor, ceil := int64(-1), d.currentLocalChainHeight() 697 698 if ceil >= uint64(MaxForkAncestry) { 699 floor = int64(ceil - uint64(MaxForkAncestry)) 700 } 701 // Request the topmost blocks to short circuit binary ancestor lookup 702 head := ceil 703 if head > height { 704 head = height 705 } 706 from := int64(head) - int64(MaxHeaderFetch) 707 if from < 0 { 708 from = 0 709 } 710 // Span out with 15 block gaps into the future to catch bad head reports 711 limit := 2 * MaxHeaderFetch / 16 712 count := 1 + int((int64(ceil)-from)/16) 713 if count > limit { 714 count = limit 715 } 716 go p.getAbsHeaders(uint64(from), count, 15, false) 717 718 // Wait for the remote response to the head fetch 719 number, hash := uint64(0), common.Hash{} 720 721 ttl := d.requestTTL() 722 timeout := time.After(ttl) 723 724 for finished := false; !finished; { 725 select { 726 case <-d.cancelCh: 727 return 0, errCancelHeaderFetch 728 729 case packet := <-d.headerCh: 730 // Discard anything not from the origin peer 731 if packet.PeerId() != p.id { 732 glog.V(logger.Debug).Warnln("Received headers from incorrect peer", "peer", packet.PeerId()) 733 break 734 } 735 // Make sure the peer actually gave something valid 736 headers := packet.(*headerPack).headers 737 if len(headers) == 0 { 738 glog.V(logger.Debug).Warnln("Empty head header set") 739 return 0, errEmptyHeaderSet 740 } 741 // Make sure the peer's reply conforms to the request 742 for i := 0; i < len(headers); i++ { 743 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 744 glog.V(logger.Debug).Warnln("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 745 return 0, errInvalidChain 746 } 747 } 748 // Check if a common ancestor was found 749 finished = true 750 for i := len(headers) - 1; i >= 0; i-- { 751 // Skip any headers that underflow/overflow our requested set 752 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 753 continue 754 } 755 // Otherwise check if we already know the header or not 756 if (d.mode == FullSync && d.blockchain.HasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash())) { 757 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 758 759 // If every header is known, even future ones, the peer straight out lied about its head 760 if number > height && i == limit-1 { 761 glog.V(logger.Debug).Warnln("Lied about chain head", "reported", height, "found", number) 762 return 0, errStallingPeer 763 } 764 break 765 } 766 } 767 768 case <-timeout: 769 glog.V(logger.Debug).Warnln("Waiting for head header timed out", "elapsed", ttl) 770 return 0, errTimeout 771 772 case <-d.bodyCh: 773 case <-d.receiptCh: 774 // Out of bounds delivery, ignore 775 } 776 } 777 // If the head fetch already found an ancestor, return 778 if !common.EmptyHash(hash) { 779 if int64(number) <= floor { 780 glog.V(logger.Debug).Warnln("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 781 return 0, errInvalidAncestor 782 } 783 glog.V(logger.Debug).Warnln("Found common ancestor", "number", number, "hash", hash) 784 return number, nil 785 } 786 // Ancestor not found, we need to binary search over our chain 787 start, end := uint64(0), head 788 if floor > 0 { 789 start = uint64(floor) 790 } 791 for start+1 < end { 792 // Split our chain interval in two, and request the hash to cross check 793 check := (start + end) / 2 794 795 ttl := d.requestTTL() 796 timeout := time.After(ttl) 797 798 go p.getAbsHeaders(uint64(check), 1, 0, false) 799 800 // Wait until a reply arrives to this request 801 for arrived := false; !arrived; { 802 select { 803 case <-d.cancelCh: 804 return 0, errCancelHeaderFetch 805 806 case packer := <-d.headerCh: 807 // Discard anything not from the origin peer 808 if packer.PeerId() != p.id { 809 glog.V(logger.Debug).Warnln("Received headers from incorrect peer", "peer", packer.PeerId()) 810 break 811 } 812 // Make sure the peer actually gave something valid 813 headers := packer.(*headerPack).headers 814 if len(headers) != 1 { 815 glog.V(logger.Debug).Warnln("Multiple headers for single request", "headers", len(headers)) 816 return 0, errBadPeer 817 } 818 arrived = true 819 820 // Modify the search interval based on the response 821 if (d.mode == FullSync && !d.blockchain.HasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash())) { 822 end = check 823 break 824 } 825 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 826 if header.Number.Uint64() != check { 827 glog.V(logger.Debug).Warnln("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 828 return 0, errBadPeer 829 } 830 start = check 831 832 case <-timeout: 833 glog.V(logger.Debug).Warnln("Waiting for search header timed out", "elapsed", ttl) 834 return 0, errTimeout 835 836 case <-d.bodyCh: 837 case <-d.receiptCh: 838 // Out of bounds delivery, ignore 839 } 840 } 841 } 842 // Ensure valid ancestry and return 843 if int64(start) <= floor { 844 glog.V(logger.Debug).Infoln("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 845 return 0, errInvalidAncestor 846 } 847 glog.V(logger.Debug).Infoln("Found common ancestor", "number", start, "hash", hash) 848 return start, nil 849 } 850 851 // fetchHeaders keeps retrieving headers concurrently from the number 852 // requested, until no more are returned, potentially throttling on the way. To 853 // facilitate concurrency but still protect against malicious nodes sending bad 854 // headers, we construct a header chain skeleton using the "origin" peer we are 855 // syncing with, and fill in the missing headers using anyone else. Headers from 856 // other peers are only accepted if they map cleanly to the skeleton. If no one 857 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 858 // the origin is dropped. 859 func (d *Downloader) fetchHeaders(p *peer, from uint64, pivot uint64) error { 860 glog.V(logger.Debug).Infoln("Directing header downloads", "origin", from) 861 defer glog.V(logger.Debug).Infoln("Header download terminated") 862 863 // Create a timeout timer, and the associated header fetcher 864 skeleton := true // Skeleton assembly phase or finishing up 865 request := time.Now() // time of the last skeleton fetch request 866 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 867 <-timeout.C // timeout channel should be initially empty 868 defer timeout.Stop() 869 870 var ttl time.Duration 871 getHeaders := func(from uint64) { 872 request = time.Now() 873 874 ttl = d.requestTTL() 875 timeout.Reset(ttl) 876 877 if skeleton { 878 glog.V(logger.Detail).Infof("Fetching skeleton headers, count=%v from=%v", MaxHeaderFetch, from) 879 go p.getAbsHeaders(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 880 } else { 881 glog.V(logger.Detail).Infof("Fetching full headers, count=%v from=%v", MaxHeaderFetch, from) 882 go p.getAbsHeaders(from, MaxHeaderFetch, 0, false) 883 } 884 } 885 // Start pulling the header chain skeleton until all is done 886 getHeaders(from) 887 888 for { 889 select { 890 case <-d.cancelCh: 891 return errCancelHeaderFetch 892 893 case packet := <-d.headerCh: 894 // Make sure the active peer is giving us the skeleton headers 895 if packet.PeerId() != p.id { 896 glog.V(logger.Debug).Warnln("Received skeleton from incorrect peer", "peer", packet.PeerId()) 897 break 898 } 899 metrics.DLHeaderTimer.UpdateSince(request) 900 timeout.Stop() 901 902 // If the skeleton's finished, pull any remaining head headers directly from the origin 903 if packet.Items() == 0 && skeleton { 904 skeleton = false 905 getHeaders(from) 906 continue 907 } 908 // If no more headers are inbound, notify the content fetchers and return 909 if packet.Items() == 0 { 910 // Don't abort header fetches while the pivot is downloading 911 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 912 glog.V(logger.Warn).Warnln("No headers, waiting for pivot commit") 913 select { 914 case <-time.After(fsHeaderContCheck): 915 getHeaders(from) 916 continue 917 case <-d.cancelCh: 918 return errCancelHeaderFetch 919 } 920 } 921 // Pivot done (or not in fast sync) and no more headers, terminate the process 922 glog.V(logger.Warn).Warnln("No more headers available") 923 select { 924 case d.headerProcCh <- nil: 925 return nil 926 case <-d.cancelCh: 927 return errCancelHeaderFetch 928 } 929 } 930 headers := packet.(*headerPack).headers 931 932 // If we received a skeleton batch, resolve internals concurrently 933 if skeleton { 934 filled, proced, err := d.fillHeaderSkeleton(from, headers) 935 if err != nil { 936 glog.V(logger.Debug).Warnln("Skeleton chain invalid", "err", err) 937 return errInvalidChain 938 } 939 headers = filled[proced:] 940 from += uint64(proced) 941 } 942 // Insert all the new headers and fetch the next batch 943 if len(headers) > 0 { 944 glog.V(logger.Debug).Infoln("Scheduling new headers", "count", len(headers), "from", from) 945 select { 946 case d.headerProcCh <- headers: 947 case <-d.cancelCh: 948 return errCancelHeaderFetch 949 } 950 from += uint64(len(headers)) 951 } 952 getHeaders(from) 953 954 case <-timeout.C: 955 // Header retrieval timed out, consider the peer bad and drop 956 glog.V(logger.Debug).Warnln("Header request timed out", "elapsed", ttl) 957 metrics.DLHeaderTimeouts.Mark(1) 958 d.dropPeer(p.id) 959 960 // Finish the sync gracefully instead of dumping the gathered data though 961 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 962 select { 963 case ch <- false: 964 case <-d.cancelCh: 965 } 966 } 967 select { 968 case d.headerProcCh <- nil: 969 case <-d.cancelCh: 970 } 971 return errBadPeer 972 } 973 } 974 } 975 976 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 977 // and maps them to the provided skeleton header chain. 978 // 979 // Any partial results from the beginning of the skeleton is (if possible) forwarded 980 // immediately to the header processor to keep the rest of the pipeline full even 981 // in the case of header stalls. 982 // 983 // The method returs the entire filled skeleton and also the number of headers 984 // already forwarded for processing. 985 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 986 glog.V(logger.Debug).Infoln("Filling up skeleton", "from", from) 987 d.queue.ScheduleSkeleton(from, skeleton) 988 989 var ( 990 deliver = func(packet dataPack) (int, error) { 991 pack := packet.(*headerPack) 992 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 993 } 994 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 995 throttle = func() bool { return false } 996 reserve = func(p *peer, count int) (*fetchRequest, bool, error) { 997 return d.queue.ReserveHeaders(p, count), false, nil 998 } 999 fetch = func(p *peer, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 1000 capacity = func(p *peer) int { return p.HeaderCapacity(d.requestRTT()) } 1001 setIdle = func(p *peer, accepted int) { p.SetHeadersIdle(accepted) } 1002 ) 1003 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 1004 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 1005 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 1006 1007 glog.V(logger.Debug).Infoln("Skeleton fill terminated", "err", err) 1008 1009 filled, proced := d.queue.RetrieveHeaders() 1010 return filled, proced, err 1011 } 1012 1013 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1014 // available peers, reserving a chunk of blocks for each, waiting for delivery 1015 // and also periodically checking for timeouts. 1016 func (d *Downloader) fetchBodies(from uint64) error { 1017 glog.V(logger.Debug).Infoln("Downloading block bodies", "origin", from) 1018 1019 var ( 1020 deliver = func(packet dataPack) (int, error) { 1021 pack := packet.(*bodyPack) 1022 return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) 1023 } 1024 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 1025 fetch = func(p *peer, req *fetchRequest) error { return p.FetchBodies(req) } 1026 capacity = func(p *peer) int { return p.BlockCapacity(d.requestRTT()) } 1027 setIdle = func(p *peer, accepted int) { p.SetBodiesIdle(accepted) } 1028 ) 1029 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 1030 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 1031 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 1032 1033 glog.V(logger.Debug).Infoln("Block body download terminated", "err", err) 1034 return err 1035 } 1036 1037 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1038 // available peers, reserving a chunk of receipts for each, waiting for delivery 1039 // and also periodically checking for timeouts. 1040 func (d *Downloader) fetchReceipts(from uint64) error { 1041 glog.V(logger.Debug).Infoln("Downloading transaction receipts", "origin", from) 1042 1043 var ( 1044 deliver = func(packet dataPack) (int, error) { 1045 pack := packet.(*receiptPack) 1046 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 1047 } 1048 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 1049 fetch = func(p *peer, req *fetchRequest) error { return p.FetchReceipts(req) } 1050 capacity = func(p *peer) int { return p.ReceiptCapacity(d.requestRTT()) } 1051 setIdle = func(p *peer, accepted int) { p.SetReceiptsIdle(accepted) } 1052 ) 1053 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 1054 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 1055 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 1056 1057 glog.V(logger.Debug).Infoln("Transaction receipt download terminated", "err", err) 1058 return err 1059 } 1060 1061 // fetchParts iteratively downloads scheduled block parts, taking any available 1062 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 1063 // also periodically checking for timeouts. 1064 // 1065 // As the scheduling/timeout logic mostly is the same for all downloaded data 1066 // types, this method is used by each for data gathering and is instrumented with 1067 // various callbacks to handle the slight differences between processing them. 1068 // 1069 // The instrumentation parameters: 1070 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 1071 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 1072 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 1073 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 1074 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 1075 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 1076 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 1077 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 1078 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 1079 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 1080 // - fetch: network callback to actually send a particular download request to a physical remote peer 1081 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 1082 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 1083 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 1084 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 1085 // - kind: textual label of the type being downloaded to display in log mesages 1086 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1087 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peer, int) (*fetchRequest, bool, error), 1088 fetchHook func([]*types.Header), fetch func(*peer, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peer) int, 1089 idle func() ([]*peer, int), setIdle func(*peer, int), kind string) error { 1090 1091 // Create a ticker to detect expired retrieval tasks 1092 ticker := time.NewTicker(100 * time.Millisecond) 1093 defer ticker.Stop() 1094 1095 update := make(chan struct{}, 1) 1096 1097 // Prepare the queue and fetch block parts until the block header fetcher's done 1098 finished := false 1099 for { 1100 select { 1101 case <-d.cancelCh: 1102 return errCancel 1103 1104 case packet := <-deliveryCh: 1105 // If the peer was previously banned and failed to deliver its pack 1106 // in a reasonable time frame, ignore its message. 1107 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1108 // Deliver the received chunk of data and check chain validity 1109 accepted, err := deliver(packet) 1110 if err == errInvalidChain { 1111 return err 1112 } 1113 // Unless a peer delivered something completely else than requested (usually 1114 // caused by a timed out request which came through in the end), set it to 1115 // idle. If the delivery's stale, the peer should have already been idled. 1116 if err != errStaleDelivery { 1117 setIdle(peer, accepted) 1118 } 1119 // Issue a log to the user to see what's going on 1120 switch { 1121 case err == nil && packet.Items() == 0: 1122 glog.V(logger.Detail).Infoln("Requested data not delivered", "type", kind) 1123 case err == nil: 1124 glog.V(logger.Detail).Infoln("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1125 default: 1126 glog.V(logger.Detail).Infoln("Failed to deliver retrieved data", "type", kind, "err", err) 1127 } 1128 } 1129 // Blocks assembled, try to update the progress 1130 select { 1131 case update <- struct{}{}: 1132 default: 1133 } 1134 1135 case cont := <-wakeCh: 1136 // The header fetcher sent a continuation flag, check if it's done 1137 if !cont { 1138 finished = true 1139 } 1140 // Headers arrive, try to update the progress 1141 select { 1142 case update <- struct{}{}: 1143 default: 1144 } 1145 1146 case <-ticker.C: 1147 // Sanity check update the progress 1148 select { 1149 case update <- struct{}{}: 1150 default: 1151 } 1152 1153 case <-update: 1154 // Short circuit if we lost all our peers 1155 if d.peers.Len() == 0 { 1156 return errNoPeers 1157 } 1158 // Check for fetch request timeouts and demote the responsible peers 1159 for pid, fails := range expire() { 1160 if peer := d.peers.Peer(pid); peer != nil { 1161 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1162 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1163 // out that sync wise we need to get rid of the peer. 1164 // 1165 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1166 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1167 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1168 if fails > 2 { 1169 glog.V(logger.Detail).Infoln("Data delivery timed out", "type", kind) 1170 setIdle(peer, 0) 1171 } else { 1172 glog.V(logger.Detail).Infoln("Stalling delivery, dropping", "type", kind) 1173 d.dropPeer(pid) 1174 } 1175 } 1176 } 1177 // If there's nothing more to fetch, wait or terminate 1178 if pending() == 0 { 1179 if !inFlight() && finished { 1180 glog.V(logger.Detail).Infoln("Data fetching completed", "type", kind) 1181 return nil 1182 } 1183 break 1184 } 1185 // Send a download request to all idle peers, until throttled 1186 progressed, throttled, running := false, false, inFlight() 1187 idles, total := idle() 1188 1189 for _, peer := range idles { 1190 // Short circuit if throttling activated 1191 if throttle() { 1192 throttled = true 1193 break 1194 } 1195 // Short circuit if there is no more available task. 1196 if pending() == 0 { 1197 break 1198 } 1199 // Reserve a chunk of fetches for a peer. A nil can mean either that 1200 // no more headers are available, or that the peer is known not to 1201 // have them. 1202 request, progress, err := reserve(peer, capacity(peer)) 1203 if err != nil { 1204 return err 1205 } 1206 if progress { 1207 progressed = true 1208 } 1209 if request == nil { 1210 continue 1211 } 1212 if request.From > 0 { 1213 glog.V(logger.Detail).Infoln("Requesting new batch of data", "type", kind, "from", request.From) 1214 } else { 1215 glog.V(logger.Detail).Infoln("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1216 } 1217 // Fetch the chunk and make sure any errors return the hashes to the queue 1218 if fetchHook != nil { 1219 fetchHook(request.Headers) 1220 } 1221 if err := fetch(peer, request); err != nil { 1222 // Although we could try and make an attempt to fix this, this error really 1223 // means that we've double allocated a fetch task to a peer. If that is the 1224 // case, the internal state of the downloader and the queue is very wrong so 1225 // better hard crash and note the error instead of silently accumulating into 1226 // a much bigger issue. 1227 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1228 } 1229 running = true 1230 } 1231 // Make sure that we have peers available for fetching. If all peers have been tried 1232 // and all failed throw an error 1233 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1234 return errPeersUnavailable 1235 } 1236 } 1237 } 1238 } 1239 1240 // processHeaders takes batches of retrieved headers from an input channel and 1241 // keeps processing and scheduling them into the header chain and downloader's 1242 // queue until the stream ends or a failure occurs. 1243 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1244 // Keep a count of uncertain headers to roll back 1245 rollback := []*types.Header{} 1246 defer func() { 1247 if len(rollback) > 0 { 1248 // Flatten the headers and roll them back 1249 hashes := make([]common.Hash, len(rollback)) 1250 for i, header := range rollback { 1251 hashes[i] = header.Hash() 1252 } 1253 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1254 if d.mode != LightSync { 1255 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1256 lastBlock = d.blockchain.CurrentBlock().Number() 1257 } 1258 d.lightchain.Rollback(hashes) 1259 curFastBlock, curBlock := common.Big0, common.Big0 1260 if d.mode != LightSync { 1261 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1262 curBlock = d.blockchain.CurrentBlock().Number() 1263 } 1264 glog.V(logger.Warn).Warnln("Rolled back headers", "count", len(hashes), 1265 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1266 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1267 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1268 } 1269 }() 1270 1271 // Wait for batches of headers to process 1272 gotHeaders := false 1273 1274 for { 1275 select { 1276 case <-d.cancelCh: 1277 return errCancelHeaderProcessing 1278 1279 case headers := <-d.headerProcCh: 1280 // Terminate header processing if we synced up 1281 if len(headers) == 0 { 1282 // Notify everyone that headers are fully processed 1283 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1284 select { 1285 case ch <- false: 1286 case <-d.cancelCh: 1287 } 1288 } 1289 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1290 // better chain compared to ours. The only exception is if its promised blocks were 1291 // already imported by other means (e.g. fecher): 1292 // 1293 // R <remote peer>, L <local node>: Both at block 10 1294 // R: Mine block 11, and propagate it to L 1295 // L: Queue block 11 for import 1296 // L: Notice that R's head and TD increased compared to ours, start sync 1297 // L: Import of block 11 finishes 1298 // L: Sync begins, and finds common ancestor at 11 1299 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1300 // R: Nothing to give 1301 if d.mode != LightSync { 1302 head := d.blockchain.CurrentBlock() 1303 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash())) > 0 { 1304 return errStallingPeer 1305 } 1306 } 1307 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1308 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1309 // of delivering the post-pivot blocks that would flag the invalid content. 1310 // 1311 // This check cannot be executed "as is" for full imports, since blocks may still be 1312 // queued for processing when the header download completes. However, as long as the 1313 // peer gave us something useful, we're already happy/progressed (above check). 1314 if d.mode == FastSync || d.mode == LightSync { 1315 head := d.lightchain.CurrentHeader() 1316 if td.Cmp(d.lightchain.GetTd(head.Hash())) > 0 { 1317 return errStallingPeer 1318 } 1319 } 1320 // Disable any rollback and return 1321 rollback = nil 1322 return nil 1323 } 1324 // Otherwise split the chunk of headers into batches and process them 1325 gotHeaders = true 1326 1327 for len(headers) > 0 { 1328 // Terminate if something failed in between processing chunks 1329 select { 1330 case <-d.cancelCh: 1331 return errCancelHeaderProcessing 1332 default: 1333 } 1334 // Select the next chunk of headers to import 1335 limit := maxHeadersProcess 1336 if limit > len(headers) { 1337 limit = len(headers) 1338 } 1339 chunk := headers[:limit] 1340 1341 // In case of header only syncing, validate the chunk immediately 1342 if d.mode == FastSync || d.mode == LightSync { 1343 // Collect the yet unknown headers to mark them as uncertain 1344 unknown := make([]*types.Header, 0, len(headers)) 1345 for _, header := range chunk { 1346 if !d.lightchain.HasHeader(header.Hash()) { 1347 unknown = append(unknown, header) 1348 } 1349 } 1350 // If we're importing pure headers, verify based on their recentness 1351 frequency := fsHeaderCheckFrequency 1352 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1353 frequency = 1 1354 } 1355 res := d.lightchain.InsertHeaderChain(chunk, frequency) 1356 // TODO(whilei): again, send error to events 1357 if res.Error != nil { 1358 // If some headers were inserted, add them too to the rollback list 1359 if res.Index > 0 { 1360 rollback = append(rollback, chunk[:res.Index]...) 1361 } 1362 glog.V(logger.Debug).Infoln("Invalid header encountered", "number", chunk[res.Index].Number, "hash", chunk[res.Index].Hash(), "err", res.Error) 1363 return errInvalidChain 1364 } 1365 go d.mux.Post(InsertHeaderChainEvent{res.HeaderChainInsertEvent}) 1366 // All verifications passed, store newly found uncertain headers 1367 rollback = append(rollback, unknown...) 1368 if len(rollback) > fsHeaderSafetyNet { 1369 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1370 } 1371 } 1372 // Unless we're doing light chains, schedule the headers for associated content retrieval 1373 if d.mode == FullSync || d.mode == FastSync { 1374 // If we've reached the allowed number of pending headers, stall a bit 1375 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1376 select { 1377 case <-d.cancelCh: 1378 return errCancelHeaderProcessing 1379 case <-time.After(time.Second): 1380 } 1381 } 1382 // Otherwise insert the headers for content retrieval 1383 inserts := d.queue.Schedule(chunk, origin) 1384 if len(inserts) != len(chunk) { 1385 glog.V(logger.Debug).Infoln("Stale headers") 1386 return errBadPeer 1387 } 1388 } 1389 headers = headers[limit:] 1390 origin += uint64(limit) 1391 } 1392 1393 // Update the highest block number we know if a higher one is found. 1394 d.syncStatsLock.Lock() 1395 if d.syncStatsChainHeight < origin { 1396 d.syncStatsChainHeight = origin - 1 1397 } 1398 d.syncStatsLock.Unlock() 1399 1400 // Signal the content downloaders of the availablility of new tasks 1401 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1402 select { 1403 case ch <- true: 1404 default: 1405 } 1406 } 1407 } 1408 } 1409 } 1410 1411 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1412 func (d *Downloader) processFullSyncContent() error { 1413 for { 1414 results := d.queue.Results(true) 1415 if len(results) == 0 { 1416 return nil 1417 } 1418 if d.chainInsertHook != nil { 1419 d.chainInsertHook(results) 1420 } 1421 if err := d.importBlockResults(results); err != nil { 1422 return err 1423 } 1424 } 1425 } 1426 1427 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1428 // Check for any early termination requests 1429 if len(results) == 0 { 1430 return nil 1431 } 1432 select { 1433 case <-d.quitCh: 1434 return errCancelContentProcessing 1435 default: 1436 } 1437 // Retrieve the a batch of results to import 1438 first, last := results[0].Header, results[len(results)-1].Header 1439 glog.V(logger.Debug).Infoln("Inserting downloaded chain", "items", len(results), 1440 "firstnum", first.Number, "firsthash", first.Hash().Hex()[:9], 1441 "lastnum", last.Number, "lasthash", last.Hash().Hex()[:9], 1442 ) 1443 blocks := make([]*types.Block, len(results)) 1444 for i, result := range results { 1445 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1446 } 1447 1448 res := d.blockchain.InsertChain(blocks) 1449 if res.Error != nil { 1450 glog.V(logger.Debug).Infoln("Downloaded item processing failed", "number", results[res.Index].Header.Number, "hash", results[res.Index].Header.Hash(), "err", res.Error) 1451 return errInvalidChain 1452 } 1453 go d.mux.Post(InsertChainEvent{res.ChainInsertEvent}) 1454 return nil 1455 } 1456 1457 // processFastSyncContent takes fetch results from the queue and writes them to the 1458 // database. It also controls the synchronisation of state nodes of the pivot block. 1459 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1460 // Start syncing state of the reported head block. 1461 // This should get us most of the state of the pivot block. 1462 stateSync := d.syncState(latest.Root) 1463 defer stateSync.Cancel() 1464 go func() { 1465 if err := stateSync.Wait(); err != nil { 1466 d.queue.Close() // wake up WaitResults 1467 } 1468 }() 1469 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1470 // sync takes long enough for the chain head to move significantly. 1471 pivot := uint64(0) 1472 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1473 pivot = height - uint64(fsMinFullBlocks) 1474 } 1475 // To cater for moving pivot points, track the pivot block and subsequently 1476 // accumulated download results separatey. 1477 var ( 1478 oldPivot *fetchResult // Locked in pivot block, might change eventually 1479 oldTail []*fetchResult // Downloaded content after the pivot 1480 ) 1481 for { 1482 // Wait for the next batch of downloaded data to be available, and if the pivot 1483 // block became stale, move the goalpost 1484 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1485 if len(results) == 0 { 1486 // If pivot sync is done, stop 1487 if oldPivot == nil { 1488 return stateSync.Cancel() 1489 } 1490 // If sync failed, stop 1491 select { 1492 case <-d.cancelCh: 1493 return stateSync.Cancel() 1494 default: 1495 } 1496 } 1497 if d.chainInsertHook != nil { 1498 d.chainInsertHook(results) 1499 } 1500 if oldPivot != nil { 1501 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1502 } 1503 // Split around the pivot block and process the two sides via fast/full sync 1504 if atomic.LoadInt32(&d.committed) == 0 { 1505 latest = results[len(results)-1].Header 1506 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1507 glog.V(logger.Warn).Warnln("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1508 pivot = height - uint64(fsMinFullBlocks) 1509 } 1510 } 1511 P, beforeP, afterP := splitAroundPivot(pivot, results) 1512 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1513 return err 1514 } 1515 if P != nil { 1516 // If new pivot block found, cancel old state retrieval and restart 1517 if oldPivot != P { 1518 stateSync.Cancel() 1519 1520 stateSync = d.syncState(P.Header.Root) 1521 defer stateSync.Cancel() 1522 go func() { 1523 if err := stateSync.Wait(); err != nil { 1524 d.queue.Close() // wake up WaitResults 1525 } 1526 }() 1527 oldPivot = P 1528 } 1529 // Wait for completion, occasionally checking for pivot staleness 1530 select { 1531 case <-stateSync.done: 1532 if stateSync.err != nil { 1533 return stateSync.err 1534 } 1535 if err := d.commitPivotBlock(P); err != nil { 1536 return err 1537 } 1538 oldPivot = nil 1539 1540 case <-time.After(time.Second): 1541 oldTail = afterP 1542 continue 1543 } 1544 } 1545 // Fast sync done, pivot commit done, full import 1546 if err := d.importBlockResults(afterP); err != nil { 1547 return err 1548 } 1549 } 1550 } 1551 1552 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1553 for _, result := range results { 1554 num := result.Header.Number.Uint64() 1555 switch { 1556 case num < pivot: 1557 before = append(before, result) 1558 case num == pivot: 1559 p = result 1560 default: 1561 after = append(after, result) 1562 } 1563 } 1564 return p, before, after 1565 } 1566 1567 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1568 // Check for any early termination requests 1569 if len(results) == 0 { 1570 return nil 1571 } 1572 select { 1573 case <-d.quitCh: 1574 return errCancelContentProcessing 1575 case <-stateSync.done: 1576 if err := stateSync.Wait(); err != nil { 1577 return err 1578 } 1579 default: 1580 } 1581 // Retrieve the a batch of results to import 1582 first, last := results[0].Header, results[len(results)-1].Header 1583 glog.V(logger.Debug).Infoln("Inserting fast-sync blocks", "items", len(results), 1584 "firstnum", first.Number, "firsthash", first.Hash().Hex(), 1585 "lastnumn", last.Number, "lasthash", last.Hash().Hex(), 1586 ) 1587 blocks := make([]*types.Block, len(results)) 1588 receipts := make([]types.Receipts, len(results)) 1589 for i, result := range results { 1590 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1591 receipts[i] = result.Receipts 1592 } 1593 res := d.blockchain.InsertReceiptChain(blocks, receipts) 1594 if res.Error != nil { 1595 glog.V(logger.Debug).Infoln("Downloaded item processing failed", "number", results[res.Index].Header.Number, "hash", results[res.Index].Header.Hash(), "err", res.Error) 1596 return errInvalidChain 1597 } 1598 // TODO(whilei): pass error in Receipt and Full chain events through 1599 go d.mux.Post(InsertReceiptChainEvent{ReceiptChainInsertEvent: res.ReceiptChainInsertEvent, Pivot: false}) 1600 return nil 1601 } 1602 1603 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1604 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1605 glog.V(logger.Debug).Infoln("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1606 res := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}) 1607 if res.Error != nil { 1608 return res.Error 1609 } 1610 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1611 return err 1612 } 1613 atomic.StoreInt32(&d.committed, 1) 1614 // TODO(whilei): pass error in Receipt and Full chain events through 1615 go d.mux.Post(InsertReceiptChainEvent{ReceiptChainInsertEvent: res.ReceiptChainInsertEvent, Pivot: false}) 1616 return nil 1617 } 1618 1619 // DeliverHeaders injects a new batch of block headers received from a remote 1620 // node into the download schedule. 1621 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1622 return d.deliver(id, d.headerCh, &headerPack{id, headers}, metrics.DLHeaders.Mark, metrics.DLHeaderDrops.Mark) 1623 } 1624 1625 // DeliverBodies injects a new batch of block bodies received from a remote node. 1626 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { 1627 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, metrics.DLBodies.Mark, metrics.DLBodyDrops.Mark) 1628 } 1629 1630 // DeliverReceipts injects a new batch of receipts received from a remote node. 1631 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1632 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, metrics.DLReceipts.Mark, metrics.DLReceiptDrops.Mark) 1633 } 1634 1635 // DeliverNodeData injects a new batch of node state data received from a remote node. 1636 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1637 return d.deliver(id, d.stateCh, &statePack{id, data}, metrics.DLStates.Mark, metrics.DLStateDrops.Mark) 1638 } 1639 1640 // deliver injects a new batch of data received from a remote node. 1641 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, mark, markDrop func(int64)) (err error) { 1642 // Update the delivery metrics for both good and failed deliveries 1643 mark(int64(packet.Items())) 1644 defer func() { 1645 if err != nil { 1646 markDrop(int64(packet.Items())) 1647 } 1648 }() 1649 // Deliver or abort if the sync is canceled while queuing 1650 d.cancelLock.RLock() 1651 cancel := d.cancelCh 1652 d.cancelLock.RUnlock() 1653 if cancel == nil { 1654 return errNoSyncActive 1655 } 1656 select { 1657 case destCh <- packet: 1658 return nil 1659 case <-cancel: 1660 return errNoSyncActive 1661 } 1662 } 1663 1664 // qosTuner is the quality of service tuning loop that occasionally gathers the 1665 // peer latency statistics and updates the estimated request round trip time. 1666 func (d *Downloader) qosTuner() { 1667 for { 1668 // Retrieve the current median RTT and integrate into the previoust target RTT 1669 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1670 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1671 1672 // A new RTT cycle passed, increase our confidence in the estimated RTT 1673 conf := atomic.LoadUint64(&d.rttConfidence) 1674 conf = conf + (1000000-conf)/2 1675 atomic.StoreUint64(&d.rttConfidence, conf) 1676 1677 // Log the new QoS values and sleep until the next RTT 1678 ttl := d.requestTTL() 1679 if logger.MlogEnabled() { 1680 mlogDownloaderTuneQOS.AssignDetails( 1681 rtt, 1682 float64(conf)/1000000.0, 1683 ttl, 1684 ).Send(mlogDownloader) 1685 } 1686 glog.V(logger.Debug).Infof("Quality of service: rtt %v, conf %.3f, ttl %v", rtt, float64(conf)/1000000.0, ttl) 1687 select { 1688 case <-d.quitCh: 1689 return 1690 case <-time.After(rtt): 1691 } 1692 } 1693 } 1694 1695 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1696 // peer set, needing to reduce the confidence we have in out QoS estimates. 1697 func (d *Downloader) qosReduceConfidence() { 1698 // If we have a single peer, confidence is always 1 1699 peers := uint64(d.peers.Len()) 1700 if peers == 0 { 1701 // Ensure peer connectivity races don't catch us off guard 1702 return 1703 } 1704 if peers == 1 { 1705 atomic.StoreUint64(&d.rttConfidence, 1000000) 1706 return 1707 } 1708 // If we have a ton of peers, don't drop confidence) 1709 if peers >= uint64(qosConfidenceCap) { 1710 return 1711 } 1712 // Otherwise drop the confidence factor 1713 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1714 if float64(conf)/1000000 < rttMinConfidence { 1715 conf = uint64(rttMinConfidence * 1000000) 1716 } 1717 atomic.StoreUint64(&d.rttConfidence, conf) 1718 1719 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1720 glog.V(logger.Debug).Infoln("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1721 } 1722 1723 // requestRTT returns the current target round trip time for a download request 1724 // to complete in. 1725 // 1726 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1727 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1728 // be adapted to, but smaller ones are preffered (stabler download stream). 1729 func (d *Downloader) requestRTT() time.Duration { 1730 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1731 } 1732 1733 // requestTTL returns the current timeout allowance for a single download request 1734 // to finish under. 1735 func (d *Downloader) requestTTL() time.Duration { 1736 var ( 1737 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1738 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1739 ) 1740 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1741 if ttl > ttlLimit { 1742 ttl = ttlLimit 1743 } 1744 return ttl 1745 }