github.com/halybang/go-ethereum@v1.0.5-0.20180325041310-3b262bc1367c/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "crypto/rand" 22 "errors" 23 "fmt" 24 "math" 25 "math/big" 26 "sync" 27 "sync/atomic" 28 "time" 29 30 metrics "github.com/rcrowley/go-metrics" 31 ethereum "github.com/wanchain/go-wanchain" 32 "github.com/wanchain/go-wanchain/common" 33 "github.com/wanchain/go-wanchain/core/types" 34 "github.com/wanchain/go-wanchain/ethdb" 35 "github.com/wanchain/go-wanchain/event" 36 "github.com/wanchain/go-wanchain/log" 37 "github.com/wanchain/go-wanchain/params" 38 ) 39 40 var ( 41 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 42 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 43 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 44 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 45 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 46 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 47 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 48 49 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 50 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 51 rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests 52 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 53 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 54 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 55 56 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 57 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 58 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 59 60 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 61 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 62 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 63 64 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 65 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 66 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 67 fsPivotInterval = 256 // Number of headers out of which to randomize the pivot point 68 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 69 fsCriticalTrials = uint32(32) // Number of times to retry in the cricical section before bailing 70 ) 71 72 var ( 73 errBusy = errors.New("busy") 74 errUnknownPeer = errors.New("peer is unknown or unhealthy") 75 errBadPeer = errors.New("action from bad peer ignored") 76 errStallingPeer = errors.New("peer is stalling") 77 errNoPeers = errors.New("no peers to keep download active") 78 errTimeout = errors.New("timeout") 79 errEmptyHeaderSet = errors.New("empty header set by peer") 80 errPeersUnavailable = errors.New("no peers available or all tried for download") 81 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 82 errInvalidChain = errors.New("retrieved hash chain is invalid") 83 errInvalidBlock = errors.New("retrieved block is invalid") 84 errInvalidBody = errors.New("retrieved block body is invalid") 85 errInvalidReceipt = errors.New("retrieved receipt is invalid") 86 errCancelBlockFetch = errors.New("block download canceled (requested)") 87 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 88 errCancelBodyFetch = errors.New("block body download canceled (requested)") 89 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 90 errCancelStateFetch = errors.New("state data download canceled (requested)") 91 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 92 errCancelContentProcessing = errors.New("content processing canceled (requested)") 93 errNoSyncActive = errors.New("no sync active") 94 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 95 ) 96 97 type Downloader struct { 98 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 99 mux *event.TypeMux // Event multiplexer to announce sync operation events 100 101 queue *queue // Scheduler for selecting the hashes to download 102 peers *peerSet // Set of active peers from which download can proceed 103 stateDB ethdb.Database 104 105 fsPivotLock *types.Header // Pivot header on critical section entry (cannot change between retries) 106 fsPivotFails uint32 // Number of subsequent fast sync failures in the critical section 107 108 rttEstimate uint64 // Round trip time to target for download requests 109 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 110 111 // Statistics 112 syncStatsChainOrigin uint64 // Origin block number where syncing started at 113 syncStatsChainHeight uint64 // Highest block number known when syncing started 114 syncStatsState stateSyncStats 115 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 116 117 lightchain LightChain 118 blockchain BlockChain 119 120 // Callbacks 121 dropPeer peerDropFn // Drops a peer for misbehaving 122 123 // Status 124 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 125 synchronising int32 126 notified int32 127 128 // Channels 129 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 130 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 131 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 132 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 133 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 134 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 135 136 // for stateFetcher 137 stateSyncStart chan *stateSync 138 trackStateReq chan *stateReq 139 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 140 141 // Cancellation and termination 142 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 143 cancelCh chan struct{} // Channel to cancel mid-flight syncs 144 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 145 146 quitCh chan struct{} // Quit channel to signal termination 147 quitLock sync.RWMutex // Lock to prevent double closes 148 149 // Testing hooks 150 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 151 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 152 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 153 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 154 } 155 156 // LightChain encapsulates functions required to synchronise a light chain. 157 type LightChain interface { 158 // HasHeader verifies a header's presence in the local chain. 159 HasHeader(h common.Hash, number uint64) bool 160 161 // GetHeaderByHash retrieves a header from the local chain. 162 GetHeaderByHash(common.Hash) *types.Header 163 164 // CurrentHeader retrieves the head header from the local chain. 165 CurrentHeader() *types.Header 166 167 // GetTdByHash returns the total difficulty of a local block. 168 GetTdByHash(common.Hash) *big.Int 169 170 // InsertHeaderChain inserts a batch of headers into the local chain. 171 InsertHeaderChain([]*types.Header, int) (int, error) 172 173 // Rollback removes a few recently added elements from the local chain. 174 Rollback([]common.Hash) 175 } 176 177 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 178 type BlockChain interface { 179 LightChain 180 181 // HasBlockAndState verifies block and associated states' presence in the local chain. 182 HasBlockAndState(common.Hash) bool 183 184 // GetBlockByHash retrieves a block from the local chain. 185 GetBlockByHash(common.Hash) *types.Block 186 187 // CurrentBlock retrieves the head block from the local chain. 188 CurrentBlock() *types.Block 189 190 // CurrentFastBlock retrieves the head fast block from the local chain. 191 CurrentFastBlock() *types.Block 192 193 // FastSyncCommitHead directly commits the head block to a certain entity. 194 FastSyncCommitHead(common.Hash) error 195 196 // InsertChain inserts a batch of blocks into the local chain. 197 InsertChain(types.Blocks) (int, error) 198 199 // InsertReceiptChain inserts a batch of receipts into the local chain. 200 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 201 } 202 203 // New creates a new downloader to fetch hashes and blocks from remote peers. 204 func New(mode SyncMode, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 205 if lightchain == nil { 206 lightchain = chain 207 } 208 209 dl := &Downloader{ 210 mode: mode, 211 stateDB: stateDb, 212 mux: mux, 213 queue: newQueue(), 214 peers: newPeerSet(), 215 rttEstimate: uint64(rttMaxEstimate), 216 rttConfidence: uint64(1000000), 217 blockchain: chain, 218 lightchain: lightchain, 219 dropPeer: dropPeer, 220 headerCh: make(chan dataPack, 1), 221 bodyCh: make(chan dataPack, 1), 222 receiptCh: make(chan dataPack, 1), 223 bodyWakeCh: make(chan bool, 1), 224 receiptWakeCh: make(chan bool, 1), 225 headerProcCh: make(chan []*types.Header, 1), 226 quitCh: make(chan struct{}), 227 stateCh: make(chan dataPack), 228 stateSyncStart: make(chan *stateSync), 229 trackStateReq: make(chan *stateReq), 230 } 231 go dl.qosTuner() 232 go dl.stateFetcher() 233 return dl 234 } 235 236 // Progress retrieves the synchronisation boundaries, specifically the origin 237 // block where synchronisation started at (may have failed/suspended); the block 238 // or header sync is currently at; and the latest known block which the sync targets. 239 // 240 // In addition, during the state download phase of fast synchronisation the number 241 // of processed and the total number of known states are also returned. Otherwise 242 // these are zero. 243 func (d *Downloader) Progress() ethereum.SyncProgress { 244 // Lock the current stats and return the progress 245 d.syncStatsLock.RLock() 246 defer d.syncStatsLock.RUnlock() 247 248 current := uint64(0) 249 switch d.mode { 250 case FullSync: 251 current = d.blockchain.CurrentBlock().NumberU64() 252 case FastSync: 253 current = d.blockchain.CurrentFastBlock().NumberU64() 254 case LightSync: 255 current = d.lightchain.CurrentHeader().Number.Uint64() 256 } 257 return ethereum.SyncProgress{ 258 StartingBlock: d.syncStatsChainOrigin, 259 CurrentBlock: current, 260 HighestBlock: d.syncStatsChainHeight, 261 PulledStates: d.syncStatsState.processed, 262 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 263 } 264 } 265 266 // Synchronising returns whether the downloader is currently retrieving blocks. 267 func (d *Downloader) Synchronising() bool { 268 return atomic.LoadInt32(&d.synchronising) > 0 269 } 270 271 // RegisterPeer injects a new download peer into the set of block source to be 272 // used for fetching hashes and blocks from. 273 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 274 275 logger := log.New("peer", id) 276 logger.Trace("Registering sync peer") 277 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 278 logger.Error("Failed to register sync peer", "err", err) 279 return err 280 } 281 d.qosReduceConfidence() 282 283 return nil 284 } 285 286 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 287 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 288 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 289 } 290 291 // UnregisterPeer remove a peer from the known list, preventing any action from 292 // the specified peer. An effort is also made to return any pending fetches into 293 // the queue. 294 func (d *Downloader) UnregisterPeer(id string) error { 295 // Unregister the peer from the active peer set and revoke any fetch tasks 296 logger := log.New("peer", id) 297 logger.Trace("Unregistering sync peer") 298 if err := d.peers.Unregister(id); err != nil { 299 logger.Error("Failed to unregister sync peer", "err", err) 300 return err 301 } 302 d.queue.Revoke(id) 303 304 // If this peer was the master peer, abort sync immediately 305 d.cancelLock.RLock() 306 master := id == d.cancelPeer 307 d.cancelLock.RUnlock() 308 309 if master { 310 d.Cancel() 311 } 312 return nil 313 } 314 315 // Synchronise tries to sync up our local block chain with a remote peer, both 316 // adding various sanity checks as well as wrapping it with various log entries. 317 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 318 err := d.synchronise(id, head, td, mode) 319 switch err { 320 case nil: 321 case errBusy: 322 323 case errTimeout, errBadPeer, errStallingPeer, 324 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 325 errInvalidAncestor, errInvalidChain: 326 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 327 d.dropPeer(id) 328 329 default: 330 log.Warn("Synchronisation failed, retrying", "err", err) 331 } 332 return err 333 } 334 335 // synchronise will select the peer and use it for synchronising. If an empty string is given 336 // it will use the best peer possible and synchronize if it's TD is higher than our own. If any of the 337 // checks fail an error will be returned. This method is synchronous 338 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 339 // Mock out the synchronisation if testing 340 if d.synchroniseMock != nil { 341 return d.synchroniseMock(id, hash) 342 } 343 // Make sure only one goroutine is ever allowed past this point at once 344 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 345 return errBusy 346 } 347 defer atomic.StoreInt32(&d.synchronising, 0) 348 349 // Post a user notification of the sync (only once per session) 350 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 351 log.Info("Block synchronisation started") 352 } 353 // Reset the queue, peer set and wake channels to clean any internal leftover state 354 d.queue.Reset() 355 d.peers.Reset() 356 357 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 358 select { 359 case <-ch: 360 default: 361 } 362 } 363 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 364 for empty := false; !empty; { 365 select { 366 case <-ch: 367 default: 368 empty = true 369 } 370 } 371 } 372 for empty := false; !empty; { 373 select { 374 case <-d.headerProcCh: 375 default: 376 empty = true 377 } 378 } 379 // Create cancel channel for aborting mid-flight and mark the master peer 380 d.cancelLock.Lock() 381 d.cancelCh = make(chan struct{}) 382 d.cancelPeer = id 383 d.cancelLock.Unlock() 384 385 defer d.Cancel() // No matter what, we can't leave the cancel channel open 386 387 // Set the requested sync mode, unless it's forbidden 388 d.mode = mode 389 if d.mode == FastSync && atomic.LoadUint32(&d.fsPivotFails) >= fsCriticalTrials { 390 d.mode = FullSync 391 } 392 // Retrieve the origin peer and initiate the downloading process 393 p := d.peers.Peer(id) 394 if p == nil { 395 return errUnknownPeer 396 } 397 398 return d.syncWithPeer(p, hash, td) 399 } 400 401 // syncWithPeer starts a block synchronization based on the hash chain from the 402 // specified peer and head hash. 403 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 404 d.mux.Post(StartEvent{}) 405 defer func() { 406 // reset on error 407 if err != nil { 408 d.mux.Post(FailedEvent{err}) 409 } else { 410 d.mux.Post(DoneEvent{}) 411 } 412 }() 413 if p.version < 62 { 414 return errTooOld 415 } 416 417 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) 418 defer func(start time.Time) { 419 log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 420 }(time.Now()) 421 422 // Look up the sync boundaries: the common ancestor and the target block 423 latest, err := d.fetchHeight(p) 424 if err != nil { 425 return err 426 } 427 height := latest.Number.Uint64() 428 429 origin, err := d.findAncestor(p, height) 430 if err != nil { 431 return err 432 } 433 d.syncStatsLock.Lock() 434 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 435 d.syncStatsChainOrigin = origin 436 } 437 d.syncStatsChainHeight = height 438 d.syncStatsLock.Unlock() 439 440 // Initiate the sync using a concurrent header and content retrieval algorithm 441 pivot := uint64(0) 442 switch d.mode { 443 case LightSync: 444 pivot = height 445 case FastSync: 446 // Calculate the new fast/slow sync pivot point 447 if d.fsPivotLock == nil { 448 pivotOffset, err := rand.Int(rand.Reader, big.NewInt(int64(fsPivotInterval))) 449 if err != nil { 450 panic(fmt.Sprintf("Failed to access crypto random source: %v", err)) 451 } 452 if height > uint64(fsMinFullBlocks)+pivotOffset.Uint64() { 453 pivot = height - uint64(fsMinFullBlocks) - pivotOffset.Uint64() 454 } 455 } else { 456 // Pivot point locked in, use this and do not pick a new one! 457 pivot = d.fsPivotLock.Number.Uint64() 458 } 459 // If the point is below the origin, move origin back to ensure state download 460 if pivot < origin { 461 if pivot > 0 { 462 origin = pivot - 1 463 } else { 464 origin = 0 465 } 466 } 467 log.Debug("Fast syncing until pivot block", "pivot", pivot) 468 } 469 d.queue.Prepare(origin+1, d.mode, pivot, latest) 470 if d.syncInitHook != nil { 471 d.syncInitHook(origin, height) 472 } 473 474 fetchers := []func() error{ 475 func() error { return d.fetchHeaders(p, origin+1) }, // Headers are always retrieved 476 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 477 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 478 func() error { return d.processHeaders(origin+1, td) }, 479 } 480 if d.mode == FastSync { 481 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 482 } else if d.mode == FullSync { 483 fetchers = append(fetchers, d.processFullSyncContent) 484 } 485 err = d.spawnSync(fetchers) 486 if err != nil && d.mode == FastSync && d.fsPivotLock != nil { 487 // If sync failed in the critical section, bump the fail counter. 488 atomic.AddUint32(&d.fsPivotFails, 1) 489 } 490 return err 491 } 492 493 // spawnSync runs d.process and all given fetcher functions to completion in 494 // separate goroutines, returning the first error that appears. 495 func (d *Downloader) spawnSync(fetchers []func() error) error { 496 var wg sync.WaitGroup 497 errc := make(chan error, len(fetchers)) 498 wg.Add(len(fetchers)) 499 for _, fn := range fetchers { 500 fn := fn 501 go func() { defer wg.Done(); errc <- fn() }() 502 } 503 // Wait for the first error, then terminate the others. 504 var err error 505 for i := 0; i < len(fetchers); i++ { 506 if i == len(fetchers)-1 { 507 // Close the queue when all fetchers have exited. 508 // This will cause the block processor to end when 509 // it has processed the queue. 510 d.queue.Close() 511 } 512 if err = <-errc; err != nil { 513 break 514 } 515 } 516 d.queue.Close() 517 d.Cancel() 518 wg.Wait() 519 return err 520 } 521 522 // Cancel cancels all of the operations and resets the queue. It returns true 523 // if the cancel operation was completed. 524 func (d *Downloader) Cancel() { 525 // Close the current cancel channel 526 d.cancelLock.Lock() 527 if d.cancelCh != nil { 528 select { 529 case <-d.cancelCh: 530 // Channel was already closed 531 default: 532 close(d.cancelCh) 533 } 534 } 535 d.cancelLock.Unlock() 536 } 537 538 // Terminate interrupts the downloader, canceling all pending operations. 539 // The downloader cannot be reused after calling Terminate. 540 func (d *Downloader) Terminate() { 541 // Close the termination channel (make sure double close is allowed) 542 d.quitLock.Lock() 543 select { 544 case <-d.quitCh: 545 default: 546 close(d.quitCh) 547 } 548 d.quitLock.Unlock() 549 550 // Cancel any pending download requests 551 d.Cancel() 552 } 553 554 // fetchHeight retrieves the head header of the remote peer to aid in estimating 555 // the total time a pending synchronisation would take. 556 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 557 p.log.Debug("Retrieving remote chain height") 558 559 // Request the advertised remote head block and wait for the response 560 head, _ := p.peer.Head() 561 go p.peer.RequestHeadersByHash(head, 1, 0, false) 562 563 ttl := d.requestTTL() 564 timeout := time.After(ttl) 565 for { 566 select { 567 case <-d.cancelCh: 568 return nil, errCancelBlockFetch 569 570 case packet := <-d.headerCh: 571 // Discard anything not from the origin peer 572 if packet.PeerId() != p.id { 573 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 574 break 575 } 576 // Make sure the peer actually gave something valid 577 headers := packet.(*headerPack).headers 578 if len(headers) != 1 { 579 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 580 return nil, errBadPeer 581 } 582 head := headers[0] 583 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 584 return head, nil 585 586 case <-timeout: 587 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 588 return nil, errTimeout 589 590 case <-d.bodyCh: 591 case <-d.receiptCh: 592 // Out of bounds delivery, ignore 593 } 594 } 595 } 596 597 // findAncestor tries to locate the common ancestor link of the local chain and 598 // a remote peers blockchain. In the general case when our node was in sync and 599 // on the correct chain, checking the top N links should already get us a match. 600 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 601 // the head links match), we do a binary search to find the common ancestor. 602 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 603 // Figure out the valid ancestor range to prevent rewrite attacks 604 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 605 606 p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) 607 if d.mode == FullSync { 608 ceil = d.blockchain.CurrentBlock().NumberU64() 609 } else if d.mode == FastSync { 610 ceil = d.blockchain.CurrentFastBlock().NumberU64() 611 } 612 if ceil >= MaxForkAncestry { 613 floor = int64(ceil - MaxForkAncestry) 614 } 615 // Request the topmost blocks to short circuit binary ancestor lookup 616 head := ceil 617 if head > height { 618 head = height 619 } 620 from := int64(head) - int64(MaxHeaderFetch) 621 if from < 0 { 622 from = 0 623 } 624 // Span out with 15 block gaps into the future to catch bad head reports 625 limit := 2 * MaxHeaderFetch / 16 626 count := 1 + int((int64(ceil)-from)/16) 627 if count > limit { 628 count = limit 629 } 630 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 631 632 // Wait for the remote response to the head fetch 633 number, hash := uint64(0), common.Hash{} 634 635 ttl := d.requestTTL() 636 timeout := time.After(ttl) 637 638 for finished := false; !finished; { 639 select { 640 case <-d.cancelCh: 641 return 0, errCancelHeaderFetch 642 643 case packet := <-d.headerCh: 644 // Discard anything not from the origin peer 645 if packet.PeerId() != p.id { 646 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 647 break 648 } 649 // Make sure the peer actually gave something valid 650 headers := packet.(*headerPack).headers 651 if len(headers) == 0 { 652 p.log.Warn("Empty head header set") 653 return 0, errEmptyHeaderSet 654 } 655 // Make sure the peer's reply conforms to the request 656 for i := 0; i < len(headers); i++ { 657 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 658 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 659 return 0, errInvalidChain 660 } 661 } 662 // Check if a common ancestor was found 663 finished = true 664 for i := len(headers) - 1; i >= 0; i-- { 665 // Skip any headers that underflow/overflow our requested set 666 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 667 continue 668 } 669 // Otherwise check if we already know the header or not 670 if (d.mode == FullSync && d.blockchain.HasBlockAndState(headers[i].Hash())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { 671 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 672 673 // If every header is known, even future ones, the peer straight out lied about its head 674 if number > height && i == limit-1 { 675 p.log.Warn("Lied about chain head", "reported", height, "found", number) 676 return 0, errStallingPeer 677 } 678 break 679 } 680 } 681 682 case <-timeout: 683 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 684 return 0, errTimeout 685 686 case <-d.bodyCh: 687 case <-d.receiptCh: 688 // Out of bounds delivery, ignore 689 } 690 } 691 // If the head fetch already found an ancestor, return 692 if !common.EmptyHash(hash) { 693 if int64(number) <= floor { 694 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 695 return 0, errInvalidAncestor 696 } 697 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 698 return number, nil 699 } 700 // Ancestor not found, we need to binary search over our chain 701 start, end := uint64(0), head 702 if floor > 0 { 703 start = uint64(floor) 704 } 705 for start+1 < end { 706 // Split our chain interval in two, and request the hash to cross check 707 check := (start + end) / 2 708 709 ttl := d.requestTTL() 710 timeout := time.After(ttl) 711 712 go p.peer.RequestHeadersByNumber(uint64(check), 1, 0, false) 713 714 // Wait until a reply arrives to this request 715 for arrived := false; !arrived; { 716 select { 717 case <-d.cancelCh: 718 return 0, errCancelHeaderFetch 719 720 case packer := <-d.headerCh: 721 // Discard anything not from the origin peer 722 if packer.PeerId() != p.id { 723 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 724 break 725 } 726 // Make sure the peer actually gave something valid 727 headers := packer.(*headerPack).headers 728 if len(headers) != 1 { 729 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 730 return 0, errBadPeer 731 } 732 arrived = true 733 734 // Modify the search interval based on the response 735 if (d.mode == FullSync && !d.blockchain.HasBlockAndState(headers[0].Hash())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { 736 end = check 737 break 738 } 739 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 740 if header.Number.Uint64() != check { 741 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 742 return 0, errBadPeer 743 } 744 start = check 745 746 case <-timeout: 747 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 748 return 0, errTimeout 749 750 case <-d.bodyCh: 751 case <-d.receiptCh: 752 // Out of bounds delivery, ignore 753 } 754 } 755 } 756 // Ensure valid ancestry and return 757 if int64(start) <= floor { 758 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 759 return 0, errInvalidAncestor 760 } 761 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 762 return start, nil 763 } 764 765 // fetchHeaders keeps retrieving headers concurrently from the number 766 // requested, until no more are returned, potentially throttling on the way. To 767 // facilitate concurrency but still protect against malicious nodes sending bad 768 // headers, we construct a header chain skeleton using the "origin" peer we are 769 // syncing with, and fill in the missing headers using anyone else. Headers from 770 // other peers are only accepted if they map cleanly to the skeleton. If no one 771 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 772 // the origin is dropped. 773 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64) error { 774 p.log.Debug("Directing header downloads", "origin", from) 775 defer p.log.Debug("Header download terminated") 776 777 // Create a timeout timer, and the associated header fetcher 778 skeleton := true // Skeleton assembly phase or finishing up 779 request := time.Now() // time of the last skeleton fetch request 780 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 781 <-timeout.C // timeout channel should be initially empty 782 defer timeout.Stop() 783 784 var ttl time.Duration 785 getHeaders := func(from uint64) { 786 request = time.Now() 787 788 ttl = d.requestTTL() 789 timeout.Reset(ttl) 790 791 if skeleton { 792 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 793 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 794 } else { 795 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 796 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 797 } 798 } 799 // Start pulling the header chain skeleton until all is done 800 getHeaders(from) 801 802 for { 803 select { 804 case <-d.cancelCh: 805 return errCancelHeaderFetch 806 807 case packet := <-d.headerCh: 808 // Make sure the active peer is giving us the skeleton headers 809 if packet.PeerId() != p.id { 810 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 811 break 812 } 813 headerReqTimer.UpdateSince(request) 814 timeout.Stop() 815 816 // If the skeleton's finished, pull any remaining head headers directly from the origin 817 if packet.Items() == 0 && skeleton { 818 skeleton = false 819 getHeaders(from) 820 continue 821 } 822 // If no more headers are inbound, notify the content fetchers and return 823 if packet.Items() == 0 { 824 p.log.Debug("No more headers available") 825 select { 826 case d.headerProcCh <- nil: 827 return nil 828 case <-d.cancelCh: 829 return errCancelHeaderFetch 830 } 831 } 832 headers := packet.(*headerPack).headers 833 834 // If we received a skeleton batch, resolve internals concurrently 835 if skeleton { 836 filled, proced, err := d.fillHeaderSkeleton(from, headers) 837 if err != nil { 838 p.log.Debug("Skeleton chain invalid", "err", err) 839 return errInvalidChain 840 } 841 headers = filled[proced:] 842 from += uint64(proced) 843 } 844 // Insert all the new headers and fetch the next batch 845 if len(headers) > 0 { 846 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 847 select { 848 case d.headerProcCh <- headers: 849 case <-d.cancelCh: 850 return errCancelHeaderFetch 851 } 852 from += uint64(len(headers)) 853 } 854 getHeaders(from) 855 856 case <-timeout.C: 857 // Header retrieval timed out, consider the peer bad and drop 858 p.log.Debug("Header request timed out", "elapsed", ttl) 859 headerTimeoutMeter.Mark(1) 860 d.dropPeer(p.id) 861 862 // Finish the sync gracefully instead of dumping the gathered data though 863 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 864 select { 865 case ch <- false: 866 case <-d.cancelCh: 867 } 868 } 869 select { 870 case d.headerProcCh <- nil: 871 case <-d.cancelCh: 872 } 873 return errBadPeer 874 } 875 } 876 } 877 878 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 879 // and maps them to the provided skeleton header chain. 880 // 881 // Any partial results from the beginning of the skeleton is (if possible) forwarded 882 // immediately to the header processor to keep the rest of the pipeline full even 883 // in the case of header stalls. 884 // 885 // The method returs the entire filled skeleton and also the number of headers 886 // already forwarded for processing. 887 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 888 log.Debug("Filling up skeleton", "from", from) 889 d.queue.ScheduleSkeleton(from, skeleton) 890 891 var ( 892 deliver = func(packet dataPack) (int, error) { 893 pack := packet.(*headerPack) 894 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 895 } 896 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 897 throttle = func() bool { return false } 898 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 899 return d.queue.ReserveHeaders(p, count), false, nil 900 } 901 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 902 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 903 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 904 ) 905 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 906 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 907 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 908 909 log.Debug("Skeleton fill terminated", "err", err) 910 911 filled, proced := d.queue.RetrieveHeaders() 912 return filled, proced, err 913 } 914 915 // fetchBodies iteratively downloads the scheduled block bodies, taking any 916 // available peers, reserving a chunk of blocks for each, waiting for delivery 917 // and also periodically checking for timeouts. 918 func (d *Downloader) fetchBodies(from uint64) error { 919 log.Debug("Downloading block bodies", "origin", from) 920 921 var ( 922 deliver = func(packet dataPack) (int, error) { 923 pack := packet.(*bodyPack) 924 return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) 925 } 926 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 927 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 928 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 929 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 930 ) 931 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 932 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 933 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 934 935 log.Debug("Block body download terminated", "err", err) 936 return err 937 } 938 939 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 940 // available peers, reserving a chunk of receipts for each, waiting for delivery 941 // and also periodically checking for timeouts. 942 func (d *Downloader) fetchReceipts(from uint64) error { 943 log.Debug("Downloading transaction receipts", "origin", from) 944 945 var ( 946 deliver = func(packet dataPack) (int, error) { 947 pack := packet.(*receiptPack) 948 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 949 } 950 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 951 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 952 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 953 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 954 ) 955 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 956 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 957 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 958 959 log.Debug("Transaction receipt download terminated", "err", err) 960 return err 961 } 962 963 // fetchParts iteratively downloads scheduled block parts, taking any available 964 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 965 // also periodically checking for timeouts. 966 // 967 // As the scheduling/timeout logic mostly is the same for all downloaded data 968 // types, this method is used by each for data gathering and is instrumented with 969 // various callbacks to handle the slight differences between processing them. 970 // 971 // The instrumentation parameters: 972 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 973 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 974 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 975 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 976 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 977 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 978 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 979 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 980 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 981 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 982 // - fetch: network callback to actually send a particular download request to a physical remote peer 983 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 984 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 985 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 986 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 987 // - kind: textual label of the type being downloaded to display in log mesages 988 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 989 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 990 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 991 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 992 993 // Create a ticker to detect expired retrieval tasks 994 ticker := time.NewTicker(100 * time.Millisecond) 995 defer ticker.Stop() 996 997 update := make(chan struct{}, 1) 998 999 // Prepare the queue and fetch block parts until the block header fetcher's done 1000 finished := false 1001 for { 1002 select { 1003 case <-d.cancelCh: 1004 return errCancel 1005 1006 case packet := <-deliveryCh: 1007 // If the peer was previously banned and failed to deliver it's pack 1008 // in a reasonable time frame, ignore it's message. 1009 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1010 // Deliver the received chunk of data and check chain validity 1011 accepted, err := deliver(packet) 1012 if err == errInvalidChain { 1013 return err 1014 } 1015 // Unless a peer delivered something completely else than requested (usually 1016 // caused by a timed out request which came through in the end), set it to 1017 // idle. If the delivery's stale, the peer should have already been idled. 1018 if err != errStaleDelivery { 1019 setIdle(peer, accepted) 1020 } 1021 // Issue a log to the user to see what's going on 1022 switch { 1023 case err == nil && packet.Items() == 0: 1024 peer.log.Trace("Requested data not delivered", "type", kind) 1025 case err == nil: 1026 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1027 default: 1028 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1029 } 1030 } 1031 // Blocks assembled, try to update the progress 1032 select { 1033 case update <- struct{}{}: 1034 default: 1035 } 1036 1037 case cont := <-wakeCh: 1038 // The header fetcher sent a continuation flag, check if it's done 1039 if !cont { 1040 finished = true 1041 } 1042 // Headers arrive, try to update the progress 1043 select { 1044 case update <- struct{}{}: 1045 default: 1046 } 1047 1048 case <-ticker.C: 1049 // Sanity check update the progress 1050 select { 1051 case update <- struct{}{}: 1052 default: 1053 } 1054 1055 case <-update: 1056 // Short circuit if we lost all our peers 1057 if d.peers.Len() == 0 { 1058 return errNoPeers 1059 } 1060 // Check for fetch request timeouts and demote the responsible peers 1061 for pid, fails := range expire() { 1062 if peer := d.peers.Peer(pid); peer != nil { 1063 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1064 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1065 // out that sync wise we need to get rid of the peer. 1066 // 1067 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1068 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1069 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1070 if fails > 2 { 1071 peer.log.Trace("Data delivery timed out", "type", kind) 1072 setIdle(peer, 0) 1073 } else { 1074 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1075 d.dropPeer(pid) 1076 } 1077 } 1078 } 1079 // If there's nothing more to fetch, wait or terminate 1080 if pending() == 0 { 1081 if !inFlight() && finished { 1082 log.Debug("Data fetching completed", "type", kind) 1083 return nil 1084 } 1085 break 1086 } 1087 // Send a download request to all idle peers, until throttled 1088 progressed, throttled, running := false, false, inFlight() 1089 idles, total := idle() 1090 1091 for _, peer := range idles { 1092 // Short circuit if throttling activated 1093 if throttle() { 1094 throttled = true 1095 break 1096 } 1097 // Short circuit if there is no more available task. 1098 if pending() == 0 { 1099 break 1100 } 1101 // Reserve a chunk of fetches for a peer. A nil can mean either that 1102 // no more headers are available, or that the peer is known not to 1103 // have them. 1104 request, progress, err := reserve(peer, capacity(peer)) 1105 if err != nil { 1106 return err 1107 } 1108 if progress { 1109 progressed = true 1110 } 1111 if request == nil { 1112 continue 1113 } 1114 if request.From > 0 { 1115 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1116 } else if len(request.Headers) > 0 { 1117 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1118 } else { 1119 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Hashes)) 1120 } 1121 // Fetch the chunk and make sure any errors return the hashes to the queue 1122 if fetchHook != nil { 1123 fetchHook(request.Headers) 1124 } 1125 if err := fetch(peer, request); err != nil { 1126 // Although we could try and make an attempt to fix this, this error really 1127 // means that we've double allocated a fetch task to a peer. If that is the 1128 // case, the internal state of the downloader and the queue is very wrong so 1129 // better hard crash and note the error instead of silently accumulating into 1130 // a much bigger issue. 1131 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1132 } 1133 running = true 1134 } 1135 // Make sure that we have peers available for fetching. If all peers have been tried 1136 // and all failed throw an error 1137 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1138 return errPeersUnavailable 1139 } 1140 } 1141 } 1142 } 1143 1144 // processHeaders takes batches of retrieved headers from an input channel and 1145 // keeps processing and scheduling them into the header chain and downloader's 1146 // queue until the stream ends or a failure occurs. 1147 func (d *Downloader) processHeaders(origin uint64, td *big.Int) error { 1148 // Calculate the pivoting point for switching from fast to slow sync 1149 pivot := d.queue.FastSyncPivot() 1150 1151 // Keep a count of uncertain headers to roll back 1152 rollback := []*types.Header{} 1153 defer func() { 1154 if len(rollback) > 0 { 1155 // Flatten the headers and roll them back 1156 hashes := make([]common.Hash, len(rollback)) 1157 for i, header := range rollback { 1158 hashes[i] = header.Hash() 1159 } 1160 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1161 if d.mode != LightSync { 1162 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1163 lastBlock = d.blockchain.CurrentBlock().Number() 1164 } 1165 d.lightchain.Rollback(hashes) 1166 curFastBlock, curBlock := common.Big0, common.Big0 1167 if d.mode != LightSync { 1168 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1169 curBlock = d.blockchain.CurrentBlock().Number() 1170 } 1171 log.Warn("Rolled back headers", "count", len(hashes), 1172 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1173 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1174 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1175 1176 // If we're already past the pivot point, this could be an attack, thread carefully 1177 if rollback[len(rollback)-1].Number.Uint64() > pivot { 1178 // If we didn't ever fail, lock in the pivot header (must! not! change!) 1179 if atomic.LoadUint32(&d.fsPivotFails) == 0 { 1180 for _, header := range rollback { 1181 if header.Number.Uint64() == pivot { 1182 log.Warn("Fast-sync pivot locked in", "number", pivot, "hash", header.Hash()) 1183 d.fsPivotLock = header 1184 } 1185 } 1186 } 1187 } 1188 } 1189 }() 1190 1191 // Wait for batches of headers to process 1192 gotHeaders := false 1193 1194 for { 1195 select { 1196 case <-d.cancelCh: 1197 return errCancelHeaderProcessing 1198 1199 case headers := <-d.headerProcCh: 1200 // Terminate header processing if we synced up 1201 if len(headers) == 0 { 1202 // Notify everyone that headers are fully processed 1203 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1204 select { 1205 case ch <- false: 1206 case <-d.cancelCh: 1207 } 1208 } 1209 // If no headers were retrieved at all, the peer violated it's TD promise that it had a 1210 // better chain compared to ours. The only exception is if it's promised blocks were 1211 // already imported by other means (e.g. fecher): 1212 // 1213 // R <remote peer>, L <local node>: Both at block 10 1214 // R: Mine block 11, and propagate it to L 1215 // L: Queue block 11 for import 1216 // L: Notice that R's head and TD increased compared to ours, start sync 1217 // L: Import of block 11 finishes 1218 // L: Sync begins, and finds common ancestor at 11 1219 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1220 // R: Nothing to give 1221 if d.mode != LightSync { 1222 if !gotHeaders && td.Cmp(d.blockchain.GetTdByHash(d.blockchain.CurrentBlock().Hash())) > 0 { 1223 return errStallingPeer 1224 } 1225 } 1226 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1227 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1228 // of delivering the post-pivot blocks that would flag the invalid content. 1229 // 1230 // This check cannot be executed "as is" for full imports, since blocks may still be 1231 // queued for processing when the header download completes. However, as long as the 1232 // peer gave us something useful, we're already happy/progressed (above check). 1233 if d.mode == FastSync || d.mode == LightSync { 1234 if td.Cmp(d.lightchain.GetTdByHash(d.lightchain.CurrentHeader().Hash())) > 0 { 1235 return errStallingPeer 1236 } 1237 } 1238 // Disable any rollback and return 1239 rollback = nil 1240 return nil 1241 } 1242 // Otherwise split the chunk of headers into batches and process them 1243 gotHeaders = true 1244 1245 for len(headers) > 0 { 1246 // Terminate if something failed in between processing chunks 1247 select { 1248 case <-d.cancelCh: 1249 return errCancelHeaderProcessing 1250 default: 1251 } 1252 // Select the next chunk of headers to import 1253 limit := maxHeadersProcess 1254 if limit > len(headers) { 1255 limit = len(headers) 1256 } 1257 chunk := headers[:limit] 1258 1259 // In case of header only syncing, validate the chunk immediately 1260 if d.mode == FastSync || d.mode == LightSync { 1261 // Collect the yet unknown headers to mark them as uncertain 1262 unknown := make([]*types.Header, 0, len(headers)) 1263 for _, header := range chunk { 1264 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1265 unknown = append(unknown, header) 1266 } 1267 } 1268 // If we're importing pure headers, verify based on their recentness 1269 frequency := fsHeaderCheckFrequency 1270 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1271 frequency = 1 1272 } 1273 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1274 // If some headers were inserted, add them too to the rollback list 1275 if n > 0 { 1276 rollback = append(rollback, chunk[:n]...) 1277 } 1278 log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1279 return errInvalidChain 1280 } 1281 // All verifications passed, store newly found uncertain headers 1282 rollback = append(rollback, unknown...) 1283 if len(rollback) > fsHeaderSafetyNet { 1284 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1285 } 1286 } 1287 // If we're fast syncing and just pulled in the pivot, make sure it's the one locked in 1288 if d.mode == FastSync && d.fsPivotLock != nil && chunk[0].Number.Uint64() <= pivot && chunk[len(chunk)-1].Number.Uint64() >= pivot { 1289 if pivot := chunk[int(pivot-chunk[0].Number.Uint64())]; pivot.Hash() != d.fsPivotLock.Hash() { 1290 log.Warn("Pivot doesn't match locked in one", "remoteNumber", pivot.Number, "remoteHash", pivot.Hash(), "localNumber", d.fsPivotLock.Number, "localHash", d.fsPivotLock.Hash()) 1291 return errInvalidChain 1292 } 1293 } 1294 // Unless we're doing light chains, schedule the headers for associated content retrieval 1295 if d.mode == FullSync || d.mode == FastSync { 1296 // If we've reached the allowed number of pending headers, stall a bit 1297 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1298 select { 1299 case <-d.cancelCh: 1300 return errCancelHeaderProcessing 1301 case <-time.After(time.Second): 1302 } 1303 } 1304 // Otherwise insert the headers for content retrieval 1305 inserts := d.queue.Schedule(chunk, origin) 1306 if len(inserts) != len(chunk) { 1307 log.Debug("Stale headers") 1308 return errBadPeer 1309 } 1310 } 1311 headers = headers[limit:] 1312 origin += uint64(limit) 1313 } 1314 // Signal the content downloaders of the availablility of new tasks 1315 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1316 select { 1317 case ch <- true: 1318 default: 1319 } 1320 } 1321 } 1322 } 1323 } 1324 1325 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1326 func (d *Downloader) processFullSyncContent() error { 1327 for { 1328 results := d.queue.WaitResults() 1329 if len(results) == 0 { 1330 return nil 1331 } 1332 if d.chainInsertHook != nil { 1333 d.chainInsertHook(results) 1334 } 1335 if err := d.importBlockResults(results); err != nil { 1336 return err 1337 } 1338 } 1339 } 1340 1341 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1342 for len(results) != 0 { 1343 // Check for any termination requests. This makes clean shutdown faster. 1344 select { 1345 case <-d.quitCh: 1346 return errCancelContentProcessing 1347 default: 1348 } 1349 // Retrieve the a batch of results to import 1350 items := int(math.Min(float64(len(results)), float64(maxResultsProcess))) 1351 first, last := results[0].Header, results[items-1].Header 1352 log.Debug("Inserting downloaded chain", "items", len(results), 1353 "firstnum", first.Number, "firsthash", first.Hash(), 1354 "lastnum", last.Number, "lasthash", last.Hash(), 1355 ) 1356 blocks := make([]*types.Block, items) 1357 for i, result := range results[:items] { 1358 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1359 } 1360 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1361 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1362 return errInvalidChain 1363 } 1364 // Shift the results to the next batch 1365 results = results[items:] 1366 } 1367 return nil 1368 } 1369 1370 // processFastSyncContent takes fetch results from the queue and writes them to the 1371 // database. It also controls the synchronisation of state nodes of the pivot block. 1372 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1373 // Start syncing state of the reported head block. 1374 // This should get us most of the state of the pivot block. 1375 stateSync := d.syncState(latest.Root) 1376 defer stateSync.Cancel() 1377 go func() { 1378 if err := stateSync.Wait(); err != nil { 1379 d.queue.Close() // wake up WaitResults 1380 } 1381 }() 1382 1383 pivot := d.queue.FastSyncPivot() 1384 for { 1385 results := d.queue.WaitResults() 1386 if len(results) == 0 { 1387 return stateSync.Cancel() 1388 } 1389 if d.chainInsertHook != nil { 1390 d.chainInsertHook(results) 1391 } 1392 P, beforeP, afterP := splitAroundPivot(pivot, results) 1393 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1394 return err 1395 } 1396 if P != nil { 1397 stateSync.Cancel() 1398 if err := d.commitPivotBlock(P); err != nil { 1399 return err 1400 } 1401 } 1402 if err := d.importBlockResults(afterP); err != nil { 1403 return err 1404 } 1405 } 1406 } 1407 1408 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1409 for _, result := range results { 1410 num := result.Header.Number.Uint64() 1411 switch { 1412 case num < pivot: 1413 before = append(before, result) 1414 case num == pivot: 1415 p = result 1416 default: 1417 after = append(after, result) 1418 } 1419 } 1420 return p, before, after 1421 } 1422 1423 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1424 for len(results) != 0 { 1425 // Check for any termination requests. 1426 select { 1427 case <-d.quitCh: 1428 return errCancelContentProcessing 1429 case <-stateSync.done: 1430 if err := stateSync.Wait(); err != nil { 1431 return err 1432 } 1433 default: 1434 } 1435 // Retrieve the a batch of results to import 1436 items := int(math.Min(float64(len(results)), float64(maxResultsProcess))) 1437 first, last := results[0].Header, results[items-1].Header 1438 log.Debug("Inserting fast-sync blocks", "items", len(results), 1439 "firstnum", first.Number, "firsthash", first.Hash(), 1440 "lastnumn", last.Number, "lasthash", last.Hash(), 1441 ) 1442 blocks := make([]*types.Block, items) 1443 receipts := make([]types.Receipts, items) 1444 for i, result := range results[:items] { 1445 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1446 receipts[i] = result.Receipts 1447 } 1448 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1449 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1450 return errInvalidChain 1451 } 1452 // Shift the results to the next batch 1453 results = results[items:] 1454 } 1455 return nil 1456 } 1457 1458 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1459 b := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1460 // Sync the pivot block state. This should complete reasonably quickly because 1461 // we've already synced up to the reported head block state earlier. 1462 if err := d.syncState(b.Root()).Wait(); err != nil { 1463 return err 1464 } 1465 log.Debug("Committing fast sync pivot as new head", "number", b.Number(), "hash", b.Hash()) 1466 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{b}, []types.Receipts{result.Receipts}); err != nil { 1467 return err 1468 } 1469 return d.blockchain.FastSyncCommitHead(b.Hash()) 1470 } 1471 1472 // DeliverHeaders injects a new batch of block headers received from a remote 1473 // node into the download schedule. 1474 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1475 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1476 } 1477 1478 // DeliverBodies injects a new batch of block bodies received from a remote node. 1479 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { 1480 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) 1481 } 1482 1483 // DeliverReceipts injects a new batch of receipts received from a remote node. 1484 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1485 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1486 } 1487 1488 // DeliverNodeData injects a new batch of node state data received from a remote node. 1489 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1490 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1491 } 1492 1493 // deliver injects a new batch of data received from a remote node. 1494 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1495 // Update the delivery metrics for both good and failed deliveries 1496 inMeter.Mark(int64(packet.Items())) 1497 defer func() { 1498 if err != nil { 1499 dropMeter.Mark(int64(packet.Items())) 1500 } 1501 }() 1502 // Deliver or abort if the sync is canceled while queuing 1503 d.cancelLock.RLock() 1504 cancel := d.cancelCh 1505 d.cancelLock.RUnlock() 1506 if cancel == nil { 1507 return errNoSyncActive 1508 } 1509 select { 1510 case destCh <- packet: 1511 return nil 1512 case <-cancel: 1513 return errNoSyncActive 1514 } 1515 } 1516 1517 // qosTuner is the quality of service tuning loop that occasionally gathers the 1518 // peer latency statistics and updates the estimated request round trip time. 1519 func (d *Downloader) qosTuner() { 1520 for { 1521 // Retrieve the current median RTT and integrate into the previoust target RTT 1522 rtt := time.Duration(float64(1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1523 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1524 1525 // A new RTT cycle passed, increase our confidence in the estimated RTT 1526 conf := atomic.LoadUint64(&d.rttConfidence) 1527 conf = conf + (1000000-conf)/2 1528 atomic.StoreUint64(&d.rttConfidence, conf) 1529 1530 // Log the new QoS values and sleep until the next RTT 1531 log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1532 select { 1533 case <-d.quitCh: 1534 return 1535 case <-time.After(rtt): 1536 } 1537 } 1538 } 1539 1540 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1541 // peer set, needing to reduce the confidence we have in out QoS estimates. 1542 func (d *Downloader) qosReduceConfidence() { 1543 // If we have a single peer, confidence is always 1 1544 peers := uint64(d.peers.Len()) 1545 if peers == 0 { 1546 // Ensure peer connectivity races don't catch us off guard 1547 return 1548 } 1549 if peers == 1 { 1550 atomic.StoreUint64(&d.rttConfidence, 1000000) 1551 return 1552 } 1553 // If we have a ton of peers, don't drop confidence) 1554 if peers >= uint64(qosConfidenceCap) { 1555 return 1556 } 1557 // Otherwise drop the confidence factor 1558 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1559 if float64(conf)/1000000 < rttMinConfidence { 1560 conf = uint64(rttMinConfidence * 1000000) 1561 } 1562 atomic.StoreUint64(&d.rttConfidence, conf) 1563 1564 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1565 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1566 } 1567 1568 // requestRTT returns the current target round trip time for a download request 1569 // to complete in. 1570 // 1571 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1572 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1573 // be adapted to, but smaller ones are preffered (stabler download stream). 1574 func (d *Downloader) requestRTT() time.Duration { 1575 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1576 } 1577 1578 // requestTTL returns the current timeout allowance for a single download request 1579 // to finish under. 1580 func (d *Downloader) requestTTL() time.Duration { 1581 var ( 1582 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1583 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1584 ) 1585 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1586 if ttl > ttlLimit { 1587 ttl = ttlLimit 1588 } 1589 return ttl 1590 }