github.com/bcskill/bcschain/v3@v3.4.9-beta2/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/bcskill/bcschain/v3" 29 "github.com/bcskill/bcschain/v3/common" 30 "github.com/bcskill/bcschain/v3/core" 31 "github.com/bcskill/bcschain/v3/core/rawdb" 32 "github.com/bcskill/bcschain/v3/core/types" 33 "github.com/bcskill/bcschain/v3/log" 34 "github.com/bcskill/bcschain/v3/metrics" 35 "github.com/bcskill/bcschain/v3/params" 36 ) 37 38 var ( 39 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 40 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 41 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 42 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 43 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 44 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 45 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 46 47 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 48 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 49 rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests 50 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 51 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 52 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 53 54 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 55 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 56 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 57 58 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 59 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 60 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 61 62 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 63 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 64 65 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 66 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 67 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 68 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 69 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 70 ) 71 72 var ( 73 errBusy = errors.New("busy") 74 errUnknownPeer = errors.New("peer is unknown or unhealthy") 75 errBadPeer = errors.New("action from bad peer ignored") 76 errStallingPeer = errors.New("peer is stalling") 77 errNoPeers = errors.New("no peers to keep download active") 78 errTimeout = errors.New("timeout") 79 errEmptyHeaderSet = errors.New("empty header set by peer") 80 errPeersUnavailable = errors.New("no peers available or all tried for download") 81 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 82 errInvalidChain = errors.New("retrieved hash chain is invalid") 83 errInvalidBlock = errors.New("retrieved block is invalid") 84 errInvalidBody = errors.New("retrieved block body is invalid") 85 errInvalidReceipt = errors.New("retrieved receipt is invalid") 86 errCancelBlockFetch = errors.New("block download canceled (requested)") 87 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 88 errCancelBodyFetch = errors.New("block body download canceled (requested)") 89 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 90 errCancelStateFetch = errors.New("state data download canceled (requested)") 91 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 92 errCancelContentProcessing = errors.New("content processing canceled (requested)") 93 errNoSyncActive = errors.New("no sync active") 94 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 95 ) 96 97 type Downloader struct { 98 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 99 mux *core.InterfaceFeed // Event multiplexer to announce sync operation events 100 101 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 102 queue *queue // Scheduler for selecting the hashes to download 103 peers *peerSet // Set of active peers from which download can proceed 104 stateDB common.Database 105 106 rttEstimate uint64 // Round trip time to target for download requests 107 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 108 109 // Statistics 110 syncStatsChainOrigin uint64 // Origin block number where syncing started at 111 syncStatsChainHeight uint64 // Highest block number known when syncing started 112 syncStatsState stateSyncStats 113 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 114 115 lightchain LightChain 116 blockchain BlockChain 117 118 // Callbacks 119 dropPeer peerDropFn // Drops a peer for misbehaving 120 121 // Status 122 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 123 synchronising int32 124 notified int32 125 committed int32 126 127 // Channels 128 headerCh chan dataPack // [eth/62] Channel receiving inbound block headers 129 bodyCh chan dataPack // [eth/62] Channel receiving inbound block bodies 130 receiptCh chan dataPack // [eth/63] Channel receiving inbound receipts 131 bodyWakeCh chan bool // [eth/62] Channel to signal the block body fetcher of new tasks 132 receiptWakeCh chan bool // [eth/63] Channel to signal the receipt fetcher of new tasks 133 headerProcCh chan []*types.Header // [eth/62] Channel to feed the header processor new tasks 134 135 // for stateFetcher 136 stateSyncStart chan *stateSync 137 trackStateReq chan *stateReq 138 stateCh chan dataPack // [eth/63] Channel receiving inbound node state data 139 140 // Cancellation and termination 141 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 142 cancelCh chan struct{} // Channel to cancel mid-flight syncs 143 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 144 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 145 146 quitCh chan struct{} // Quit channel to signal termination 147 quitLock sync.RWMutex // Lock to prevent double closes 148 149 // Testing hooks 150 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 151 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 152 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 153 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 154 } 155 156 // LightChain encapsulates functions required to synchronise a light chain. 157 type LightChain interface { 158 // HasHeader verifies a header's presence in the local chain. 159 HasHeader(common.Hash, uint64) bool 160 161 // GetHeaderByHash retrieves a header from the local chain. 162 GetHeaderByHash(common.Hash) *types.Header 163 164 // CurrentHeader retrieves the head header from the local chain. 165 CurrentHeader() *types.Header 166 167 // GetTd returns the total difficulty of a local block. 168 GetTd(common.Hash, uint64) *big.Int 169 170 // InsertHeaderChain inserts a batch of headers into the local chain. 171 InsertHeaderChain([]*types.Header, int) (int, error) 172 173 // Rollback removes a few recently added elements from the local chain. 174 Rollback([]common.Hash) 175 } 176 177 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 178 type BlockChain interface { 179 LightChain 180 181 // HasBlock verifies a block's presence in the local chain. 182 HasBlock(common.Hash, uint64) bool 183 184 // GetBlockByHash retrieves a block from the local chain. 185 GetBlockByHash(common.Hash) *types.Block 186 187 // CurrentBlock retrieves the head block from the local chain. 188 CurrentBlock() *types.Block 189 190 // CurrentFastBlock retrieves the head fast block from the local chain. 191 CurrentFastBlock() *types.Block 192 193 // FastSyncCommitHead directly commits the head block to a certain entity. 194 FastSyncCommitHead(common.Hash) error 195 196 // InsertChain inserts a batch of blocks into the local chain. 197 InsertChain(types.Blocks) (int, error) 198 199 // InsertReceiptChain inserts a batch of receipts into the local chain. 200 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 201 } 202 203 // New creates a new downloader to fetch hashes and blocks from remote peers. 204 func New(mode SyncMode, stateDb common.Database, mux *core.InterfaceFeed, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 205 if lightchain == nil { 206 lightchain = chain 207 } 208 209 dl := &Downloader{ 210 mode: mode, 211 stateDB: stateDb, 212 mux: mux, 213 queue: newQueue(), 214 peers: newPeerSet(), 215 rttEstimate: uint64(rttMaxEstimate), 216 rttConfidence: uint64(1000000), 217 blockchain: chain, 218 lightchain: lightchain, 219 dropPeer: dropPeer, 220 headerCh: make(chan dataPack, 1), 221 bodyCh: make(chan dataPack, 1), 222 receiptCh: make(chan dataPack, 1), 223 bodyWakeCh: make(chan bool, 1), 224 receiptWakeCh: make(chan bool, 1), 225 headerProcCh: make(chan []*types.Header, 1), 226 quitCh: make(chan struct{}), 227 stateCh: make(chan dataPack), 228 stateSyncStart: make(chan *stateSync), 229 syncStatsState: stateSyncStats{ 230 processed: rawdb.ReadFastTrieProgress(stateDb.GlobalTable()), 231 }, 232 trackStateReq: make(chan *stateReq), 233 } 234 go dl.qosTuner() 235 go dl.stateFetcher() 236 return dl 237 } 238 239 // Progress retrieves the synchronisation boundaries, specifically the origin 240 // block where synchronisation started at (may have failed/suspended); the block 241 // or header sync is currently at; and the latest known block which the sync targets. 242 // 243 // In addition, during the state download phase of fast synchronisation the number 244 // of processed and the total number of known states are also returned. Otherwise 245 // these are zero. 246 func (d *Downloader) Progress() gochain.SyncProgress { 247 // Lock the current stats and return the progress 248 d.syncStatsLock.RLock() 249 defer d.syncStatsLock.RUnlock() 250 251 current := uint64(0) 252 switch d.mode { 253 case FullSync: 254 current = d.blockchain.CurrentBlock().NumberU64() 255 case FastSync: 256 current = d.blockchain.CurrentFastBlock().NumberU64() 257 case LightSync: 258 current = d.lightchain.CurrentHeader().Number.Uint64() 259 } 260 return gochain.SyncProgress{ 261 StartingBlock: d.syncStatsChainOrigin, 262 CurrentBlock: current, 263 HighestBlock: d.syncStatsChainHeight, 264 PulledStates: d.syncStatsState.processed, 265 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 266 } 267 } 268 269 // Synchronising returns whether the downloader is currently retrieving blocks. 270 func (d *Downloader) Synchronising() bool { 271 return atomic.LoadInt32(&d.synchronising) > 0 272 } 273 274 // RegisterPeer injects a new download peer into the set of block source to be 275 // used for fetching hashes and blocks from. 276 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 277 logger := log.New("peer", id) 278 logger.Info("Registering sync peer") 279 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 280 logger.Error("Failed to register sync peer", "err", err) 281 return err 282 } 283 d.qosReduceConfidence() 284 285 return nil 286 } 287 288 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 289 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 290 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 291 } 292 293 // UnregisterPeer remove a peer from the known list, preventing any action from 294 // the specified peer. An effort is also made to return any pending fetches into 295 // the queue. 296 func (d *Downloader) UnregisterPeer(id string) error { 297 // Unregister the peer from the active peer set and revoke any fetch tasks 298 logger := log.New("peer", id) 299 logger.Warn("Unregistering sync peer") 300 if err := d.peers.Unregister(id); err != nil { 301 logger.Error("Failed to unregister sync peer", "err", err) 302 return err 303 } 304 d.queue.Revoke(id) 305 306 // If this peer was the master peer, abort sync immediately 307 d.cancelLock.RLock() 308 master := id == d.cancelPeer 309 d.cancelLock.RUnlock() 310 311 if master { 312 d.cancel() 313 } 314 return nil 315 } 316 317 // Synchronise tries to sync up our local block chain with a remote peer, both 318 // adding various sanity checks as well as wrapping it with various log entries. 319 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 320 err := d.synchronise(id, head, td, mode) 321 switch err { 322 case nil: 323 case errBusy: 324 log.Debug("Synchronisation already in progress") 325 case errTimeout, errBadPeer, errStallingPeer, 326 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 327 errInvalidAncestor, errInvalidChain: 328 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 329 if d.dropPeer == nil { 330 // The dropPeer method is nil when `--copydb` is used for a local copy. 331 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 332 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 333 } else { 334 d.dropPeer(id) 335 } 336 default: 337 log.Warn("Synchronisation failed, retrying", "peer", id, "err", err) 338 } 339 return err 340 } 341 342 // synchronise will select the peer and use it for synchronising. If an empty string is given 343 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 344 // checks fail an error will be returned. This method is synchronous 345 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 346 // Mock out the synchronisation if testing 347 if d.synchroniseMock != nil { 348 return d.synchroniseMock(id, hash) 349 } 350 // Make sure only one goroutine is ever allowed past this point at once 351 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 352 return errBusy 353 } 354 defer atomic.StoreInt32(&d.synchronising, 0) 355 356 // Post a user notification of the sync (only once per session) 357 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 358 log.Info("Block synchronisation started") 359 } 360 // Reset the queue, peer set and wake channels to clean any internal leftover state 361 d.queue.Reset() 362 d.peers.Reset() 363 364 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 365 select { 366 case <-ch: 367 default: 368 } 369 } 370 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 371 for empty := false; !empty; { 372 select { 373 case <-ch: 374 default: 375 empty = true 376 } 377 } 378 } 379 for empty := false; !empty; { 380 select { 381 case <-d.headerProcCh: 382 default: 383 empty = true 384 } 385 } 386 // Create cancel channel for aborting mid-flight and mark the master peer 387 d.cancelLock.Lock() 388 d.cancelCh = make(chan struct{}) 389 d.cancelPeer = id 390 d.cancelLock.Unlock() 391 392 defer d.Cancel() // No matter what, we can't leave the cancel channel open 393 394 // Set the requested sync mode, unless it's forbidden 395 d.mode = mode 396 397 // Retrieve the origin peer and initiate the downloading process 398 p := d.peers.Peer(id) 399 if p == nil { 400 return errUnknownPeer 401 } 402 return d.syncWithPeer(p, hash, td) 403 } 404 405 // syncWithPeer starts a block synchronization based on the hash chain from the 406 // specified peer and head hash. 407 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 408 d.mux.Send(StartEvent{}) 409 defer func() { 410 // reset on error 411 if err != nil { 412 d.mux.Send(FailedEvent{err}) 413 } else { 414 d.mux.Send(DoneEvent{}) 415 } 416 }() 417 if p.version < 62 { 418 return errTooOld 419 } 420 421 log.Info("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) 422 defer func(start time.Time) { 423 log.Info("Synchronisation terminated", "peer", p.id, "elapsed", time.Since(start), "err", err) 424 }(time.Now()) 425 426 // Look up the sync boundaries: the common ancestor and the target block 427 latest, err := d.fetchHeight(p) 428 if err != nil { 429 return err 430 } 431 height := latest.Number.Uint64() 432 433 origin, err := d.findAncestor(p, height) 434 if err != nil { 435 return err 436 } 437 d.syncStatsLock.Lock() 438 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 439 d.syncStatsChainOrigin = origin 440 } 441 d.syncStatsChainHeight = height 442 d.syncStatsLock.Unlock() 443 444 // Ensure our origin point is below any fast sync pivot point 445 pivot := uint64(0) 446 if d.mode == FastSync { 447 if height <= uint64(fsMinFullBlocks) { 448 origin = 0 449 } else { 450 pivot = height - uint64(fsMinFullBlocks) 451 if pivot <= origin { 452 origin = pivot - 1 453 } 454 } 455 } 456 d.committed = 1 457 if d.mode == FastSync && pivot != 0 { 458 d.committed = 0 459 } 460 // Initiate the sync using a concurrent header and content retrieval algorithm 461 d.queue.Prepare(origin+1, d.mode) 462 if d.syncInitHook != nil { 463 d.syncInitHook(origin, height) 464 } 465 466 fetchers := []func() error{ 467 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 468 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 469 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 470 func() error { return d.processHeaders(origin+1, pivot, td) }, 471 } 472 if d.mode == FastSync { 473 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 474 } else if d.mode == FullSync { 475 fetchers = append(fetchers, d.processFullSyncContent) 476 } 477 return d.spawnSync(fetchers) 478 } 479 480 // spawnSync runs d.process and all given fetcher functions to completion in 481 // separate goroutines, returning the first error that appears. 482 func (d *Downloader) spawnSync(fetchers []func() error) error { 483 errc := make(chan error, len(fetchers)) 484 d.cancelWg.Add(len(fetchers)) 485 for _, fn := range fetchers { 486 fn := fn 487 go func() { defer d.cancelWg.Done(); errc <- fn() }() 488 } 489 // Wait for the first error, then terminate the others. 490 var err error 491 for i := 0; i < len(fetchers); i++ { 492 if i == len(fetchers)-1 { 493 // Close the queue when all fetchers have exited. 494 // This will cause the block processor to end when 495 // it has processed the queue. 496 d.queue.Close() 497 } 498 if err = <-errc; err != nil { 499 break 500 } 501 } 502 d.queue.Close() 503 d.Cancel() 504 return err 505 } 506 507 // cancel aborts all of the operations and resets the queue. However, cancel does 508 // not wait for the running download goroutines to finish. This method should be 509 // used when cancelling the downloads from inside the downloader. 510 func (d *Downloader) cancel() { 511 // Close the current cancel channel 512 d.cancelLock.Lock() 513 if d.cancelCh != nil { 514 select { 515 case <-d.cancelCh: 516 // Channel was already closed 517 default: 518 close(d.cancelCh) 519 } 520 } 521 d.cancelLock.Unlock() 522 } 523 524 // Cancel aborts all of the operations and waits for all download goroutines to 525 // finish before returning. 526 func (d *Downloader) Cancel() { 527 d.cancel() 528 d.cancelWg.Wait() 529 } 530 531 // Terminate interrupts the downloader, canceling all pending operations. 532 // The downloader cannot be reused after calling Terminate. 533 func (d *Downloader) Terminate() { 534 // Close the termination channel (make sure double close is allowed) 535 d.quitLock.Lock() 536 select { 537 case <-d.quitCh: 538 default: 539 close(d.quitCh) 540 } 541 d.quitLock.Unlock() 542 543 // Cancel any pending download requests 544 d.Cancel() 545 } 546 547 // fetchHeight retrieves the head header of the remote peer to aid in estimating 548 // the total time a pending synchronisation would take. 549 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 550 p.log.Debug("Retrieving remote chain height") 551 552 // Request the advertised remote head block and wait for the response 553 head, _ := p.peer.Head() 554 go p.peer.RequestHeadersByHash(head, 1, 0, false) 555 556 ttl := d.requestTTL() 557 timeout := time.After(ttl) 558 for { 559 select { 560 case <-d.cancelCh: 561 return nil, errCancelBlockFetch 562 563 case packet := <-d.headerCh: 564 // Discard anything not from the origin peer 565 if packet.PeerId() != p.id { 566 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 567 break 568 } 569 // Make sure the peer actually gave something valid 570 headers := packet.(*headerPack).headers 571 if len(headers) != 1 { 572 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 573 return nil, errBadPeer 574 } 575 head := headers[0] 576 p.log.Info("Remote head header identified", "number", head.Number, "hash", head.Hash()) 577 return head, nil 578 579 case <-timeout: 580 p.log.Warn("Waiting for head header timed out", "elapsed", ttl) 581 return nil, errTimeout 582 583 case <-d.bodyCh: 584 case <-d.receiptCh: 585 // Out of bounds delivery, ignore 586 } 587 } 588 } 589 590 // findAncestor tries to locate the common ancestor link of the local chain and 591 // a remote peers blockchain. In the general case when our node was in sync and 592 // on the correct chain, checking the top N links should already get us a match. 593 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 594 // the head links match), we do a binary search to find the common ancestor. 595 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 596 // Figure out the valid ancestor range to prevent rewrite attacks 597 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 598 599 if d.mode == FullSync { 600 ceil = d.blockchain.CurrentBlock().NumberU64() 601 } else if d.mode == FastSync { 602 ceil = d.blockchain.CurrentFastBlock().NumberU64() 603 } 604 if ceil >= MaxForkAncestry { 605 floor = int64(ceil - MaxForkAncestry) 606 } 607 p.log.Info("Looking for common ancestor", "local", ceil, "remote", height) 608 609 // Request the topmost blocks to short circuit binary ancestor lookup 610 head := ceil 611 if head > height { 612 head = height 613 } 614 from := int64(head) - int64(MaxHeaderFetch) 615 if from < 0 { 616 from = 0 617 } 618 // Span out with 15 block gaps into the future to catch bad head reports 619 limit := 2 * MaxHeaderFetch / 16 620 count := 1 + int((int64(ceil)-from)/16) 621 if count > limit { 622 count = limit 623 } 624 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 625 626 // Wait for the remote response to the head fetch 627 number, hash := uint64(0), common.Hash{} 628 629 ttl := d.requestTTL() 630 timeout := time.After(ttl) 631 632 for finished := false; !finished; { 633 select { 634 case <-d.cancelCh: 635 return 0, errCancelHeaderFetch 636 637 case packet := <-d.headerCh: 638 // Discard anything not from the origin peer 639 if packet.PeerId() != p.id { 640 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 641 break 642 } 643 // Make sure the peer actually gave something valid 644 headers := packet.(*headerPack).headers 645 if len(headers) == 0 { 646 p.log.Warn("Empty head header set") 647 return 0, errEmptyHeaderSet 648 } 649 // Make sure the peer's reply conforms to the request 650 for i := 0; i < len(headers); i++ { 651 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 652 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 653 return 0, errInvalidChain 654 } 655 } 656 // Check if a common ancestor was found 657 finished = true 658 for i := len(headers) - 1; i >= 0; i-- { 659 // Skip any headers that underflow/overflow our requested set 660 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 661 continue 662 } 663 // Otherwise check if we already know the header or not 664 h := headers[i].Hash() 665 n := headers[i].Number.Uint64() 666 if (d.mode == FullSync && d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && d.lightchain.HasHeader(h, n)) { 667 number, hash = n, h 668 669 // If every header is known, even future ones, the peer straight out lied about its head 670 if number > height && i == limit-1 { 671 p.log.Warn("Lied about chain head", "reported", height, "found", number) 672 return 0, errStallingPeer 673 } 674 break 675 } 676 } 677 678 case <-timeout: 679 p.log.Warn("Waiting for head header timed out", "elapsed", ttl) 680 return 0, errTimeout 681 682 case <-d.bodyCh: 683 case <-d.receiptCh: 684 // Out of bounds delivery, ignore 685 } 686 } 687 // If the head fetch already found an ancestor, return 688 if hash != (common.Hash{}) { 689 if int64(number) <= floor { 690 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 691 return 0, errInvalidAncestor 692 } 693 p.log.Info("Found common ancestor", "number", number, "hash", hash) 694 return number, nil 695 } 696 // Ancestor not found, we need to binary search over our chain 697 start, end := uint64(0), head 698 if floor > 0 { 699 start = uint64(floor) 700 } 701 for start+1 < end { 702 // Split our chain interval in two, and request the hash to cross check 703 check := (start + end) / 2 704 705 ttl := d.requestTTL() 706 timeout := time.After(ttl) 707 708 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 709 710 // Wait until a reply arrives to this request 711 for arrived := false; !arrived; { 712 select { 713 case <-d.cancelCh: 714 return 0, errCancelHeaderFetch 715 716 case packer := <-d.headerCh: 717 // Discard anything not from the origin peer 718 if packer.PeerId() != p.id { 719 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 720 break 721 } 722 // Make sure the peer actually gave something valid 723 headers := packer.(*headerPack).headers 724 if len(headers) != 1 { 725 p.log.Warn("Multiple headers for single request", "headers", len(headers)) 726 return 0, errBadPeer 727 } 728 arrived = true 729 730 // Modify the search interval based on the response 731 h := headers[0].Hash() 732 n := headers[0].Number.Uint64() 733 if (d.mode == FullSync && !d.blockchain.HasBlock(h, n)) || (d.mode != FullSync && !d.lightchain.HasHeader(h, n)) { 734 end = check 735 break 736 } 737 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 738 if header.Number.Uint64() != check { 739 p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 740 return 0, errBadPeer 741 } 742 start = check 743 744 case <-timeout: 745 p.log.Warn("Waiting for search header timed out", "elapsed", ttl) 746 return 0, errTimeout 747 748 case <-d.bodyCh: 749 case <-d.receiptCh: 750 // Out of bounds delivery, ignore 751 } 752 } 753 } 754 // Ensure valid ancestry and return 755 if int64(start) <= floor { 756 p.log.Info("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 757 return 0, errInvalidAncestor 758 } 759 p.log.Info("Found common ancestor", "number", start, "hash", hash) 760 return start, nil 761 } 762 763 // fetchHeaders keeps retrieving headers concurrently from the number 764 // requested, until no more are returned, potentially throttling on the way. To 765 // facilitate concurrency but still protect against malicious nodes sending bad 766 // headers, we construct a header chain skeleton using the "origin" peer we are 767 // syncing with, and fill in the missing headers using anyone else. Headers from 768 // other peers are only accepted if they map cleanly to the skeleton. If no one 769 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 770 // the origin is dropped. 771 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 772 p.log.Info("Directing header downloads", "origin", from) 773 defer p.log.Info("Header download terminated") 774 775 // Create a timeout timer, and the associated header fetcher 776 skeleton := true // Skeleton assembly phase or finishing up 777 request := time.Now() // time of the last skeleton fetch request 778 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 779 <-timeout.C // timeout channel should be initially empty 780 defer timeout.Stop() 781 782 var ttl time.Duration 783 getHeaders := func(from uint64) { 784 request = time.Now() 785 786 ttl = d.requestTTL() 787 timeout.Reset(ttl) 788 789 if skeleton { 790 p.log.Info("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 791 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 792 } else { 793 p.log.Info("Fetching full headers", "count", MaxHeaderFetch, "from", from) 794 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 795 } 796 } 797 // Start pulling the header chain skeleton until all is done 798 getHeaders(from) 799 800 for { 801 select { 802 case <-d.cancelCh: 803 return errCancelHeaderFetch 804 805 case packet := <-d.headerCh: 806 // Make sure the active peer is giving us the skeleton headers 807 if packet.PeerId() != p.id { 808 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 809 break 810 } 811 headerReqTimer.UpdateSince(request) 812 timeout.Stop() 813 814 // If the skeleton's finished, pull any remaining head headers directly from the origin 815 if packet.Items() == 0 && skeleton { 816 skeleton = false 817 getHeaders(from) 818 continue 819 } 820 // If no more headers are inbound, notify the content fetchers and return 821 if packet.Items() == 0 { 822 // Don't abort header fetches while the pivot is downloading 823 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 824 p.log.Debug("No headers, waiting for pivot commit") 825 select { 826 case <-time.After(fsHeaderContCheck): 827 getHeaders(from) 828 continue 829 case <-d.cancelCh: 830 return errCancelHeaderFetch 831 } 832 } 833 // Pivot done (or not in fast sync) and no more headers, terminate the process 834 p.log.Debug("No more headers available") 835 select { 836 case d.headerProcCh <- nil: 837 return nil 838 case <-d.cancelCh: 839 return errCancelHeaderFetch 840 } 841 } 842 headers := packet.(*headerPack).headers 843 844 // If we received a skeleton batch, resolve internals concurrently 845 if skeleton { 846 filled, proced, err := d.fillHeaderSkeleton(from, headers) 847 if err != nil { 848 p.log.Warn("Skeleton chain invalid", "err", err) 849 return errInvalidChain 850 } 851 headers = filled[proced:] 852 from += uint64(proced) 853 } else { 854 // If we're closing in on the chain head, but haven't yet reached it, delay 855 // the last few headers so mini reorgs on the head don't cause invalid hash 856 // chain errors. 857 if n := len(headers); n > 0 { 858 // Retrieve the current head we're at 859 head := uint64(0) 860 if d.mode == LightSync { 861 head = d.lightchain.CurrentHeader().Number.Uint64() 862 } else { 863 head = d.blockchain.CurrentFastBlock().NumberU64() 864 if full := d.blockchain.CurrentBlock().NumberU64(); head < full { 865 head = full 866 } 867 } 868 // If the head is way older than this batch, delay the last few headers 869 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 870 delay := reorgProtHeaderDelay 871 if delay > n { 872 delay = n 873 } 874 headers = headers[:n-delay] 875 } 876 } 877 } 878 // Insert all the new headers and fetch the next batch 879 if len(headers) > 0 { 880 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 881 select { 882 case d.headerProcCh <- headers: 883 case <-d.cancelCh: 884 return errCancelHeaderFetch 885 } 886 from += uint64(len(headers)) 887 getHeaders(from) 888 } else { 889 // No headers delivered, or all of them being delayed, sleep a bit and retry 890 p.log.Trace("All headers delayed, waiting") 891 select { 892 case <-time.After(fsHeaderContCheck): 893 getHeaders(from) 894 continue 895 case <-d.cancelCh: 896 return errCancelHeaderFetch 897 } 898 } 899 900 case <-timeout.C: 901 if d.dropPeer == nil { 902 // The dropPeer method is nil when `--copydb` is used for a local copy. 903 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 904 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 905 break 906 } 907 // Header retrieval timed out, consider the peer bad and drop 908 p.log.Warn("Header request timed out", "elapsed", ttl) 909 headerTimeoutMeter.Mark(1) 910 d.dropPeer(p.id) 911 912 // Finish the sync gracefully instead of dumping the gathered data though 913 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 914 select { 915 case ch <- false: 916 case <-d.cancelCh: 917 } 918 } 919 select { 920 case d.headerProcCh <- nil: 921 case <-d.cancelCh: 922 } 923 return errBadPeer 924 } 925 } 926 } 927 928 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 929 // and maps them to the provided skeleton header chain. 930 // 931 // Any partial results from the beginning of the skeleton is (if possible) forwarded 932 // immediately to the header processor to keep the rest of the pipeline full even 933 // in the case of header stalls. 934 // 935 // The method returns the entire filled skeleton and also the number of headers 936 // already forwarded for processing. 937 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 938 log.Info("Filling up skeleton", "from", from) 939 d.queue.ScheduleSkeleton(from, skeleton) 940 941 var ( 942 deliver = func(packet dataPack) (int, error) { 943 pack := packet.(*headerPack) 944 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 945 } 946 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 947 throttle = func() bool { return false } 948 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 949 return d.queue.ReserveHeaders(p, count), false, nil 950 } 951 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 952 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 953 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 954 ) 955 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 956 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 957 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 958 959 log.Info("Skeleton fill terminated", "err", err) 960 961 filled, proced := d.queue.RetrieveHeaders() 962 return filled, proced, err 963 } 964 965 // fetchBodies iteratively downloads the scheduled block bodies, taking any 966 // available peers, reserving a chunk of blocks for each, waiting for delivery 967 // and also periodically checking for timeouts. 968 func (d *Downloader) fetchBodies(from uint64) error { 969 log.Info("Downloading block bodies", "origin", from) 970 971 var ( 972 deliver = func(packet dataPack) (int, error) { 973 pack := packet.(*bodyPack) 974 return d.queue.DeliverBodies(pack.peerId, pack.transactions) 975 } 976 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 977 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 978 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 979 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 980 ) 981 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 982 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 983 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 984 985 log.Info("Block body download terminated", "err", err) 986 return err 987 } 988 989 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 990 // available peers, reserving a chunk of receipts for each, waiting for delivery 991 // and also periodically checking for timeouts. 992 func (d *Downloader) fetchReceipts(from uint64) error { 993 log.Info("Downloading transaction receipts", "origin", from) 994 995 var ( 996 deliver = func(packet dataPack) (int, error) { 997 pack := packet.(*receiptPack) 998 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 999 } 1000 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 1001 fetch = func(p *peerConnection, req *fetchRequest) error { 1002 return p.FetchReceipts(req) 1003 } 1004 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 1005 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 1006 ) 1007 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 1008 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 1009 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 1010 log.Info("Transaction receipt download terminated", "err", err) 1011 return err 1012 } 1013 1014 // fetchParts iteratively downloads scheduled block parts, taking any available 1015 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 1016 // also periodically checking for timeouts. 1017 // 1018 // As the scheduling/timeout logic mostly is the same for all downloaded data 1019 // types, this method is used by each for data gathering and is instrumented with 1020 // various callbacks to handle the slight differences between processing them. 1021 // 1022 // The instrumentation parameters: 1023 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 1024 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 1025 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 1026 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 1027 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 1028 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 1029 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 1030 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 1031 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 1032 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 1033 // - fetch: network callback to actually send a particular download request to a physical remote peer 1034 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 1035 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 1036 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 1037 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 1038 // - kind: textual label of the type being downloaded to display in log mesages 1039 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1040 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 1041 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 1042 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 1043 1044 // Create a ticker to detect expired retrieval tasks 1045 ticker := time.NewTicker(100 * time.Millisecond) 1046 defer ticker.Stop() 1047 1048 update := make(chan struct{}, 1) 1049 1050 // Prepare the queue and fetch block parts until the block header fetcher's done 1051 finished := false 1052 for { 1053 select { 1054 case <-d.cancelCh: 1055 return errCancel 1056 1057 case packet := <-deliveryCh: 1058 // If the peer was previously banned and failed to deliver its pack 1059 // in a reasonable time frame, ignore its message. 1060 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1061 // Deliver the received chunk of data and check chain validity 1062 accepted, err := deliver(packet) 1063 if err == errInvalidChain { 1064 return err 1065 } 1066 // Unless a peer delivered something completely else than requested (usually 1067 // caused by a timed out request which came through in the end), set it to 1068 // idle. If the delivery's stale, the peer should have already been idled. 1069 if err != errStaleDelivery { 1070 setIdle(peer, accepted) 1071 } 1072 // Issue a log to the user to see what's going on 1073 switch { 1074 case err == nil && packet.Items() == 0: 1075 peer.log.Warn("Requested data not delivered", "type", kind) 1076 case err == nil: 1077 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1078 default: 1079 peer.log.Warn("Failed to deliver retrieved data", "type", kind, "err", err) 1080 } 1081 } 1082 // Blocks assembled, try to update the progress 1083 select { 1084 case update <- struct{}{}: 1085 default: 1086 } 1087 1088 case cont := <-wakeCh: 1089 // The header fetcher sent a continuation flag, check if it's done 1090 if !cont { 1091 finished = true 1092 } 1093 // Headers arrive, try to update the progress 1094 select { 1095 case update <- struct{}{}: 1096 default: 1097 } 1098 1099 case <-ticker.C: 1100 // Sanity check update the progress 1101 select { 1102 case update <- struct{}{}: 1103 default: 1104 } 1105 1106 case <-update: 1107 // Short circuit if we lost all our peers 1108 if d.peers.Len() == 0 { 1109 return errNoPeers 1110 } 1111 // Check for fetch request timeouts and demote the responsible peers 1112 for pid, fails := range expire() { 1113 if peer := d.peers.Peer(pid); peer != nil { 1114 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1115 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1116 // out that sync wise we need to get rid of the peer. 1117 // 1118 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1119 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1120 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1121 if fails > 2 { 1122 peer.log.Warn("Data delivery timed out", "type", kind) 1123 setIdle(peer, 0) 1124 } else { 1125 peer.log.Warn("Stalling delivery, dropping", "type", kind) 1126 if d.dropPeer == nil { 1127 // The dropPeer method is nil when `--copydb` is used for a local copy. 1128 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1129 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1130 } else { 1131 d.dropPeer(pid) 1132 } 1133 } 1134 } 1135 } 1136 // If there's nothing more to fetch, wait or terminate 1137 if pending() == 0 { 1138 if !inFlight() && finished { 1139 log.Debug("Data fetching completed", "type", kind) 1140 return nil 1141 } 1142 break 1143 } 1144 // Send a download request to all idle peers, until throttled 1145 progressed, throttled, running := false, false, inFlight() 1146 idles, total := idle() 1147 1148 for _, peer := range idles { 1149 // Short circuit if throttling activated 1150 if throttle() { 1151 throttled = true 1152 break 1153 } 1154 // Short circuit if there is no more available task. 1155 if pending() == 0 { 1156 break 1157 } 1158 // Reserve a chunk of fetches for a peer. A nil can mean either that 1159 // no more headers are available, or that the peer is known not to 1160 // have them. 1161 request, progress, err := reserve(peer, capacity(peer)) 1162 if err != nil { 1163 return err 1164 } 1165 if progress { 1166 progressed = true 1167 } 1168 if request == nil { 1169 continue 1170 } 1171 if request.From > 0 { 1172 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1173 } else { 1174 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1175 } 1176 // Fetch the chunk and make sure any errors return the hashes to the queue 1177 if fetchHook != nil { 1178 fetchHook(request.Headers) 1179 } 1180 if err := fetch(peer, request); err != nil { 1181 // Although we could try and make an attempt to fix this, this error really 1182 // means that we've double allocated a fetch task to a peer. If that is the 1183 // case, the internal state of the downloader and the queue is very wrong so 1184 // better hard crash and note the error instead of silently accumulating into 1185 // a much bigger issue. 1186 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1187 } 1188 running = true 1189 } 1190 // Make sure that we have peers available for fetching. If all peers have been tried 1191 // and all failed throw an error 1192 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1193 return errPeersUnavailable 1194 } 1195 } 1196 } 1197 } 1198 1199 // processHeaders takes batches of retrieved headers from an input channel and 1200 // keeps processing and scheduling them into the header chain and downloader's 1201 // queue until the stream ends or a failure occurs. 1202 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1203 // Keep a count of uncertain headers to roll back 1204 rollback := []*types.Header{} 1205 defer func() { 1206 if len(rollback) > 0 { 1207 // Flatten the headers and roll them back 1208 hashes := make([]common.Hash, len(rollback)) 1209 for i, header := range rollback { 1210 hashes[i] = header.Hash() 1211 } 1212 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1213 if d.mode != LightSync { 1214 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1215 lastBlock = d.blockchain.CurrentBlock().Number() 1216 } 1217 d.lightchain.Rollback(hashes) 1218 curFastBlock, curBlock := common.Big0, common.Big0 1219 if d.mode != LightSync { 1220 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1221 curBlock = d.blockchain.CurrentBlock().Number() 1222 } 1223 log.Warn("Rolled back headers", "count", len(hashes), 1224 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1225 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1226 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1227 } 1228 }() 1229 1230 // Wait for batches of headers to process 1231 gotHeaders := false 1232 1233 for { 1234 select { 1235 case <-d.cancelCh: 1236 return errCancelHeaderProcessing 1237 1238 case headers := <-d.headerProcCh: 1239 // Terminate header processing if we synced up 1240 if len(headers) == 0 { 1241 // Notify everyone that headers are fully processed 1242 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1243 select { 1244 case ch <- false: 1245 case <-d.cancelCh: 1246 } 1247 } 1248 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1249 // better chain compared to ours. The only exception is if its promised blocks were 1250 // already imported by other means (e.g. fecher): 1251 // 1252 // R <remote peer>, L <local node>: Both at block 10 1253 // R: Mine block 11, and propagate it to L 1254 // L: Queue block 11 for import 1255 // L: Notice that R's head and TD increased compared to ours, start sync 1256 // L: Import of block 11 finishes 1257 // L: Sync begins, and finds common ancestor at 11 1258 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1259 // R: Nothing to give 1260 if d.mode != LightSync { 1261 head := d.blockchain.CurrentBlock() 1262 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1263 return errStallingPeer 1264 } 1265 } 1266 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1267 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1268 // of delivering the post-pivot blocks that would flag the invalid content. 1269 // 1270 // This check cannot be executed "as is" for full imports, since blocks may still be 1271 // queued for processing when the header download completes. However, as long as the 1272 // peer gave us something useful, we're already happy/progressed (above check). 1273 if d.mode == FastSync || d.mode == LightSync { 1274 head := d.lightchain.CurrentHeader() 1275 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1276 return errStallingPeer 1277 } 1278 } 1279 // Disable any rollback and return 1280 rollback = nil 1281 return nil 1282 } 1283 // Otherwise split the chunk of headers into batches and process them 1284 gotHeaders = true 1285 1286 for len(headers) > 0 { 1287 // Terminate if something failed in between processing chunks 1288 select { 1289 case <-d.cancelCh: 1290 return errCancelHeaderProcessing 1291 default: 1292 } 1293 // Select the next chunk of headers to import 1294 limit := maxHeadersProcess 1295 if limit > len(headers) { 1296 limit = len(headers) 1297 } 1298 chunk := headers[:limit] 1299 1300 // In case of header only syncing, validate the chunk immediately 1301 if d.mode == FastSync || d.mode == LightSync { 1302 // Collect the yet unknown headers to mark them as uncertain 1303 unknown := make([]*types.Header, 0, len(headers)) 1304 for _, header := range chunk { 1305 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1306 unknown = append(unknown, header) 1307 } 1308 } 1309 // If we're importing pure headers, verify based on their recentness 1310 frequency := fsHeaderCheckFrequency 1311 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1312 frequency = 1 1313 } 1314 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1315 // If some headers were inserted, add them too to the rollback list 1316 if n > 0 { 1317 rollback = append(rollback, chunk[:n]...) 1318 } 1319 log.Error("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1320 return errInvalidChain 1321 } 1322 // All verifications passed, store newly found uncertain headers 1323 rollback = append(rollback, unknown...) 1324 if len(rollback) > fsHeaderSafetyNet { 1325 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1326 } 1327 } 1328 // Unless we're doing light chains, schedule the headers for associated content retrieval 1329 if d.mode == FullSync || d.mode == FastSync { 1330 // If we've reached the allowed number of pending headers, stall a bit 1331 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1332 select { 1333 case <-d.cancelCh: 1334 return errCancelHeaderProcessing 1335 case <-time.After(time.Second): 1336 } 1337 } 1338 // Otherwise insert the headers for content retrieval 1339 inserts := d.queue.Schedule(chunk, origin) 1340 if len(inserts) != len(chunk) { 1341 log.Debug("Stale headers") 1342 return errBadPeer 1343 } 1344 } 1345 headers = headers[limit:] 1346 origin += uint64(limit) 1347 } 1348 1349 // Update the highest block number we know if a higher one is found. 1350 d.syncStatsLock.Lock() 1351 if d.syncStatsChainHeight < origin { 1352 d.syncStatsChainHeight = origin - 1 1353 } 1354 d.syncStatsLock.Unlock() 1355 1356 // Signal the content downloaders of the availablility of new tasks 1357 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1358 select { 1359 case ch <- true: 1360 default: 1361 } 1362 } 1363 } 1364 } 1365 } 1366 1367 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1368 func (d *Downloader) processFullSyncContent() error { 1369 for { 1370 results := d.queue.Results(true) 1371 if len(results) == 0 { 1372 return nil 1373 } 1374 if d.chainInsertHook != nil { 1375 d.chainInsertHook(results) 1376 } 1377 if err := d.importBlockResults(results); err != nil { 1378 return err 1379 } 1380 } 1381 } 1382 1383 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1384 // Check for any early termination requests 1385 if len(results) == 0 { 1386 return nil 1387 } 1388 select { 1389 case <-d.quitCh: 1390 return errCancelContentProcessing 1391 default: 1392 } 1393 // Retrieve the a batch of results to import 1394 first, last := results[0].Header, results[len(results)-1].Header 1395 log.Info("Inserting downloaded chain", "items", len(results), 1396 "firstnum", first.Number, "firsthash", first.Hash(), 1397 "lastnum", last.Number, "lasthash", last.Hash(), 1398 ) 1399 blocks := make([]*types.Block, len(results)) 1400 for i, result := range results { 1401 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1402 } 1403 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1404 log.Info("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1405 return errInvalidChain 1406 } 1407 return nil 1408 } 1409 1410 // processFastSyncContent takes fetch results from the queue and writes them to the 1411 // database. It also controls the synchronisation of state nodes of the pivot block. 1412 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1413 // Start syncing state of the reported head block. This should get us most of 1414 // the state of the pivot block. 1415 stateSync := d.syncState(latest.Root) 1416 defer stateSync.Cancel() 1417 done := make(chan struct{}) 1418 go func() { 1419 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1420 d.queue.Close() // wake up WaitResults 1421 } 1422 close(done) 1423 }() 1424 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1425 // sync takes long enough for the chain head to move significantly. 1426 pivot := uint64(0) 1427 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1428 pivot = height - uint64(fsMinFullBlocks) 1429 } 1430 // To cater for moving pivot points, track the pivot block and subsequently 1431 // accumulated download results separately. 1432 var ( 1433 oldPivot *fetchResult // Locked in pivot block, might change eventually 1434 oldTail []*fetchResult // Downloaded content after the pivot 1435 ) 1436 for { 1437 // Wait for the next batch of downloaded data to be available, and if the pivot 1438 // block became stale, move the goalpost 1439 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1440 if len(results) == 0 { 1441 // If pivot sync is done, stop 1442 if oldPivot == nil { 1443 return stateSync.Cancel() 1444 } 1445 // If sync failed, stop 1446 select { 1447 case <-d.cancelCh: 1448 return stateSync.Cancel() 1449 default: 1450 } 1451 } 1452 if d.chainInsertHook != nil { 1453 d.chainInsertHook(results) 1454 } 1455 if oldPivot != nil { 1456 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1457 } 1458 // Split around the pivot block and process the two sides via fast/full sync 1459 if atomic.LoadInt32(&d.committed) == 0 { 1460 latest = results[len(results)-1].Header 1461 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1462 log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1463 pivot = height - uint64(fsMinFullBlocks) 1464 } 1465 } 1466 P, beforeP, afterP := splitAroundPivot(pivot, results) 1467 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1468 return err 1469 } 1470 if P != nil { 1471 // If new pivot block found, cancel old state retrieval and restart 1472 if oldPivot != P { 1473 stateSync.Cancel() 1474 <-done // Wait for cancel to be received. 1475 stateSync = d.syncState(P.Header.Root) 1476 defer stateSync.Cancel() 1477 go func() { 1478 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1479 d.queue.Close() // wake up WaitResults 1480 } 1481 }() 1482 oldPivot = P 1483 } 1484 // Wait for completion, occasionally checking for pivot staleness 1485 select { 1486 case <-stateSync.done: 1487 if stateSync.err != nil { 1488 return stateSync.err 1489 } 1490 if err := d.commitPivotBlock(P); err != nil { 1491 return err 1492 } 1493 oldPivot = nil 1494 1495 case <-time.After(time.Second): 1496 oldTail = afterP 1497 continue 1498 } 1499 } 1500 // Fast sync done, pivot commit done, full import 1501 if err := d.importBlockResults(afterP); err != nil { 1502 return err 1503 } 1504 } 1505 } 1506 1507 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1508 for _, result := range results { 1509 num := result.Header.Number.Uint64() 1510 switch { 1511 case num < pivot: 1512 before = append(before, result) 1513 case num == pivot: 1514 p = result 1515 default: 1516 after = append(after, result) 1517 } 1518 } 1519 return p, before, after 1520 } 1521 1522 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1523 // Check for any early termination requests 1524 if len(results) == 0 { 1525 return nil 1526 } 1527 select { 1528 case <-d.quitCh: 1529 return errCancelContentProcessing 1530 case <-stateSync.done: 1531 if err := stateSync.Wait(); err != nil { 1532 return err 1533 } 1534 default: 1535 } 1536 // Retrieve the a batch of results to import 1537 first, last := results[0].Header, results[len(results)-1].Header 1538 log.Info("Inserting fast-sync blocks", "items", len(results), 1539 "firstnum", first.Number, "firsthash", first.Hash(), 1540 "lastnumn", last.Number, "lasthash", last.Hash(), 1541 ) 1542 blocks := make([]*types.Block, len(results)) 1543 receipts := make([]types.Receipts, len(results)) 1544 for i, result := range results { 1545 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1546 receipts[i] = result.Receipts 1547 } 1548 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1549 log.Error("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1550 return errInvalidChain 1551 } 1552 return nil 1553 } 1554 1555 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1556 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1557 log.Info("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1558 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1559 return err 1560 } 1561 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1562 return err 1563 } 1564 atomic.StoreInt32(&d.committed, 1) 1565 return nil 1566 } 1567 1568 // DeliverHeaders injects a new batch of block headers received from a remote 1569 // node into the download schedule. 1570 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1571 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1572 } 1573 1574 // DeliverBodies injects a new batch of block bodies received from a remote node. 1575 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction) (err error) { 1576 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions}, bodyInMeter, bodyDropMeter) 1577 } 1578 1579 // DeliverReceipts injects a new batch of receipts received from a remote node. 1580 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1581 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1582 } 1583 1584 // DeliverNodeData injects a new batch of node state data received from a remote node. 1585 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1586 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1587 } 1588 1589 // deliver injects a new batch of data received from a remote node. 1590 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1591 // Update the delivery metrics for both good and failed deliveries 1592 inMeter.Mark(int64(packet.Items())) 1593 defer func() { 1594 if err != nil { 1595 dropMeter.Mark(int64(packet.Items())) 1596 } 1597 }() 1598 // Deliver or abort if the sync is canceled while queuing 1599 d.cancelLock.RLock() 1600 cancel := d.cancelCh 1601 d.cancelLock.RUnlock() 1602 if cancel == nil { 1603 return errNoSyncActive 1604 } 1605 select { 1606 case destCh <- packet: 1607 return nil 1608 case <-cancel: 1609 return errNoSyncActive 1610 } 1611 } 1612 1613 // qosTuner is the quality of service tuning loop that occasionally gathers the 1614 // peer latency statistics and updates the estimated request round trip time. 1615 func (d *Downloader) qosTuner() { 1616 for { 1617 // Retrieve the current median RTT and integrate into the previoust target RTT 1618 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1619 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1620 1621 // A new RTT cycle passed, increase our confidence in the estimated RTT 1622 conf := atomic.LoadUint64(&d.rttConfidence) 1623 conf = conf + (1000000-conf)/2 1624 atomic.StoreUint64(&d.rttConfidence, conf) 1625 1626 // Log the new QoS values and sleep until the next RTT 1627 log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1628 select { 1629 case <-d.quitCh: 1630 return 1631 case <-time.After(rtt): 1632 } 1633 } 1634 } 1635 1636 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1637 // peer set, needing to reduce the confidence we have in out QoS estimates. 1638 func (d *Downloader) qosReduceConfidence() { 1639 // If we have a single peer, confidence is always 1 1640 peers := uint64(d.peers.Len()) 1641 if peers == 0 { 1642 // Ensure peer connectivity races don't catch us off guard 1643 return 1644 } 1645 if peers == 1 { 1646 atomic.StoreUint64(&d.rttConfidence, 1000000) 1647 return 1648 } 1649 // If we have a ton of peers, don't drop confidence) 1650 if peers >= uint64(qosConfidenceCap) { 1651 return 1652 } 1653 // Otherwise drop the confidence factor 1654 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1655 if float64(conf)/1000000 < rttMinConfidence { 1656 conf = uint64(rttMinConfidence * 1000000) 1657 } 1658 atomic.StoreUint64(&d.rttConfidence, conf) 1659 1660 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1661 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1662 } 1663 1664 // requestRTT returns the current target round trip time for a download request 1665 // to complete in. 1666 // 1667 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1668 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1669 // be adapted to, but smaller ones are preferred (stabler download stream). 1670 func (d *Downloader) requestRTT() time.Duration { 1671 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1672 } 1673 1674 // requestTTL returns the current timeout allowance for a single download request 1675 // to finish under. 1676 func (d *Downloader) requestTTL() time.Duration { 1677 var ( 1678 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1679 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1680 ) 1681 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1682 if ttl > ttlLimit { 1683 ttl = ttlLimit 1684 } 1685 return ttl 1686 }