github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/intprotocol/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "github.com/intfoundation/intchain" 24 "github.com/intfoundation/intchain/common" 25 "github.com/intfoundation/intchain/core/rawdb" 26 "github.com/intfoundation/intchain/core/types" 27 "github.com/intfoundation/intchain/event" 28 "github.com/intfoundation/intchain/intdb" 29 "github.com/intfoundation/intchain/log" 30 "github.com/intfoundation/intchain/metrics" 31 "github.com/intfoundation/intchain/params" 32 "math/big" 33 "sync" 34 "sync/atomic" 35 "time" 36 ) 37 38 var ( 39 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 40 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 41 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 42 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 43 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 44 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 45 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 46 47 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 48 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 49 rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests 50 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 51 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 52 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 53 54 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 55 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 56 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 57 58 maxQueuedHeaders = 32 * 1024 // [intprotocol/62] Maximum number of headers to queue for import (DOS protection) 59 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 60 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 61 62 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 63 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 64 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 65 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 66 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 67 ) 68 69 var ( 70 errBusy = errors.New("busy") 71 errUnknownPeer = errors.New("peer is unknown or unhealthy") 72 errBadPeer = errors.New("action from bad peer ignored") 73 errStallingPeer = errors.New("peer is stalling") 74 errNoPeers = errors.New("no peers to keep download active") 75 errTimeout = errors.New("timeout") 76 errEmptyHeaderSet = errors.New("empty header set by peer") 77 errPeersUnavailable = errors.New("no peers available or all tried for download") 78 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 79 errInvalidChain = errors.New("retrieved hash chain is invalid") 80 errInvalidBlock = errors.New("retrieved block is invalid") 81 errInvalidBody = errors.New("retrieved block body is invalid") 82 errInvalidReceipt = errors.New("retrieved receipt is invalid") 83 errCancelBlockFetch = errors.New("block download canceled (requested)") 84 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 85 errCancelBodyFetch = errors.New("block body download canceled (requested)") 86 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 87 errCancelStateFetch = errors.New("state data download canceled (requested)") 88 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 89 errCancelContentProcessing = errors.New("content processing canceled (requested)") 90 errNoSyncActive = errors.New("no sync active") 91 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 92 ) 93 94 type Downloader struct { 95 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 96 mux *event.TypeMux // Event multiplexer to announce sync operation events 97 98 queue *queue // Scheduler for selecting the hashes to download 99 peers *peerSet // Set of active peers from which download can proceed 100 stateDB intdb.Database 101 102 rttEstimate uint64 // Round trip time to target for download requests 103 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 104 105 // Statistics 106 syncStatsChainOrigin uint64 // Origin block number where syncing started at 107 syncStatsChainHeight uint64 // Highest block number known when syncing started 108 syncStatsState stateSyncStats 109 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 110 111 lightchain LightChain 112 blockchain BlockChain 113 114 // Callbacks 115 dropPeer peerDropFn // Drops a peer for misbehaving 116 117 // Status 118 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 119 synchronising int32 120 notified int32 121 committed int32 122 123 // Channels 124 headerCh chan dataPack // [intprotocol/62] Channel receiving inbound block headers 125 bodyCh chan dataPack // [intprotocol/62] Channel receiving inbound block bodies 126 receiptCh chan dataPack // [intprotocol/63] Channel receiving inbound receipts 127 bodyWakeCh chan bool // [intprotocol/62] Channel to signal the block body fetcher of new tasks 128 receiptWakeCh chan bool // [intprotocol/63] Channel to signal the receipt fetcher of new tasks 129 headerProcCh chan []*types.Header // [intprotocol/62] Channel to feed the header processor new tasks 130 131 // for stateFetcher 132 stateSyncStart chan *stateSync 133 trackStateReq chan *stateReq 134 stateCh chan dataPack // [intprotocol/63] Channel receiving inbound node state data 135 136 // Cancellation and termination 137 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 138 cancelCh chan struct{} // Channel to cancel mid-flight syncs 139 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 140 141 quitCh chan struct{} // Quit channel to signal termination 142 quitLock sync.RWMutex // Lock to prevent double closes 143 144 // Testing hooks 145 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 146 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 147 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 148 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 149 150 logger log.Logger 151 } 152 153 // LightChain encapsulates functions required to synchronise a light chain. 154 type LightChain interface { 155 // HasHeader verifies a header's presence in the local chain. 156 HasHeader(common.Hash, uint64) bool 157 158 // GetHeaderByHash retrieves a header from the local chain. 159 GetHeaderByHash(common.Hash) *types.Header 160 161 // CurrentHeader retrieves the head header from the local chain. 162 CurrentHeader() *types.Header 163 164 // GetTd returns the total difficulty of a local block. 165 GetTd(common.Hash, uint64) *big.Int 166 167 // InsertHeaderChain inserts a batch of headers into the local chain. 168 InsertHeaderChain([]*types.Header, int) (int, error) 169 170 // Rollback removes a few recently added elements from the local chain. 171 Rollback([]common.Hash) 172 } 173 174 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 175 type BlockChain interface { 176 LightChain 177 178 // HasBlock verifies a block's presence in the local chain. 179 HasBlock(common.Hash, uint64) bool 180 181 // GetBlockByHash retrieves a block from the local chain. 182 GetBlockByHash(common.Hash) *types.Block 183 184 // CurrentBlock retrieves the head block from the local chain. 185 CurrentBlock() *types.Block 186 187 // CurrentFastBlock retrieves the head fast block from the local chain. 188 CurrentFastBlock() *types.Block 189 190 // FastSyncCommitHead directly commits the head block to a certain entity. 191 FastSyncCommitHead(common.Hash) error 192 193 // InsertChain inserts a batch of blocks into the local chain. 194 InsertChain(types.Blocks) (int, error) 195 196 // InsertReceiptChain inserts a batch of receipts into the local chain. 197 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 198 } 199 200 // New creates a new downloader to fetch hashes and blocks from remote peers. 201 func New(mode SyncMode, stateDb intdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, logger log.Logger) *Downloader { 202 if lightchain == nil { 203 lightchain = chain 204 } 205 206 dl := &Downloader{ 207 mode: mode, 208 stateDB: stateDb, 209 mux: mux, 210 queue: newQueue(), 211 peers: newPeerSet(), 212 rttEstimate: uint64(rttMaxEstimate), 213 rttConfidence: uint64(1000000), 214 blockchain: chain, 215 lightchain: lightchain, 216 dropPeer: dropPeer, 217 headerCh: make(chan dataPack, 1), 218 bodyCh: make(chan dataPack, 1), 219 receiptCh: make(chan dataPack, 1), 220 bodyWakeCh: make(chan bool, 1), 221 receiptWakeCh: make(chan bool, 1), 222 headerProcCh: make(chan []*types.Header, 1), 223 quitCh: make(chan struct{}), 224 stateCh: make(chan dataPack), 225 stateSyncStart: make(chan *stateSync), 226 syncStatsState: stateSyncStats{ 227 processed: rawdb.ReadFastTrieProgress(stateDb), 228 }, 229 trackStateReq: make(chan *stateReq), 230 231 logger: logger, 232 } 233 go dl.qosTuner() 234 go dl.stateFetcher() 235 return dl 236 } 237 238 // Progress retrieves the synchronisation boundaries, specifically the origin 239 // block where synchronisation started at (may have failed/suspended); the block 240 // or header sync is currently at; and the latest known block which the sync targets. 241 // 242 // In addition, during the state download phase of fast synchronisation the number 243 // of processed and the total number of known states are also returned. Otherwise 244 // these are zero. 245 func (d *Downloader) Progress() intchain.SyncProgress { 246 // Lock the current stats and return the progress 247 d.syncStatsLock.RLock() 248 defer d.syncStatsLock.RUnlock() 249 250 current := uint64(0) 251 switch d.mode { 252 case FullSync: 253 current = d.blockchain.CurrentBlock().NumberU64() 254 case FastSync: 255 current = d.blockchain.CurrentFastBlock().NumberU64() 256 } 257 return intchain.SyncProgress{ 258 StartingBlock: d.syncStatsChainOrigin, 259 CurrentBlock: current, 260 HighestBlock: d.syncStatsChainHeight, 261 PulledStates: d.syncStatsState.processed, 262 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 263 } 264 } 265 266 // Synchronising returns whether the downloader is currently retrieving blocks. 267 func (d *Downloader) Synchronising() bool { 268 return atomic.LoadInt32(&d.synchronising) > 0 269 } 270 271 // RegisterPeer injects a new download peer into the set of block source to be 272 // used for fetching hashes and blocks from. 273 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 274 logger := d.logger.New("peer", id) 275 logger.Trace("Registering sync peer") 276 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 277 logger.Error("Failed to register sync peer", "err", err) 278 return err 279 } 280 d.qosReduceConfidence() 281 282 return nil 283 } 284 285 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 286 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 287 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 288 } 289 290 // UnregisterPeer remove a peer from the known list, preventing any action from 291 // the specified peer. An effort is also made to return any pending fetches into 292 // the queue. 293 func (d *Downloader) UnregisterPeer(id string) error { 294 // Unregister the peer from the active peer set and revoke any fetch tasks 295 logger := d.logger.New("peer", id) 296 logger.Trace("Unregistering sync peer") 297 if err := d.peers.Unregister(id); err != nil { 298 logger.Error("Failed to unregister sync peer", "err", err) 299 return err 300 } 301 d.queue.Revoke(id) 302 303 // If this peer was the master peer, abort sync immediately 304 d.cancelLock.RLock() 305 master := id == d.cancelPeer 306 d.cancelLock.RUnlock() 307 308 if master { 309 d.Cancel() 310 } 311 return nil 312 } 313 314 // Synchronise tries to sync up our local block chain with a remote peer, both 315 // adding various sanity checks as well as wrapping it with various log entries. 316 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 317 err := d.synchronise(id, head, td, mode) 318 switch err { 319 case nil: 320 case errBusy: 321 322 case errTimeout, errBadPeer, errStallingPeer, 323 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 324 errInvalidAncestor, errInvalidChain: 325 d.logger.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 326 if d.dropPeer == nil { 327 // The dropPeer method is nil when `--copydb` is used for a local copy. 328 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 329 d.logger.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 330 } else { 331 d.dropPeer(id) 332 } 333 default: 334 d.logger.Warn("Synchronisation failed, retrying", "err", err) 335 } 336 return err 337 } 338 339 // synchronise will select the peer and use it for synchronising. If an empty string is given 340 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 341 // checks fail an error will be returned. This method is synchronous 342 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 343 // Mock out the synchronisation if testing 344 if d.synchroniseMock != nil { 345 return d.synchroniseMock(id, hash) 346 } 347 // Make sure only one goroutine is ever allowed past this point at once 348 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 349 return errBusy 350 } 351 defer atomic.StoreInt32(&d.synchronising, 0) 352 353 // Post a user notification of the sync (only once per session) 354 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 355 d.logger.Info("Block synchronisation started") 356 } 357 // Reset the queue, peer set and wake channels to clean any internal leftover state 358 d.queue.Reset() 359 d.peers.Reset() 360 361 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 362 select { 363 case <-ch: 364 default: 365 } 366 } 367 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 368 for empty := false; !empty; { 369 select { 370 case <-ch: 371 default: 372 empty = true 373 } 374 } 375 } 376 for empty := false; !empty; { 377 select { 378 case <-d.headerProcCh: 379 default: 380 empty = true 381 } 382 } 383 // Create cancel channel for aborting mid-flight and mark the master peer 384 d.cancelLock.Lock() 385 d.cancelCh = make(chan struct{}) 386 d.cancelPeer = id 387 d.cancelLock.Unlock() 388 389 defer d.Cancel() // No matter what, we can't leave the cancel channel open 390 391 // Set the requested sync mode, unless it's forbidden 392 d.mode = mode 393 394 // Retrieve the origin peer and initiate the downloading process 395 p := d.peers.Peer(id) 396 if p == nil { 397 return errUnknownPeer 398 } 399 return d.syncWithPeer(p, hash, td) 400 } 401 402 // syncWithPeer starts a block synchronization based on the hash chain from the 403 // specified peer and head hash. 404 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 405 d.mux.Post(StartEvent{}) 406 defer func() { 407 // reset on error 408 if err != nil { 409 d.mux.Post(FailedEvent{err}) 410 } else { 411 d.mux.Post(DoneEvent{}) 412 } 413 }() 414 if p.version < 62 { 415 return errTooOld 416 } 417 418 d.logger.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) 419 defer func(start time.Time) { 420 d.logger.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 421 }(time.Now()) 422 423 // Look up the sync boundaries: the common ancestor and the target block 424 latest, err := d.fetchHeight(p) 425 if err != nil { 426 return err 427 } 428 height := latest.Number.Uint64() 429 430 origin, err := d.findAncestor(p, height) 431 if err != nil { 432 return err 433 } 434 d.syncStatsLock.Lock() 435 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 436 d.syncStatsChainOrigin = origin 437 } 438 d.syncStatsChainHeight = height 439 d.syncStatsLock.Unlock() 440 441 // Ensure our origin point is below any fast sync pivot point 442 pivot := uint64(0) 443 if d.mode == FastSync { 444 if height <= uint64(fsMinFullBlocks) { 445 origin = 0 446 } else { 447 pivot = height - uint64(fsMinFullBlocks) 448 if pivot <= origin { 449 origin = pivot - 1 450 } 451 } 452 } 453 d.committed = 1 454 if d.mode == FastSync && pivot != 0 { 455 d.committed = 0 456 } 457 // Initiate the sync using a concurrent header and content retrieval algorithm 458 d.queue.Prepare(origin+1, d.mode) 459 if d.syncInitHook != nil { 460 d.syncInitHook(origin, height) 461 } 462 463 fetchers := []func() error{ 464 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 465 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 466 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 467 func() error { return d.processHeaders(origin+1, pivot, td) }, 468 } 469 if d.mode == FastSync { 470 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 471 } else if d.mode == FullSync { 472 fetchers = append(fetchers, d.processFullSyncContent) 473 } 474 return d.spawnSync(fetchers) 475 } 476 477 // spawnSync runs d.process and all given fetcher functions to completion in 478 // separate goroutines, returning the first error that appears. 479 func (d *Downloader) spawnSync(fetchers []func() error) error { 480 var wg sync.WaitGroup 481 errc := make(chan error, len(fetchers)) 482 wg.Add(len(fetchers)) 483 for _, fn := range fetchers { 484 fn := fn 485 go func() { defer wg.Done(); errc <- fn() }() 486 } 487 // Wait for the first error, then terminate the others. 488 var err error 489 for i := 0; i < len(fetchers); i++ { 490 if i == len(fetchers)-1 { 491 // Close the queue when all fetchers have exited. 492 // This will cause the block processor to end when 493 // it has processed the queue. 494 d.queue.Close() 495 } 496 if err = <-errc; err != nil { 497 break 498 } 499 } 500 d.queue.Close() 501 d.Cancel() 502 wg.Wait() 503 return err 504 } 505 506 // Cancel cancels all of the operations and resets the queue. It returns true 507 // if the cancel operation was completed. 508 func (d *Downloader) Cancel() { 509 // Close the current cancel channel 510 d.cancelLock.Lock() 511 if d.cancelCh != nil { 512 select { 513 case <-d.cancelCh: 514 // Channel was already closed 515 default: 516 close(d.cancelCh) 517 } 518 } 519 d.cancelLock.Unlock() 520 } 521 522 // Terminate interrupts the downloader, canceling all pending operations. 523 // The downloader cannot be reused after calling Terminate. 524 func (d *Downloader) Terminate() { 525 // Close the termination channel (make sure double close is allowed) 526 d.quitLock.Lock() 527 select { 528 case <-d.quitCh: 529 default: 530 close(d.quitCh) 531 } 532 d.quitLock.Unlock() 533 534 // Cancel any pending download requests 535 d.Cancel() 536 } 537 538 // fetchHeight retrieves the head header of the remote peer to aid in estimating 539 // the total time a pending synchronisation would take. 540 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 541 p.log.Debug("Retrieving remote chain height") 542 543 // Request the advertised remote head block and wait for the response 544 head, _ := p.peer.Head() 545 go p.peer.RequestHeadersByHash(head, 1, 0, false) 546 547 ttl := d.requestTTL() 548 timeout := time.After(ttl) 549 for { 550 select { 551 case <-d.cancelCh: 552 return nil, errCancelBlockFetch 553 554 case packet := <-d.headerCh: 555 // Discard anything not from the origin peer 556 if packet.PeerId() != p.id { 557 d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 558 break 559 } 560 // Make sure the peer actually gave something valid 561 headers := packet.(*headerPack).headers 562 if len(headers) != 1 { 563 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 564 return nil, errBadPeer 565 } 566 head := headers[0] 567 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 568 return head, nil 569 570 case <-timeout: 571 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 572 return nil, errTimeout 573 574 case <-d.bodyCh: 575 case <-d.receiptCh: 576 // Out of bounds delivery, ignore 577 } 578 } 579 } 580 581 // findAncestor tries to locate the common ancestor link of the local chain and 582 // a remote peers blockchain. In the general case when our node was in sync and 583 // on the correct chain, checking the top N links should already get us a match. 584 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 585 // the head links match), we do a binary search to find the common ancestor. 586 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 587 // Figure out the valid ancestor range to prevent rewrite attacks 588 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 589 590 if d.mode == FullSync { 591 ceil = d.blockchain.CurrentBlock().NumberU64() 592 } else if d.mode == FastSync { 593 ceil = d.blockchain.CurrentFastBlock().NumberU64() 594 } 595 if ceil >= MaxForkAncestry { 596 floor = int64(ceil - MaxForkAncestry) 597 } 598 p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) 599 600 // Request the topmost blocks to short circuit binary ancestor lookup 601 head := ceil 602 if head > height { 603 head = height 604 } 605 from := int64(head) - int64(MaxHeaderFetch) 606 if from < 0 { 607 from = 0 608 } 609 // Span out with 15 block gaps into the future to catch bad head reports 610 limit := 2 * MaxHeaderFetch / 16 611 count := 1 + int((int64(ceil)-from)/16) 612 if count > limit { 613 count = limit 614 } 615 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 616 617 // Wait for the remote response to the head fetch 618 number, hash := uint64(0), common.Hash{} 619 620 ttl := d.requestTTL() 621 timeout := time.After(ttl) 622 623 for finished := false; !finished; { 624 select { 625 case <-d.cancelCh: 626 return 0, errCancelHeaderFetch 627 628 case packet := <-d.headerCh: 629 // Discard anything not from the origin peer 630 if packet.PeerId() != p.id { 631 d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 632 break 633 } 634 // Make sure the peer actually gave something valid 635 headers := packet.(*headerPack).headers 636 if len(headers) == 0 { 637 p.log.Warn("Empty head header set") 638 return 0, errEmptyHeaderSet 639 } 640 // Make sure the peer's reply conforms to the request 641 for i := 0; i < len(headers); i++ { 642 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 643 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 644 return 0, errInvalidChain 645 } 646 } 647 // Check if a common ancestor was found 648 finished = true 649 for i := len(headers) - 1; i >= 0; i-- { 650 // Skip any headers that underflow/overflow our requested set 651 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 652 continue 653 } 654 // Otherwise check if we already know the header or not 655 if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { 656 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 657 658 // If every header is known, even future ones, the peer straight out lied about its head 659 if number > height && i == limit-1 { 660 p.log.Warn("Lied about chain head", "reported", height, "found", number) 661 return 0, errStallingPeer 662 } 663 break 664 } 665 } 666 667 case <-timeout: 668 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 669 return 0, errTimeout 670 671 case <-d.bodyCh: 672 case <-d.receiptCh: 673 // Out of bounds delivery, ignore 674 } 675 } 676 // If the head fetch already found an ancestor, return 677 if !common.EmptyHash(hash) { 678 if int64(number) <= floor { 679 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 680 return 0, errInvalidAncestor 681 } 682 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 683 return number, nil 684 } 685 // Ancestor not found, we need to binary search over our chain 686 start, end := uint64(0), head 687 if floor > 0 { 688 start = uint64(floor) 689 } 690 for start+1 < end { 691 // Split our chain interval in two, and request the hash to cross check 692 check := (start + end) / 2 693 694 ttl := d.requestTTL() 695 timeout := time.After(ttl) 696 697 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 698 699 // Wait until a reply arrives to this request 700 for arrived := false; !arrived; { 701 select { 702 case <-d.cancelCh: 703 return 0, errCancelHeaderFetch 704 705 case packer := <-d.headerCh: 706 // Discard anything not from the origin peer 707 if packer.PeerId() != p.id { 708 d.logger.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 709 break 710 } 711 // Make sure the peer actually gave something valid 712 headers := packer.(*headerPack).headers 713 if len(headers) != 1 { 714 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 715 return 0, errBadPeer 716 } 717 arrived = true 718 719 // Modify the search interval based on the response 720 if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { 721 end = check 722 break 723 } 724 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 725 if header.Number.Uint64() != check { 726 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 727 return 0, errBadPeer 728 } 729 start = check 730 731 case <-timeout: 732 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 733 return 0, errTimeout 734 735 case <-d.bodyCh: 736 case <-d.receiptCh: 737 // Out of bounds delivery, ignore 738 } 739 } 740 } 741 // Ensure valid ancestry and return 742 if int64(start) <= floor { 743 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 744 return 0, errInvalidAncestor 745 } 746 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 747 return start, nil 748 } 749 750 // fetchHeaders keeps retrieving headers concurrently from the number 751 // requested, until no more are returned, potentially throttling on the way. To 752 // facilitate concurrency but still protect against malicious nodes sending bad 753 // headers, we construct a header chain skeleton using the "origin" peer we are 754 // syncing with, and fill in the missing headers using anyone else. Headers from 755 // other peers are only accepted if they map cleanly to the skeleton. If no one 756 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 757 // the origin is dropped. 758 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 759 p.log.Debug("Directing header downloads", "origin", from) 760 defer p.log.Debug("Header download terminated") 761 762 // Create a timeout timer, and the associated header fetcher 763 skeleton := true // Skeleton assembly phase or finishing up 764 request := time.Now() // time of the last skeleton fetch request 765 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 766 <-timeout.C // timeout channel should be initially empty 767 defer timeout.Stop() 768 769 var ttl time.Duration 770 getHeaders := func(from uint64) { 771 request = time.Now() 772 773 ttl = d.requestTTL() 774 timeout.Reset(ttl) 775 776 if skeleton { 777 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 778 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 779 } else { 780 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 781 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 782 } 783 } 784 // Start pulling the header chain skeleton until all is done 785 getHeaders(from) 786 787 for { 788 select { 789 case <-d.cancelCh: 790 return errCancelHeaderFetch 791 792 case packet := <-d.headerCh: 793 // Make sure the active peer is giving us the skeleton headers 794 if packet.PeerId() != p.id { 795 d.logger.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 796 break 797 } 798 headerReqTimer.UpdateSince(request) 799 timeout.Stop() 800 801 // If the skeleton's finished, pull any remaining head headers directly from the origin 802 if packet.Items() == 0 && skeleton { 803 skeleton = false 804 getHeaders(from) 805 continue 806 } 807 // If no more headers are inbound, notify the content fetchers and return 808 if packet.Items() == 0 { 809 // Don't abort header fetches while the pivot is downloading 810 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 811 p.log.Debug("No headers, waiting for pivot commit") 812 select { 813 case <-time.After(fsHeaderContCheck): 814 getHeaders(from) 815 continue 816 case <-d.cancelCh: 817 return errCancelHeaderFetch 818 } 819 } 820 // Pivot done (or not in fast sync) and no more headers, terminate the process 821 p.log.Debug("No more headers available") 822 select { 823 case d.headerProcCh <- nil: 824 return nil 825 case <-d.cancelCh: 826 return errCancelHeaderFetch 827 } 828 } 829 headers := packet.(*headerPack).headers 830 831 // If we received a skeleton batch, resolve internals concurrently 832 if skeleton { 833 filled, proced, err := d.fillHeaderSkeleton(from, headers) 834 if err != nil { 835 p.log.Debug("Skeleton chain invalid", "err", err) 836 return errInvalidChain 837 } 838 headers = filled[proced:] 839 from += uint64(proced) 840 } 841 // Insert all the new headers and fetch the next batch 842 if len(headers) > 0 { 843 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 844 select { 845 case d.headerProcCh <- headers: 846 case <-d.cancelCh: 847 return errCancelHeaderFetch 848 } 849 from += uint64(len(headers)) 850 } 851 getHeaders(from) 852 853 case <-timeout.C: 854 if d.dropPeer == nil { 855 // The dropPeer method is nil when `--copydb` is used for a local copy. 856 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 857 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 858 break 859 } 860 // Header retrieval timed out, consider the peer bad and drop 861 p.log.Debug("Header request timed out", "elapsed", ttl) 862 headerTimeoutMeter.Mark(1) 863 d.dropPeer(p.id) 864 865 // Finish the sync gracefully instead of dumping the gathered data though 866 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 867 select { 868 case ch <- false: 869 case <-d.cancelCh: 870 } 871 } 872 select { 873 case d.headerProcCh <- nil: 874 case <-d.cancelCh: 875 } 876 return errBadPeer 877 } 878 } 879 } 880 881 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 882 // and maps them to the provided skeleton header chain. 883 // 884 // Any partial results from the beginning of the skeleton is (if possible) forwarded 885 // immediately to the header processor to keep the rest of the pipeline full even 886 // in the case of header stalls. 887 // 888 // The method returs the entire filled skeleton and also the number of headers 889 // already forwarded for processing. 890 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 891 d.logger.Debug("Filling up skeleton", "from", from) 892 d.queue.ScheduleSkeleton(from, skeleton) 893 894 var ( 895 deliver = func(packet dataPack) (int, error) { 896 pack := packet.(*headerPack) 897 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 898 } 899 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 900 throttle = func() bool { return false } 901 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 902 return d.queue.ReserveHeaders(p, count), false, nil 903 } 904 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 905 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 906 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 907 ) 908 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 909 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 910 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 911 912 d.logger.Debug("Skeleton fill terminated", "err", err) 913 914 filled, proced := d.queue.RetrieveHeaders() 915 return filled, proced, err 916 } 917 918 // fetchBodies iteratively downloads the scheduled block bodies, taking any 919 // available peers, reserving a chunk of blocks for each, waiting for delivery 920 // and also periodically checking for timeouts. 921 func (d *Downloader) fetchBodies(from uint64) error { 922 d.logger.Debug("Downloading block bodies", "origin", from) 923 924 var ( 925 deliver = func(packet dataPack) (int, error) { 926 pack := packet.(*bodyPack) 927 return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) 928 } 929 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 930 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 931 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 932 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 933 ) 934 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 935 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 936 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 937 938 d.logger.Debug("Block body download terminated", "err", err) 939 return err 940 } 941 942 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 943 // available peers, reserving a chunk of receipts for each, waiting for delivery 944 // and also periodically checking for timeouts. 945 func (d *Downloader) fetchReceipts(from uint64) error { 946 d.logger.Debug("Downloading transaction receipts", "origin", from) 947 948 var ( 949 deliver = func(packet dataPack) (int, error) { 950 pack := packet.(*receiptPack) 951 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 952 } 953 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 954 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 955 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 956 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 957 ) 958 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 959 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 960 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 961 962 d.logger.Debug("Transaction receipt download terminated", "err", err) 963 return err 964 } 965 966 // fetchParts iteratively downloads scheduled block parts, taking any available 967 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 968 // also periodically checking for timeouts. 969 // 970 // As the scheduling/timeout logic mostly is the same for all downloaded data 971 // types, this method is used by each for data gathering and is instrumented with 972 // various callbacks to handle the slight differences between processing them. 973 // 974 // The instrumentation parameters: 975 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 976 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 977 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 978 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 979 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 980 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 981 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 982 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 983 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 984 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 985 // - fetch: network callback to actually send a particular download request to a physical remote peer 986 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 987 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 988 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 989 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 990 // - kind: textual label of the type being downloaded to display in log mesages 991 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 992 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 993 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 994 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 995 996 // Create a ticker to detect expired retrieval tasks 997 ticker := time.NewTicker(100 * time.Millisecond) 998 defer ticker.Stop() 999 1000 update := make(chan struct{}, 1) 1001 1002 // Prepare the queue and fetch block parts until the block header fetcher's done 1003 finished := false 1004 for { 1005 select { 1006 case <-d.cancelCh: 1007 return errCancel 1008 1009 case packet := <-deliveryCh: 1010 // If the peer was previously banned and failed to deliver its pack 1011 // in a reasonable time frame, ignore its message. 1012 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1013 // Deliver the received chunk of data and check chain validity 1014 accepted, err := deliver(packet) 1015 if err == errInvalidChain { 1016 return err 1017 } 1018 // Unless a peer delivered something completely else than requested (usually 1019 // caused by a timed out request which came through in the end), set it to 1020 // idle. If the delivery's stale, the peer should have already been idled. 1021 if err != errStaleDelivery { 1022 setIdle(peer, accepted) 1023 } 1024 // Issue a log to the user to see what's going on 1025 switch { 1026 case err == nil && packet.Items() == 0: 1027 peer.log.Trace("Requested data not delivered", "type", kind) 1028 case err == nil: 1029 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1030 default: 1031 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1032 } 1033 } 1034 // Blocks assembled, try to update the progress 1035 select { 1036 case update <- struct{}{}: 1037 default: 1038 } 1039 1040 case cont := <-wakeCh: 1041 // The header fetcher sent a continuation flag, check if it's done 1042 if !cont { 1043 finished = true 1044 } 1045 // Headers arrive, try to update the progress 1046 select { 1047 case update <- struct{}{}: 1048 default: 1049 } 1050 1051 case <-ticker.C: 1052 // Sanity check update the progress 1053 select { 1054 case update <- struct{}{}: 1055 default: 1056 } 1057 1058 case <-update: 1059 // Short circuit if we lost all our peers 1060 if d.peers.Len() == 0 { 1061 return errNoPeers 1062 } 1063 // Check for fetch request timeouts and demote the responsible peers 1064 for pid, fails := range expire() { 1065 if peer := d.peers.Peer(pid); peer != nil { 1066 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1067 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1068 // out that sync wise we need to get rid of the peer. 1069 // 1070 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1071 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1072 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1073 if fails > 2 { 1074 peer.log.Trace("Data delivery timed out", "type", kind) 1075 setIdle(peer, 0) 1076 } else { 1077 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1078 if d.dropPeer == nil { 1079 // The dropPeer method is nil when `--copydb` is used for a local copy. 1080 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1081 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1082 } else { 1083 d.dropPeer(pid) 1084 } 1085 } 1086 } 1087 } 1088 // If there's nothing more to fetch, wait or terminate 1089 if pending() == 0 { 1090 if !inFlight() && finished { 1091 d.logger.Debug("Data fetching completed", "type", kind) 1092 return nil 1093 } 1094 break 1095 } 1096 // Send a download request to all idle peers, until throttled 1097 progressed, throttled, running := false, false, inFlight() 1098 idles, total := idle() 1099 1100 for _, peer := range idles { 1101 // Short circuit if throttling activated 1102 if throttle() { 1103 throttled = true 1104 break 1105 } 1106 // Short circuit if there is no more available task. 1107 if pending() == 0 { 1108 break 1109 } 1110 // Reserve a chunk of fetches for a peer. A nil can mean either that 1111 // no more headers are available, or that the peer is known not to 1112 // have them. 1113 request, progress, err := reserve(peer, capacity(peer)) 1114 if err != nil { 1115 return err 1116 } 1117 if progress { 1118 progressed = true 1119 } 1120 if request == nil { 1121 continue 1122 } 1123 if request.From > 0 { 1124 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1125 } else { 1126 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1127 } 1128 // Fetch the chunk and make sure any errors return the hashes to the queue 1129 if fetchHook != nil { 1130 fetchHook(request.Headers) 1131 } 1132 if err := fetch(peer, request); err != nil { 1133 // Although we could try and make an attempt to fix this, this error really 1134 // means that we've double allocated a fetch task to a peer. If that is the 1135 // case, the internal state of the downloader and the queue is very wrong so 1136 // better hard crash and note the error instead of silently accumulating into 1137 // a much bigger issue. 1138 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1139 } 1140 running = true 1141 } 1142 // Make sure that we have peers available for fetching. If all peers have been tried 1143 // and all failed throw an error 1144 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1145 return errPeersUnavailable 1146 } 1147 } 1148 } 1149 } 1150 1151 // processHeaders takes batches of retrieved headers from an input channel and 1152 // keeps processing and scheduling them into the header chain and downloader's 1153 // queue until the stream ends or a failure occurs. 1154 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1155 // Keep a count of uncertain headers to roll back 1156 rollback := []*types.Header{} 1157 defer func() { 1158 if len(rollback) > 0 { 1159 // Flatten the headers and roll them back 1160 hashes := make([]common.Hash, len(rollback)) 1161 for i, header := range rollback { 1162 hashes[i] = header.Hash() 1163 } 1164 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1165 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1166 lastBlock = d.blockchain.CurrentBlock().Number() 1167 d.lightchain.Rollback(hashes) 1168 curFastBlock, curBlock := common.Big0, common.Big0 1169 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1170 curBlock = d.blockchain.CurrentBlock().Number() 1171 d.logger.Warn("Rolled back headers", "count", len(hashes), 1172 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1173 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1174 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1175 } 1176 }() 1177 1178 // Wait for batches of headers to process 1179 gotHeaders := false 1180 1181 for { 1182 select { 1183 case <-d.cancelCh: 1184 return errCancelHeaderProcessing 1185 1186 case headers := <-d.headerProcCh: 1187 // Terminate header processing if we synced up 1188 if len(headers) == 0 { 1189 // Notify everyone that headers are fully processed 1190 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1191 select { 1192 case ch <- false: 1193 case <-d.cancelCh: 1194 } 1195 } 1196 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1197 // better chain compared to ours. The only exception is if its promised blocks were 1198 // already imported by other means (e.g. fecher): 1199 // 1200 // R <remote peer>, L <local node>: Both at block 10 1201 // R: Mine block 11, and propagate it to L 1202 // L: Queue block 11 for import 1203 // L: Notice that R's head and TD increased compared to ours, start sync 1204 // L: Import of block 11 finishes 1205 // L: Sync begins, and finds common ancestor at 11 1206 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1207 // R: Nothing to give 1208 head := d.blockchain.CurrentBlock() 1209 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1210 return errStallingPeer 1211 } 1212 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1213 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1214 // of delivering the post-pivot blocks that would flag the invalid content. 1215 // 1216 // This check cannot be executed "as is" for full imports, since blocks may still be 1217 // queued for processing when the header download completes. However, as long as the 1218 // peer gave us something useful, we're already happy/progressed (above check). 1219 if d.mode == FastSync { 1220 head := d.lightchain.CurrentHeader() 1221 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1222 return errStallingPeer 1223 } 1224 } 1225 // Disable any rollback and return 1226 rollback = nil 1227 return nil 1228 } 1229 // Otherwise split the chunk of headers into batches and process them 1230 gotHeaders = true 1231 1232 for len(headers) > 0 { 1233 // Terminate if something failed in between processing chunks 1234 select { 1235 case <-d.cancelCh: 1236 return errCancelHeaderProcessing 1237 default: 1238 } 1239 // Select the next chunk of headers to import 1240 limit := maxHeadersProcess 1241 if limit > len(headers) { 1242 limit = len(headers) 1243 } 1244 chunk := headers[:limit] 1245 1246 // In case of header only syncing, validate the chunk immediately 1247 if d.mode == FastSync { 1248 // Collect the yet unknown headers to mark them as uncertain 1249 unknown := make([]*types.Header, 0, len(headers)) 1250 for _, header := range chunk { 1251 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1252 unknown = append(unknown, header) 1253 } 1254 } 1255 // If we're importing pure headers, verify based on their recentness 1256 frequency := fsHeaderCheckFrequency 1257 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1258 frequency = 1 1259 } 1260 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1261 // If some headers were inserted, add them too to the rollback list 1262 if n > 0 { 1263 rollback = append(rollback, chunk[:n]...) 1264 } 1265 d.logger.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1266 return errInvalidChain 1267 } 1268 // All verifications passed, store newly found uncertain headers 1269 rollback = append(rollback, unknown...) 1270 if len(rollback) > fsHeaderSafetyNet { 1271 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1272 } 1273 } 1274 // Unless we're doing light chains, schedule the headers for associated content retrieval 1275 if d.mode == FullSync || d.mode == FastSync { 1276 // If we've reached the allowed number of pending headers, stall a bit 1277 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1278 select { 1279 case <-d.cancelCh: 1280 return errCancelHeaderProcessing 1281 case <-time.After(time.Second): 1282 } 1283 } 1284 // Otherwise insert the headers for content retrieval 1285 inserts := d.queue.Schedule(chunk, origin) 1286 if len(inserts) != len(chunk) { 1287 d.logger.Debug("Stale headers") 1288 return errBadPeer 1289 } 1290 } 1291 headers = headers[limit:] 1292 origin += uint64(limit) 1293 } 1294 1295 // Update the highest block number we know if a higher one is found. 1296 d.syncStatsLock.Lock() 1297 if d.syncStatsChainHeight < origin { 1298 d.syncStatsChainHeight = origin - 1 1299 } 1300 d.syncStatsLock.Unlock() 1301 1302 // Signal the content downloaders of the availablility of new tasks 1303 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1304 select { 1305 case ch <- true: 1306 default: 1307 } 1308 } 1309 } 1310 } 1311 } 1312 1313 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1314 func (d *Downloader) processFullSyncContent() error { 1315 for { 1316 results := d.queue.Results(true) 1317 if len(results) == 0 { 1318 return nil 1319 } 1320 if d.chainInsertHook != nil { 1321 d.chainInsertHook(results) 1322 } 1323 if err := d.importBlockResults(results); err != nil { 1324 return err 1325 } 1326 } 1327 } 1328 1329 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1330 // Check for any early termination requests 1331 if len(results) == 0 { 1332 return nil 1333 } 1334 select { 1335 case <-d.quitCh: 1336 return errCancelContentProcessing 1337 default: 1338 } 1339 // Retrieve the a batch of results to import 1340 first, last := results[0].Header, results[len(results)-1].Header 1341 d.logger.Debug("Inserting downloaded chain", "items", len(results), 1342 "firstnum", first.Number, "firsthash", first.Hash(), 1343 "lastnum", last.Number, "lasthash", last.Hash(), 1344 ) 1345 blocks := make([]*types.Block, len(results)) 1346 for i, result := range results { 1347 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1348 } 1349 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1350 d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1351 return errInvalidChain 1352 } 1353 return nil 1354 } 1355 1356 // processFastSyncContent takes fetch results from the queue and writes them to the 1357 // database. It also controls the synchronisation of state nodes of the pivot block. 1358 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1359 // Start syncing state of the reported head block. This should get us most of 1360 // the state of the pivot block. 1361 stateSync := d.syncState(latest.Root) 1362 defer stateSync.Cancel() 1363 go func() { 1364 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1365 d.queue.Close() // wake up WaitResults 1366 } 1367 }() 1368 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1369 // sync takes long enough for the chain head to move significantly. 1370 pivot := uint64(0) 1371 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1372 pivot = height - uint64(fsMinFullBlocks) 1373 } 1374 // To cater for moving pivot points, track the pivot block and subsequently 1375 // accumulated download results separatey. 1376 var ( 1377 oldPivot *fetchResult // Locked in pivot block, might change eventually 1378 oldTail []*fetchResult // Downloaded content after the pivot 1379 ) 1380 for { 1381 // Wait for the next batch of downloaded data to be available, and if the pivot 1382 // block became stale, move the goalpost 1383 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1384 if len(results) == 0 { 1385 // If pivot sync is done, stop 1386 if oldPivot == nil { 1387 return stateSync.Cancel() 1388 } 1389 // If sync failed, stop 1390 select { 1391 case <-d.cancelCh: 1392 return stateSync.Cancel() 1393 default: 1394 } 1395 } 1396 if d.chainInsertHook != nil { 1397 d.chainInsertHook(results) 1398 } 1399 if oldPivot != nil { 1400 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1401 } 1402 // Split around the pivot block and process the two sides via fast/full sync 1403 if atomic.LoadInt32(&d.committed) == 0 { 1404 latest = results[len(results)-1].Header 1405 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1406 d.logger.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1407 pivot = height - uint64(fsMinFullBlocks) 1408 } 1409 } 1410 P, beforeP, afterP := splitAroundPivot(pivot, results) 1411 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1412 return err 1413 } 1414 if P != nil { 1415 // If new pivot block found, cancel old state retrieval and restart 1416 if oldPivot != P { 1417 stateSync.Cancel() 1418 1419 stateSync = d.syncState(P.Header.Root) 1420 defer stateSync.Cancel() 1421 go func() { 1422 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1423 d.queue.Close() // wake up WaitResults 1424 } 1425 }() 1426 oldPivot = P 1427 } 1428 // Wait for completion, occasionally checking for pivot staleness 1429 select { 1430 case <-stateSync.done: 1431 if stateSync.err != nil { 1432 return stateSync.err 1433 } 1434 if err := d.commitPivotBlock(P); err != nil { 1435 return err 1436 } 1437 oldPivot = nil 1438 1439 case <-time.After(time.Second): 1440 oldTail = afterP 1441 continue 1442 } 1443 } 1444 // Fast sync done, pivot commit done, full import 1445 if err := d.importBlockResults(afterP); err != nil { 1446 return err 1447 } 1448 } 1449 } 1450 1451 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1452 for _, result := range results { 1453 num := result.Header.Number.Uint64() 1454 switch { 1455 case num < pivot: 1456 before = append(before, result) 1457 case num == pivot: 1458 p = result 1459 default: 1460 after = append(after, result) 1461 } 1462 } 1463 return p, before, after 1464 } 1465 1466 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1467 // Check for any early termination requests 1468 if len(results) == 0 { 1469 return nil 1470 } 1471 select { 1472 case <-d.quitCh: 1473 return errCancelContentProcessing 1474 case <-stateSync.done: 1475 if err := stateSync.Wait(); err != nil { 1476 return err 1477 } 1478 default: 1479 } 1480 // Retrieve the a batch of results to import 1481 first, last := results[0].Header, results[len(results)-1].Header 1482 d.logger.Debug("Inserting fast-sync blocks", "items", len(results), 1483 "firstnum", first.Number, "firsthash", first.Hash(), 1484 "lastnumn", last.Number, "lasthash", last.Hash(), 1485 ) 1486 blocks := make([]*types.Block, len(results)) 1487 receipts := make([]types.Receipts, len(results)) 1488 for i, result := range results { 1489 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1490 receipts[i] = result.Receipts 1491 } 1492 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1493 d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1494 return errInvalidChain 1495 } 1496 return nil 1497 } 1498 1499 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1500 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1501 d.logger.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1502 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1503 return err 1504 } 1505 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1506 return err 1507 } 1508 atomic.StoreInt32(&d.committed, 1) 1509 return nil 1510 } 1511 1512 // DeliverHeaders injects a new batch of block headers received from a remote 1513 // node into the download schedule. 1514 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1515 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1516 } 1517 1518 // DeliverBodies injects a new batch of block bodies received from a remote node. 1519 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { 1520 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) 1521 } 1522 1523 // DeliverReceipts injects a new batch of receipts received from a remote node. 1524 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1525 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1526 } 1527 1528 // DeliverNodeData injects a new batch of node state data received from a remote node. 1529 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1530 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1531 } 1532 1533 // deliver injects a new batch of data received from a remote node. 1534 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1535 // Update the delivery metrics for both good and failed deliveries 1536 inMeter.Mark(int64(packet.Items())) 1537 defer func() { 1538 if err != nil { 1539 dropMeter.Mark(int64(packet.Items())) 1540 } 1541 }() 1542 // Deliver or abort if the sync is canceled while queuing 1543 d.cancelLock.RLock() 1544 cancel := d.cancelCh 1545 d.cancelLock.RUnlock() 1546 if cancel == nil { 1547 return errNoSyncActive 1548 } 1549 select { 1550 case destCh <- packet: 1551 return nil 1552 case <-cancel: 1553 return errNoSyncActive 1554 } 1555 } 1556 1557 // qosTuner is the quality of service tuning loop that occasionally gathers the 1558 // peer latency statistics and updates the estimated request round trip time. 1559 func (d *Downloader) qosTuner() { 1560 for { 1561 // Retrieve the current median RTT and integrate into the previoust target RTT 1562 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1563 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1564 1565 // A new RTT cycle passed, increase our confidence in the estimated RTT 1566 conf := atomic.LoadUint64(&d.rttConfidence) 1567 conf = conf + (1000000-conf)/2 1568 atomic.StoreUint64(&d.rttConfidence, conf) 1569 1570 // Log the new QoS values and sleep until the next RTT 1571 d.logger.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1572 select { 1573 case <-d.quitCh: 1574 return 1575 case <-time.After(rtt): 1576 } 1577 } 1578 } 1579 1580 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1581 // peer set, needing to reduce the confidence we have in out QoS estimates. 1582 func (d *Downloader) qosReduceConfidence() { 1583 // If we have a single peer, confidence is always 1 1584 peers := uint64(d.peers.Len()) 1585 if peers == 0 { 1586 // Ensure peer connectivity races don't catch us off guard 1587 return 1588 } 1589 if peers == 1 { 1590 atomic.StoreUint64(&d.rttConfidence, 1000000) 1591 return 1592 } 1593 // If we have a ton of peers, don't drop confidence) 1594 if peers >= uint64(qosConfidenceCap) { 1595 return 1596 } 1597 // Otherwise drop the confidence factor 1598 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1599 if float64(conf)/1000000 < rttMinConfidence { 1600 conf = uint64(rttMinConfidence * 1000000) 1601 } 1602 atomic.StoreUint64(&d.rttConfidence, conf) 1603 1604 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1605 d.logger.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1606 } 1607 1608 // requestRTT returns the current target round trip time for a download request 1609 // to complete in. 1610 // 1611 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1612 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1613 // be adapted to, but smaller ones are preffered (stabler download stream). 1614 func (d *Downloader) requestRTT() time.Duration { 1615 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1616 } 1617 1618 // requestTTL returns the current timeout allowance for a single download request 1619 // to finish under. 1620 func (d *Downloader) requestTTL() time.Duration { 1621 var ( 1622 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1623 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1624 ) 1625 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1626 if ttl > ttlLimit { 1627 ttl = ttlLimit 1628 } 1629 return ttl 1630 }