github.com/Gessiux/neatchain@v1.3.1/neatptc/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/Gessiux/neatchain" 29 "github.com/Gessiux/neatchain/chain/core/rawdb" 30 "github.com/Gessiux/neatchain/chain/core/types" 31 "github.com/Gessiux/neatchain/chain/log" 32 "github.com/Gessiux/neatchain/neatdb" 33 "github.com/Gessiux/neatchain/params" 34 "github.com/Gessiux/neatchain/utilities/common" 35 "github.com/Gessiux/neatchain/utilities/event" 36 "github.com/Gessiux/neatchain/utilities/metrics" 37 ) 38 39 var ( 40 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 41 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 42 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 43 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 44 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 45 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 46 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 47 48 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 49 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 50 rttMaxEstimate = 20 * time.Second // Maximum rount-trip time to target for download requests 51 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 52 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 53 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 54 55 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 56 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 57 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 58 59 maxQueuedHeaders = 32 * 1024 // [neatptc/62] Maximum number of headers to queue for import (DOS protection) 60 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 61 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 62 63 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 64 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 65 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 66 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 67 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 68 ) 69 70 var ( 71 errBusy = errors.New("busy") 72 errUnknownPeer = errors.New("peer is unknown or unhealthy") 73 errBadPeer = errors.New("action from bad peer ignored") 74 errStallingPeer = errors.New("peer is stalling") 75 errNoPeers = errors.New("no peers to keep download active") 76 errTimeout = errors.New("timeout") 77 errEmptyHeaderSet = errors.New("empty header set by peer") 78 errPeersUnavailable = errors.New("no peers available or all tried for download") 79 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 80 errInvalidChain = errors.New("retrieved hash chain is invalid") 81 errInvalidBlock = errors.New("retrieved block is invalid") 82 errInvalidBody = errors.New("retrieved block body is invalid") 83 errInvalidReceipt = errors.New("retrieved receipt is invalid") 84 errCancelBlockFetch = errors.New("block download canceled (requested)") 85 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 86 errCancelBodyFetch = errors.New("block body download canceled (requested)") 87 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 88 errCancelStateFetch = errors.New("state data download canceled (requested)") 89 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 90 errCancelContentProcessing = errors.New("content processing canceled (requested)") 91 errNoSyncActive = errors.New("no sync active") 92 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 93 ) 94 95 type Downloader struct { 96 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 97 mux *event.TypeMux // Event multiplexer to announce sync operation events 98 99 queue *queue // Scheduler for selecting the hashes to download 100 peers *peerSet // Set of active peers from which download can proceed 101 stateDB neatdb.Database 102 103 rttEstimate uint64 // Round trip time to target for download requests 104 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 105 106 // Statistics 107 syncStatsChainOrigin uint64 // Origin block number where syncing started at 108 syncStatsChainHeight uint64 // Highest block number known when syncing started 109 syncStatsState stateSyncStats 110 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 111 112 lightchain LightChain 113 blockchain BlockChain 114 115 // Callbacks 116 dropPeer peerDropFn // Drops a peer for misbehaving 117 118 // Status 119 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 120 synchronising int32 121 notified int32 122 committed int32 123 124 // Channels 125 headerCh chan dataPack // [neatptc/62] Channel receiving inbound block headers 126 bodyCh chan dataPack // [neatptc/62] Channel receiving inbound block bodies 127 receiptCh chan dataPack // [neatptc/63] Channel receiving inbound receipts 128 bodyWakeCh chan bool // [neatptc/62] Channel to signal the block body fetcher of new tasks 129 receiptWakeCh chan bool // [neatptc/63] Channel to signal the receipt fetcher of new tasks 130 headerProcCh chan []*types.Header // [neatptc/62] Channel to feed the header processor new tasks 131 132 // for stateFetcher 133 stateSyncStart chan *stateSync 134 trackStateReq chan *stateReq 135 stateCh chan dataPack // [neatptc/63] Channel receiving inbound node state data 136 137 // Cancellation and termination 138 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 139 cancelCh chan struct{} // Channel to cancel mid-flight syncs 140 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 141 142 quitCh chan struct{} // Quit channel to signal termination 143 quitLock sync.RWMutex // Lock to prevent double closes 144 145 // Testing hooks 146 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 147 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 148 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 149 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 150 151 logger log.Logger 152 } 153 154 // LightChain encapsulates functions required to synchronise a light chain. 155 type LightChain interface { 156 // HasHeader verifies a header's presence in the local chain. 157 HasHeader(common.Hash, uint64) bool 158 159 // GetHeaderByHash retrieves a header from the local chain. 160 GetHeaderByHash(common.Hash) *types.Header 161 162 // CurrentHeader retrieves the head header from the local chain. 163 CurrentHeader() *types.Header 164 165 // GetTd returns the total difficulty of a local block. 166 GetTd(common.Hash, uint64) *big.Int 167 168 // InsertHeaderChain inserts a batch of headers into the local chain. 169 InsertHeaderChain([]*types.Header, int) (int, error) 170 171 // Rollback removes a few recently added elements from the local chain. 172 Rollback([]common.Hash) 173 } 174 175 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 176 type BlockChain interface { 177 LightChain 178 179 // HasBlock verifies a block's presence in the local chain. 180 HasBlock(common.Hash, uint64) bool 181 182 // GetBlockByHash retrieves a block from the local chain. 183 GetBlockByHash(common.Hash) *types.Block 184 185 // CurrentBlock retrieves the head block from the local chain. 186 CurrentBlock() *types.Block 187 188 // CurrentFastBlock retrieves the head fast block from the local chain. 189 CurrentFastBlock() *types.Block 190 191 // FastSyncCommitHead directly commits the head block to a certain entity. 192 FastSyncCommitHead(common.Hash) error 193 194 // InsertChain inserts a batch of blocks into the local chain. 195 InsertChain(types.Blocks) (int, error) 196 197 // InsertReceiptChain inserts a batch of receipts into the local chain. 198 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 199 } 200 201 // New creates a new downloader to fetch hashes and blocks from remote peers. 202 func New(mode SyncMode, stateDb neatdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, logger log.Logger) *Downloader { 203 if lightchain == nil { 204 lightchain = chain 205 } 206 207 dl := &Downloader{ 208 mode: mode, 209 stateDB: stateDb, 210 mux: mux, 211 queue: newQueue(), 212 peers: newPeerSet(), 213 rttEstimate: uint64(rttMaxEstimate), 214 rttConfidence: uint64(1000000), 215 blockchain: chain, 216 lightchain: lightchain, 217 dropPeer: dropPeer, 218 headerCh: make(chan dataPack, 1), 219 bodyCh: make(chan dataPack, 1), 220 receiptCh: make(chan dataPack, 1), 221 bodyWakeCh: make(chan bool, 1), 222 receiptWakeCh: make(chan bool, 1), 223 headerProcCh: make(chan []*types.Header, 1), 224 quitCh: make(chan struct{}), 225 stateCh: make(chan dataPack), 226 stateSyncStart: make(chan *stateSync), 227 syncStatsState: stateSyncStats{ 228 processed: rawdb.ReadFastTrieProgress(stateDb), 229 }, 230 trackStateReq: make(chan *stateReq), 231 232 logger: logger, 233 } 234 go dl.qosTuner() 235 go dl.stateFetcher() 236 return dl 237 } 238 239 // Progress retrieves the synchronisation boundaries, specifically the origin 240 // block where synchronisation started at (may have failed/suspended); the block 241 // or header sync is currently at; and the latest known block which the sync targets. 242 // 243 // In addition, during the state download phase of fast synchronisation the number 244 // of processed and the total number of known states are also returned. Otherwise 245 // these are zero. 246 func (d *Downloader) Progress() neatchain.SyncProgress { 247 // Lock the current stats and return the progress 248 d.syncStatsLock.RLock() 249 defer d.syncStatsLock.RUnlock() 250 251 current := uint64(0) 252 switch d.mode { 253 case FullSync: 254 current = d.blockchain.CurrentBlock().NumberU64() 255 case FastSync: 256 current = d.blockchain.CurrentFastBlock().NumberU64() 257 } 258 return neatchain.SyncProgress{ 259 StartingBlock: d.syncStatsChainOrigin, 260 CurrentBlock: current, 261 HighestBlock: d.syncStatsChainHeight, 262 PulledStates: d.syncStatsState.processed, 263 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 264 } 265 } 266 267 // Synchronising returns whether the downloader is currently retrieving blocks. 268 func (d *Downloader) Synchronising() bool { 269 return atomic.LoadInt32(&d.synchronising) > 0 270 } 271 272 // RegisterPeer injects a new download peer into the set of block source to be 273 // used for fetching hashes and blocks from. 274 func (d *Downloader) RegisterPeer(id string, version int, peer Peer) error { 275 logger := d.logger.New("peer", id) 276 logger.Trace("Registering sync peer") 277 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 278 logger.Error("Failed to register sync peer", "err", err) 279 return err 280 } 281 d.qosReduceConfidence() 282 283 return nil 284 } 285 286 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 287 func (d *Downloader) RegisterLightPeer(id string, version int, peer LightPeer) error { 288 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 289 } 290 291 // UnregisterPeer remove a peer from the known list, preventing any action from 292 // the specified peer. An effort is also made to return any pending fetches into 293 // the queue. 294 func (d *Downloader) UnregisterPeer(id string) error { 295 // Unregister the peer from the active peer set and revoke any fetch tasks 296 logger := d.logger.New("peer", id) 297 logger.Trace("Unregistering sync peer") 298 if err := d.peers.Unregister(id); err != nil { 299 logger.Error("Failed to unregister sync peer", "err", err) 300 return err 301 } 302 d.queue.Revoke(id) 303 304 // If this peer was the master peer, abort sync immediately 305 d.cancelLock.RLock() 306 master := id == d.cancelPeer 307 d.cancelLock.RUnlock() 308 309 if master { 310 d.Cancel() 311 } 312 return nil 313 } 314 315 // Synchronise tries to sync up our local block chain with a remote peer, both 316 // adding various sanity checks as well as wrapping it with various log entries. 317 func (d *Downloader) Synchronise(id string, head common.Hash, td *big.Int, mode SyncMode) error { 318 err := d.synchronise(id, head, td, mode) 319 switch err { 320 case nil: 321 case errBusy: 322 323 case errTimeout, errBadPeer, errStallingPeer, 324 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 325 errInvalidAncestor, errInvalidChain: 326 d.logger.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 327 if d.dropPeer == nil { 328 // The dropPeer method is nil when `--copydb` is used for a local copy. 329 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 330 d.logger.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 331 } else { 332 d.dropPeer(id) 333 } 334 default: 335 d.logger.Warn("Synchronisation failed, retrying", "err", err) 336 } 337 return err 338 } 339 340 // synchronise will select the peer and use it for synchronising. If an empty string is given 341 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 342 // checks fail an error will be returned. This method is synchronous 343 func (d *Downloader) synchronise(id string, hash common.Hash, td *big.Int, mode SyncMode) error { 344 // Mock out the synchronisation if testing 345 if d.synchroniseMock != nil { 346 return d.synchroniseMock(id, hash) 347 } 348 // Make sure only one goroutine is ever allowed past this point at once 349 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 350 return errBusy 351 } 352 defer atomic.StoreInt32(&d.synchronising, 0) 353 354 // Post a user notification of the sync (only once per session) 355 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 356 d.logger.Info("Block synchronisation started") 357 } 358 // Reset the queue, peer set and wake channels to clean any internal leftover state 359 d.queue.Reset() 360 d.peers.Reset() 361 362 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 363 select { 364 case <-ch: 365 default: 366 } 367 } 368 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 369 for empty := false; !empty; { 370 select { 371 case <-ch: 372 default: 373 empty = true 374 } 375 } 376 } 377 for empty := false; !empty; { 378 select { 379 case <-d.headerProcCh: 380 default: 381 empty = true 382 } 383 } 384 // Create cancel channel for aborting mid-flight and mark the master peer 385 d.cancelLock.Lock() 386 d.cancelCh = make(chan struct{}) 387 d.cancelPeer = id 388 d.cancelLock.Unlock() 389 390 defer d.Cancel() // No matter what, we can't leave the cancel channel open 391 392 // Set the requested sync mode, unless it's forbidden 393 d.mode = mode 394 395 // Retrieve the origin peer and initiate the downloading process 396 p := d.peers.Peer(id) 397 if p == nil { 398 return errUnknownPeer 399 } 400 return d.syncWithPeer(p, hash, td) 401 } 402 403 // syncWithPeer starts a block synchronization based on the hash chain from the 404 // specified peer and head hash. 405 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 406 d.mux.Post(StartEvent{}) 407 defer func() { 408 // reset on error 409 if err != nil { 410 d.mux.Post(FailedEvent{err}) 411 } else { 412 d.mux.Post(DoneEvent{}) 413 } 414 }() 415 if p.version < 62 { 416 return errTooOld 417 } 418 419 d.logger.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", d.mode) 420 defer func(start time.Time) { 421 d.logger.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 422 }(time.Now()) 423 424 // Look up the sync boundaries: the common ancestor and the target block 425 latest, err := d.fetchHeight(p) 426 if err != nil { 427 return err 428 } 429 height := latest.Number.Uint64() 430 431 origin, err := d.findAncestor(p, height) 432 if err != nil { 433 return err 434 } 435 d.syncStatsLock.Lock() 436 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 437 d.syncStatsChainOrigin = origin 438 } 439 d.syncStatsChainHeight = height 440 d.syncStatsLock.Unlock() 441 442 // Ensure our origin point is below any fast sync pivot point 443 pivot := uint64(0) 444 if d.mode == FastSync { 445 if height <= uint64(fsMinFullBlocks) { 446 origin = 0 447 } else { 448 pivot = height - uint64(fsMinFullBlocks) 449 if pivot <= origin { 450 origin = pivot - 1 451 } 452 } 453 } 454 d.committed = 1 455 if d.mode == FastSync && pivot != 0 { 456 d.committed = 0 457 } 458 // Initiate the sync using a concurrent header and content retrieval algorithm 459 d.queue.Prepare(origin+1, d.mode) 460 if d.syncInitHook != nil { 461 d.syncInitHook(origin, height) 462 } 463 464 fetchers := []func() error{ 465 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 466 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 467 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 468 func() error { return d.processHeaders(origin+1, pivot, td) }, 469 } 470 if d.mode == FastSync { 471 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 472 } else if d.mode == FullSync { 473 fetchers = append(fetchers, d.processFullSyncContent) 474 } 475 return d.spawnSync(fetchers) 476 } 477 478 // spawnSync runs d.process and all given fetcher functions to completion in 479 // separate goroutines, returning the first error that appears. 480 func (d *Downloader) spawnSync(fetchers []func() error) error { 481 var wg sync.WaitGroup 482 errc := make(chan error, len(fetchers)) 483 wg.Add(len(fetchers)) 484 for _, fn := range fetchers { 485 fn := fn 486 go func() { defer wg.Done(); errc <- fn() }() 487 } 488 // Wait for the first error, then terminate the others. 489 var err error 490 for i := 0; i < len(fetchers); i++ { 491 if i == len(fetchers)-1 { 492 // Close the queue when all fetchers have exited. 493 // This will cause the block processor to end when 494 // it has processed the queue. 495 d.queue.Close() 496 } 497 if err = <-errc; err != nil { 498 break 499 } 500 } 501 d.queue.Close() 502 d.Cancel() 503 wg.Wait() 504 return err 505 } 506 507 // Cancel cancels all of the operations and resets the queue. It returns true 508 // if the cancel operation was completed. 509 func (d *Downloader) Cancel() { 510 // Close the current cancel channel 511 d.cancelLock.Lock() 512 if d.cancelCh != nil { 513 select { 514 case <-d.cancelCh: 515 // Channel was already closed 516 default: 517 close(d.cancelCh) 518 } 519 } 520 d.cancelLock.Unlock() 521 } 522 523 // Terminate interrupts the downloader, canceling all pending operations. 524 // The downloader cannot be reused after calling Terminate. 525 func (d *Downloader) Terminate() { 526 // Close the termination channel (make sure double close is allowed) 527 d.quitLock.Lock() 528 select { 529 case <-d.quitCh: 530 default: 531 close(d.quitCh) 532 } 533 d.quitLock.Unlock() 534 535 // Cancel any pending download requests 536 d.Cancel() 537 } 538 539 // fetchHeight retrieves the head header of the remote peer to aid in estimating 540 // the total time a pending synchronisation would take. 541 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 542 p.log.Debug("Retrieving remote chain height") 543 544 // Request the advertised remote head block and wait for the response 545 head, _ := p.peer.Head() 546 go p.peer.RequestHeadersByHash(head, 1, 0, false) 547 548 ttl := d.requestTTL() 549 timeout := time.After(ttl) 550 for { 551 select { 552 case <-d.cancelCh: 553 return nil, errCancelBlockFetch 554 555 case packet := <-d.headerCh: 556 // Discard anything not from the origin peer 557 if packet.PeerId() != p.id { 558 d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 559 break 560 } 561 // Make sure the peer actually gave something valid 562 headers := packet.(*headerPack).headers 563 if len(headers) != 1 { 564 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 565 return nil, errBadPeer 566 } 567 head := headers[0] 568 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 569 return head, nil 570 571 case <-timeout: 572 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 573 return nil, errTimeout 574 575 case <-d.bodyCh: 576 case <-d.receiptCh: 577 // Out of bounds delivery, ignore 578 } 579 } 580 } 581 582 // findAncestor tries to locate the common ancestor link of the local chain and 583 // a remote peers blockchain. In the general case when our node was in sync and 584 // on the correct chain, checking the top N links should already get us a match. 585 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 586 // the head links match), we do a binary search to find the common ancestor. 587 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 588 // Figure out the valid ancestor range to prevent rewrite attacks 589 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 590 591 if d.mode == FullSync { 592 ceil = d.blockchain.CurrentBlock().NumberU64() 593 } else if d.mode == FastSync { 594 ceil = d.blockchain.CurrentFastBlock().NumberU64() 595 } 596 if ceil >= MaxForkAncestry { 597 floor = int64(ceil - MaxForkAncestry) 598 } 599 p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) 600 601 // Request the topmost blocks to short circuit binary ancestor lookup 602 head := ceil 603 if head > height { 604 head = height 605 } 606 from := int64(head) - int64(MaxHeaderFetch) 607 if from < 0 { 608 from = 0 609 } 610 // Span out with 15 block gaps into the future to catch bad head reports 611 limit := 2 * MaxHeaderFetch / 16 612 count := 1 + int((int64(ceil)-from)/16) 613 if count > limit { 614 count = limit 615 } 616 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 617 618 // Wait for the remote response to the head fetch 619 number, hash := uint64(0), common.Hash{} 620 621 ttl := d.requestTTL() 622 timeout := time.After(ttl) 623 624 for finished := false; !finished; { 625 select { 626 case <-d.cancelCh: 627 return 0, errCancelHeaderFetch 628 629 case packet := <-d.headerCh: 630 // Discard anything not from the origin peer 631 if packet.PeerId() != p.id { 632 d.logger.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 633 break 634 } 635 // Make sure the peer actually gave something valid 636 headers := packet.(*headerPack).headers 637 if len(headers) == 0 { 638 p.log.Warn("Empty head header set") 639 return 0, errEmptyHeaderSet 640 } 641 // Make sure the peer's reply conforms to the request 642 for i := 0; i < len(headers); i++ { 643 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 644 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 645 return 0, errInvalidChain 646 } 647 } 648 // Check if a common ancestor was found 649 finished = true 650 for i := len(headers) - 1; i >= 0; i-- { 651 // Skip any headers that underflow/overflow our requested set 652 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 653 continue 654 } 655 // Otherwise check if we already know the header or not 656 if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { 657 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 658 659 // If every header is known, even future ones, the peer straight out lied about its head 660 if number > height && i == limit-1 { 661 p.log.Warn("Lied about chain head", "reported", height, "found", number) 662 return 0, errStallingPeer 663 } 664 break 665 } 666 } 667 668 case <-timeout: 669 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 670 return 0, errTimeout 671 672 case <-d.bodyCh: 673 case <-d.receiptCh: 674 // Out of bounds delivery, ignore 675 } 676 } 677 // If the head fetch already found an ancestor, return 678 if !common.EmptyHash(hash) { 679 if int64(number) <= floor { 680 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 681 return 0, errInvalidAncestor 682 } 683 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 684 return number, nil 685 } 686 // Ancestor not found, we need to binary search over our chain 687 start, end := uint64(0), head 688 if floor > 0 { 689 start = uint64(floor) 690 } 691 for start+1 < end { 692 // Split our chain interval in two, and request the hash to cross check 693 check := (start + end) / 2 694 695 ttl := d.requestTTL() 696 timeout := time.After(ttl) 697 698 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 699 700 // Wait until a reply arrives to this request 701 for arrived := false; !arrived; { 702 select { 703 case <-d.cancelCh: 704 return 0, errCancelHeaderFetch 705 706 case packer := <-d.headerCh: 707 // Discard anything not from the origin peer 708 if packer.PeerId() != p.id { 709 d.logger.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 710 break 711 } 712 // Make sure the peer actually gave something valid 713 headers := packer.(*headerPack).headers 714 if len(headers) != 1 { 715 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 716 return 0, errBadPeer 717 } 718 arrived = true 719 720 // Modify the search interval based on the response 721 if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { 722 end = check 723 break 724 } 725 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 726 if header.Number.Uint64() != check { 727 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 728 return 0, errBadPeer 729 } 730 start = check 731 732 case <-timeout: 733 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 734 return 0, errTimeout 735 736 case <-d.bodyCh: 737 case <-d.receiptCh: 738 // Out of bounds delivery, ignore 739 } 740 } 741 } 742 // Ensure valid ancestry and return 743 if int64(start) <= floor { 744 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 745 return 0, errInvalidAncestor 746 } 747 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 748 return start, nil 749 } 750 751 // fetchHeaders keeps retrieving headers concurrently from the number 752 // requested, until no more are returned, potentially throttling on the way. To 753 // facilitate concurrency but still protect against malicious nodes sending bad 754 // headers, we construct a header chain skeleton using the "origin" peer we are 755 // syncing with, and fill in the missing headers using anyone else. Headers from 756 // other peers are only accepted if they map cleanly to the skeleton. If no one 757 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 758 // the origin is dropped. 759 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 760 p.log.Debug("Directing header downloads", "origin", from) 761 defer p.log.Debug("Header download terminated") 762 763 // Create a timeout timer, and the associated header fetcher 764 skeleton := true // Skeleton assembly phase or finishing up 765 request := time.Now() // time of the last skeleton fetch request 766 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 767 <-timeout.C // timeout channel should be initially empty 768 defer timeout.Stop() 769 770 var ttl time.Duration 771 getHeaders := func(from uint64) { 772 request = time.Now() 773 774 ttl = d.requestTTL() 775 timeout.Reset(ttl) 776 777 if skeleton { 778 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 779 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 780 } else { 781 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 782 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 783 } 784 } 785 // Start pulling the header chain skeleton until all is done 786 getHeaders(from) 787 788 for { 789 select { 790 case <-d.cancelCh: 791 return errCancelHeaderFetch 792 793 case packet := <-d.headerCh: 794 // Make sure the active peer is giving us the skeleton headers 795 if packet.PeerId() != p.id { 796 d.logger.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 797 break 798 } 799 headerReqTimer.UpdateSince(request) 800 timeout.Stop() 801 802 // If the skeleton's finished, pull any remaining head headers directly from the origin 803 if packet.Items() == 0 && skeleton { 804 skeleton = false 805 getHeaders(from) 806 continue 807 } 808 // If no more headers are inbound, notify the content fetchers and return 809 if packet.Items() == 0 { 810 // Don't abort header fetches while the pivot is downloading 811 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 812 p.log.Debug("No headers, waiting for pivot commit") 813 select { 814 case <-time.After(fsHeaderContCheck): 815 getHeaders(from) 816 continue 817 case <-d.cancelCh: 818 return errCancelHeaderFetch 819 } 820 } 821 // Pivot done (or not in fast sync) and no more headers, terminate the process 822 p.log.Debug("No more headers available") 823 select { 824 case d.headerProcCh <- nil: 825 return nil 826 case <-d.cancelCh: 827 return errCancelHeaderFetch 828 } 829 } 830 headers := packet.(*headerPack).headers 831 832 // If we received a skeleton batch, resolve internals concurrently 833 if skeleton { 834 filled, proced, err := d.fillHeaderSkeleton(from, headers) 835 if err != nil { 836 p.log.Debug("Skeleton chain invalid", "err", err) 837 return errInvalidChain 838 } 839 headers = filled[proced:] 840 from += uint64(proced) 841 } 842 // Insert all the new headers and fetch the next batch 843 if len(headers) > 0 { 844 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 845 select { 846 case d.headerProcCh <- headers: 847 case <-d.cancelCh: 848 return errCancelHeaderFetch 849 } 850 from += uint64(len(headers)) 851 } 852 getHeaders(from) 853 854 case <-timeout.C: 855 if d.dropPeer == nil { 856 // The dropPeer method is nil when `--copydb` is used for a local copy. 857 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 858 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 859 break 860 } 861 // Header retrieval timed out, consider the peer bad and drop 862 p.log.Debug("Header request timed out", "elapsed", ttl) 863 headerTimeoutMeter.Mark(1) 864 d.dropPeer(p.id) 865 866 // Finish the sync gracefully instead of dumping the gathered data though 867 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 868 select { 869 case ch <- false: 870 case <-d.cancelCh: 871 } 872 } 873 select { 874 case d.headerProcCh <- nil: 875 case <-d.cancelCh: 876 } 877 return errBadPeer 878 } 879 } 880 } 881 882 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 883 // and maps them to the provided skeleton header chain. 884 // 885 // Any partial results from the beginning of the skeleton is (if possible) forwarded 886 // immediately to the header processor to keep the rest of the pipeline full even 887 // in the case of header stalls. 888 // 889 // The method returs the entire filled skeleton and also the number of headers 890 // already forwarded for processing. 891 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 892 d.logger.Debug("Filling up skeleton", "from", from) 893 d.queue.ScheduleSkeleton(from, skeleton) 894 895 var ( 896 deliver = func(packet dataPack) (int, error) { 897 pack := packet.(*headerPack) 898 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 899 } 900 expire = func() map[string]int { return d.queue.ExpireHeaders(d.requestTTL()) } 901 throttle = func() bool { return false } 902 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 903 return d.queue.ReserveHeaders(p, count), false, nil 904 } 905 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 906 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 907 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 908 ) 909 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 910 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 911 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 912 913 d.logger.Debug("Skeleton fill terminated", "err", err) 914 915 filled, proced := d.queue.RetrieveHeaders() 916 return filled, proced, err 917 } 918 919 // fetchBodies iteratively downloads the scheduled block bodies, taking any 920 // available peers, reserving a chunk of blocks for each, waiting for delivery 921 // and also periodically checking for timeouts. 922 func (d *Downloader) fetchBodies(from uint64) error { 923 d.logger.Debug("Downloading block bodies", "origin", from) 924 925 var ( 926 deliver = func(packet dataPack) (int, error) { 927 pack := packet.(*bodyPack) 928 return d.queue.DeliverBodies(pack.peerId, pack.transactions, pack.uncles) 929 } 930 expire = func() map[string]int { return d.queue.ExpireBodies(d.requestTTL()) } 931 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 932 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 933 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 934 ) 935 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 936 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 937 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 938 939 d.logger.Debug("Block body download terminated", "err", err) 940 return err 941 } 942 943 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 944 // available peers, reserving a chunk of receipts for each, waiting for delivery 945 // and also periodically checking for timeouts. 946 func (d *Downloader) fetchReceipts(from uint64) error { 947 d.logger.Debug("Downloading transaction receipts", "origin", from) 948 949 var ( 950 deliver = func(packet dataPack) (int, error) { 951 pack := packet.(*receiptPack) 952 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 953 } 954 expire = func() map[string]int { return d.queue.ExpireReceipts(d.requestTTL()) } 955 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 956 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 957 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 958 ) 959 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 960 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 961 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 962 963 d.logger.Debug("Transaction receipt download terminated", "err", err) 964 return err 965 } 966 967 // fetchParts iteratively downloads scheduled block parts, taking any available 968 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 969 // also periodically checking for timeouts. 970 // 971 // As the scheduling/timeout logic mostly is the same for all downloaded data 972 // types, this method is used by each for data gathering and is instrumented with 973 // various callbacks to handle the slight differences between processing them. 974 // 975 // The instrumentation parameters: 976 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 977 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 978 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 979 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 980 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 981 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 982 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 983 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 984 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 985 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 986 // - fetch: network callback to actually send a particular download request to a physical remote peer 987 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 988 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 989 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 990 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 991 // - kind: textual label of the type being downloaded to display in log mesages 992 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 993 expire func() map[string]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 994 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 995 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 996 997 // Create a ticker to detect expired retrieval tasks 998 ticker := time.NewTicker(100 * time.Millisecond) 999 defer ticker.Stop() 1000 1001 update := make(chan struct{}, 1) 1002 1003 // Prepare the queue and fetch block parts until the block header fetcher's done 1004 finished := false 1005 for { 1006 select { 1007 case <-d.cancelCh: 1008 return errCancel 1009 1010 case packet := <-deliveryCh: 1011 // If the peer was previously banned and failed to deliver its pack 1012 // in a reasonable time frame, ignore its message. 1013 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1014 // Deliver the received chunk of data and check chain validity 1015 accepted, err := deliver(packet) 1016 if err == errInvalidChain { 1017 return err 1018 } 1019 // Unless a peer delivered something completely else than requested (usually 1020 // caused by a timed out request which came through in the end), set it to 1021 // idle. If the delivery's stale, the peer should have already been idled. 1022 if err != errStaleDelivery { 1023 setIdle(peer, accepted) 1024 } 1025 // Issue a log to the user to see what's going on 1026 switch { 1027 case err == nil && packet.Items() == 0: 1028 peer.log.Trace("Requested data not delivered", "type", kind) 1029 case err == nil: 1030 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1031 default: 1032 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1033 } 1034 } 1035 // Blocks assembled, try to update the progress 1036 select { 1037 case update <- struct{}{}: 1038 default: 1039 } 1040 1041 case cont := <-wakeCh: 1042 // The header fetcher sent a continuation flag, check if it's done 1043 if !cont { 1044 finished = true 1045 } 1046 // Headers arrive, try to update the progress 1047 select { 1048 case update <- struct{}{}: 1049 default: 1050 } 1051 1052 case <-ticker.C: 1053 // Sanity check update the progress 1054 select { 1055 case update <- struct{}{}: 1056 default: 1057 } 1058 1059 case <-update: 1060 // Short circuit if we lost all our peers 1061 if d.peers.Len() == 0 { 1062 return errNoPeers 1063 } 1064 // Check for fetch request timeouts and demote the responsible peers 1065 for pid, fails := range expire() { 1066 if peer := d.peers.Peer(pid); peer != nil { 1067 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1068 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1069 // out that sync wise we need to get rid of the peer. 1070 // 1071 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1072 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1073 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1074 if fails > 2 { 1075 peer.log.Trace("Data delivery timed out", "type", kind) 1076 setIdle(peer, 0) 1077 } else { 1078 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1079 if d.dropPeer == nil { 1080 // The dropPeer method is nil when `--copydb` is used for a local copy. 1081 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1082 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1083 } else { 1084 d.dropPeer(pid) 1085 } 1086 } 1087 } 1088 } 1089 // If there's nothing more to fetch, wait or terminate 1090 if pending() == 0 { 1091 if !inFlight() && finished { 1092 d.logger.Debug("Data fetching completed", "type", kind) 1093 return nil 1094 } 1095 break 1096 } 1097 // Send a download request to all idle peers, until throttled 1098 progressed, throttled, running := false, false, inFlight() 1099 idles, total := idle() 1100 1101 for _, peer := range idles { 1102 // Short circuit if throttling activated 1103 if throttle() { 1104 throttled = true 1105 break 1106 } 1107 // Short circuit if there is no more available task. 1108 if pending() == 0 { 1109 break 1110 } 1111 // Reserve a chunk of fetches for a peer. A nil can mean either that 1112 // no more headers are available, or that the peer is known not to 1113 // have them. 1114 request, progress, err := reserve(peer, capacity(peer)) 1115 if err != nil { 1116 return err 1117 } 1118 if progress { 1119 progressed = true 1120 } 1121 if request == nil { 1122 continue 1123 } 1124 if request.From > 0 { 1125 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1126 } else { 1127 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1128 } 1129 // Fetch the chunk and make sure any errors return the hashes to the queue 1130 if fetchHook != nil { 1131 fetchHook(request.Headers) 1132 } 1133 if err := fetch(peer, request); err != nil { 1134 // Although we could try and make an attempt to fix this, this error really 1135 // means that we've double allocated a fetch task to a peer. If that is the 1136 // case, the internal state of the downloader and the queue is very wrong so 1137 // better hard crash and note the error instead of silently accumulating into 1138 // a much bigger issue. 1139 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1140 } 1141 running = true 1142 } 1143 // Make sure that we have peers available for fetching. If all peers have been tried 1144 // and all failed throw an error 1145 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1146 return errPeersUnavailable 1147 } 1148 } 1149 } 1150 } 1151 1152 // processHeaders takes batches of retrieved headers from an input channel and 1153 // keeps processing and scheduling them into the header chain and downloader's 1154 // queue until the stream ends or a failure occurs. 1155 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1156 // Keep a count of uncertain headers to roll back 1157 rollback := []*types.Header{} 1158 defer func() { 1159 if len(rollback) > 0 { 1160 // Flatten the headers and roll them back 1161 hashes := make([]common.Hash, len(rollback)) 1162 for i, header := range rollback { 1163 hashes[i] = header.Hash() 1164 } 1165 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1166 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1167 lastBlock = d.blockchain.CurrentBlock().Number() 1168 d.lightchain.Rollback(hashes) 1169 curFastBlock, curBlock := common.Big0, common.Big0 1170 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1171 curBlock = d.blockchain.CurrentBlock().Number() 1172 d.logger.Warn("Rolled back headers", "count", len(hashes), 1173 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1174 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1175 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1176 } 1177 }() 1178 1179 // Wait for batches of headers to process 1180 gotHeaders := false 1181 1182 for { 1183 select { 1184 case <-d.cancelCh: 1185 return errCancelHeaderProcessing 1186 1187 case headers := <-d.headerProcCh: 1188 // Terminate header processing if we synced up 1189 if len(headers) == 0 { 1190 // Notify everyone that headers are fully processed 1191 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1192 select { 1193 case ch <- false: 1194 case <-d.cancelCh: 1195 } 1196 } 1197 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1198 // better chain compared to ours. The only exception is if its promised blocks were 1199 // already imported by other means (e.g. fecher): 1200 // 1201 // R <remote peer>, L <local node>: Both at block 10 1202 // R: Mine block 11, and propagate it to L 1203 // L: Queue block 11 for import 1204 // L: Notice that R's head and TD increased compared to ours, start sync 1205 // L: Import of block 11 finishes 1206 // L: Sync begins, and finds common ancestor at 11 1207 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1208 // R: Nothing to give 1209 head := d.blockchain.CurrentBlock() 1210 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1211 return errStallingPeer 1212 } 1213 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1214 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1215 // of delivering the post-pivot blocks that would flag the invalid content. 1216 // 1217 // This check cannot be executed "as is" for full imports, since blocks may still be 1218 // queued for processing when the header download completes. However, as long as the 1219 // peer gave us something useful, we're already happy/progressed (above check). 1220 if d.mode == FastSync { 1221 head := d.lightchain.CurrentHeader() 1222 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1223 return errStallingPeer 1224 } 1225 } 1226 // Disable any rollback and return 1227 rollback = nil 1228 return nil 1229 } 1230 // Otherwise split the chunk of headers into batches and process them 1231 gotHeaders = true 1232 1233 for len(headers) > 0 { 1234 // Terminate if something failed in between processing chunks 1235 select { 1236 case <-d.cancelCh: 1237 return errCancelHeaderProcessing 1238 default: 1239 } 1240 // Select the next chunk of headers to import 1241 limit := maxHeadersProcess 1242 if limit > len(headers) { 1243 limit = len(headers) 1244 } 1245 chunk := headers[:limit] 1246 1247 // In case of header only syncing, validate the chunk immediately 1248 if d.mode == FastSync { 1249 // Collect the yet unknown headers to mark them as uncertain 1250 unknown := make([]*types.Header, 0, len(headers)) 1251 for _, header := range chunk { 1252 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1253 unknown = append(unknown, header) 1254 } 1255 } 1256 // If we're importing pure headers, verify based on their recentness 1257 frequency := fsHeaderCheckFrequency 1258 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1259 frequency = 1 1260 } 1261 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1262 // If some headers were inserted, add them too to the rollback list 1263 if n > 0 { 1264 rollback = append(rollback, chunk[:n]...) 1265 } 1266 d.logger.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1267 return errInvalidChain 1268 } 1269 // All verifications passed, store newly found uncertain headers 1270 rollback = append(rollback, unknown...) 1271 if len(rollback) > fsHeaderSafetyNet { 1272 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1273 } 1274 } 1275 // Unless we're doing light chains, schedule the headers for associated content retrieval 1276 if d.mode == FullSync || d.mode == FastSync { 1277 // If we've reached the allowed number of pending headers, stall a bit 1278 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1279 select { 1280 case <-d.cancelCh: 1281 return errCancelHeaderProcessing 1282 case <-time.After(time.Second): 1283 } 1284 } 1285 // Otherwise insert the headers for content retrieval 1286 inserts := d.queue.Schedule(chunk, origin) 1287 if len(inserts) != len(chunk) { 1288 d.logger.Debug("Stale headers") 1289 return errBadPeer 1290 } 1291 } 1292 headers = headers[limit:] 1293 origin += uint64(limit) 1294 } 1295 1296 // Update the highest block number we know if a higher one is found. 1297 d.syncStatsLock.Lock() 1298 if d.syncStatsChainHeight < origin { 1299 d.syncStatsChainHeight = origin - 1 1300 } 1301 d.syncStatsLock.Unlock() 1302 1303 // Signal the content downloaders of the availablility of new tasks 1304 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1305 select { 1306 case ch <- true: 1307 default: 1308 } 1309 } 1310 } 1311 } 1312 } 1313 1314 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1315 func (d *Downloader) processFullSyncContent() error { 1316 for { 1317 results := d.queue.Results(true) 1318 if len(results) == 0 { 1319 return nil 1320 } 1321 if d.chainInsertHook != nil { 1322 d.chainInsertHook(results) 1323 } 1324 if err := d.importBlockResults(results); err != nil { 1325 return err 1326 } 1327 } 1328 } 1329 1330 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1331 // Check for any early termination requests 1332 if len(results) == 0 { 1333 return nil 1334 } 1335 select { 1336 case <-d.quitCh: 1337 return errCancelContentProcessing 1338 default: 1339 } 1340 // Retrieve the a batch of results to import 1341 first, last := results[0].Header, results[len(results)-1].Header 1342 d.logger.Debug("Inserting downloaded chain", "items", len(results), 1343 "firstnum", first.Number, "firsthash", first.Hash(), 1344 "lastnum", last.Number, "lasthash", last.Hash(), 1345 ) 1346 blocks := make([]*types.Block, len(results)) 1347 for i, result := range results { 1348 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1349 } 1350 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1351 d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1352 return errInvalidChain 1353 } 1354 return nil 1355 } 1356 1357 // processFastSyncContent takes fetch results from the queue and writes them to the 1358 // database. It also controls the synchronisation of state nodes of the pivot block. 1359 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1360 // Start syncing state of the reported head block. This should get us most of 1361 // the state of the pivot block. 1362 stateSync := d.syncState(latest.Root) 1363 defer stateSync.Cancel() 1364 go func() { 1365 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1366 d.queue.Close() // wake up WaitResults 1367 } 1368 }() 1369 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1370 // sync takes long enough for the chain head to move significantly. 1371 pivot := uint64(0) 1372 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1373 pivot = height - uint64(fsMinFullBlocks) 1374 } 1375 // To cater for moving pivot points, track the pivot block and subsequently 1376 // accumulated download results separatey. 1377 var ( 1378 oldPivot *fetchResult // Locked in pivot block, might change eventually 1379 oldTail []*fetchResult // Downloaded content after the pivot 1380 ) 1381 for { 1382 // Wait for the next batch of downloaded data to be available, and if the pivot 1383 // block became stale, move the goalpost 1384 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1385 if len(results) == 0 { 1386 // If pivot sync is done, stop 1387 if oldPivot == nil { 1388 return stateSync.Cancel() 1389 } 1390 // If sync failed, stop 1391 select { 1392 case <-d.cancelCh: 1393 return stateSync.Cancel() 1394 default: 1395 } 1396 } 1397 if d.chainInsertHook != nil { 1398 d.chainInsertHook(results) 1399 } 1400 if oldPivot != nil { 1401 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1402 } 1403 // Split around the pivot block and process the two sides via fast/full sync 1404 if atomic.LoadInt32(&d.committed) == 0 { 1405 latest = results[len(results)-1].Header 1406 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1407 d.logger.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1408 pivot = height - uint64(fsMinFullBlocks) 1409 } 1410 } 1411 P, beforeP, afterP := splitAroundPivot(pivot, results) 1412 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1413 return err 1414 } 1415 if P != nil { 1416 // If new pivot block found, cancel old state retrieval and restart 1417 if oldPivot != P { 1418 stateSync.Cancel() 1419 1420 stateSync = d.syncState(P.Header.Root) 1421 defer stateSync.Cancel() 1422 go func() { 1423 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1424 d.queue.Close() // wake up WaitResults 1425 } 1426 }() 1427 oldPivot = P 1428 } 1429 // Wait for completion, occasionally checking for pivot staleness 1430 select { 1431 case <-stateSync.done: 1432 if stateSync.err != nil { 1433 return stateSync.err 1434 } 1435 if err := d.commitPivotBlock(P); err != nil { 1436 return err 1437 } 1438 oldPivot = nil 1439 1440 case <-time.After(time.Second): 1441 oldTail = afterP 1442 continue 1443 } 1444 } 1445 // Fast sync done, pivot commit done, full import 1446 if err := d.importBlockResults(afterP); err != nil { 1447 return err 1448 } 1449 } 1450 } 1451 1452 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1453 for _, result := range results { 1454 num := result.Header.Number.Uint64() 1455 switch { 1456 case num < pivot: 1457 before = append(before, result) 1458 case num == pivot: 1459 p = result 1460 default: 1461 after = append(after, result) 1462 } 1463 } 1464 return p, before, after 1465 } 1466 1467 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1468 // Check for any early termination requests 1469 if len(results) == 0 { 1470 return nil 1471 } 1472 select { 1473 case <-d.quitCh: 1474 return errCancelContentProcessing 1475 case <-stateSync.done: 1476 if err := stateSync.Wait(); err != nil { 1477 return err 1478 } 1479 default: 1480 } 1481 // Retrieve the a batch of results to import 1482 first, last := results[0].Header, results[len(results)-1].Header 1483 d.logger.Debug("Inserting fast-sync blocks", "items", len(results), 1484 "firstnum", first.Number, "firsthash", first.Hash(), 1485 "lastnumn", last.Number, "lasthash", last.Hash(), 1486 ) 1487 blocks := make([]*types.Block, len(results)) 1488 receipts := make([]types.Receipts, len(results)) 1489 for i, result := range results { 1490 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1491 receipts[i] = result.Receipts 1492 } 1493 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1494 d.logger.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1495 return errInvalidChain 1496 } 1497 return nil 1498 } 1499 1500 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1501 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1502 d.logger.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1503 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1504 return err 1505 } 1506 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1507 return err 1508 } 1509 atomic.StoreInt32(&d.committed, 1) 1510 return nil 1511 } 1512 1513 // DeliverHeaders injects a new batch of block headers received from a remote 1514 // node into the download schedule. 1515 func (d *Downloader) DeliverHeaders(id string, headers []*types.Header) (err error) { 1516 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1517 } 1518 1519 // DeliverBodies injects a new batch of block bodies received from a remote node. 1520 func (d *Downloader) DeliverBodies(id string, transactions [][]*types.Transaction, uncles [][]*types.Header) (err error) { 1521 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions, uncles}, bodyInMeter, bodyDropMeter) 1522 } 1523 1524 // DeliverReceipts injects a new batch of receipts received from a remote node. 1525 func (d *Downloader) DeliverReceipts(id string, receipts [][]*types.Receipt) (err error) { 1526 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1527 } 1528 1529 // DeliverNodeData injects a new batch of node state data received from a remote node. 1530 func (d *Downloader) DeliverNodeData(id string, data [][]byte) (err error) { 1531 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1532 } 1533 1534 // deliver injects a new batch of data received from a remote node. 1535 func (d *Downloader) deliver(id string, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1536 // Update the delivery metrics for both good and failed deliveries 1537 inMeter.Mark(int64(packet.Items())) 1538 defer func() { 1539 if err != nil { 1540 dropMeter.Mark(int64(packet.Items())) 1541 } 1542 }() 1543 // Deliver or abort if the sync is canceled while queuing 1544 d.cancelLock.RLock() 1545 cancel := d.cancelCh 1546 d.cancelLock.RUnlock() 1547 if cancel == nil { 1548 return errNoSyncActive 1549 } 1550 select { 1551 case destCh <- packet: 1552 return nil 1553 case <-cancel: 1554 return errNoSyncActive 1555 } 1556 } 1557 1558 // qosTuner is the quality of service tuning loop that occasionally gathers the 1559 // peer latency statistics and updates the estimated request round trip time. 1560 func (d *Downloader) qosTuner() { 1561 for { 1562 // Retrieve the current median RTT and integrate into the previoust target RTT 1563 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1564 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1565 1566 // A new RTT cycle passed, increase our confidence in the estimated RTT 1567 conf := atomic.LoadUint64(&d.rttConfidence) 1568 conf = conf + (1000000-conf)/2 1569 atomic.StoreUint64(&d.rttConfidence, conf) 1570 1571 // Log the new QoS values and sleep until the next RTT 1572 d.logger.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1573 select { 1574 case <-d.quitCh: 1575 return 1576 case <-time.After(rtt): 1577 } 1578 } 1579 } 1580 1581 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1582 // peer set, needing to reduce the confidence we have in out QoS estimates. 1583 func (d *Downloader) qosReduceConfidence() { 1584 // If we have a single peer, confidence is always 1 1585 peers := uint64(d.peers.Len()) 1586 if peers == 0 { 1587 // Ensure peer connectivity races don't catch us off guard 1588 return 1589 } 1590 if peers == 1 { 1591 atomic.StoreUint64(&d.rttConfidence, 1000000) 1592 return 1593 } 1594 // If we have a ton of peers, don't drop confidence) 1595 if peers >= uint64(qosConfidenceCap) { 1596 return 1597 } 1598 // Otherwise drop the confidence factor 1599 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1600 if float64(conf)/1000000 < rttMinConfidence { 1601 conf = uint64(rttMinConfidence * 1000000) 1602 } 1603 atomic.StoreUint64(&d.rttConfidence, conf) 1604 1605 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1606 d.logger.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1607 } 1608 1609 // requestRTT returns the current target round trip time for a download request 1610 // to complete in. 1611 // 1612 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1613 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1614 // be adapted to, but smaller ones are preffered (stabler download stream). 1615 func (d *Downloader) requestRTT() time.Duration { 1616 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1617 } 1618 1619 // requestTTL returns the current timeout allowance for a single download request 1620 // to finish under. 1621 func (d *Downloader) requestTTL() time.Duration { 1622 var ( 1623 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1624 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1625 ) 1626 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1627 if ttl > ttlLimit { 1628 ttl = ttlLimit 1629 } 1630 return ttl 1631 }