github.com/n1ghtfa1l/go-vnt@v0.6.4-alpha.6/vnt/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 hubble "github.com/vntchain/go-vnt" 29 "github.com/vntchain/go-vnt/common" 30 "github.com/vntchain/go-vnt/core/rawdb" 31 "github.com/vntchain/go-vnt/core/types" 32 "github.com/vntchain/go-vnt/event" 33 "github.com/vntchain/go-vnt/log" 34 "github.com/vntchain/go-vnt/metrics" 35 "github.com/vntchain/go-vnt/params" 36 "github.com/vntchain/go-vnt/vntdb" 37 38 libp2p "github.com/libp2p/go-libp2p-peer" 39 ) 40 41 var ( 42 MaxHashFetch = 512 // Amount of hashes to be fetched per retrieval request 43 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 44 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 45 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 46 MaxBodyFetch = 128 // Amount of block bodies to be fetched per retrieval request 47 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 48 MaxStateFetch = 384 // Amount of node state values to allow fetching per request 49 50 MaxForkAncestry = 3 * params.EpochDuration // Maximum chain reorganisation 51 rttMinEstimate = 2 * time.Second // Minimum round-trip time to target for download requests 52 rttMaxEstimate = 20 * time.Second // Maximum round-trip time to target for download requests 53 rttMinConfidence = 0.1 // Worse confidence factor in our estimated RTT value 54 ttlScaling = 3 // Constant scaling factor for RTT -> TTL conversion 55 ttlLimit = time.Minute // Maximum TTL allowance to prevent reaching crazy timeouts 56 57 qosTuningPeers = 5 // Number of peers to tune based on (best peers) 58 qosConfidenceCap = 10 // Number of peers above which not to modify RTT confidence 59 qosTuningImpact = 0.25 // Impact that a new tuning target has on the previous value 60 61 maxQueuedHeaders = 32 * 1024 // [vnt/62] Maximum number of headers to queue for import (DOS protection) 62 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 63 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 64 65 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during fast sync 66 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 67 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 68 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 69 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in fast sync 70 ) 71 72 var ( 73 errBusy = errors.New("busy") 74 errUnknownPeer = errors.New("peer is unknown or unhealthy") 75 errBadPeer = errors.New("action from bad peer ignored") 76 errStallingPeer = errors.New("peer is stalling") 77 errNoPeers = errors.New("no peers to keep download active") 78 errTimeout = errors.New("timeout") 79 errEmptyHeaderSet = errors.New("empty header set by peer") 80 errPeersUnavailable = errors.New("no peers available or all tried for download") 81 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 82 errInvalidChain = errors.New("retrieved hash chain is invalid") 83 errInvalidBlock = errors.New("retrieved block is invalid") 84 errInvalidBody = errors.New("retrieved block body is invalid") 85 errInvalidReceipt = errors.New("retrieved receipt is invalid") 86 errCancelBlockFetch = errors.New("block download canceled (requested)") 87 errCancelHeaderFetch = errors.New("block header download canceled (requested)") 88 errCancelBodyFetch = errors.New("block body download canceled (requested)") 89 errCancelReceiptFetch = errors.New("receipt download canceled (requested)") 90 errCancelStateFetch = errors.New("state data download canceled (requested)") 91 errCancelHeaderProcessing = errors.New("header processing canceled (requested)") 92 errCancelContentProcessing = errors.New("content processing canceled (requested)") 93 errNoSyncActive = errors.New("no sync active") 94 errTooOld = errors.New("peer doesn't speak recent enough protocol version (need version >= 62)") 95 ) 96 97 type Downloader struct { 98 mode SyncMode // Synchronisation mode defining the strategy used (per sync cycle) 99 mux *event.TypeMux // Event multiplexer to announce sync operation events 100 101 queue *queue // Scheduler for selecting the hashes to download 102 peers *peerSet // Set of active peers from which download can proceed 103 stateDB vntdb.Database 104 105 rttEstimate uint64 // Round trip time to target for download requests 106 rttConfidence uint64 // Confidence in the estimated RTT (unit: millionths to allow atomic ops) 107 108 // Statistics 109 syncStatsChainOrigin uint64 // Origin block number where syncing started at 110 syncStatsChainHeight uint64 // Highest block number known when syncing started 111 syncStatsState stateSyncStats 112 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 113 114 lightchain LightChain 115 blockchain BlockChain 116 117 // Callbacks 118 dropPeer peerDropFn // Drops a peer for misbehaving 119 120 // Status 121 synchroniseMock func(id libp2p.ID, hash common.Hash) error // Replacement for synchronise during testing 122 synchronising int32 123 notified int32 124 committed int32 125 126 // Channels 127 headerCh chan dataPack // [vnt/62] Channel receiving inbound block headers 128 bodyCh chan dataPack // [vnt/62] Channel receiving inbound block bodies 129 receiptCh chan dataPack // [vnt/63] Channel receiving inbound receipts 130 bodyWakeCh chan bool // [vnt/62] Channel to signal the block body fetcher of new tasks 131 receiptWakeCh chan bool // [vnt/63] Channel to signal the receipt fetcher of new tasks 132 headerProcCh chan []*types.Header // [vnt/62] Channel to feed the header processor new tasks 133 134 // for stateFetcher 135 stateSyncStart chan *stateSync 136 trackStateReq chan *stateReq 137 stateCh chan dataPack // [vnt/63] Channel receiving inbound node state data 138 139 // Cancellation and termination 140 cancelPeer libp2p.ID // Identifier of the peer currently being used as the master (cancel on drop) 141 cancelCh chan struct{} // Channel to cancel mid-flight syncs 142 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 143 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 144 145 quitCh chan struct{} // Quit channel to signal termination 146 quitLock sync.RWMutex // Lock to prevent double closes 147 148 // Testing hooks 149 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 150 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 151 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 152 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 153 } 154 155 // LightChain encapsulates functions required to synchronise a light chain. 156 type LightChain interface { 157 // HasHeader verifies a header's presence in the local chain. 158 HasHeader(common.Hash, uint64) bool 159 160 // GetHeaderByHash retrieves a header from the local chain. 161 GetHeaderByHash(common.Hash) *types.Header 162 163 // CurrentHeader retrieves the head header from the local chain. 164 CurrentHeader() *types.Header 165 166 // GetTd returns the total difficulty of a local block. 167 GetTd(common.Hash, uint64) *big.Int 168 169 // InsertHeaderChain inserts a batch of headers into the local chain. 170 InsertHeaderChain([]*types.Header, int) (int, error) 171 172 // Rollback removes a few recently added elements from the local chain. 173 Rollback([]common.Hash) 174 } 175 176 // BlockChain encapsulates functions required to sync a (full or fast) blockchain. 177 type BlockChain interface { 178 LightChain 179 180 // HasBlock verifies a block's presence in the local chain. 181 HasBlock(common.Hash, uint64) bool 182 183 // GetBlockByHash retrieves a block from the local chain. 184 GetBlockByHash(common.Hash) *types.Block 185 186 // CurrentBlock retrieves the head block from the local chain. 187 CurrentBlock() *types.Block 188 189 // CurrentFastBlock retrieves the head fast block from the local chain. 190 CurrentFastBlock() *types.Block 191 192 // FastSyncCommitHead directly commits the head block to a certain entity. 193 FastSyncCommitHead(common.Hash) error 194 195 // InsertChain inserts a batch of blocks into the local chain. 196 InsertChain(types.Blocks) (int, error) 197 198 // InsertReceiptChain inserts a batch of receipts into the local chain. 199 InsertReceiptChain(types.Blocks, []types.Receipts) (int, error) 200 } 201 202 // New creates a new downloader to fetch hashes and blocks from remote peers. 203 func New(mode SyncMode, stateDb vntdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn) *Downloader { 204 if lightchain == nil { 205 lightchain = chain 206 } 207 208 dl := &Downloader{ 209 mode: mode, 210 stateDB: stateDb, 211 mux: mux, 212 queue: newQueue(), 213 peers: newPeerSet(), 214 rttEstimate: uint64(rttMaxEstimate), 215 rttConfidence: uint64(1000000), 216 blockchain: chain, 217 lightchain: lightchain, 218 dropPeer: dropPeer, 219 headerCh: make(chan dataPack, 1), 220 bodyCh: make(chan dataPack, 1), 221 receiptCh: make(chan dataPack, 1), 222 bodyWakeCh: make(chan bool, 1), 223 receiptWakeCh: make(chan bool, 1), 224 headerProcCh: make(chan []*types.Header, 1), 225 quitCh: make(chan struct{}), 226 stateCh: make(chan dataPack), 227 stateSyncStart: make(chan *stateSync), 228 syncStatsState: stateSyncStats{ 229 processed: rawdb.ReadFastTrieProgress(stateDb), 230 }, 231 trackStateReq: make(chan *stateReq), 232 } 233 go dl.qosTuner() 234 go dl.stateFetcher() 235 return dl 236 } 237 238 // Progress retrieves the synchronisation boundaries, specifically the origin 239 // block where synchronisation started at (may have failed/suspended); the block 240 // or header sync is currently at; and the latest known block which the sync targets. 241 // 242 // In addition, during the state download phase of fast synchronisation the number 243 // of processed and the total number of known states are also returned. Otherwise 244 // these are zero. 245 func (d *Downloader) Progress() hubble.SyncProgress { 246 // Lock the current stats and return the progress 247 d.syncStatsLock.RLock() 248 defer d.syncStatsLock.RUnlock() 249 250 current := uint64(0) 251 switch d.mode { 252 case FullSync: 253 current = d.blockchain.CurrentBlock().NumberU64() 254 case FastSync: 255 current = d.blockchain.CurrentFastBlock().NumberU64() 256 case LightSync: 257 current = d.lightchain.CurrentHeader().Number.Uint64() 258 } 259 return hubble.SyncProgress{ 260 StartingBlock: d.syncStatsChainOrigin, 261 CurrentBlock: current, 262 HighestBlock: d.syncStatsChainHeight, 263 PulledStates: d.syncStatsState.processed, 264 KnownStates: d.syncStatsState.processed + d.syncStatsState.pending, 265 } 266 } 267 268 // Synchronising returns whether the downloader is currently retrieving blocks. 269 func (d *Downloader) Synchronising() bool { 270 return atomic.LoadInt32(&d.synchronising) > 0 271 } 272 273 // RegisterPeer injects a new download peer into the set of block source to be 274 // used for fetching hashes and blocks from. 275 func (d *Downloader) RegisterPeer(id libp2p.ID, version int, peer Peer) error { 276 logger := log.New("peer", id) 277 logger.Trace("Registering sync peer") 278 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 279 logger.Error("Failed to register sync peer", "err", err) 280 return err 281 } 282 d.qosReduceConfidence() 283 284 return nil 285 } 286 287 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 288 func (d *Downloader) RegisterLightPeer(id libp2p.ID, version int, peer LightPeer) error { 289 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 290 } 291 292 // UnregisterPeer remove a peer from the known list, preventing any action from 293 // the specified peer. An effort is also made to return any pending fetches into 294 // the queue. 295 func (d *Downloader) UnregisterPeer(id libp2p.ID) error { 296 // Unregister the peer from the active peer set and revoke any fetch tasks 297 logger := log.New("peer", id) 298 logger.Trace("Unregistering sync peer") 299 if err := d.peers.Unregister(id); err != nil { 300 logger.Error("Failed to unregister sync peer", "err", err) 301 return err 302 } 303 d.queue.Revoke(id) 304 305 // If this peer was the master peer, abort sync immediately 306 d.cancelLock.RLock() 307 master := id == d.cancelPeer 308 d.cancelLock.RUnlock() 309 310 if master { 311 d.cancel() 312 } 313 return nil 314 } 315 316 // Synchronise tries to sync up our local block chain with a remote peer, both 317 // adding various sanity checks as well as wrapping it with various log entries. 318 func (d *Downloader) Synchronise(id libp2p.ID, head common.Hash, td *big.Int, mode SyncMode) error { 319 err := d.synchronise(id, head, td, mode) 320 switch err { 321 case nil: 322 case errBusy: 323 324 case errTimeout, errBadPeer, errStallingPeer, 325 errEmptyHeaderSet, errPeersUnavailable, errTooOld, 326 errInvalidAncestor, errInvalidChain: 327 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 328 if d.dropPeer == nil { 329 // The dropPeer method is nil when `--copydb` is used for a local copy. 330 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 331 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 332 } else { 333 d.dropPeer(id) 334 } 335 default: 336 log.Warn("Synchronisation failed, retrying", "err", err) 337 } 338 return err 339 } 340 341 // synchronise will select the peer and use it for synchronising. If an empty string is given 342 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 343 // checks fail an error will be returned. This method is synchronous 344 func (d *Downloader) synchronise(id libp2p.ID, hash common.Hash, td *big.Int, mode SyncMode) error { 345 // Mock out the synchronisation if testing 346 if d.synchroniseMock != nil { 347 return d.synchroniseMock(id, hash) 348 } 349 // Make sure only one goroutine is ever allowed past this point at once 350 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 351 return errBusy 352 } 353 defer atomic.StoreInt32(&d.synchronising, 0) 354 355 // Post a user notification of the sync (only once per session) 356 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 357 log.Info("Block synchronisation started") 358 } 359 // Reset the queue, peer set and wake channels to clean any internal leftover state 360 d.queue.Reset() 361 d.peers.Reset() 362 363 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 364 select { 365 case <-ch: 366 default: 367 } 368 } 369 for _, ch := range []chan dataPack{d.headerCh, d.bodyCh, d.receiptCh} { 370 for empty := false; !empty; { 371 select { 372 case <-ch: 373 default: 374 empty = true 375 } 376 } 377 } 378 for empty := false; !empty; { 379 select { 380 case <-d.headerProcCh: 381 default: 382 empty = true 383 } 384 } 385 // Create cancel channel for aborting mid-flight and mark the master peer 386 d.cancelLock.Lock() 387 d.cancelCh = make(chan struct{}) 388 d.cancelPeer = id 389 d.cancelLock.Unlock() 390 391 defer d.Cancel() // No matter what, we can't leave the cancel channel open 392 393 // Set the requested sync mode, unless it's forbidden 394 d.mode = mode 395 396 // Retrieve the origin peer and initiate the downloading process 397 p := d.peers.Peer(id) 398 if p == nil { 399 return errUnknownPeer 400 } 401 return d.syncWithPeer(p, hash, td) 402 } 403 404 // syncWithPeer starts a block synchronization based on the hash chain from the 405 // specified peer and head hash. 406 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td *big.Int) (err error) { 407 d.mux.Post(StartEvent{}) 408 defer func() { 409 // reset on error 410 if err != nil { 411 d.mux.Post(FailedEvent{err}) 412 } else { 413 d.mux.Post(DoneEvent{}) 414 } 415 }() 416 if p.version < 62 { 417 return errTooOld 418 } 419 420 log.Debug("Synchronising with the network", "peer", p.id, "vnt", p.version, "head", hash, "td", td, "mode", d.mode) 421 defer func(start time.Time) { 422 log.Debug("Synchronisation terminated", "elapsed", time.Since(start)) 423 }(time.Now()) 424 425 // Look up the sync boundaries: the common ancestor and the target block 426 latest, err := d.fetchHeight(p) 427 if err != nil { 428 return err 429 } 430 height := latest.Number.Uint64() 431 432 origin, err := d.findAncestor(p, height) 433 if err != nil { 434 return err 435 } 436 d.syncStatsLock.Lock() 437 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 438 d.syncStatsChainOrigin = origin 439 } 440 d.syncStatsChainHeight = height 441 d.syncStatsLock.Unlock() 442 443 // Ensure our origin point is below any fast sync pivot point 444 pivot := uint64(0) 445 if d.mode == FastSync { 446 if height <= uint64(fsMinFullBlocks) { 447 origin = 0 448 } else { 449 pivot = height - uint64(fsMinFullBlocks) 450 if pivot <= origin { 451 origin = pivot - 1 452 } 453 } 454 } 455 d.committed = 1 456 if d.mode == FastSync && pivot != 0 { 457 d.committed = 0 458 } 459 // Initiate the sync using a concurrent header and content retrieval algorithm 460 d.queue.Prepare(origin+1, d.mode) 461 if d.syncInitHook != nil { 462 d.syncInitHook(origin, height) 463 } 464 465 fetchers := []func() error{ 466 func() error { return d.fetchHeaders(p, origin+1, pivot) }, // Headers are always retrieved 467 func() error { return d.fetchBodies(origin + 1) }, // Bodies are retrieved during normal and fast sync 468 func() error { return d.fetchReceipts(origin + 1) }, // Receipts are retrieved during fast sync 469 func() error { return d.processHeaders(origin+1, pivot, td) }, 470 } 471 if d.mode == FastSync { 472 fetchers = append(fetchers, func() error { return d.processFastSyncContent(latest) }) 473 } else if d.mode == FullSync { 474 fetchers = append(fetchers, d.processFullSyncContent) 475 } 476 return d.spawnSync(fetchers) 477 } 478 479 // spawnSync runs d.process and all given fetcher functions to completion in 480 // separate goroutines, returning the first error that appears. 481 func (d *Downloader) spawnSync(fetchers []func() error) error { 482 errc := make(chan error, len(fetchers)) 483 d.cancelWg.Add(len(fetchers)) 484 for _, fn := range fetchers { 485 fn := fn 486 go func() { defer d.cancelWg.Done(); errc <- fn() }() 487 } 488 // Wait for the first error, then terminate the others. 489 var err error 490 for i := 0; i < len(fetchers); i++ { 491 if i == len(fetchers)-1 { 492 // Close the queue when all fetchers have exited. 493 // This will cause the block processor to end when 494 // it has processed the queue. 495 d.queue.Close() 496 } 497 if err = <-errc; err != nil { 498 break 499 } 500 } 501 d.queue.Close() 502 d.Cancel() 503 return err 504 } 505 506 // cancel aborts all of the operations and resets the queue. However, cancel does 507 // not wait for the running download goroutines to finish. This method should be 508 // used when cancelling the downloads from inside the downloader. 509 func (d *Downloader) cancel() { 510 // Close the current cancel channel 511 d.cancelLock.Lock() 512 if d.cancelCh != nil { 513 select { 514 case <-d.cancelCh: 515 // Channel was already closed 516 default: 517 close(d.cancelCh) 518 } 519 } 520 d.cancelLock.Unlock() 521 } 522 523 // Cancel aborts all of the operations and waits for all download goroutines to 524 // finish before returning. 525 func (d *Downloader) Cancel() { 526 d.cancel() 527 d.cancelWg.Wait() 528 } 529 530 // Terminate interrupts the downloader, canceling all pending operations. 531 // The downloader cannot be reused after calling Terminate. 532 func (d *Downloader) Terminate() { 533 // Close the termination channel (make sure double close is allowed) 534 d.quitLock.Lock() 535 select { 536 case <-d.quitCh: 537 default: 538 close(d.quitCh) 539 } 540 d.quitLock.Unlock() 541 542 // Cancel any pending download requests 543 d.Cancel() 544 } 545 546 // fetchHeight retrieves the head header of the remote peer to aid in estimating 547 // the total time a pending synchronisation would take. 548 func (d *Downloader) fetchHeight(p *peerConnection) (*types.Header, error) { 549 p.log.Debug("Retrieving remote chain height") 550 551 // Request the advertised remote head block and wait for the response 552 head, _ := p.peer.Head() 553 go p.peer.RequestHeadersByHash(head, 1, 0, false) 554 555 ttl := d.requestTTL() 556 timeout := time.After(ttl) 557 for { 558 select { 559 case <-d.cancelCh: 560 return nil, errCancelBlockFetch 561 562 case packet := <-d.headerCh: 563 // Discard anything not from the origin peer 564 if packet.PeerId() != p.id { 565 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 566 break 567 } 568 // Make sure the peer actually gave something valid 569 headers := packet.(*headerPack).headers 570 if len(headers) != 1 { 571 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 572 return nil, errBadPeer 573 } 574 head := headers[0] 575 p.log.Debug("Remote head header identified", "number", head.Number, "hash", head.Hash()) 576 return head, nil 577 578 case <-timeout: 579 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 580 return nil, errTimeout 581 582 case <-d.bodyCh: 583 case <-d.receiptCh: 584 // Out of bounds delivery, ignore 585 } 586 } 587 } 588 589 // findAncestor tries to locate the common ancestor link of the local chain and 590 // a remote peers blockchain. In the general case when our node was in sync and 591 // on the correct chain, checking the top N links should already get us a match. 592 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 593 // the head links match), we do a binary search to find the common ancestor. 594 func (d *Downloader) findAncestor(p *peerConnection, height uint64) (uint64, error) { 595 // Figure out the valid ancestor range to prevent rewrite attacks 596 floor, ceil := int64(-1), d.lightchain.CurrentHeader().Number.Uint64() 597 598 if d.mode == FullSync { 599 ceil = d.blockchain.CurrentBlock().NumberU64() 600 } else if d.mode == FastSync { 601 ceil = d.blockchain.CurrentFastBlock().NumberU64() 602 } 603 if ceil >= MaxForkAncestry { 604 floor = int64(ceil - MaxForkAncestry) 605 } 606 p.log.Debug("Looking for common ancestor", "local", ceil, "remote", height) 607 608 // Request the topmost blocks to short circuit binary ancestor lookup 609 head := ceil 610 if head > height { 611 head = height 612 } 613 from := int64(head) - int64(MaxHeaderFetch) 614 if from < 0 { 615 from = 0 616 } 617 // Span out with 15 block gaps into the future to catch bad head reports 618 limit := 2 * MaxHeaderFetch / 16 619 count := 1 + int((int64(ceil)-from)/16) 620 if count > limit { 621 count = limit 622 } 623 go p.peer.RequestHeadersByNumber(uint64(from), count, 15, false) 624 625 // Wait for the remote response to the head fetch 626 number, hash := uint64(0), common.Hash{} 627 628 ttl := d.requestTTL() 629 timeout := time.After(ttl) 630 631 for finished := false; !finished; { 632 select { 633 case <-d.cancelCh: 634 return 0, errCancelHeaderFetch 635 636 case packet := <-d.headerCh: 637 // Discard anything not from the origin peer 638 if packet.PeerId() != p.id { 639 log.Debug("Received headers from incorrect peer", "peer", packet.PeerId()) 640 break 641 } 642 // Make sure the peer actually gave something valid 643 headers := packet.(*headerPack).headers 644 if len(headers) == 0 { 645 p.log.Warn("Empty head header set") 646 return 0, errEmptyHeaderSet 647 } 648 // Make sure the peer's reply conforms to the request 649 for i := 0; i < len(headers); i++ { 650 if number := headers[i].Number.Int64(); number != from+int64(i)*16 { 651 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", from+int64(i)*16, "received", number) 652 return 0, errInvalidChain 653 } 654 } 655 // Check if a common ancestor was found 656 finished = true 657 for i := len(headers) - 1; i >= 0; i-- { 658 // Skip any headers that underflow/overflow our requested set 659 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > ceil { 660 continue 661 } 662 // Otherwise check if we already know the header or not 663 if (d.mode == FullSync && d.blockchain.HasBlock(headers[i].Hash(), headers[i].Number.Uint64())) || (d.mode != FullSync && d.lightchain.HasHeader(headers[i].Hash(), headers[i].Number.Uint64())) { 664 number, hash = headers[i].Number.Uint64(), headers[i].Hash() 665 666 // If every header is known, even future ones, the peer straight out lied about its head 667 if number > height && i == limit-1 { 668 p.log.Warn("Lied about chain head", "reported", height, "found", number) 669 return 0, errStallingPeer 670 } 671 break 672 } 673 } 674 675 case <-timeout: 676 p.log.Debug("Waiting for head header timed out", "elapsed", ttl) 677 return 0, errTimeout 678 679 case <-d.bodyCh: 680 case <-d.receiptCh: 681 // Out of bounds delivery, ignore 682 } 683 } 684 // If the head fetch already found an ancestor, return 685 if hash != (common.Hash{}) { 686 if int64(number) <= floor { 687 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 688 return 0, errInvalidAncestor 689 } 690 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 691 return number, nil 692 } 693 // Ancestor not found, we need to binary search over our chain 694 start, end := uint64(0), head 695 if floor > 0 { 696 start = uint64(floor) 697 } 698 for start+1 < end { 699 // Split our chain interval in two, and request the hash to cross check 700 check := (start + end) / 2 701 702 ttl := d.requestTTL() 703 timeout := time.After(ttl) 704 705 go p.peer.RequestHeadersByNumber(check, 1, 0, false) 706 707 // Wait until a reply arrives to this request 708 for arrived := false; !arrived; { 709 select { 710 case <-d.cancelCh: 711 return 0, errCancelHeaderFetch 712 713 case packer := <-d.headerCh: 714 // Discard anything not from the origin peer 715 if packer.PeerId() != p.id { 716 log.Debug("Received headers from incorrect peer", "peer", packer.PeerId()) 717 break 718 } 719 // Make sure the peer actually gave something valid 720 headers := packer.(*headerPack).headers 721 if len(headers) != 1 { 722 p.log.Debug("Multiple headers for single request", "headers", len(headers)) 723 return 0, errBadPeer 724 } 725 arrived = true 726 727 // Modify the search interval based on the response 728 if (d.mode == FullSync && !d.blockchain.HasBlock(headers[0].Hash(), headers[0].Number.Uint64())) || (d.mode != FullSync && !d.lightchain.HasHeader(headers[0].Hash(), headers[0].Number.Uint64())) { 729 end = check 730 break 731 } 732 header := d.lightchain.GetHeaderByHash(headers[0].Hash()) // Independent of sync mode, header surely exists 733 if header.Number.Uint64() != check { 734 p.log.Debug("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 735 return 0, errBadPeer 736 } 737 start = check 738 739 case <-timeout: 740 p.log.Debug("Waiting for search header timed out", "elapsed", ttl) 741 return 0, errTimeout 742 743 case <-d.bodyCh: 744 case <-d.receiptCh: 745 // Out of bounds delivery, ignore 746 } 747 } 748 } 749 // Ensure valid ancestry and return 750 if int64(start) <= floor { 751 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 752 return 0, errInvalidAncestor 753 } 754 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 755 return start, nil 756 } 757 758 // fetchHeaders keeps retrieving headers concurrently from the number 759 // requested, until no more are returned, potentially throttling on the way. To 760 // facilitate concurrency but still protect against malicious nodes sending bad 761 // headers, we construct a header chain skeleton using the "origin" peer we are 762 // syncing with, and fill in the missing headers using anyone else. Headers from 763 // other peers are only accepted if they map cleanly to the skeleton. If no one 764 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 765 // the origin is dropped. 766 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, pivot uint64) error { 767 p.log.Debug("Directing header downloads", "origin", from) 768 defer p.log.Debug("Header download terminated") 769 770 // Create a timeout timer, and the associated header fetcher 771 skeleton := true // Skeleton assembly phase or finishing up 772 request := time.Now() // time of the last skeleton fetch request 773 timeout := time.NewTimer(0) // timer to dump a non-responsive active peer 774 <-timeout.C // timeout channel should be initially empty 775 defer timeout.Stop() 776 777 var ttl time.Duration 778 getHeaders := func(from uint64) { 779 request = time.Now() 780 781 ttl = d.requestTTL() 782 timeout.Reset(ttl) 783 784 if skeleton { 785 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 786 go p.peer.RequestHeadersByNumber(from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 787 } else { 788 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 789 go p.peer.RequestHeadersByNumber(from, MaxHeaderFetch, 0, false) 790 } 791 } 792 // Start pulling the header chain skeleton until all is done 793 getHeaders(from) 794 795 for { 796 select { 797 case <-d.cancelCh: 798 return errCancelHeaderFetch 799 800 case packet := <-d.headerCh: 801 // Make sure the active peer is giving us the skeleton headers 802 if packet.PeerId() != p.id { 803 log.Debug("Received skeleton from incorrect peer", "peer", packet.PeerId()) 804 break 805 } 806 headerReqTimer.UpdateSince(request) 807 timeout.Stop() 808 809 // If the skeleton's finished, pull any remaining head headers directly from the origin 810 if packet.Items() == 0 && skeleton { 811 skeleton = false 812 getHeaders(from) 813 continue 814 } 815 // If no more headers are inbound, notify the content fetchers and return 816 if packet.Items() == 0 { 817 // Don't abort header fetches while the pivot is downloading 818 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 819 p.log.Debug("No headers, waiting for pivot commit") 820 select { 821 case <-time.After(fsHeaderContCheck): 822 getHeaders(from) 823 continue 824 case <-d.cancelCh: 825 return errCancelHeaderFetch 826 } 827 } 828 // Pivot done (or not in fast sync) and no more headers, terminate the process 829 p.log.Debug("No more headers available") 830 select { 831 case d.headerProcCh <- nil: 832 return nil 833 case <-d.cancelCh: 834 return errCancelHeaderFetch 835 } 836 } 837 headers := packet.(*headerPack).headers 838 839 // If we received a skeleton batch, resolve internals concurrently 840 if skeleton { 841 filled, proced, err := d.fillHeaderSkeleton(from, headers) 842 if err != nil { 843 p.log.Debug("Skeleton chain invalid", "err", err) 844 return errInvalidChain 845 } 846 headers = filled[proced:] 847 from += uint64(proced) 848 } 849 // Insert all the new headers and fetch the next batch 850 if len(headers) > 0 { 851 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 852 select { 853 case d.headerProcCh <- headers: 854 case <-d.cancelCh: 855 return errCancelHeaderFetch 856 } 857 from += uint64(len(headers)) 858 } 859 getHeaders(from) 860 861 case <-timeout.C: 862 if d.dropPeer == nil { 863 // The dropPeer method is nil when `--copydb` is used for a local copy. 864 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 865 p.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", p.id) 866 break 867 } 868 // Header retrieval timed out, consider the peer bad and drop 869 p.log.Debug("Header request timed out", "elapsed", ttl) 870 headerTimeoutMeter.Mark(1) 871 d.dropPeer(p.id) 872 873 // Finish the sync gracefully instead of dumping the gathered data though 874 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 875 select { 876 case ch <- false: 877 case <-d.cancelCh: 878 } 879 } 880 select { 881 case d.headerProcCh <- nil: 882 case <-d.cancelCh: 883 } 884 return errBadPeer 885 } 886 } 887 } 888 889 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 890 // and maps them to the provided skeleton header chain. 891 // 892 // Any partial results from the beginning of the skeleton is (if possible) forwarded 893 // immediately to the header processor to keep the rest of the pipeline full even 894 // in the case of header stalls. 895 // 896 // The method returns the entire filled skeleton and also the number of headers 897 // already forwarded for processing. 898 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, int, error) { 899 log.Debug("Filling up skeleton", "from", from) 900 d.queue.ScheduleSkeleton(from, skeleton) 901 902 var ( 903 deliver = func(packet dataPack) (int, error) { 904 pack := packet.(*headerPack) 905 return d.queue.DeliverHeaders(pack.peerId, pack.headers, d.headerProcCh) 906 } 907 expire = func() map[libp2p.ID]int { return d.queue.ExpireHeaders(d.requestTTL()) } 908 throttle = func() bool { return false } 909 reserve = func(p *peerConnection, count int) (*fetchRequest, bool, error) { 910 return d.queue.ReserveHeaders(p, count), false, nil 911 } 912 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchHeaders(req.From, MaxHeaderFetch) } 913 capacity = func(p *peerConnection) int { return p.HeaderCapacity(d.requestRTT()) } 914 setIdle = func(p *peerConnection, accepted int) { p.SetHeadersIdle(accepted) } 915 ) 916 err := d.fetchParts(errCancelHeaderFetch, d.headerCh, deliver, d.queue.headerContCh, expire, 917 d.queue.PendingHeaders, d.queue.InFlightHeaders, throttle, reserve, 918 nil, fetch, d.queue.CancelHeaders, capacity, d.peers.HeaderIdlePeers, setIdle, "headers") 919 920 log.Debug("Skeleton fill terminated", "err", err) 921 922 filled, proced := d.queue.RetrieveHeaders() 923 return filled, proced, err 924 } 925 926 // fetchBodies iteratively downloads the scheduled block bodies, taking any 927 // available peers, reserving a chunk of blocks for each, waiting for delivery 928 // and also periodically checking for timeouts. 929 func (d *Downloader) fetchBodies(from uint64) error { 930 log.Debug("Downloading block bodies", "origin", from) 931 932 var ( 933 deliver = func(packet dataPack) (int, error) { 934 pack := packet.(*bodyPack) 935 return d.queue.DeliverBodies(pack.peerId, pack.transactions) 936 } 937 expire = func() map[libp2p.ID]int { return d.queue.ExpireBodies(d.requestTTL()) } 938 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchBodies(req) } 939 capacity = func(p *peerConnection) int { return p.BlockCapacity(d.requestRTT()) } 940 setIdle = func(p *peerConnection, accepted int) { p.SetBodiesIdle(accepted) } 941 ) 942 err := d.fetchParts(errCancelBodyFetch, d.bodyCh, deliver, d.bodyWakeCh, expire, 943 d.queue.PendingBlocks, d.queue.InFlightBlocks, d.queue.ShouldThrottleBlocks, d.queue.ReserveBodies, 944 d.bodyFetchHook, fetch, d.queue.CancelBodies, capacity, d.peers.BodyIdlePeers, setIdle, "bodies") 945 946 log.Debug("Block body download terminated", "err", err) 947 return err 948 } 949 950 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 951 // available peers, reserving a chunk of receipts for each, waiting for delivery 952 // and also periodically checking for timeouts. 953 func (d *Downloader) fetchReceipts(from uint64) error { 954 log.Debug("Downloading transaction receipts", "origin", from) 955 956 var ( 957 deliver = func(packet dataPack) (int, error) { 958 pack := packet.(*receiptPack) 959 return d.queue.DeliverReceipts(pack.peerId, pack.receipts) 960 } 961 expire = func() map[libp2p.ID]int { return d.queue.ExpireReceipts(d.requestTTL()) } 962 fetch = func(p *peerConnection, req *fetchRequest) error { return p.FetchReceipts(req) } 963 capacity = func(p *peerConnection) int { return p.ReceiptCapacity(d.requestRTT()) } 964 setIdle = func(p *peerConnection, accepted int) { p.SetReceiptsIdle(accepted) } 965 ) 966 err := d.fetchParts(errCancelReceiptFetch, d.receiptCh, deliver, d.receiptWakeCh, expire, 967 d.queue.PendingReceipts, d.queue.InFlightReceipts, d.queue.ShouldThrottleReceipts, d.queue.ReserveReceipts, 968 d.receiptFetchHook, fetch, d.queue.CancelReceipts, capacity, d.peers.ReceiptIdlePeers, setIdle, "receipts") 969 970 log.Debug("Transaction receipt download terminated", "err", err) 971 return err 972 } 973 974 // fetchParts iteratively downloads scheduled block parts, taking any available 975 // peers, reserving a chunk of fetch requests for each, waiting for delivery and 976 // also periodically checking for timeouts. 977 // 978 // As the scheduling/timeout logic mostly is the same for all downloaded data 979 // types, this method is used by each for data gathering and is instrumented with 980 // various callbacks to handle the slight differences between processing them. 981 // 982 // The instrumentation parameters: 983 // - errCancel: error type to return if the fetch operation is cancelled (mostly makes logging nicer) 984 // - deliveryCh: channel from which to retrieve downloaded data packets (merged from all concurrent peers) 985 // - deliver: processing callback to deliver data packets into type specific download queues (usually within `queue`) 986 // - wakeCh: notification channel for waking the fetcher when new tasks are available (or sync completed) 987 // - expire: task callback method to abort requests that took too long and return the faulty peers (traffic shaping) 988 // - pending: task callback for the number of requests still needing download (detect completion/non-completability) 989 // - inFlight: task callback for the number of in-progress requests (wait for all active downloads to finish) 990 // - throttle: task callback to check if the processing queue is full and activate throttling (bound memory use) 991 // - reserve: task callback to reserve new download tasks to a particular peer (also signals partial completions) 992 // - fetchHook: tester callback to notify of new tasks being initiated (allows testing the scheduling logic) 993 // - fetch: network callback to actually send a particular download request to a physical remote peer 994 // - cancel: task callback to abort an in-flight download request and allow rescheduling it (in case of lost peer) 995 // - capacity: network callback to retrieve the estimated type-specific bandwidth capacity of a peer (traffic shaping) 996 // - idle: network callback to retrieve the currently (type specific) idle peers that can be assigned tasks 997 // - setIdle: network callback to set a peer back to idle and update its estimated capacity (traffic shaping) 998 // - kind: textual label of the type being downloaded to display in log mesages 999 func (d *Downloader) fetchParts(errCancel error, deliveryCh chan dataPack, deliver func(dataPack) (int, error), wakeCh chan bool, 1000 expire func() map[libp2p.ID]int, pending func() int, inFlight func() bool, throttle func() bool, reserve func(*peerConnection, int) (*fetchRequest, bool, error), 1001 fetchHook func([]*types.Header), fetch func(*peerConnection, *fetchRequest) error, cancel func(*fetchRequest), capacity func(*peerConnection) int, 1002 idle func() ([]*peerConnection, int), setIdle func(*peerConnection, int), kind string) error { 1003 1004 // Create a ticker to detect expired retrieval tasks 1005 ticker := time.NewTicker(100 * time.Millisecond) 1006 defer ticker.Stop() 1007 1008 update := make(chan struct{}, 1) 1009 1010 // Prepare the queue and fetch block parts until the block header fetcher's done 1011 finished := false 1012 for { 1013 select { 1014 case <-d.cancelCh: 1015 return errCancel 1016 1017 case packet := <-deliveryCh: 1018 // If the peer was previously banned and failed to deliver its pack 1019 // in a reasonable time frame, ignore its message. 1020 if peer := d.peers.Peer(packet.PeerId()); peer != nil { 1021 // Deliver the received chunk of data and check chain validity 1022 accepted, err := deliver(packet) 1023 if err == errInvalidChain { 1024 return err 1025 } 1026 // Unless a peer delivered something completely else than requested (usually 1027 // caused by a timed out request which came through in the end), set it to 1028 // idle. If the delivery's stale, the peer should have already been idled. 1029 if err != errStaleDelivery { 1030 setIdle(peer, accepted) 1031 } 1032 // Issue a log to the user to see what's going on 1033 switch { 1034 case err == nil && packet.Items() == 0: 1035 peer.log.Trace("Requested data not delivered", "type", kind) 1036 case err == nil: 1037 peer.log.Trace("Delivered new batch of data", "type", kind, "count", packet.Stats()) 1038 default: 1039 peer.log.Trace("Failed to deliver retrieved data", "type", kind, "err", err) 1040 } 1041 } 1042 // Blocks assembled, try to update the progress 1043 select { 1044 case update <- struct{}{}: 1045 default: 1046 } 1047 1048 case cont := <-wakeCh: 1049 // The header fetcher sent a continuation flag, check if it's done 1050 if !cont { 1051 finished = true 1052 } 1053 // Headers arrive, try to update the progress 1054 select { 1055 case update <- struct{}{}: 1056 default: 1057 } 1058 1059 case <-ticker.C: 1060 // Sanity check update the progress 1061 select { 1062 case update <- struct{}{}: 1063 default: 1064 } 1065 1066 case <-update: 1067 // Short circuit if we lost all our peers 1068 if d.peers.Len() == 0 { 1069 return errNoPeers 1070 } 1071 // Check for fetch request timeouts and demote the responsible peers 1072 for pid, fails := range expire() { 1073 if peer := d.peers.Peer(pid); peer != nil { 1074 // If a lot of retrieval elements expired, we might have overestimated the remote peer or perhaps 1075 // ourselves. Only reset to minimal throughput but don't drop just yet. If even the minimal times 1076 // out that sync wise we need to get rid of the peer. 1077 // 1078 // The reason the minimum threshold is 2 is because the downloader tries to estimate the bandwidth 1079 // and latency of a peer separately, which requires pushing the measures capacity a bit and seeing 1080 // how response times reacts, to it always requests one more than the minimum (i.e. min 2). 1081 if fails > 2 { 1082 peer.log.Trace("Data delivery timed out", "type", kind) 1083 setIdle(peer, 0) 1084 } else { 1085 peer.log.Debug("Stalling delivery, dropping", "type", kind) 1086 if d.dropPeer == nil { 1087 // The dropPeer method is nil when `--copydb` is used for a local copy. 1088 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 1089 peer.log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", pid) 1090 } else { 1091 d.dropPeer(pid) 1092 } 1093 } 1094 } 1095 } 1096 // If there's nothing more to fetch, wait or terminate 1097 if pending() == 0 { 1098 if !inFlight() && finished { 1099 log.Debug("Data fetching completed", "type", kind) 1100 return nil 1101 } 1102 break 1103 } 1104 // Send a download request to all idle peers, until throttled 1105 progressed, throttled, running := false, false, inFlight() 1106 idles, total := idle() 1107 1108 for _, peer := range idles { 1109 // Short circuit if throttling activated 1110 if throttle() { 1111 throttled = true 1112 break 1113 } 1114 // Short circuit if there is no more available task. 1115 if pending() == 0 { 1116 break 1117 } 1118 // Reserve a chunk of fetches for a peer. A nil can mean either that 1119 // no more headers are available, or that the peer is known not to 1120 // have them. 1121 request, progress, err := reserve(peer, capacity(peer)) 1122 if err != nil { 1123 return err 1124 } 1125 if progress { 1126 progressed = true 1127 } 1128 if request == nil { 1129 continue 1130 } 1131 if request.From > 0 { 1132 peer.log.Trace("Requesting new batch of data", "type", kind, "from", request.From) 1133 } else { 1134 peer.log.Trace("Requesting new batch of data", "type", kind, "count", len(request.Headers), "from", request.Headers[0].Number) 1135 } 1136 // Fetch the chunk and make sure any errors return the hashes to the queue 1137 if fetchHook != nil { 1138 fetchHook(request.Headers) 1139 } 1140 if err := fetch(peer, request); err != nil { 1141 // Although we could try and make an attempt to fix this, this error really 1142 // means that we've double allocated a fetch task to a peer. If that is the 1143 // case, the internal state of the downloader and the queue is very wrong so 1144 // better hard crash and note the error instead of silently accumulating into 1145 // a much bigger issue. 1146 panic(fmt.Sprintf("%v: %s fetch assignment failed", peer, kind)) 1147 } 1148 running = true 1149 } 1150 // Make sure that we have peers available for fetching. If all peers have been tried 1151 // and all failed throw an error 1152 if !progressed && !throttled && !running && len(idles) == total && pending() > 0 { 1153 return errPeersUnavailable 1154 } 1155 } 1156 } 1157 } 1158 1159 // processHeaders takes batches of retrieved headers from an input channel and 1160 // keeps processing and scheduling them into the header chain and downloader's 1161 // queue until the stream ends or a failure occurs. 1162 func (d *Downloader) processHeaders(origin uint64, pivot uint64, td *big.Int) error { 1163 // Keep a count of uncertain headers to roll back 1164 rollback := []*types.Header{} 1165 defer func() { 1166 if len(rollback) > 0 { 1167 // Flatten the headers and roll them back 1168 hashes := make([]common.Hash, len(rollback)) 1169 for i, header := range rollback { 1170 hashes[i] = header.Hash() 1171 } 1172 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1173 if d.mode != LightSync { 1174 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1175 lastBlock = d.blockchain.CurrentBlock().Number() 1176 } 1177 d.lightchain.Rollback(hashes) 1178 curFastBlock, curBlock := common.Big0, common.Big0 1179 if d.mode != LightSync { 1180 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1181 curBlock = d.blockchain.CurrentBlock().Number() 1182 } 1183 log.Warn("Rolled back headers", "count", len(hashes), 1184 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1185 "fast", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1186 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock)) 1187 } 1188 }() 1189 1190 // Wait for batches of headers to process 1191 gotHeaders := false 1192 1193 for { 1194 select { 1195 case <-d.cancelCh: 1196 return errCancelHeaderProcessing 1197 1198 case headers := <-d.headerProcCh: 1199 // Terminate header processing if we synced up 1200 if len(headers) == 0 { 1201 // Notify everyone that headers are fully processed 1202 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1203 select { 1204 case ch <- false: 1205 case <-d.cancelCh: 1206 } 1207 } 1208 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1209 // better chain compared to ours. The only exception is if its promised blocks were 1210 // already imported by other means (e.g. fecher): 1211 // 1212 // R <remote peer>, L <local node>: Both at block 10 1213 // R: Produced block 11, and propagate it to L 1214 // L: Queue block 11 for import 1215 // L: Notice that R's head and TD increased compared to ours, start sync 1216 // L: Import of block 11 finishes 1217 // L: Sync begins, and finds common ancestor at 11 1218 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1219 // R: Nothing to give 1220 if d.mode != LightSync { 1221 head := d.blockchain.CurrentBlock() 1222 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1223 return errStallingPeer 1224 } 1225 } 1226 // If fast or light syncing, ensure promised headers are indeed delivered. This is 1227 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1228 // of delivering the post-pivot blocks that would flag the invalid content. 1229 // 1230 // This check cannot be executed "as is" for full imports, since blocks may still be 1231 // queued for processing when the header download completes. However, as long as the 1232 // peer gave us something useful, we're already happy/progressed (above check). 1233 if d.mode == FastSync || d.mode == LightSync { 1234 head := d.lightchain.CurrentHeader() 1235 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1236 return errStallingPeer 1237 } 1238 } 1239 // Disable any rollback and return 1240 rollback = nil 1241 return nil 1242 } 1243 // Otherwise split the chunk of headers into batches and process them 1244 gotHeaders = true 1245 1246 for len(headers) > 0 { 1247 // Terminate if something failed in between processing chunks 1248 select { 1249 case <-d.cancelCh: 1250 return errCancelHeaderProcessing 1251 default: 1252 } 1253 // Select the next chunk of headers to import 1254 limit := maxHeadersProcess 1255 if limit > len(headers) { 1256 limit = len(headers) 1257 } 1258 chunk := headers[:limit] 1259 1260 // In case of header only syncing, validate the chunk immediately 1261 if d.mode == FastSync || d.mode == LightSync { 1262 // Collect the yet unknown headers to mark them as uncertain 1263 unknown := make([]*types.Header, 0, len(headers)) 1264 for _, header := range chunk { 1265 if !d.lightchain.HasHeader(header.Hash(), header.Number.Uint64()) { 1266 unknown = append(unknown, header) 1267 } 1268 } 1269 // If we're importing pure headers, verify based on their recentness 1270 frequency := fsHeaderCheckFrequency 1271 if chunk[len(chunk)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1272 frequency = 1 1273 } 1274 if n, err := d.lightchain.InsertHeaderChain(chunk, frequency); err != nil { 1275 // If some headers were inserted, add them too to the rollback list 1276 if n > 0 { 1277 rollback = append(rollback, chunk[:n]...) 1278 } 1279 log.Debug("Invalid header encountered", "number", chunk[n].Number, "hash", chunk[n].Hash(), "err", err) 1280 return errInvalidChain 1281 } 1282 // All verifications passed, store newly found uncertain headers 1283 rollback = append(rollback, unknown...) 1284 if len(rollback) > fsHeaderSafetyNet { 1285 rollback = append(rollback[:0], rollback[len(rollback)-fsHeaderSafetyNet:]...) 1286 } 1287 } 1288 // Unless we're doing light chains, schedule the headers for associated content retrieval 1289 if d.mode == FullSync || d.mode == FastSync { 1290 // If we've reached the allowed number of pending headers, stall a bit 1291 for d.queue.PendingBlocks() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1292 select { 1293 case <-d.cancelCh: 1294 return errCancelHeaderProcessing 1295 case <-time.After(time.Second): 1296 } 1297 } 1298 // Otherwise insert the headers for content retrieval 1299 inserts := d.queue.Schedule(chunk, origin) 1300 if len(inserts) != len(chunk) { 1301 log.Debug("Stale headers") 1302 return errBadPeer 1303 } 1304 } 1305 headers = headers[limit:] 1306 origin += uint64(limit) 1307 } 1308 1309 // Update the highest block number we know if a higher one is found. 1310 d.syncStatsLock.Lock() 1311 if d.syncStatsChainHeight < origin { 1312 d.syncStatsChainHeight = origin - 1 1313 } 1314 d.syncStatsLock.Unlock() 1315 1316 // Signal the content downloaders of the availablility of new tasks 1317 for _, ch := range []chan bool{d.bodyWakeCh, d.receiptWakeCh} { 1318 select { 1319 case ch <- true: 1320 default: 1321 } 1322 } 1323 } 1324 } 1325 } 1326 1327 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1328 func (d *Downloader) processFullSyncContent() error { 1329 for { 1330 results := d.queue.Results(true) 1331 if len(results) == 0 { 1332 return nil 1333 } 1334 if d.chainInsertHook != nil { 1335 d.chainInsertHook(results) 1336 } 1337 if err := d.importBlockResults(results); err != nil { 1338 return err 1339 } 1340 } 1341 } 1342 1343 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1344 // Check for any early termination requests 1345 if len(results) == 0 { 1346 return nil 1347 } 1348 select { 1349 case <-d.quitCh: 1350 return errCancelContentProcessing 1351 default: 1352 } 1353 // Retrieve the a batch of results to import 1354 first, last := results[0].Header, results[len(results)-1].Header 1355 log.Debug("Inserting downloaded chain", "items", len(results), 1356 "firstnum", first.Number, "firsthash", first.Hash(), 1357 "lastnum", last.Number, "lasthash", last.Hash(), 1358 ) 1359 blocks := make([]*types.Block, len(results)) 1360 for i, result := range results { 1361 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions) 1362 } 1363 1364 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1365 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1366 return errInvalidChain 1367 } 1368 return nil 1369 } 1370 1371 // processFastSyncContent takes fetch results from the queue and writes them to the 1372 // database. It also controls the synchronisation of state nodes of the pivot block. 1373 func (d *Downloader) processFastSyncContent(latest *types.Header) error { 1374 // Start syncing state of the reported head block. This should get us most of 1375 // the state of the pivot block. 1376 stateSync := d.syncState(latest.Root) 1377 defer stateSync.Cancel() 1378 go func() { 1379 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1380 d.queue.Close() // wake up WaitResults 1381 } 1382 }() 1383 // Figure out the ideal pivot block. Note, that this goalpost may move if the 1384 // sync takes long enough for the chain head to move significantly. 1385 pivot := uint64(0) 1386 if height := latest.Number.Uint64(); height > uint64(fsMinFullBlocks) { 1387 pivot = height - uint64(fsMinFullBlocks) 1388 } 1389 // To cater for moving pivot points, track the pivot block and subsequently 1390 // accumulated download results separately. 1391 var ( 1392 oldPivot *fetchResult // Locked in pivot block, might change eventually 1393 oldTail []*fetchResult // Downloaded content after the pivot 1394 ) 1395 for { 1396 // Wait for the next batch of downloaded data to be available, and if the pivot 1397 // block became stale, move the goalpost 1398 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1399 if len(results) == 0 { 1400 // If pivot sync is done, stop 1401 if oldPivot == nil { 1402 return stateSync.Cancel() 1403 } 1404 // If sync failed, stop 1405 select { 1406 case <-d.cancelCh: 1407 return stateSync.Cancel() 1408 default: 1409 } 1410 } 1411 if d.chainInsertHook != nil { 1412 d.chainInsertHook(results) 1413 } 1414 if oldPivot != nil { 1415 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1416 } 1417 // Split around the pivot block and process the two sides via fast/full sync 1418 if atomic.LoadInt32(&d.committed) == 0 { 1419 latest = results[len(results)-1].Header 1420 if height := latest.Number.Uint64(); height > pivot+2*uint64(fsMinFullBlocks) { 1421 log.Warn("Pivot became stale, moving", "old", pivot, "new", height-uint64(fsMinFullBlocks)) 1422 pivot = height - uint64(fsMinFullBlocks) 1423 } 1424 } 1425 P, beforeP, afterP := splitAroundPivot(pivot, results) 1426 if err := d.commitFastSyncData(beforeP, stateSync); err != nil { 1427 return err 1428 } 1429 if P != nil { 1430 // If new pivot block found, cancel old state retrieval and restart 1431 if oldPivot != P { 1432 stateSync.Cancel() 1433 1434 stateSync = d.syncState(P.Header.Root) 1435 defer stateSync.Cancel() 1436 go func() { 1437 if err := stateSync.Wait(); err != nil && err != errCancelStateFetch { 1438 d.queue.Close() // wake up WaitResults 1439 } 1440 }() 1441 oldPivot = P 1442 } 1443 // Wait for completion, occasionally checking for pivot staleness 1444 select { 1445 case <-stateSync.done: 1446 if stateSync.err != nil { 1447 return stateSync.err 1448 } 1449 if err := d.commitPivotBlock(P); err != nil { 1450 return err 1451 } 1452 oldPivot = nil 1453 1454 case <-time.After(time.Second): 1455 oldTail = afterP 1456 continue 1457 } 1458 } 1459 // Fast sync done, pivot commit done, full import 1460 if err := d.importBlockResults(afterP); err != nil { 1461 return err 1462 } 1463 } 1464 } 1465 1466 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1467 for _, result := range results { 1468 num := result.Header.Number.Uint64() 1469 switch { 1470 case num < pivot: 1471 before = append(before, result) 1472 case num == pivot: 1473 p = result 1474 default: 1475 after = append(after, result) 1476 } 1477 } 1478 return p, before, after 1479 } 1480 1481 func (d *Downloader) commitFastSyncData(results []*fetchResult, stateSync *stateSync) error { 1482 // Check for any early termination requests 1483 if len(results) == 0 { 1484 return nil 1485 } 1486 select { 1487 case <-d.quitCh: 1488 return errCancelContentProcessing 1489 case <-stateSync.done: 1490 if err := stateSync.Wait(); err != nil { 1491 return err 1492 } 1493 default: 1494 } 1495 // Retrieve the a batch of results to import 1496 first, last := results[0].Header, results[len(results)-1].Header 1497 log.Debug("Inserting fast-sync blocks", "items", len(results), 1498 "firstnum", first.Number, "firsthash", first.Hash(), 1499 "lastnumn", last.Number, "lasthash", last.Hash(), 1500 ) 1501 blocks := make([]*types.Block, len(results)) 1502 receipts := make([]types.Receipts, len(results)) 1503 for i, result := range results { 1504 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions) 1505 receipts[i] = result.Receipts 1506 } 1507 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts); err != nil { 1508 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1509 return errInvalidChain 1510 } 1511 return nil 1512 } 1513 1514 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1515 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions) 1516 log.Debug("Committing fast sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1517 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}); err != nil { 1518 return err 1519 } 1520 if err := d.blockchain.FastSyncCommitHead(block.Hash()); err != nil { 1521 return err 1522 } 1523 atomic.StoreInt32(&d.committed, 1) 1524 return nil 1525 } 1526 1527 // DeliverHeaders injects a new batch of block headers received from a remote 1528 // node into the download schedule. 1529 func (d *Downloader) DeliverHeaders(id libp2p.ID, headers []*types.Header) (err error) { 1530 return d.deliver(id, d.headerCh, &headerPack{id, headers}, headerInMeter, headerDropMeter) 1531 } 1532 1533 // DeliverBodies injects a new batch of block bodies received from a remote node. 1534 func (d *Downloader) DeliverBodies(id libp2p.ID, transactions [][]*types.Transaction) (err error) { 1535 return d.deliver(id, d.bodyCh, &bodyPack{id, transactions}, bodyInMeter, bodyDropMeter) 1536 } 1537 1538 // DeliverReceipts injects a new batch of receipts received from a remote node. 1539 func (d *Downloader) DeliverReceipts(id libp2p.ID, receipts [][]*types.Receipt) (err error) { 1540 return d.deliver(id, d.receiptCh, &receiptPack{id, receipts}, receiptInMeter, receiptDropMeter) 1541 } 1542 1543 // DeliverNodeData injects a new batch of node state data received from a remote node. 1544 func (d *Downloader) DeliverNodeData(id libp2p.ID, data [][]byte) (err error) { 1545 return d.deliver(id, d.stateCh, &statePack{id, data}, stateInMeter, stateDropMeter) 1546 } 1547 1548 // deliver injects a new batch of data received from a remote node. 1549 func (d *Downloader) deliver(id libp2p.ID, destCh chan dataPack, packet dataPack, inMeter, dropMeter metrics.Meter) (err error) { 1550 // Update the delivery metrics for both good and failed deliveries 1551 inMeter.Mark(int64(packet.Items())) 1552 defer func() { 1553 if err != nil { 1554 dropMeter.Mark(int64(packet.Items())) 1555 } 1556 }() 1557 // Deliver or abort if the sync is canceled while queuing 1558 d.cancelLock.RLock() 1559 cancel := d.cancelCh 1560 d.cancelLock.RUnlock() 1561 if cancel == nil { 1562 return errNoSyncActive 1563 } 1564 select { 1565 case destCh <- packet: 1566 return nil 1567 case <-cancel: 1568 return errNoSyncActive 1569 } 1570 } 1571 1572 // qosTuner is the quality of service tuning loop that occasionally gathers the 1573 // peer latency statistics and updates the estimated request round trip time. 1574 func (d *Downloader) qosTuner() { 1575 for { 1576 // Retrieve the current median RTT and integrate into the previoust target RTT 1577 rtt := time.Duration((1-qosTuningImpact)*float64(atomic.LoadUint64(&d.rttEstimate)) + qosTuningImpact*float64(d.peers.medianRTT())) 1578 atomic.StoreUint64(&d.rttEstimate, uint64(rtt)) 1579 1580 // A new RTT cycle passed, increase our confidence in the estimated RTT 1581 conf := atomic.LoadUint64(&d.rttConfidence) 1582 conf = conf + (1000000-conf)/2 1583 atomic.StoreUint64(&d.rttConfidence, conf) 1584 1585 // Log the new QoS values and sleep until the next RTT 1586 log.Debug("Recalculated downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1587 select { 1588 case <-d.quitCh: 1589 return 1590 case <-time.After(rtt): 1591 } 1592 } 1593 } 1594 1595 // qosReduceConfidence is meant to be called when a new peer joins the downloader's 1596 // peer set, needing to reduce the confidence we have in out QoS estimates. 1597 func (d *Downloader) qosReduceConfidence() { 1598 // If we have a single peer, confidence is always 1 1599 peers := uint64(d.peers.Len()) 1600 if peers == 0 { 1601 // Ensure peer connectivity races don't catch us off guard 1602 return 1603 } 1604 if peers == 1 { 1605 atomic.StoreUint64(&d.rttConfidence, 1000000) 1606 return 1607 } 1608 // If we have a ton of peers, don't drop confidence) 1609 if peers >= uint64(qosConfidenceCap) { 1610 return 1611 } 1612 // Otherwise drop the confidence factor 1613 conf := atomic.LoadUint64(&d.rttConfidence) * (peers - 1) / peers 1614 if float64(conf)/1000000 < rttMinConfidence { 1615 conf = uint64(rttMinConfidence * 1000000) 1616 } 1617 atomic.StoreUint64(&d.rttConfidence, conf) 1618 1619 rtt := time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1620 log.Debug("Relaxed downloader QoS values", "rtt", rtt, "confidence", float64(conf)/1000000.0, "ttl", d.requestTTL()) 1621 } 1622 1623 // requestRTT returns the current target round trip time for a download request 1624 // to complete in. 1625 // 1626 // Note, the returned RTT is .9 of the actually estimated RTT. The reason is that 1627 // the downloader tries to adapt queries to the RTT, so multiple RTT values can 1628 // be adapted to, but smaller ones are preferred (stabler download stream). 1629 func (d *Downloader) requestRTT() time.Duration { 1630 return time.Duration(atomic.LoadUint64(&d.rttEstimate)) * 9 / 10 1631 } 1632 1633 // requestTTL returns the current timeout allowance for a single download request 1634 // to finish under. 1635 func (d *Downloader) requestTTL() time.Duration { 1636 var ( 1637 rtt = time.Duration(atomic.LoadUint64(&d.rttEstimate)) 1638 conf = float64(atomic.LoadUint64(&d.rttConfidence)) / 1000000.0 1639 ) 1640 ttl := time.Duration(ttlScaling) * time.Duration(float64(rtt)/conf) 1641 if ttl > ttlLimit { 1642 ttl = ttlLimit 1643 } 1644 return ttl 1645 }