github.com/calmw/ethereum@v0.1.1/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/calmw/ethereum" 29 "github.com/calmw/ethereum/common" 30 "github.com/calmw/ethereum/core/rawdb" 31 "github.com/calmw/ethereum/core/state/snapshot" 32 "github.com/calmw/ethereum/core/types" 33 "github.com/calmw/ethereum/eth/protocols/snap" 34 "github.com/calmw/ethereum/ethdb" 35 "github.com/calmw/ethereum/event" 36 "github.com/calmw/ethereum/log" 37 "github.com/calmw/ethereum/params" 38 "github.com/calmw/ethereum/trie" 39 ) 40 41 var ( 42 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 43 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 44 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 45 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 46 47 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 48 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 49 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 50 fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 51 lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 52 53 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 54 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 55 56 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 57 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 58 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync 59 ) 60 61 var ( 62 errBusy = errors.New("busy") 63 errUnknownPeer = errors.New("peer is unknown or unhealthy") 64 errBadPeer = errors.New("action from bad peer ignored") 65 errStallingPeer = errors.New("peer is stalling") 66 errUnsyncedPeer = errors.New("unsynced peer") 67 errNoPeers = errors.New("no peers to keep download active") 68 errTimeout = errors.New("timeout") 69 errEmptyHeaderSet = errors.New("empty header set by peer") 70 errPeersUnavailable = errors.New("no peers available or all tried for download") 71 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 72 errInvalidChain = errors.New("retrieved hash chain is invalid") 73 errInvalidBody = errors.New("retrieved block body is invalid") 74 errInvalidReceipt = errors.New("retrieved receipt is invalid") 75 errCancelStateFetch = errors.New("state data download canceled (requested)") 76 errCancelContentProcessing = errors.New("content processing canceled (requested)") 77 errCanceled = errors.New("syncing canceled (requested)") 78 errTooOld = errors.New("peer's protocol version too old") 79 errNoAncestorFound = errors.New("no common ancestor found") 80 errNoPivotHeader = errors.New("pivot header is not found") 81 ErrMergeTransition = errors.New("legacy sync reached the merge") 82 ) 83 84 // peerDropFn is a callback type for dropping a peer detected as malicious. 85 type peerDropFn func(id string) 86 87 // badBlockFn is a callback for the async beacon sync to notify the caller that 88 // the origin header requested to sync to, produced a chain with a bad block. 89 type badBlockFn func(invalid *types.Header, origin *types.Header) 90 91 // headerTask is a set of downloaded headers to queue along with their precomputed 92 // hashes to avoid constant rehashing. 93 type headerTask struct { 94 headers []*types.Header 95 hashes []common.Hash 96 } 97 98 type Downloader struct { 99 mode atomic.Uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode 100 mux *event.TypeMux // Event multiplexer to announce sync operation events 101 102 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 103 queue *queue // Scheduler for selecting the hashes to download 104 peers *peerSet // Set of active peers from which download can proceed 105 106 stateDB ethdb.Database // Database to state sync into (and deduplicate via) 107 108 // Statistics 109 syncStatsChainOrigin uint64 // Origin block number where syncing started at 110 syncStatsChainHeight uint64 // Highest block number known when syncing started 111 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 112 113 lightchain LightChain 114 blockchain BlockChain 115 116 // Callbacks 117 dropPeer peerDropFn // Drops a peer for misbehaving 118 badBlock badBlockFn // Reports a block as rejected by the chain 119 120 // Status 121 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 122 synchronising atomic.Bool 123 notified atomic.Bool 124 committed atomic.Bool 125 ancientLimit uint64 // The maximum block number which can be regarded as ancient data. 126 127 // Channels 128 headerProcCh chan *headerTask // Channel to feed the header processor new tasks 129 130 // Skeleton sync 131 skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode) 132 133 // State sync 134 pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root 135 pivotLock sync.RWMutex // Lock protecting pivot header reads from updates 136 137 SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now 138 stateSyncStart chan *stateSync 139 140 // Cancellation and termination 141 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 142 cancelCh chan struct{} // Channel to cancel mid-flight syncs 143 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 144 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 145 146 quitCh chan struct{} // Quit channel to signal termination 147 quitLock sync.Mutex // Lock to prevent double closes 148 149 // Testing hooks 150 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 151 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 152 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 153 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 154 155 // Progress reporting metrics 156 syncStartBlock uint64 // Head snap block when Geth was started 157 syncStartTime time.Time // Time instance when chain sync started 158 syncLogTime time.Time // Time instance when status was last reported 159 } 160 161 // LightChain encapsulates functions required to synchronise a light chain. 162 type LightChain interface { 163 // HasHeader verifies a header's presence in the local chain. 164 HasHeader(common.Hash, uint64) bool 165 166 // GetHeaderByHash retrieves a header from the local chain. 167 GetHeaderByHash(common.Hash) *types.Header 168 169 // CurrentHeader retrieves the head header from the local chain. 170 CurrentHeader() *types.Header 171 172 // GetTd returns the total difficulty of a local block. 173 GetTd(common.Hash, uint64) *big.Int 174 175 // InsertHeaderChain inserts a batch of headers into the local chain. 176 InsertHeaderChain([]*types.Header) (int, error) 177 178 // SetHead rewinds the local chain to a new head. 179 SetHead(uint64) error 180 } 181 182 // BlockChain encapsulates functions required to sync a (full or snap) blockchain. 183 type BlockChain interface { 184 LightChain 185 186 // HasBlock verifies a block's presence in the local chain. 187 HasBlock(common.Hash, uint64) bool 188 189 // HasFastBlock verifies a snap block's presence in the local chain. 190 HasFastBlock(common.Hash, uint64) bool 191 192 // GetBlockByHash retrieves a block from the local chain. 193 GetBlockByHash(common.Hash) *types.Block 194 195 // CurrentBlock retrieves the head block from the local chain. 196 CurrentBlock() *types.Header 197 198 // CurrentSnapBlock retrieves the head snap block from the local chain. 199 CurrentSnapBlock() *types.Header 200 201 // SnapSyncCommitHead directly commits the head block to a certain entity. 202 SnapSyncCommitHead(common.Hash) error 203 204 // InsertChain inserts a batch of blocks into the local chain. 205 InsertChain(types.Blocks) (int, error) 206 207 // InsertReceiptChain inserts a batch of receipts into the local chain. 208 InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error) 209 210 // Snapshots returns the blockchain snapshot tree to paused it during sync. 211 Snapshots() *snapshot.Tree 212 213 // TrieDB retrieves the low level trie database used for interacting 214 // with trie nodes. 215 TrieDB() *trie.Database 216 } 217 218 // New creates a new downloader to fetch hashes and blocks from remote peers. 219 func New(stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { 220 if lightchain == nil { 221 lightchain = chain 222 } 223 dl := &Downloader{ 224 stateDB: stateDb, 225 mux: mux, 226 queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), 227 peers: newPeerSet(), 228 blockchain: chain, 229 lightchain: lightchain, 230 dropPeer: dropPeer, 231 headerProcCh: make(chan *headerTask, 1), 232 quitCh: make(chan struct{}), 233 SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), 234 stateSyncStart: make(chan *stateSync), 235 syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(), 236 } 237 // Create the post-merge skeleton syncer and start the process 238 dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) 239 240 go dl.stateFetcher() 241 return dl 242 } 243 244 // Progress retrieves the synchronisation boundaries, specifically the origin 245 // block where synchronisation started at (may have failed/suspended); the block 246 // or header sync is currently at; and the latest known block which the sync targets. 247 // 248 // In addition, during the state download phase of snap synchronisation the number 249 // of processed and the total number of known states are also returned. Otherwise 250 // these are zero. 251 func (d *Downloader) Progress() ethereum.SyncProgress { 252 // Lock the current stats and return the progress 253 d.syncStatsLock.RLock() 254 defer d.syncStatsLock.RUnlock() 255 256 current := uint64(0) 257 mode := d.getMode() 258 switch { 259 case d.blockchain != nil && mode == FullSync: 260 current = d.blockchain.CurrentBlock().Number.Uint64() 261 case d.blockchain != nil && mode == SnapSync: 262 current = d.blockchain.CurrentSnapBlock().Number.Uint64() 263 case d.lightchain != nil: 264 current = d.lightchain.CurrentHeader().Number.Uint64() 265 default: 266 log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) 267 } 268 progress, pending := d.SnapSyncer.Progress() 269 270 return ethereum.SyncProgress{ 271 StartingBlock: d.syncStatsChainOrigin, 272 CurrentBlock: current, 273 HighestBlock: d.syncStatsChainHeight, 274 SyncedAccounts: progress.AccountSynced, 275 SyncedAccountBytes: uint64(progress.AccountBytes), 276 SyncedBytecodes: progress.BytecodeSynced, 277 SyncedBytecodeBytes: uint64(progress.BytecodeBytes), 278 SyncedStorage: progress.StorageSynced, 279 SyncedStorageBytes: uint64(progress.StorageBytes), 280 HealedTrienodes: progress.TrienodeHealSynced, 281 HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), 282 HealedBytecodes: progress.BytecodeHealSynced, 283 HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), 284 HealingTrienodes: pending.TrienodeHeal, 285 HealingBytecode: pending.BytecodeHeal, 286 } 287 } 288 289 // Synchronising returns whether the downloader is currently retrieving blocks. 290 func (d *Downloader) Synchronising() bool { 291 return d.synchronising.Load() 292 } 293 294 // RegisterPeer injects a new download peer into the set of block source to be 295 // used for fetching hashes and blocks from. 296 func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { 297 var logger log.Logger 298 if len(id) < 16 { 299 // Tests use short IDs, don't choke on them 300 logger = log.New("peer", id) 301 } else { 302 logger = log.New("peer", id[:8]) 303 } 304 logger.Trace("Registering sync peer") 305 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 306 logger.Error("Failed to register sync peer", "err", err) 307 return err 308 } 309 return nil 310 } 311 312 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 313 func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { 314 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 315 } 316 317 // UnregisterPeer remove a peer from the known list, preventing any action from 318 // the specified peer. An effort is also made to return any pending fetches into 319 // the queue. 320 func (d *Downloader) UnregisterPeer(id string) error { 321 // Unregister the peer from the active peer set and revoke any fetch tasks 322 var logger log.Logger 323 if len(id) < 16 { 324 // Tests use short IDs, don't choke on them 325 logger = log.New("peer", id) 326 } else { 327 logger = log.New("peer", id[:8]) 328 } 329 logger.Trace("Unregistering sync peer") 330 if err := d.peers.Unregister(id); err != nil { 331 logger.Error("Failed to unregister sync peer", "err", err) 332 return err 333 } 334 d.queue.Revoke(id) 335 336 return nil 337 } 338 339 // LegacySync tries to sync up our local block chain with a remote peer, both 340 // adding various sanity checks as well as wrapping it with various log entries. 341 func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { 342 err := d.synchronise(id, head, td, ttd, mode, false, nil) 343 344 switch err { 345 case nil, errBusy, errCanceled: 346 return err 347 } 348 if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || 349 errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || 350 errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { 351 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 352 if d.dropPeer == nil { 353 // The dropPeer method is nil when `--copydb` is used for a local copy. 354 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 355 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 356 } else { 357 d.dropPeer(id) 358 } 359 return err 360 } 361 if errors.Is(err, ErrMergeTransition) { 362 return err // This is an expected fault, don't keep printing it in a spin-loop 363 } 364 log.Warn("Synchronisation failed, retrying", "err", err) 365 return err 366 } 367 368 // synchronise will select the peer and use it for synchronising. If an empty string is given 369 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 370 // checks fail an error will be returned. This method is synchronous 371 func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { 372 // The beacon header syncer is async. It will start this synchronization and 373 // will continue doing other tasks. However, if synchronization needs to be 374 // cancelled, the syncer needs to know if we reached the startup point (and 375 // inited the cancel channel) or not yet. Make sure that we'll signal even in 376 // case of a failure. 377 if beaconPing != nil { 378 defer func() { 379 select { 380 case <-beaconPing: // already notified 381 default: 382 close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing) 383 } 384 }() 385 } 386 // Mock out the synchronisation if testing 387 if d.synchroniseMock != nil { 388 return d.synchroniseMock(id, hash) 389 } 390 // Make sure only one goroutine is ever allowed past this point at once 391 if !d.synchronising.CompareAndSwap(false, true) { 392 return errBusy 393 } 394 defer d.synchronising.Store(false) 395 396 // Post a user notification of the sync (only once per session) 397 if d.notified.CompareAndSwap(false, true) { 398 log.Info("Block synchronisation started") 399 } 400 if mode == SnapSync { 401 // Snap sync uses the snapshot namespace to store potentially flakey data until 402 // sync completely heals and finishes. Pause snapshot maintenance in the mean- 403 // time to prevent access. 404 if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests 405 snapshots.Disable() 406 } 407 } 408 // Reset the queue, peer set and wake channels to clean any internal leftover state 409 d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) 410 d.peers.Reset() 411 412 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 413 select { 414 case <-ch: 415 default: 416 } 417 } 418 for empty := false; !empty; { 419 select { 420 case <-d.headerProcCh: 421 default: 422 empty = true 423 } 424 } 425 // Create cancel channel for aborting mid-flight and mark the master peer 426 d.cancelLock.Lock() 427 d.cancelCh = make(chan struct{}) 428 d.cancelPeer = id 429 d.cancelLock.Unlock() 430 431 defer d.Cancel() // No matter what, we can't leave the cancel channel open 432 433 // Atomically set the requested sync mode 434 d.mode.Store(uint32(mode)) 435 436 // Retrieve the origin peer and initiate the downloading process 437 var p *peerConnection 438 if !beaconMode { // Beacon mode doesn't need a peer to sync from 439 p = d.peers.Peer(id) 440 if p == nil { 441 return errUnknownPeer 442 } 443 } 444 if beaconPing != nil { 445 close(beaconPing) 446 } 447 return d.syncWithPeer(p, hash, td, ttd, beaconMode) 448 } 449 450 func (d *Downloader) getMode() SyncMode { 451 return SyncMode(d.mode.Load()) 452 } 453 454 // syncWithPeer starts a block synchronization based on the hash chain from the 455 // specified peer and head hash. 456 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { 457 d.mux.Post(StartEvent{}) 458 defer func() { 459 // reset on error 460 if err != nil { 461 d.mux.Post(FailedEvent{err}) 462 } else { 463 latest := d.lightchain.CurrentHeader() 464 d.mux.Post(DoneEvent{latest}) 465 } 466 }() 467 mode := d.getMode() 468 469 if !beaconMode { 470 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) 471 } else { 472 log.Debug("Backfilling with the network", "mode", mode) 473 } 474 defer func(start time.Time) { 475 log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) 476 }(time.Now()) 477 478 // Look up the sync boundaries: the common ancestor and the target block 479 var latest, pivot, final *types.Header 480 if !beaconMode { 481 // In legacy mode, use the master peer to retrieve the headers from 482 latest, pivot, err = d.fetchHead(p) 483 if err != nil { 484 return err 485 } 486 } else { 487 // In beacon mode, use the skeleton chain to retrieve the headers from 488 latest, _, final, err = d.skeleton.Bounds() 489 if err != nil { 490 return err 491 } 492 if latest.Number.Uint64() > uint64(fsMinFullBlocks) { 493 number := latest.Number.Uint64() - uint64(fsMinFullBlocks) 494 495 // Retrieve the pivot header from the skeleton chain segment but 496 // fallback to local chain if it's not found in skeleton space. 497 if pivot = d.skeleton.Header(number); pivot == nil { 498 _, oldest, _, _ := d.skeleton.Bounds() // error is already checked 499 if number < oldest.Number.Uint64() { 500 count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks 501 headers := d.readHeaderRange(oldest, count) 502 if len(headers) == count { 503 pivot = headers[len(headers)-1] 504 log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) 505 } 506 } 507 } 508 // Print an error log and return directly in case the pivot header 509 // is still not found. It means the skeleton chain is not linked 510 // correctly with local chain. 511 if pivot == nil { 512 log.Error("Pivot header is not found", "number", number) 513 return errNoPivotHeader 514 } 515 } 516 } 517 // If no pivot block was returned, the head is below the min full block 518 // threshold (i.e. new chain). In that case we won't really snap sync 519 // anyway, but still need a valid pivot block to avoid some code hitting 520 // nil panics on access. 521 if mode == SnapSync && pivot == nil { 522 pivot = d.blockchain.CurrentBlock() 523 } 524 height := latest.Number.Uint64() 525 526 var origin uint64 527 if !beaconMode { 528 // In legacy mode, reach out to the network and find the ancestor 529 origin, err = d.findAncestor(p, latest) 530 if err != nil { 531 return err 532 } 533 } else { 534 // In beacon mode, use the skeleton chain for the ancestor lookup 535 origin, err = d.findBeaconAncestor() 536 if err != nil { 537 return err 538 } 539 } 540 d.syncStatsLock.Lock() 541 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 542 d.syncStatsChainOrigin = origin 543 } 544 d.syncStatsChainHeight = height 545 d.syncStatsLock.Unlock() 546 547 // Ensure our origin point is below any snap sync pivot point 548 if mode == SnapSync { 549 if height <= uint64(fsMinFullBlocks) { 550 origin = 0 551 } else { 552 pivotNumber := pivot.Number.Uint64() 553 if pivotNumber <= origin { 554 origin = pivotNumber - 1 555 } 556 // Write out the pivot into the database so a rollback beyond it will 557 // reenable snap sync 558 rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) 559 } 560 } 561 d.committed.Store(true) 562 if mode == SnapSync && pivot.Number.Uint64() != 0 { 563 d.committed.Store(false) 564 } 565 if mode == SnapSync { 566 // Set the ancient data limitation. If we are running snap sync, all block 567 // data older than ancientLimit will be written to the ancient store. More 568 // recent data will be written to the active database and will wait for the 569 // freezer to migrate. 570 // 571 // If the network is post-merge, use either the last announced finalized 572 // block as the ancient limit, or if we haven't yet received one, the head- 573 // a max fork ancestry limit. One quirky case if we've already passed the 574 // finalized block, in which case the skeleton.Bounds will return nil and 575 // we'll revert to head - 90K. That's fine, we're finishing sync anyway. 576 // 577 // For non-merged networks, if there is a checkpoint available, then calculate 578 // the ancientLimit through that. Otherwise calculate the ancient limit through 579 // the advertised height of the remote peer. This most is mostly a fallback for 580 // legacy networks, but should eventually be droppped. TODO(karalabe). 581 if beaconMode { 582 // Beacon sync, use the latest finalized block as the ancient limit 583 // or a reasonable height if no finalized block is yet announced. 584 if final != nil { 585 d.ancientLimit = final.Number.Uint64() 586 } else if height > fullMaxForkAncestry+1 { 587 d.ancientLimit = height - fullMaxForkAncestry - 1 588 } else { 589 d.ancientLimit = 0 590 } 591 } else { 592 // Legacy sync, use the best announcement we have from the remote peer. 593 // TODO(karalabe): Drop this pathway. 594 if height > fullMaxForkAncestry+1 { 595 d.ancientLimit = height - fullMaxForkAncestry - 1 596 } else { 597 d.ancientLimit = 0 598 } 599 } 600 frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. 601 602 // If a part of blockchain data has already been written into active store, 603 // disable the ancient style insertion explicitly. 604 if origin >= frozen && frozen != 0 { 605 d.ancientLimit = 0 606 log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1) 607 } else if d.ancientLimit > 0 { 608 log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit) 609 } 610 // Rewind the ancient store and blockchain if reorg happens. 611 if origin+1 < frozen { 612 if err := d.lightchain.SetHead(origin); err != nil { 613 return err 614 } 615 } 616 } 617 // Initiate the sync using a concurrent header and content retrieval algorithm 618 d.queue.Prepare(origin+1, mode) 619 if d.syncInitHook != nil { 620 d.syncInitHook(origin, height) 621 } 622 var headerFetcher func() error 623 if !beaconMode { 624 // In legacy mode, headers are retrieved from the network 625 headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } 626 } else { 627 // In beacon mode, headers are served by the skeleton syncer 628 headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } 629 } 630 fetchers := []func() error{ 631 headerFetcher, // Headers are always retrieved 632 func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync 633 func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync 634 func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, 635 } 636 if mode == SnapSync { 637 d.pivotLock.Lock() 638 d.pivotHeader = pivot 639 d.pivotLock.Unlock() 640 641 fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) 642 } else if mode == FullSync { 643 fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) 644 } 645 return d.spawnSync(fetchers) 646 } 647 648 // spawnSync runs d.process and all given fetcher functions to completion in 649 // separate goroutines, returning the first error that appears. 650 func (d *Downloader) spawnSync(fetchers []func() error) error { 651 errc := make(chan error, len(fetchers)) 652 d.cancelWg.Add(len(fetchers)) 653 for _, fn := range fetchers { 654 fn := fn 655 go func() { defer d.cancelWg.Done(); errc <- fn() }() 656 } 657 // Wait for the first error, then terminate the others. 658 var err error 659 for i := 0; i < len(fetchers); i++ { 660 if i == len(fetchers)-1 { 661 // Close the queue when all fetchers have exited. 662 // This will cause the block processor to end when 663 // it has processed the queue. 664 d.queue.Close() 665 } 666 if got := <-errc; got != nil { 667 err = got 668 if got != errCanceled { 669 break // receive a meaningful error, bubble it up 670 } 671 } 672 } 673 d.queue.Close() 674 d.Cancel() 675 return err 676 } 677 678 // cancel aborts all of the operations and resets the queue. However, cancel does 679 // not wait for the running download goroutines to finish. This method should be 680 // used when cancelling the downloads from inside the downloader. 681 func (d *Downloader) cancel() { 682 // Close the current cancel channel 683 d.cancelLock.Lock() 684 defer d.cancelLock.Unlock() 685 686 if d.cancelCh != nil { 687 select { 688 case <-d.cancelCh: 689 // Channel was already closed 690 default: 691 close(d.cancelCh) 692 } 693 } 694 } 695 696 // Cancel aborts all of the operations and waits for all download goroutines to 697 // finish before returning. 698 func (d *Downloader) Cancel() { 699 d.cancel() 700 d.cancelWg.Wait() 701 } 702 703 // Terminate interrupts the downloader, canceling all pending operations. 704 // The downloader cannot be reused after calling Terminate. 705 func (d *Downloader) Terminate() { 706 // Close the termination channel (make sure double close is allowed) 707 d.quitLock.Lock() 708 select { 709 case <-d.quitCh: 710 default: 711 close(d.quitCh) 712 713 // Terminate the internal beacon syncer 714 d.skeleton.Terminate() 715 } 716 d.quitLock.Unlock() 717 718 // Cancel any pending download requests 719 d.Cancel() 720 } 721 722 // fetchHead retrieves the head header and prior pivot block (if available) from 723 // a remote peer. 724 func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { 725 p.log.Debug("Retrieving remote chain head") 726 mode := d.getMode() 727 728 // Request the advertised remote head block and wait for the response 729 latest, _ := p.peer.Head() 730 fetch := 1 731 if mode == SnapSync { 732 fetch = 2 // head + pivot headers 733 } 734 headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) 735 if err != nil { 736 return nil, nil, err 737 } 738 // Make sure the peer gave us at least one and at most the requested headers 739 if len(headers) == 0 || len(headers) > fetch { 740 return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) 741 } 742 // The first header needs to be the head, validate against the request. If 743 // only 1 header was returned, make sure there's no pivot or there was not 744 // one requested. 745 head = headers[0] 746 if len(headers) == 1 { 747 if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { 748 return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) 749 } 750 p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) 751 return head, nil, nil 752 } 753 // At this point we have 2 headers in total and the first is the 754 // validated head of the chain. Check the pivot number and return, 755 pivot = headers[1] 756 if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { 757 return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) 758 } 759 return head, pivot, nil 760 } 761 762 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 763 // common ancestor. 764 // It returns parameters to be used for peer.RequestHeadersByNumber: 765 // 766 // from - starting block number 767 // count - number of headers to request 768 // skip - number of headers to skip 769 // 770 // and also returns 'max', the last block which is expected to be returned by the remote peers, 771 // given the (from,count,skip) 772 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 773 var ( 774 from int 775 count int 776 MaxCount = MaxHeaderFetch / 16 777 ) 778 // requestHead is the highest block that we will ask for. If requestHead is not offset, 779 // the highest block that we will get is 16 blocks back from head, which means we 780 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 781 // between us and the peer is 1-2 blocks, which is most common 782 requestHead := int(remoteHeight) - 1 783 if requestHead < 0 { 784 requestHead = 0 785 } 786 // requestBottom is the lowest block we want included in the query 787 // Ideally, we want to include the one just below our own head 788 requestBottom := int(localHeight - 1) 789 if requestBottom < 0 { 790 requestBottom = 0 791 } 792 totalSpan := requestHead - requestBottom 793 span := 1 + totalSpan/MaxCount 794 if span < 2 { 795 span = 2 796 } 797 if span > 16 { 798 span = 16 799 } 800 801 count = 1 + totalSpan/span 802 if count > MaxCount { 803 count = MaxCount 804 } 805 if count < 2 { 806 count = 2 807 } 808 from = requestHead - (count-1)*span 809 if from < 0 { 810 from = 0 811 } 812 max := from + (count-1)*span 813 return int64(from), count, span - 1, uint64(max) 814 } 815 816 // findAncestor tries to locate the common ancestor link of the local chain and 817 // a remote peers blockchain. In the general case when our node was in sync and 818 // on the correct chain, checking the top N links should already get us a match. 819 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 820 // the head links match), we do a binary search to find the common ancestor. 821 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 822 // Figure out the valid ancestor range to prevent rewrite attacks 823 var ( 824 floor = int64(-1) 825 localHeight uint64 826 remoteHeight = remoteHeader.Number.Uint64() 827 ) 828 mode := d.getMode() 829 switch mode { 830 case FullSync: 831 localHeight = d.blockchain.CurrentBlock().Number.Uint64() 832 case SnapSync: 833 localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64() 834 default: 835 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 836 } 837 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 838 839 // Recap floor value for binary search 840 maxForkAncestry := fullMaxForkAncestry 841 if d.getMode() == LightSync { 842 maxForkAncestry = lightMaxForkAncestry 843 } 844 if localHeight >= maxForkAncestry { 845 // We're above the max reorg threshold, find the earliest fork point 846 floor = int64(localHeight - maxForkAncestry) 847 } 848 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 849 // all headers before that point will be missing. 850 if mode == LightSync { 851 // If we don't know the current CHT position, find it 852 if d.genesis == 0 { 853 header := d.lightchain.CurrentHeader() 854 for header != nil { 855 d.genesis = header.Number.Uint64() 856 if floor >= int64(d.genesis)-1 { 857 break 858 } 859 header = d.lightchain.GetHeaderByHash(header.ParentHash) 860 } 861 } 862 // We already know the "genesis" block number, cap floor to that 863 if floor < int64(d.genesis)-1 { 864 floor = int64(d.genesis) - 1 865 } 866 } 867 868 ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) 869 if err == nil { 870 return ancestor, nil 871 } 872 // The returned error was not nil. 873 // If the error returned does not reflect that a common ancestor was not found, return it. 874 // If the error reflects that a common ancestor was not found, continue to binary search, 875 // where the error value will be reassigned. 876 if !errors.Is(err, errNoAncestorFound) { 877 return 0, err 878 } 879 880 ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) 881 if err != nil { 882 return 0, err 883 } 884 return ancestor, nil 885 } 886 887 func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { 888 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 889 890 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 891 headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) 892 if err != nil { 893 return 0, err 894 } 895 // Wait for the remote response to the head fetch 896 number, hash := uint64(0), common.Hash{} 897 898 // Make sure the peer actually gave something valid 899 if len(headers) == 0 { 900 p.log.Warn("Empty head header set") 901 return 0, errEmptyHeaderSet 902 } 903 // Make sure the peer's reply conforms to the request 904 for i, header := range headers { 905 expectNumber := from + int64(i)*int64(skip+1) 906 if number := header.Number.Int64(); number != expectNumber { 907 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 908 return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) 909 } 910 } 911 // Check if a common ancestor was found 912 for i := len(headers) - 1; i >= 0; i-- { 913 // Skip any headers that underflow/overflow our requested set 914 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 915 continue 916 } 917 // Otherwise check if we already know the header or not 918 h := hashes[i] 919 n := headers[i].Number.Uint64() 920 921 var known bool 922 switch mode { 923 case FullSync: 924 known = d.blockchain.HasBlock(h, n) 925 case SnapSync: 926 known = d.blockchain.HasFastBlock(h, n) 927 default: 928 known = d.lightchain.HasHeader(h, n) 929 } 930 if known { 931 number, hash = n, h 932 break 933 } 934 } 935 // If the head fetch already found an ancestor, return 936 if hash != (common.Hash{}) { 937 if int64(number) <= floor { 938 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 939 return 0, errInvalidAncestor 940 } 941 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 942 return number, nil 943 } 944 return 0, errNoAncestorFound 945 } 946 947 func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { 948 hash := common.Hash{} 949 950 // Ancestor not found, we need to binary search over our chain 951 start, end := uint64(0), remoteHeight 952 if floor > 0 { 953 start = uint64(floor) 954 } 955 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 956 957 for start+1 < end { 958 // Split our chain interval in two, and request the hash to cross check 959 check := (start + end) / 2 960 961 headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) 962 if err != nil { 963 return 0, err 964 } 965 // Make sure the peer actually gave something valid 966 if len(headers) != 1 { 967 p.log.Warn("Multiple headers for single request", "headers", len(headers)) 968 return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) 969 } 970 // Modify the search interval based on the response 971 h := hashes[0] 972 n := headers[0].Number.Uint64() 973 974 var known bool 975 switch mode { 976 case FullSync: 977 known = d.blockchain.HasBlock(h, n) 978 case SnapSync: 979 known = d.blockchain.HasFastBlock(h, n) 980 default: 981 known = d.lightchain.HasHeader(h, n) 982 } 983 if !known { 984 end = check 985 continue 986 } 987 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 988 if header.Number.Uint64() != check { 989 p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 990 return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) 991 } 992 start = check 993 hash = h 994 } 995 // Ensure valid ancestry and return 996 if int64(start) <= floor { 997 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 998 return 0, errInvalidAncestor 999 } 1000 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 1001 return start, nil 1002 } 1003 1004 // fetchHeaders keeps retrieving headers concurrently from the number 1005 // requested, until no more are returned, potentially throttling on the way. To 1006 // facilitate concurrency but still protect against malicious nodes sending bad 1007 // headers, we construct a header chain skeleton using the "origin" peer we are 1008 // syncing with, and fill in the missing headers using anyone else. Headers from 1009 // other peers are only accepted if they map cleanly to the skeleton. If no one 1010 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 1011 // the origin is dropped. 1012 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { 1013 p.log.Debug("Directing header downloads", "origin", from) 1014 defer p.log.Debug("Header download terminated") 1015 1016 // Start pulling the header chain skeleton until all is done 1017 var ( 1018 skeleton = true // Skeleton assembly phase or finishing up 1019 pivoting = false // Whether the next request is pivot verification 1020 ancestor = from 1021 mode = d.getMode() 1022 ) 1023 for { 1024 // Pull the next batch of headers, it either: 1025 // - Pivot check to see if the chain moved too far 1026 // - Skeleton retrieval to permit concurrent header fetches 1027 // - Full header retrieval if we're near the chain head 1028 var ( 1029 headers []*types.Header 1030 hashes []common.Hash 1031 err error 1032 ) 1033 switch { 1034 case pivoting: 1035 d.pivotLock.RLock() 1036 pivot := d.pivotHeader.Number.Uint64() 1037 d.pivotLock.RUnlock() 1038 1039 p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) 1040 headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep 1041 1042 case skeleton: 1043 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 1044 headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 1045 1046 default: 1047 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 1048 headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) 1049 } 1050 switch err { 1051 case nil: 1052 // Headers retrieved, continue with processing 1053 1054 case errCanceled: 1055 // Sync cancelled, no issue, propagate up 1056 return err 1057 1058 default: 1059 // Header retrieval either timed out, or the peer failed in some strange way 1060 // (e.g. disconnect). Consider the master peer bad and drop 1061 d.dropPeer(p.id) 1062 1063 // Finish the sync gracefully instead of dumping the gathered data though 1064 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1065 select { 1066 case ch <- false: 1067 case <-d.cancelCh: 1068 } 1069 } 1070 select { 1071 case d.headerProcCh <- nil: 1072 case <-d.cancelCh: 1073 } 1074 return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) 1075 } 1076 // If the pivot is being checked, move if it became stale and run the real retrieval 1077 var pivot uint64 1078 1079 d.pivotLock.RLock() 1080 if d.pivotHeader != nil { 1081 pivot = d.pivotHeader.Number.Uint64() 1082 } 1083 d.pivotLock.RUnlock() 1084 1085 if pivoting { 1086 if len(headers) == 2 { 1087 if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { 1088 log.Warn("Peer sent invalid next pivot", "have", have, "want", want) 1089 return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) 1090 } 1091 if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { 1092 log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) 1093 return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) 1094 } 1095 log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) 1096 pivot = headers[0].Number.Uint64() 1097 1098 d.pivotLock.Lock() 1099 d.pivotHeader = headers[0] 1100 d.pivotLock.Unlock() 1101 1102 // Write out the pivot into the database so a rollback beyond 1103 // it will reenable snap sync and update the state root that 1104 // the state syncer will be downloading. 1105 rawdb.WriteLastPivotNumber(d.stateDB, pivot) 1106 } 1107 // Disable the pivot check and fetch the next batch of headers 1108 pivoting = false 1109 continue 1110 } 1111 // If the skeleton's finished, pull any remaining head headers directly from the origin 1112 if skeleton && len(headers) == 0 { 1113 // A malicious node might withhold advertised headers indefinitely 1114 if from+uint64(MaxHeaderFetch)-1 <= head { 1115 p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) 1116 return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) 1117 } 1118 p.log.Debug("No skeleton, fetching headers directly") 1119 skeleton = false 1120 continue 1121 } 1122 // If no more headers are inbound, notify the content fetchers and return 1123 if len(headers) == 0 { 1124 // Don't abort header fetches while the pivot is downloading 1125 if !d.committed.Load() && pivot <= from { 1126 p.log.Debug("No headers, waiting for pivot commit") 1127 select { 1128 case <-time.After(fsHeaderContCheck): 1129 continue 1130 case <-d.cancelCh: 1131 return errCanceled 1132 } 1133 } 1134 // Pivot done (or not in snap sync) and no more headers, terminate the process 1135 p.log.Debug("No more headers available") 1136 select { 1137 case d.headerProcCh <- nil: 1138 return nil 1139 case <-d.cancelCh: 1140 return errCanceled 1141 } 1142 } 1143 // If we received a skeleton batch, resolve internals concurrently 1144 var progressed bool 1145 if skeleton { 1146 filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) 1147 if err != nil { 1148 p.log.Debug("Skeleton chain invalid", "err", err) 1149 return fmt.Errorf("%w: %v", errInvalidChain, err) 1150 } 1151 headers = filled[proced:] 1152 hashes = hashset[proced:] 1153 1154 progressed = proced > 0 1155 from += uint64(proced) 1156 } else { 1157 // A malicious node might withhold advertised headers indefinitely 1158 if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { 1159 p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) 1160 return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) 1161 } 1162 // If we're closing in on the chain head, but haven't yet reached it, delay 1163 // the last few headers so mini reorgs on the head don't cause invalid hash 1164 // chain errors. 1165 if n := len(headers); n > 0 { 1166 // Retrieve the current head we're at 1167 var head uint64 1168 if mode == LightSync { 1169 head = d.lightchain.CurrentHeader().Number.Uint64() 1170 } else { 1171 head = d.blockchain.CurrentSnapBlock().Number.Uint64() 1172 if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full { 1173 head = full 1174 } 1175 } 1176 // If the head is below the common ancestor, we're actually deduplicating 1177 // already existing chain segments, so use the ancestor as the fake head. 1178 // Otherwise, we might end up delaying header deliveries pointlessly. 1179 if head < ancestor { 1180 head = ancestor 1181 } 1182 // If the head is way older than this batch, delay the last few headers 1183 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 1184 delay := reorgProtHeaderDelay 1185 if delay > n { 1186 delay = n 1187 } 1188 headers = headers[:n-delay] 1189 hashes = hashes[:n-delay] 1190 } 1191 } 1192 } 1193 // If no headers have been delivered, or all of them have been delayed, 1194 // sleep a bit and retry. Take care with headers already consumed during 1195 // skeleton filling 1196 if len(headers) == 0 && !progressed { 1197 p.log.Trace("All headers delayed, waiting") 1198 select { 1199 case <-time.After(fsHeaderContCheck): 1200 continue 1201 case <-d.cancelCh: 1202 return errCanceled 1203 } 1204 } 1205 // Insert any remaining new headers and fetch the next batch 1206 if len(headers) > 0 { 1207 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 1208 select { 1209 case d.headerProcCh <- &headerTask{ 1210 headers: headers, 1211 hashes: hashes, 1212 }: 1213 case <-d.cancelCh: 1214 return errCanceled 1215 } 1216 from += uint64(len(headers)) 1217 } 1218 // If we're still skeleton filling snap sync, check pivot staleness 1219 // before continuing to the next skeleton filling 1220 if skeleton && pivot > 0 { 1221 pivoting = true 1222 } 1223 } 1224 } 1225 1226 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1227 // and maps them to the provided skeleton header chain. 1228 // 1229 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1230 // immediately to the header processor to keep the rest of the pipeline full even 1231 // in the case of header stalls. 1232 // 1233 // The method returns the entire filled skeleton and also the number of headers 1234 // already forwarded for processing. 1235 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { 1236 log.Debug("Filling up skeleton", "from", from) 1237 d.queue.ScheduleSkeleton(from, skeleton) 1238 1239 err := d.concurrentFetch((*headerQueue)(d), false) 1240 if err != nil { 1241 log.Debug("Skeleton fill failed", "err", err) 1242 } 1243 filled, hashes, proced := d.queue.RetrieveHeaders() 1244 if err == nil { 1245 log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) 1246 } 1247 return filled, hashes, proced, err 1248 } 1249 1250 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1251 // available peers, reserving a chunk of blocks for each, waiting for delivery 1252 // and also periodically checking for timeouts. 1253 func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { 1254 log.Debug("Downloading block bodies", "origin", from) 1255 err := d.concurrentFetch((*bodyQueue)(d), beaconMode) 1256 1257 log.Debug("Block body download terminated", "err", err) 1258 return err 1259 } 1260 1261 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1262 // available peers, reserving a chunk of receipts for each, waiting for delivery 1263 // and also periodically checking for timeouts. 1264 func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { 1265 log.Debug("Downloading receipts", "origin", from) 1266 err := d.concurrentFetch((*receiptQueue)(d), beaconMode) 1267 1268 log.Debug("Receipt download terminated", "err", err) 1269 return err 1270 } 1271 1272 // processHeaders takes batches of retrieved headers from an input channel and 1273 // keeps processing and scheduling them into the header chain and downloader's 1274 // queue until the stream ends or a failure occurs. 1275 func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { 1276 // Keep a count of uncertain headers to roll back 1277 var ( 1278 rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) 1279 rollbackErr error 1280 mode = d.getMode() 1281 ) 1282 defer func() { 1283 if rollback > 0 { 1284 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1285 if mode != LightSync { 1286 lastFastBlock = d.blockchain.CurrentSnapBlock().Number 1287 lastBlock = d.blockchain.CurrentBlock().Number 1288 } 1289 if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block 1290 // We're already unwinding the stack, only print the error to make it more visible 1291 log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) 1292 } 1293 curFastBlock, curBlock := common.Big0, common.Big0 1294 if mode != LightSync { 1295 curFastBlock = d.blockchain.CurrentSnapBlock().Number 1296 curBlock = d.blockchain.CurrentBlock().Number 1297 } 1298 log.Warn("Rolled back chain segment", 1299 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1300 "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1301 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) 1302 } 1303 }() 1304 // Wait for batches of headers to process 1305 gotHeaders := false 1306 1307 for { 1308 select { 1309 case <-d.cancelCh: 1310 rollbackErr = errCanceled 1311 return errCanceled 1312 1313 case task := <-d.headerProcCh: 1314 // Terminate header processing if we synced up 1315 if task == nil || len(task.headers) == 0 { 1316 // Notify everyone that headers are fully processed 1317 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1318 select { 1319 case ch <- false: 1320 case <-d.cancelCh: 1321 } 1322 } 1323 // If we're in legacy sync mode, we need to check total difficulty 1324 // violations from malicious peers. That is not needed in beacon 1325 // mode and we can skip to terminating sync. 1326 if !beaconMode { 1327 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1328 // better chain compared to ours. The only exception is if its promised blocks were 1329 // already imported by other means (e.g. fetcher): 1330 // 1331 // R <remote peer>, L <local node>: Both at block 10 1332 // R: Mine block 11, and propagate it to L 1333 // L: Queue block 11 for import 1334 // L: Notice that R's head and TD increased compared to ours, start sync 1335 // L: Import of block 11 finishes 1336 // L: Sync begins, and finds common ancestor at 11 1337 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1338 // R: Nothing to give 1339 if mode != LightSync { 1340 head := d.blockchain.CurrentBlock() 1341 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1342 return errStallingPeer 1343 } 1344 } 1345 // If snap or light syncing, ensure promised headers are indeed delivered. This is 1346 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1347 // of delivering the post-pivot blocks that would flag the invalid content. 1348 // 1349 // This check cannot be executed "as is" for full imports, since blocks may still be 1350 // queued for processing when the header download completes. However, as long as the 1351 // peer gave us something useful, we're already happy/progressed (above check). 1352 if mode == SnapSync || mode == LightSync { 1353 head := d.lightchain.CurrentHeader() 1354 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1355 return errStallingPeer 1356 } 1357 } 1358 } 1359 // Disable any rollback and return 1360 rollback = 0 1361 return nil 1362 } 1363 // Otherwise split the chunk of headers into batches and process them 1364 headers, hashes := task.headers, task.hashes 1365 1366 gotHeaders = true 1367 for len(headers) > 0 { 1368 // Terminate if something failed in between processing chunks 1369 select { 1370 case <-d.cancelCh: 1371 rollbackErr = errCanceled 1372 return errCanceled 1373 default: 1374 } 1375 // Select the next chunk of headers to import 1376 limit := maxHeadersProcess 1377 if limit > len(headers) { 1378 limit = len(headers) 1379 } 1380 chunkHeaders := headers[:limit] 1381 chunkHashes := hashes[:limit] 1382 1383 // In case of header only syncing, validate the chunk immediately 1384 if mode == SnapSync || mode == LightSync { 1385 // Although the received headers might be all valid, a legacy 1386 // PoW/PoA sync must not accept post-merge headers. Make sure 1387 // that any transition is rejected at this point. 1388 var ( 1389 rejected []*types.Header 1390 td *big.Int 1391 ) 1392 if !beaconMode && ttd != nil { 1393 td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) 1394 if td == nil { 1395 // This should never really happen, but handle gracefully for now 1396 log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) 1397 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1398 } 1399 for i, header := range chunkHeaders { 1400 td = new(big.Int).Add(td, header.Difficulty) 1401 if td.Cmp(ttd) >= 0 { 1402 // Terminal total difficulty reached, allow the last header in 1403 if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { 1404 chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] 1405 if len(rejected) > 0 { 1406 // Make a nicer user log as to the first TD truly rejected 1407 td = new(big.Int).Add(td, rejected[0].Difficulty) 1408 } 1409 } else { 1410 chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] 1411 } 1412 break 1413 } 1414 } 1415 } 1416 if len(chunkHeaders) > 0 { 1417 if n, err := d.lightchain.InsertHeaderChain(chunkHeaders); err != nil { 1418 rollbackErr = err 1419 1420 // If some headers were inserted, track them as uncertain 1421 if mode == SnapSync && n > 0 && rollback == 0 { 1422 rollback = chunkHeaders[0].Number.Uint64() 1423 } 1424 log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) 1425 return fmt.Errorf("%w: %v", errInvalidChain, err) 1426 } 1427 // All verifications passed, track all headers within the allowed limits 1428 if mode == SnapSync { 1429 head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() 1430 if head-rollback > uint64(fsHeaderSafetyNet) { 1431 rollback = head - uint64(fsHeaderSafetyNet) 1432 } else { 1433 rollback = 1 1434 } 1435 } 1436 } 1437 if len(rejected) != 0 { 1438 // Merge threshold reached, stop importing, but don't roll back 1439 rollback = 0 1440 1441 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) 1442 return ErrMergeTransition 1443 } 1444 } 1445 // Unless we're doing light chains, schedule the headers for associated content retrieval 1446 if mode == FullSync || mode == SnapSync { 1447 // If we've reached the allowed number of pending headers, stall a bit 1448 for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1449 select { 1450 case <-d.cancelCh: 1451 rollbackErr = errCanceled 1452 return errCanceled 1453 case <-time.After(time.Second): 1454 } 1455 } 1456 // Otherwise insert the headers for content retrieval 1457 inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) 1458 if len(inserts) != len(chunkHeaders) { 1459 rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) 1460 return fmt.Errorf("%w: stale headers", errBadPeer) 1461 } 1462 } 1463 headers = headers[limit:] 1464 hashes = hashes[limit:] 1465 origin += uint64(limit) 1466 } 1467 // Update the highest block number we know if a higher one is found. 1468 d.syncStatsLock.Lock() 1469 if d.syncStatsChainHeight < origin { 1470 d.syncStatsChainHeight = origin - 1 1471 } 1472 d.syncStatsLock.Unlock() 1473 1474 // Signal the content downloaders of the availability of new tasks 1475 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1476 select { 1477 case ch <- true: 1478 default: 1479 } 1480 } 1481 } 1482 } 1483 } 1484 1485 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1486 func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { 1487 for { 1488 results := d.queue.Results(true) 1489 if len(results) == 0 { 1490 return nil 1491 } 1492 if d.chainInsertHook != nil { 1493 d.chainInsertHook(results) 1494 } 1495 // Although the received blocks might be all valid, a legacy PoW/PoA sync 1496 // must not accept post-merge blocks. Make sure that pre-merge blocks are 1497 // imported, but post-merge ones are rejected. 1498 var ( 1499 rejected []*fetchResult 1500 td *big.Int 1501 ) 1502 if !beaconMode && ttd != nil { 1503 td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) 1504 if td == nil { 1505 // This should never really happen, but handle gracefully for now 1506 log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) 1507 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1508 } 1509 for i, result := range results { 1510 td = new(big.Int).Add(td, result.Header.Difficulty) 1511 if td.Cmp(ttd) >= 0 { 1512 // Terminal total difficulty reached, allow the last block in 1513 if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { 1514 results, rejected = results[:i+1], results[i+1:] 1515 if len(rejected) > 0 { 1516 // Make a nicer user log as to the first TD truly rejected 1517 td = new(big.Int).Add(td, rejected[0].Header.Difficulty) 1518 } 1519 } else { 1520 results, rejected = results[:i], results[i:] 1521 } 1522 break 1523 } 1524 } 1525 } 1526 if err := d.importBlockResults(results); err != nil { 1527 return err 1528 } 1529 if len(rejected) != 0 { 1530 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) 1531 return ErrMergeTransition 1532 } 1533 } 1534 } 1535 1536 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1537 // Check for any early termination requests 1538 if len(results) == 0 { 1539 return nil 1540 } 1541 select { 1542 case <-d.quitCh: 1543 return errCancelContentProcessing 1544 default: 1545 } 1546 // Retrieve a batch of results to import 1547 first, last := results[0].Header, results[len(results)-1].Header 1548 log.Debug("Inserting downloaded chain", "items", len(results), 1549 "firstnum", first.Number, "firsthash", first.Hash(), 1550 "lastnum", last.Number, "lasthash", last.Hash(), 1551 ) 1552 blocks := make([]*types.Block, len(results)) 1553 for i, result := range results { 1554 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1555 } 1556 // Downloaded blocks are always regarded as trusted after the 1557 // transition. Because the downloaded chain is guided by the 1558 // consensus-layer. 1559 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1560 if index < len(results) { 1561 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1562 1563 // In post-merge, notify the engine API of encountered bad chains 1564 if d.badBlock != nil { 1565 head, _, _, err := d.skeleton.Bounds() 1566 if err != nil { 1567 log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err) 1568 } else { 1569 d.badBlock(blocks[index].Header(), head) 1570 } 1571 } 1572 } else { 1573 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1574 // when it needs to preprocess blocks to import a sidechain. 1575 // The importer will put together a new list of blocks to import, which is a superset 1576 // of the blocks delivered from the downloader, and the indexing will be off. 1577 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1578 } 1579 return fmt.Errorf("%w: %v", errInvalidChain, err) 1580 } 1581 return nil 1582 } 1583 1584 // processSnapSyncContent takes fetch results from the queue and writes them to the 1585 // database. It also controls the synchronisation of state nodes of the pivot block. 1586 func (d *Downloader) processSnapSyncContent() error { 1587 // Start syncing state of the reported head block. This should get us most of 1588 // the state of the pivot block. 1589 d.pivotLock.RLock() 1590 sync := d.syncState(d.pivotHeader.Root) 1591 d.pivotLock.RUnlock() 1592 1593 defer func() { 1594 // The `sync` object is replaced every time the pivot moves. We need to 1595 // defer close the very last active one, hence the lazy evaluation vs. 1596 // calling defer sync.Cancel() !!! 1597 sync.Cancel() 1598 }() 1599 1600 closeOnErr := func(s *stateSync) { 1601 if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { 1602 d.queue.Close() // wake up Results 1603 } 1604 } 1605 go closeOnErr(sync) 1606 1607 // To cater for moving pivot points, track the pivot block and subsequently 1608 // accumulated download results separately. 1609 var ( 1610 oldPivot *fetchResult // Locked in pivot block, might change eventually 1611 oldTail []*fetchResult // Downloaded content after the pivot 1612 ) 1613 for { 1614 // Wait for the next batch of downloaded data to be available, and if the pivot 1615 // block became stale, move the goalpost 1616 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1617 if len(results) == 0 { 1618 // If pivot sync is done, stop 1619 if oldPivot == nil { 1620 d.reportSnapSyncProgress(true) 1621 return sync.Cancel() 1622 } 1623 // If sync failed, stop 1624 select { 1625 case <-d.cancelCh: 1626 sync.Cancel() 1627 return errCanceled 1628 default: 1629 } 1630 } 1631 if d.chainInsertHook != nil { 1632 d.chainInsertHook(results) 1633 } 1634 d.reportSnapSyncProgress(false) 1635 1636 // If we haven't downloaded the pivot block yet, check pivot staleness 1637 // notifications from the header downloader 1638 d.pivotLock.RLock() 1639 pivot := d.pivotHeader 1640 d.pivotLock.RUnlock() 1641 1642 if oldPivot == nil { 1643 if pivot.Root != sync.root { 1644 sync.Cancel() 1645 sync = d.syncState(pivot.Root) 1646 1647 go closeOnErr(sync) 1648 } 1649 } else { 1650 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1651 } 1652 // Split around the pivot block and process the two sides via snap/full sync 1653 if !d.committed.Load() { 1654 latest := results[len(results)-1].Header 1655 // If the height is above the pivot block by 2 sets, it means the pivot 1656 // become stale in the network and it was garbage collected, move to a 1657 // new pivot. 1658 // 1659 // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those 1660 // need to be taken into account, otherwise we're detecting the pivot move 1661 // late and will drop peers due to unavailable state!!! 1662 if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) { 1663 log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay)) 1664 pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted 1665 1666 d.pivotLock.Lock() 1667 d.pivotHeader = pivot 1668 d.pivotLock.Unlock() 1669 1670 // Write out the pivot into the database so a rollback beyond it will 1671 // reenable snap sync 1672 rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) 1673 } 1674 } 1675 P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) 1676 if err := d.commitSnapSyncData(beforeP, sync); err != nil { 1677 return err 1678 } 1679 if P != nil { 1680 // If new pivot block found, cancel old state retrieval and restart 1681 if oldPivot != P { 1682 sync.Cancel() 1683 sync = d.syncState(P.Header.Root) 1684 1685 go closeOnErr(sync) 1686 oldPivot = P 1687 } 1688 // Wait for completion, occasionally checking for pivot staleness 1689 select { 1690 case <-sync.done: 1691 if sync.err != nil { 1692 return sync.err 1693 } 1694 if err := d.commitPivotBlock(P); err != nil { 1695 return err 1696 } 1697 oldPivot = nil 1698 1699 case <-time.After(time.Second): 1700 oldTail = afterP 1701 continue 1702 } 1703 } 1704 // Fast sync done, pivot commit done, full import 1705 if err := d.importBlockResults(afterP); err != nil { 1706 return err 1707 } 1708 } 1709 } 1710 1711 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1712 if len(results) == 0 { 1713 return nil, nil, nil 1714 } 1715 if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot { 1716 // the pivot is somewhere in the future 1717 return nil, results, nil 1718 } 1719 // This can also be optimized, but only happens very seldom 1720 for _, result := range results { 1721 num := result.Header.Number.Uint64() 1722 switch { 1723 case num < pivot: 1724 before = append(before, result) 1725 case num == pivot: 1726 p = result 1727 default: 1728 after = append(after, result) 1729 } 1730 } 1731 return p, before, after 1732 } 1733 1734 func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { 1735 // Check for any early termination requests 1736 if len(results) == 0 { 1737 return nil 1738 } 1739 select { 1740 case <-d.quitCh: 1741 return errCancelContentProcessing 1742 case <-stateSync.done: 1743 if err := stateSync.Wait(); err != nil { 1744 return err 1745 } 1746 default: 1747 } 1748 // Retrieve the batch of results to import 1749 first, last := results[0].Header, results[len(results)-1].Header 1750 log.Debug("Inserting snap-sync blocks", "items", len(results), 1751 "firstnum", first.Number, "firsthash", first.Hash(), 1752 "lastnumn", last.Number, "lasthash", last.Hash(), 1753 ) 1754 blocks := make([]*types.Block, len(results)) 1755 receipts := make([]types.Receipts, len(results)) 1756 for i, result := range results { 1757 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1758 receipts[i] = result.Receipts 1759 } 1760 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { 1761 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1762 return fmt.Errorf("%w: %v", errInvalidChain, err) 1763 } 1764 return nil 1765 } 1766 1767 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1768 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1769 log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1770 1771 // Commit the pivot block as the new head, will require full sync from here on 1772 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { 1773 return err 1774 } 1775 if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { 1776 return err 1777 } 1778 d.committed.Store(true) 1779 return nil 1780 } 1781 1782 // DeliverSnapPacket is invoked from a peer's message handler when it transmits a 1783 // data packet for the local node to consume. 1784 func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { 1785 switch packet := packet.(type) { 1786 case *snap.AccountRangePacket: 1787 hashes, accounts, err := packet.Unpack() 1788 if err != nil { 1789 return err 1790 } 1791 return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) 1792 1793 case *snap.StorageRangesPacket: 1794 hashset, slotset := packet.Unpack() 1795 return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) 1796 1797 case *snap.ByteCodesPacket: 1798 return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) 1799 1800 case *snap.TrieNodesPacket: 1801 return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) 1802 1803 default: 1804 return fmt.Errorf("unexpected snap packet type: %T", packet) 1805 } 1806 } 1807 1808 // readHeaderRange returns a list of headers, using the given last header as the base, 1809 // and going backwards towards genesis. This method assumes that the caller already has 1810 // placed a reasonable cap on count. 1811 func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header { 1812 var ( 1813 current = last 1814 headers []*types.Header 1815 ) 1816 for { 1817 parent := d.lightchain.GetHeaderByHash(current.ParentHash) 1818 if parent == nil { 1819 break // The chain is not continuous, or the chain is exhausted 1820 } 1821 headers = append(headers, parent) 1822 if len(headers) >= count { 1823 break 1824 } 1825 current = parent 1826 } 1827 return headers 1828 } 1829 1830 // reportSnapSyncProgress calculates various status reports and provides it to the user. 1831 func (d *Downloader) reportSnapSyncProgress(force bool) { 1832 // Initialize the sync start time if it's the first time we're reporting 1833 if d.syncStartTime.IsZero() { 1834 d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero 1835 } 1836 // Don't report all the events, just occasionally 1837 if !force && time.Since(d.syncLogTime) < 8*time.Second { 1838 return 1839 } 1840 // Don't report anything until we have a meaningful progress 1841 var ( 1842 headerBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable) 1843 bodyBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable) 1844 receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable) 1845 ) 1846 syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes) 1847 if syncedBytes == 0 { 1848 return 1849 } 1850 var ( 1851 header = d.blockchain.CurrentHeader() 1852 block = d.blockchain.CurrentSnapBlock() 1853 ) 1854 syncedBlocks := block.Number.Uint64() - d.syncStartBlock 1855 if syncedBlocks == 0 { 1856 return 1857 } 1858 // Retrieve the current chain head and calculate the ETA 1859 latest, _, _, err := d.skeleton.Bounds() 1860 if err != nil { 1861 // We're going to cheat for non-merged networks, but that's fine 1862 latest = d.pivotHeader 1863 } 1864 if latest == nil { 1865 // This should really never happen, but add some defensive code for now. 1866 // TODO(karalabe): Remove it eventually if we don't see it blow. 1867 log.Error("Nil latest block in sync progress report") 1868 return 1869 } 1870 var ( 1871 left = latest.Number.Uint64() - block.Number.Uint64() 1872 eta = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left) 1873 1874 progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64())) 1875 headers = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString()) 1876 bodies = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString()) 1877 receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString()) 1878 ) 1879 log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta)) 1880 d.syncLogTime = time.Now() 1881 }