github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/tacshi/go-ethereum" 29 "github.com/tacshi/go-ethereum/common" 30 "github.com/tacshi/go-ethereum/core/rawdb" 31 "github.com/tacshi/go-ethereum/core/state/snapshot" 32 "github.com/tacshi/go-ethereum/core/types" 33 "github.com/tacshi/go-ethereum/eth/protocols/snap" 34 "github.com/tacshi/go-ethereum/ethdb" 35 "github.com/tacshi/go-ethereum/event" 36 "github.com/tacshi/go-ethereum/log" 37 "github.com/tacshi/go-ethereum/params" 38 "github.com/tacshi/go-ethereum/trie" 39 ) 40 41 var ( 42 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 43 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 44 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 45 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 46 47 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 48 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 49 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 50 fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 51 lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 52 53 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 54 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 55 56 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync 57 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 58 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 59 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 60 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync 61 ) 62 63 var ( 64 errBusy = errors.New("busy") 65 errUnknownPeer = errors.New("peer is unknown or unhealthy") 66 errBadPeer = errors.New("action from bad peer ignored") 67 errStallingPeer = errors.New("peer is stalling") 68 errUnsyncedPeer = errors.New("unsynced peer") 69 errNoPeers = errors.New("no peers to keep download active") 70 errTimeout = errors.New("timeout") 71 errEmptyHeaderSet = errors.New("empty header set by peer") 72 errPeersUnavailable = errors.New("no peers available or all tried for download") 73 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 74 errInvalidChain = errors.New("retrieved hash chain is invalid") 75 errInvalidBody = errors.New("retrieved block body is invalid") 76 errInvalidReceipt = errors.New("retrieved receipt is invalid") 77 errCancelStateFetch = errors.New("state data download canceled (requested)") 78 errCancelContentProcessing = errors.New("content processing canceled (requested)") 79 errCanceled = errors.New("syncing canceled (requested)") 80 errTooOld = errors.New("peer's protocol version too old") 81 errNoAncestorFound = errors.New("no common ancestor found") 82 errNoPivotHeader = errors.New("pivot header is not found") 83 ErrMergeTransition = errors.New("legacy sync reached the merge") 84 ) 85 86 // peerDropFn is a callback type for dropping a peer detected as malicious. 87 type peerDropFn func(id string) 88 89 // badBlockFn is a callback for the async beacon sync to notify the caller that 90 // the origin header requested to sync to, produced a chain with a bad block. 91 type badBlockFn func(invalid *types.Header, origin *types.Header) 92 93 // headerTask is a set of downloaded headers to queue along with their precomputed 94 // hashes to avoid constant rehashing. 95 type headerTask struct { 96 headers []*types.Header 97 hashes []common.Hash 98 } 99 100 type Downloader struct { 101 mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode 102 mux *event.TypeMux // Event multiplexer to announce sync operation events 103 104 checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) 105 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 106 queue *queue // Scheduler for selecting the hashes to download 107 peers *peerSet // Set of active peers from which download can proceed 108 109 stateDB ethdb.Database // Database to state sync into (and deduplicate via) 110 111 // Statistics 112 syncStatsChainOrigin uint64 // Origin block number where syncing started at 113 syncStatsChainHeight uint64 // Highest block number known when syncing started 114 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 115 116 lightchain LightChain 117 blockchain BlockChain 118 119 // Callbacks 120 dropPeer peerDropFn // Drops a peer for misbehaving 121 badBlock badBlockFn // Reports a block as rejected by the chain 122 123 // Status 124 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 125 synchronising int32 126 notified int32 127 committed int32 128 ancientLimit uint64 // The maximum block number which can be regarded as ancient data. 129 130 // Channels 131 headerProcCh chan *headerTask // Channel to feed the header processor new tasks 132 133 // Skeleton sync 134 skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode) 135 136 // State sync 137 pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root 138 pivotLock sync.RWMutex // Lock protecting pivot header reads from updates 139 140 SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now 141 stateSyncStart chan *stateSync 142 143 // Cancellation and termination 144 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 145 cancelCh chan struct{} // Channel to cancel mid-flight syncs 146 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 147 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 148 149 quitCh chan struct{} // Quit channel to signal termination 150 quitLock sync.Mutex // Lock to prevent double closes 151 152 // Testing hooks 153 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 154 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 155 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 156 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 157 158 // Progress reporting metrics 159 syncStartBlock uint64 // Head snap block when Geth was started 160 syncStartTime time.Time // Time instance when chain sync started 161 syncLogTime time.Time // Time instance when status was last reported 162 } 163 164 // LightChain encapsulates functions required to synchronise a light chain. 165 type LightChain interface { 166 // HasHeader verifies a header's presence in the local chain. 167 HasHeader(common.Hash, uint64) bool 168 169 // GetHeaderByHash retrieves a header from the local chain. 170 GetHeaderByHash(common.Hash) *types.Header 171 172 // CurrentHeader retrieves the head header from the local chain. 173 CurrentHeader() *types.Header 174 175 // GetTd returns the total difficulty of a local block. 176 GetTd(common.Hash, uint64) *big.Int 177 178 // InsertHeaderChain inserts a batch of headers into the local chain. 179 InsertHeaderChain([]*types.Header, int) (int, error) 180 181 // SetHead rewinds the local chain to a new head. 182 SetHead(uint64) error 183 } 184 185 // BlockChain encapsulates functions required to sync a (full or snap) blockchain. 186 type BlockChain interface { 187 LightChain 188 189 // HasBlock verifies a block's presence in the local chain. 190 HasBlock(common.Hash, uint64) bool 191 192 // HasFastBlock verifies a snap block's presence in the local chain. 193 HasFastBlock(common.Hash, uint64) bool 194 195 // GetBlockByHash retrieves a block from the local chain. 196 GetBlockByHash(common.Hash) *types.Block 197 198 // CurrentBlock retrieves the head block from the local chain. 199 CurrentBlock() *types.Header 200 201 // CurrentSnapBlock retrieves the head snap block from the local chain. 202 CurrentSnapBlock() *types.Header 203 204 // SnapSyncCommitHead directly commits the head block to a certain entity. 205 SnapSyncCommitHead(common.Hash) error 206 207 // InsertChain inserts a batch of blocks into the local chain. 208 InsertChain(types.Blocks) (int, error) 209 210 // InsertReceiptChain inserts a batch of receipts into the local chain. 211 InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error) 212 213 // Snapshots returns the blockchain snapshot tree to paused it during sync. 214 Snapshots() *snapshot.Tree 215 216 // TrieDB retrieves the low level trie database used for interacting 217 // with trie nodes. 218 TrieDB() *trie.Database 219 } 220 221 // New creates a new downloader to fetch hashes and blocks from remote peers. 222 func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { 223 if lightchain == nil { 224 lightchain = chain 225 } 226 dl := &Downloader{ 227 stateDB: stateDb, 228 mux: mux, 229 checkpoint: checkpoint, 230 queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), 231 peers: newPeerSet(), 232 blockchain: chain, 233 lightchain: lightchain, 234 dropPeer: dropPeer, 235 headerProcCh: make(chan *headerTask, 1), 236 quitCh: make(chan struct{}), 237 SnapSyncer: snap.NewSyncer(stateDb, chain.TrieDB().Scheme()), 238 stateSyncStart: make(chan *stateSync), 239 syncStartBlock: chain.CurrentSnapBlock().Number.Uint64(), 240 } 241 // Create the post-merge skeleton syncer and start the process 242 dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) 243 244 go dl.stateFetcher() 245 return dl 246 } 247 248 // Progress retrieves the synchronisation boundaries, specifically the origin 249 // block where synchronisation started at (may have failed/suspended); the block 250 // or header sync is currently at; and the latest known block which the sync targets. 251 // 252 // In addition, during the state download phase of snap synchronisation the number 253 // of processed and the total number of known states are also returned. Otherwise 254 // these are zero. 255 func (d *Downloader) Progress() ethereum.SyncProgress { 256 // Lock the current stats and return the progress 257 d.syncStatsLock.RLock() 258 defer d.syncStatsLock.RUnlock() 259 260 current := uint64(0) 261 mode := d.getMode() 262 switch { 263 case d.blockchain != nil && mode == FullSync: 264 current = d.blockchain.CurrentBlock().Number.Uint64() 265 case d.blockchain != nil && mode == SnapSync: 266 current = d.blockchain.CurrentSnapBlock().Number.Uint64() 267 case d.lightchain != nil: 268 current = d.lightchain.CurrentHeader().Number.Uint64() 269 default: 270 log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) 271 } 272 progress, pending := d.SnapSyncer.Progress() 273 274 return ethereum.SyncProgress{ 275 StartingBlock: d.syncStatsChainOrigin, 276 CurrentBlock: current, 277 HighestBlock: d.syncStatsChainHeight, 278 SyncedAccounts: progress.AccountSynced, 279 SyncedAccountBytes: uint64(progress.AccountBytes), 280 SyncedBytecodes: progress.BytecodeSynced, 281 SyncedBytecodeBytes: uint64(progress.BytecodeBytes), 282 SyncedStorage: progress.StorageSynced, 283 SyncedStorageBytes: uint64(progress.StorageBytes), 284 HealedTrienodes: progress.TrienodeHealSynced, 285 HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), 286 HealedBytecodes: progress.BytecodeHealSynced, 287 HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), 288 HealingTrienodes: pending.TrienodeHeal, 289 HealingBytecode: pending.BytecodeHeal, 290 } 291 } 292 293 // Synchronising returns whether the downloader is currently retrieving blocks. 294 func (d *Downloader) Synchronising() bool { 295 return atomic.LoadInt32(&d.synchronising) > 0 296 } 297 298 // RegisterPeer injects a new download peer into the set of block source to be 299 // used for fetching hashes and blocks from. 300 func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { 301 var logger log.Logger 302 if len(id) < 16 { 303 // Tests use short IDs, don't choke on them 304 logger = log.New("peer", id) 305 } else { 306 logger = log.New("peer", id[:8]) 307 } 308 logger.Trace("Registering sync peer") 309 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 310 logger.Error("Failed to register sync peer", "err", err) 311 return err 312 } 313 return nil 314 } 315 316 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 317 func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { 318 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 319 } 320 321 // UnregisterPeer remove a peer from the known list, preventing any action from 322 // the specified peer. An effort is also made to return any pending fetches into 323 // the queue. 324 func (d *Downloader) UnregisterPeer(id string) error { 325 // Unregister the peer from the active peer set and revoke any fetch tasks 326 var logger log.Logger 327 if len(id) < 16 { 328 // Tests use short IDs, don't choke on them 329 logger = log.New("peer", id) 330 } else { 331 logger = log.New("peer", id[:8]) 332 } 333 logger.Trace("Unregistering sync peer") 334 if err := d.peers.Unregister(id); err != nil { 335 logger.Error("Failed to unregister sync peer", "err", err) 336 return err 337 } 338 d.queue.Revoke(id) 339 340 return nil 341 } 342 343 // LegacySync tries to sync up our local block chain with a remote peer, both 344 // adding various sanity checks as well as wrapping it with various log entries. 345 func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { 346 err := d.synchronise(id, head, td, ttd, mode, false, nil) 347 348 switch err { 349 case nil, errBusy, errCanceled: 350 return err 351 } 352 if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || 353 errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || 354 errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { 355 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 356 if d.dropPeer == nil { 357 // The dropPeer method is nil when `--copydb` is used for a local copy. 358 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 359 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 360 } else { 361 d.dropPeer(id) 362 } 363 return err 364 } 365 if errors.Is(err, ErrMergeTransition) { 366 return err // This is an expected fault, don't keep printing it in a spin-loop 367 } 368 log.Warn("Synchronisation failed, retrying", "err", err) 369 return err 370 } 371 372 // synchronise will select the peer and use it for synchronising. If an empty string is given 373 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 374 // checks fail an error will be returned. This method is synchronous 375 func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { 376 // The beacon header syncer is async. It will start this synchronization and 377 // will continue doing other tasks. However, if synchronization needs to be 378 // cancelled, the syncer needs to know if we reached the startup point (and 379 // inited the cancel channel) or not yet. Make sure that we'll signal even in 380 // case of a failure. 381 if beaconPing != nil { 382 defer func() { 383 select { 384 case <-beaconPing: // already notified 385 default: 386 close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing) 387 } 388 }() 389 } 390 // Mock out the synchronisation if testing 391 if d.synchroniseMock != nil { 392 return d.synchroniseMock(id, hash) 393 } 394 // Make sure only one goroutine is ever allowed past this point at once 395 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 396 return errBusy 397 } 398 defer atomic.StoreInt32(&d.synchronising, 0) 399 400 // Post a user notification of the sync (only once per session) 401 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 402 log.Info("Block synchronisation started") 403 } 404 if mode == SnapSync { 405 // Snap sync uses the snapshot namespace to store potentially flakey data until 406 // sync completely heals and finishes. Pause snapshot maintenance in the mean- 407 // time to prevent access. 408 if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests 409 snapshots.Disable() 410 } 411 } 412 // Reset the queue, peer set and wake channels to clean any internal leftover state 413 d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) 414 d.peers.Reset() 415 416 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 417 select { 418 case <-ch: 419 default: 420 } 421 } 422 for empty := false; !empty; { 423 select { 424 case <-d.headerProcCh: 425 default: 426 empty = true 427 } 428 } 429 // Create cancel channel for aborting mid-flight and mark the master peer 430 d.cancelLock.Lock() 431 d.cancelCh = make(chan struct{}) 432 d.cancelPeer = id 433 d.cancelLock.Unlock() 434 435 defer d.Cancel() // No matter what, we can't leave the cancel channel open 436 437 // Atomically set the requested sync mode 438 atomic.StoreUint32(&d.mode, uint32(mode)) 439 440 // Retrieve the origin peer and initiate the downloading process 441 var p *peerConnection 442 if !beaconMode { // Beacon mode doesn't need a peer to sync from 443 p = d.peers.Peer(id) 444 if p == nil { 445 return errUnknownPeer 446 } 447 } 448 if beaconPing != nil { 449 close(beaconPing) 450 } 451 return d.syncWithPeer(p, hash, td, ttd, beaconMode) 452 } 453 454 func (d *Downloader) getMode() SyncMode { 455 return SyncMode(atomic.LoadUint32(&d.mode)) 456 } 457 458 // syncWithPeer starts a block synchronization based on the hash chain from the 459 // specified peer and head hash. 460 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { 461 d.mux.Post(StartEvent{}) 462 defer func() { 463 // reset on error 464 if err != nil { 465 d.mux.Post(FailedEvent{err}) 466 } else { 467 latest := d.lightchain.CurrentHeader() 468 d.mux.Post(DoneEvent{latest}) 469 } 470 }() 471 mode := d.getMode() 472 473 if !beaconMode { 474 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) 475 } else { 476 log.Debug("Backfilling with the network", "mode", mode) 477 } 478 defer func(start time.Time) { 479 log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) 480 }(time.Now()) 481 482 // Look up the sync boundaries: the common ancestor and the target block 483 var latest, pivot, final *types.Header 484 if !beaconMode { 485 // In legacy mode, use the master peer to retrieve the headers from 486 latest, pivot, err = d.fetchHead(p) 487 if err != nil { 488 return err 489 } 490 } else { 491 // In beacon mode, use the skeleton chain to retrieve the headers from 492 latest, _, final, err = d.skeleton.Bounds() 493 if err != nil { 494 return err 495 } 496 if latest.Number.Uint64() > uint64(fsMinFullBlocks) { 497 number := latest.Number.Uint64() - uint64(fsMinFullBlocks) 498 499 // Retrieve the pivot header from the skeleton chain segment but 500 // fallback to local chain if it's not found in skeleton space. 501 if pivot = d.skeleton.Header(number); pivot == nil { 502 _, oldest, _, _ := d.skeleton.Bounds() // error is already checked 503 if number < oldest.Number.Uint64() { 504 count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks 505 headers := d.readHeaderRange(oldest, count) 506 if len(headers) == count { 507 pivot = headers[len(headers)-1] 508 log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) 509 } 510 } 511 } 512 // Print an error log and return directly in case the pivot header 513 // is still not found. It means the skeleton chain is not linked 514 // correctly with local chain. 515 if pivot == nil { 516 log.Error("Pivot header is not found", "number", number) 517 return errNoPivotHeader 518 } 519 } 520 } 521 // If no pivot block was returned, the head is below the min full block 522 // threshold (i.e. new chain). In that case we won't really snap sync 523 // anyway, but still need a valid pivot block to avoid some code hitting 524 // nil panics on access. 525 if mode == SnapSync && pivot == nil { 526 pivot = d.blockchain.CurrentBlock() 527 } 528 height := latest.Number.Uint64() 529 530 var origin uint64 531 if !beaconMode { 532 // In legacy mode, reach out to the network and find the ancestor 533 origin, err = d.findAncestor(p, latest) 534 if err != nil { 535 return err 536 } 537 } else { 538 // In beacon mode, use the skeleton chain for the ancestor lookup 539 origin, err = d.findBeaconAncestor() 540 if err != nil { 541 return err 542 } 543 } 544 d.syncStatsLock.Lock() 545 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 546 d.syncStatsChainOrigin = origin 547 } 548 d.syncStatsChainHeight = height 549 d.syncStatsLock.Unlock() 550 551 // Ensure our origin point is below any snap sync pivot point 552 if mode == SnapSync { 553 if height <= uint64(fsMinFullBlocks) { 554 origin = 0 555 } else { 556 pivotNumber := pivot.Number.Uint64() 557 if pivotNumber <= origin { 558 origin = pivotNumber - 1 559 } 560 // Write out the pivot into the database so a rollback beyond it will 561 // reenable snap sync 562 rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) 563 } 564 } 565 d.committed = 1 566 if mode == SnapSync && pivot.Number.Uint64() != 0 { 567 d.committed = 0 568 } 569 if mode == SnapSync { 570 // Set the ancient data limitation. If we are running snap sync, all block 571 // data older than ancientLimit will be written to the ancient store. More 572 // recent data will be written to the active database and will wait for the 573 // freezer to migrate. 574 // 575 // If the network is post-merge, use either the last announced finalized 576 // block as the ancient limit, or if we haven't yet received one, the head- 577 // a max fork ancestry limit. One quirky case if we've already passed the 578 // finalized block, in which case the skeleton.Bounds will return nil and 579 // we'll revert to head - 90K. That's fine, we're finishing sync anyway. 580 // 581 // For non-merged networks, if there is a checkpoint available, then calculate 582 // the ancientLimit through that. Otherwise calculate the ancient limit through 583 // the advertised height of the remote peer. This most is mostly a fallback for 584 // legacy networks, but should eventually be droppped. TODO(karalabe). 585 if beaconMode { 586 // Beacon sync, use the latest finalized block as the ancient limit 587 // or a reasonable height if no finalized block is yet announced. 588 if final != nil { 589 d.ancientLimit = final.Number.Uint64() 590 } else if height > fullMaxForkAncestry+1 { 591 d.ancientLimit = height - fullMaxForkAncestry - 1 592 } else { 593 d.ancientLimit = 0 594 } 595 } else { 596 // Legacy sync, use any hardcoded checkpoints or the best announcement 597 // we have from the remote peer. TODO(karalabe): Drop this pathway. 598 if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 { 599 d.ancientLimit = d.checkpoint 600 } else if height > fullMaxForkAncestry+1 { 601 d.ancientLimit = height - fullMaxForkAncestry - 1 602 } else { 603 d.ancientLimit = 0 604 } 605 } 606 frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. 607 608 // If a part of blockchain data has already been written into active store, 609 // disable the ancient style insertion explicitly. 610 if origin >= frozen && frozen != 0 { 611 d.ancientLimit = 0 612 log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1) 613 } else if d.ancientLimit > 0 { 614 log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit) 615 } 616 // Rewind the ancient store and blockchain if reorg happens. 617 if origin+1 < frozen { 618 if err := d.lightchain.SetHead(origin); err != nil { 619 return err 620 } 621 } 622 } 623 // Initiate the sync using a concurrent header and content retrieval algorithm 624 d.queue.Prepare(origin+1, mode) 625 if d.syncInitHook != nil { 626 d.syncInitHook(origin, height) 627 } 628 var headerFetcher func() error 629 if !beaconMode { 630 // In legacy mode, headers are retrieved from the network 631 headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } 632 } else { 633 // In beacon mode, headers are served by the skeleton syncer 634 headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } 635 } 636 fetchers := []func() error{ 637 headerFetcher, // Headers are always retrieved 638 func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync 639 func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync 640 func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, 641 } 642 if mode == SnapSync { 643 d.pivotLock.Lock() 644 d.pivotHeader = pivot 645 d.pivotLock.Unlock() 646 647 fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) 648 } else if mode == FullSync { 649 fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) 650 } 651 return d.spawnSync(fetchers) 652 } 653 654 // spawnSync runs d.process and all given fetcher functions to completion in 655 // separate goroutines, returning the first error that appears. 656 func (d *Downloader) spawnSync(fetchers []func() error) error { 657 errc := make(chan error, len(fetchers)) 658 d.cancelWg.Add(len(fetchers)) 659 for _, fn := range fetchers { 660 fn := fn 661 go func() { defer d.cancelWg.Done(); errc <- fn() }() 662 } 663 // Wait for the first error, then terminate the others. 664 var err error 665 for i := 0; i < len(fetchers); i++ { 666 if i == len(fetchers)-1 { 667 // Close the queue when all fetchers have exited. 668 // This will cause the block processor to end when 669 // it has processed the queue. 670 d.queue.Close() 671 } 672 if err = <-errc; err != nil && err != errCanceled { 673 break 674 } 675 } 676 d.queue.Close() 677 d.Cancel() 678 return err 679 } 680 681 // cancel aborts all of the operations and resets the queue. However, cancel does 682 // not wait for the running download goroutines to finish. This method should be 683 // used when cancelling the downloads from inside the downloader. 684 func (d *Downloader) cancel() { 685 // Close the current cancel channel 686 d.cancelLock.Lock() 687 defer d.cancelLock.Unlock() 688 689 if d.cancelCh != nil { 690 select { 691 case <-d.cancelCh: 692 // Channel was already closed 693 default: 694 close(d.cancelCh) 695 } 696 } 697 } 698 699 // Cancel aborts all of the operations and waits for all download goroutines to 700 // finish before returning. 701 func (d *Downloader) Cancel() { 702 d.cancel() 703 d.cancelWg.Wait() 704 } 705 706 // Terminate interrupts the downloader, canceling all pending operations. 707 // The downloader cannot be reused after calling Terminate. 708 func (d *Downloader) Terminate() { 709 // Close the termination channel (make sure double close is allowed) 710 d.quitLock.Lock() 711 select { 712 case <-d.quitCh: 713 default: 714 close(d.quitCh) 715 716 // Terminate the internal beacon syncer 717 d.skeleton.Terminate() 718 } 719 d.quitLock.Unlock() 720 721 // Cancel any pending download requests 722 d.Cancel() 723 } 724 725 // fetchHead retrieves the head header and prior pivot block (if available) from 726 // a remote peer. 727 func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { 728 p.log.Debug("Retrieving remote chain head") 729 mode := d.getMode() 730 731 // Request the advertised remote head block and wait for the response 732 latest, _ := p.peer.Head() 733 fetch := 1 734 if mode == SnapSync { 735 fetch = 2 // head + pivot headers 736 } 737 headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) 738 if err != nil { 739 return nil, nil, err 740 } 741 // Make sure the peer gave us at least one and at most the requested headers 742 if len(headers) == 0 || len(headers) > fetch { 743 return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) 744 } 745 // The first header needs to be the head, validate against the checkpoint 746 // and request. If only 1 header was returned, make sure there's no pivot 747 // or there was not one requested. 748 head = headers[0] 749 if (mode == SnapSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { 750 return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) 751 } 752 if len(headers) == 1 { 753 if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { 754 return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) 755 } 756 p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) 757 return head, nil, nil 758 } 759 // At this point we have 2 headers in total and the first is the 760 // validated head of the chain. Check the pivot number and return, 761 pivot = headers[1] 762 if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { 763 return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) 764 } 765 return head, pivot, nil 766 } 767 768 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 769 // common ancestor. 770 // It returns parameters to be used for peer.RequestHeadersByNumber: 771 // 772 // from - starting block number 773 // count - number of headers to request 774 // skip - number of headers to skip 775 // 776 // and also returns 'max', the last block which is expected to be returned by the remote peers, 777 // given the (from,count,skip) 778 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 779 var ( 780 from int 781 count int 782 MaxCount = MaxHeaderFetch / 16 783 ) 784 // requestHead is the highest block that we will ask for. If requestHead is not offset, 785 // the highest block that we will get is 16 blocks back from head, which means we 786 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 787 // between us and the peer is 1-2 blocks, which is most common 788 requestHead := int(remoteHeight) - 1 789 if requestHead < 0 { 790 requestHead = 0 791 } 792 // requestBottom is the lowest block we want included in the query 793 // Ideally, we want to include the one just below our own head 794 requestBottom := int(localHeight - 1) 795 if requestBottom < 0 { 796 requestBottom = 0 797 } 798 totalSpan := requestHead - requestBottom 799 span := 1 + totalSpan/MaxCount 800 if span < 2 { 801 span = 2 802 } 803 if span > 16 { 804 span = 16 805 } 806 807 count = 1 + totalSpan/span 808 if count > MaxCount { 809 count = MaxCount 810 } 811 if count < 2 { 812 count = 2 813 } 814 from = requestHead - (count-1)*span 815 if from < 0 { 816 from = 0 817 } 818 max := from + (count-1)*span 819 return int64(from), count, span - 1, uint64(max) 820 } 821 822 // findAncestor tries to locate the common ancestor link of the local chain and 823 // a remote peers blockchain. In the general case when our node was in sync and 824 // on the correct chain, checking the top N links should already get us a match. 825 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 826 // the head links match), we do a binary search to find the common ancestor. 827 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 828 // Figure out the valid ancestor range to prevent rewrite attacks 829 var ( 830 floor = int64(-1) 831 localHeight uint64 832 remoteHeight = remoteHeader.Number.Uint64() 833 ) 834 mode := d.getMode() 835 switch mode { 836 case FullSync: 837 localHeight = d.blockchain.CurrentBlock().Number.Uint64() 838 case SnapSync: 839 localHeight = d.blockchain.CurrentSnapBlock().Number.Uint64() 840 default: 841 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 842 } 843 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 844 845 // Recap floor value for binary search 846 maxForkAncestry := fullMaxForkAncestry 847 if d.getMode() == LightSync { 848 maxForkAncestry = lightMaxForkAncestry 849 } 850 if localHeight >= maxForkAncestry { 851 // We're above the max reorg threshold, find the earliest fork point 852 floor = int64(localHeight - maxForkAncestry) 853 } 854 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 855 // all headers before that point will be missing. 856 if mode == LightSync { 857 // If we don't know the current CHT position, find it 858 if d.genesis == 0 { 859 header := d.lightchain.CurrentHeader() 860 for header != nil { 861 d.genesis = header.Number.Uint64() 862 if floor >= int64(d.genesis)-1 { 863 break 864 } 865 header = d.lightchain.GetHeaderByHash(header.ParentHash) 866 } 867 } 868 // We already know the "genesis" block number, cap floor to that 869 if floor < int64(d.genesis)-1 { 870 floor = int64(d.genesis) - 1 871 } 872 } 873 874 ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) 875 if err == nil { 876 return ancestor, nil 877 } 878 // The returned error was not nil. 879 // If the error returned does not reflect that a common ancestor was not found, return it. 880 // If the error reflects that a common ancestor was not found, continue to binary search, 881 // where the error value will be reassigned. 882 if !errors.Is(err, errNoAncestorFound) { 883 return 0, err 884 } 885 886 ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) 887 if err != nil { 888 return 0, err 889 } 890 return ancestor, nil 891 } 892 893 func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { 894 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 895 896 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 897 headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) 898 if err != nil { 899 return 0, err 900 } 901 // Wait for the remote response to the head fetch 902 number, hash := uint64(0), common.Hash{} 903 904 // Make sure the peer actually gave something valid 905 if len(headers) == 0 { 906 p.log.Warn("Empty head header set") 907 return 0, errEmptyHeaderSet 908 } 909 // Make sure the peer's reply conforms to the request 910 for i, header := range headers { 911 expectNumber := from + int64(i)*int64(skip+1) 912 if number := header.Number.Int64(); number != expectNumber { 913 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 914 return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) 915 } 916 } 917 // Check if a common ancestor was found 918 for i := len(headers) - 1; i >= 0; i-- { 919 // Skip any headers that underflow/overflow our requested set 920 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 921 continue 922 } 923 // Otherwise check if we already know the header or not 924 h := hashes[i] 925 n := headers[i].Number.Uint64() 926 927 var known bool 928 switch mode { 929 case FullSync: 930 known = d.blockchain.HasBlock(h, n) 931 case SnapSync: 932 known = d.blockchain.HasFastBlock(h, n) 933 default: 934 known = d.lightchain.HasHeader(h, n) 935 } 936 if known { 937 number, hash = n, h 938 break 939 } 940 } 941 // If the head fetch already found an ancestor, return 942 if hash != (common.Hash{}) { 943 if int64(number) <= floor { 944 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 945 return 0, errInvalidAncestor 946 } 947 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 948 return number, nil 949 } 950 return 0, errNoAncestorFound 951 } 952 953 func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { 954 hash := common.Hash{} 955 956 // Ancestor not found, we need to binary search over our chain 957 start, end := uint64(0), remoteHeight 958 if floor > 0 { 959 start = uint64(floor) 960 } 961 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 962 963 for start+1 < end { 964 // Split our chain interval in two, and request the hash to cross check 965 check := (start + end) / 2 966 967 headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) 968 if err != nil { 969 return 0, err 970 } 971 // Make sure the peer actually gave something valid 972 if len(headers) != 1 { 973 p.log.Warn("Multiple headers for single request", "headers", len(headers)) 974 return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) 975 } 976 // Modify the search interval based on the response 977 h := hashes[0] 978 n := headers[0].Number.Uint64() 979 980 var known bool 981 switch mode { 982 case FullSync: 983 known = d.blockchain.HasBlock(h, n) 984 case SnapSync: 985 known = d.blockchain.HasFastBlock(h, n) 986 default: 987 known = d.lightchain.HasHeader(h, n) 988 } 989 if !known { 990 end = check 991 continue 992 } 993 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 994 if header.Number.Uint64() != check { 995 p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 996 return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) 997 } 998 start = check 999 hash = h 1000 } 1001 // Ensure valid ancestry and return 1002 if int64(start) <= floor { 1003 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 1004 return 0, errInvalidAncestor 1005 } 1006 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 1007 return start, nil 1008 } 1009 1010 // fetchHeaders keeps retrieving headers concurrently from the number 1011 // requested, until no more are returned, potentially throttling on the way. To 1012 // facilitate concurrency but still protect against malicious nodes sending bad 1013 // headers, we construct a header chain skeleton using the "origin" peer we are 1014 // syncing with, and fill in the missing headers using anyone else. Headers from 1015 // other peers are only accepted if they map cleanly to the skeleton. If no one 1016 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 1017 // the origin is dropped. 1018 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { 1019 p.log.Debug("Directing header downloads", "origin", from) 1020 defer p.log.Debug("Header download terminated") 1021 1022 // Start pulling the header chain skeleton until all is done 1023 var ( 1024 skeleton = true // Skeleton assembly phase or finishing up 1025 pivoting = false // Whether the next request is pivot verification 1026 ancestor = from 1027 mode = d.getMode() 1028 ) 1029 for { 1030 // Pull the next batch of headers, it either: 1031 // - Pivot check to see if the chain moved too far 1032 // - Skeleton retrieval to permit concurrent header fetches 1033 // - Full header retrieval if we're near the chain head 1034 var ( 1035 headers []*types.Header 1036 hashes []common.Hash 1037 err error 1038 ) 1039 switch { 1040 case pivoting: 1041 d.pivotLock.RLock() 1042 pivot := d.pivotHeader.Number.Uint64() 1043 d.pivotLock.RUnlock() 1044 1045 p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) 1046 headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep 1047 1048 case skeleton: 1049 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 1050 headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 1051 1052 default: 1053 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 1054 headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) 1055 } 1056 switch err { 1057 case nil: 1058 // Headers retrieved, continue with processing 1059 1060 case errCanceled: 1061 // Sync cancelled, no issue, propagate up 1062 return err 1063 1064 default: 1065 // Header retrieval either timed out, or the peer failed in some strange way 1066 // (e.g. disconnect). Consider the master peer bad and drop 1067 d.dropPeer(p.id) 1068 1069 // Finish the sync gracefully instead of dumping the gathered data though 1070 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1071 select { 1072 case ch <- false: 1073 case <-d.cancelCh: 1074 } 1075 } 1076 select { 1077 case d.headerProcCh <- nil: 1078 case <-d.cancelCh: 1079 } 1080 return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) 1081 } 1082 // If the pivot is being checked, move if it became stale and run the real retrieval 1083 var pivot uint64 1084 1085 d.pivotLock.RLock() 1086 if d.pivotHeader != nil { 1087 pivot = d.pivotHeader.Number.Uint64() 1088 } 1089 d.pivotLock.RUnlock() 1090 1091 if pivoting { 1092 if len(headers) == 2 { 1093 if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { 1094 log.Warn("Peer sent invalid next pivot", "have", have, "want", want) 1095 return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) 1096 } 1097 if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { 1098 log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) 1099 return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) 1100 } 1101 log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) 1102 pivot = headers[0].Number.Uint64() 1103 1104 d.pivotLock.Lock() 1105 d.pivotHeader = headers[0] 1106 d.pivotLock.Unlock() 1107 1108 // Write out the pivot into the database so a rollback beyond 1109 // it will reenable snap sync and update the state root that 1110 // the state syncer will be downloading. 1111 rawdb.WriteLastPivotNumber(d.stateDB, pivot) 1112 } 1113 // Disable the pivot check and fetch the next batch of headers 1114 pivoting = false 1115 continue 1116 } 1117 // If the skeleton's finished, pull any remaining head headers directly from the origin 1118 if skeleton && len(headers) == 0 { 1119 // A malicious node might withhold advertised headers indefinitely 1120 if from+uint64(MaxHeaderFetch)-1 <= head { 1121 p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) 1122 return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) 1123 } 1124 p.log.Debug("No skeleton, fetching headers directly") 1125 skeleton = false 1126 continue 1127 } 1128 // If no more headers are inbound, notify the content fetchers and return 1129 if len(headers) == 0 { 1130 // Don't abort header fetches while the pivot is downloading 1131 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 1132 p.log.Debug("No headers, waiting for pivot commit") 1133 select { 1134 case <-time.After(fsHeaderContCheck): 1135 continue 1136 case <-d.cancelCh: 1137 return errCanceled 1138 } 1139 } 1140 // Pivot done (or not in snap sync) and no more headers, terminate the process 1141 p.log.Debug("No more headers available") 1142 select { 1143 case d.headerProcCh <- nil: 1144 return nil 1145 case <-d.cancelCh: 1146 return errCanceled 1147 } 1148 } 1149 // If we received a skeleton batch, resolve internals concurrently 1150 var progressed bool 1151 if skeleton { 1152 filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) 1153 if err != nil { 1154 p.log.Debug("Skeleton chain invalid", "err", err) 1155 return fmt.Errorf("%w: %v", errInvalidChain, err) 1156 } 1157 headers = filled[proced:] 1158 hashes = hashset[proced:] 1159 1160 progressed = proced > 0 1161 from += uint64(proced) 1162 } else { 1163 // A malicious node might withhold advertised headers indefinitely 1164 if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { 1165 p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) 1166 return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) 1167 } 1168 // If we're closing in on the chain head, but haven't yet reached it, delay 1169 // the last few headers so mini reorgs on the head don't cause invalid hash 1170 // chain errors. 1171 if n := len(headers); n > 0 { 1172 // Retrieve the current head we're at 1173 var head uint64 1174 if mode == LightSync { 1175 head = d.lightchain.CurrentHeader().Number.Uint64() 1176 } else { 1177 head = d.blockchain.CurrentSnapBlock().Number.Uint64() 1178 if full := d.blockchain.CurrentBlock().Number.Uint64(); head < full { 1179 head = full 1180 } 1181 } 1182 // If the head is below the common ancestor, we're actually deduplicating 1183 // already existing chain segments, so use the ancestor as the fake head. 1184 // Otherwise, we might end up delaying header deliveries pointlessly. 1185 if head < ancestor { 1186 head = ancestor 1187 } 1188 // If the head is way older than this batch, delay the last few headers 1189 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 1190 delay := reorgProtHeaderDelay 1191 if delay > n { 1192 delay = n 1193 } 1194 headers = headers[:n-delay] 1195 hashes = hashes[:n-delay] 1196 } 1197 } 1198 } 1199 // If no headers have been delivered, or all of them have been delayed, 1200 // sleep a bit and retry. Take care with headers already consumed during 1201 // skeleton filling 1202 if len(headers) == 0 && !progressed { 1203 p.log.Trace("All headers delayed, waiting") 1204 select { 1205 case <-time.After(fsHeaderContCheck): 1206 continue 1207 case <-d.cancelCh: 1208 return errCanceled 1209 } 1210 } 1211 // Insert any remaining new headers and fetch the next batch 1212 if len(headers) > 0 { 1213 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 1214 select { 1215 case d.headerProcCh <- &headerTask{ 1216 headers: headers, 1217 hashes: hashes, 1218 }: 1219 case <-d.cancelCh: 1220 return errCanceled 1221 } 1222 from += uint64(len(headers)) 1223 } 1224 // If we're still skeleton filling snap sync, check pivot staleness 1225 // before continuing to the next skeleton filling 1226 if skeleton && pivot > 0 { 1227 pivoting = true 1228 } 1229 } 1230 } 1231 1232 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1233 // and maps them to the provided skeleton header chain. 1234 // 1235 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1236 // immediately to the header processor to keep the rest of the pipeline full even 1237 // in the case of header stalls. 1238 // 1239 // The method returns the entire filled skeleton and also the number of headers 1240 // already forwarded for processing. 1241 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { 1242 log.Debug("Filling up skeleton", "from", from) 1243 d.queue.ScheduleSkeleton(from, skeleton) 1244 1245 err := d.concurrentFetch((*headerQueue)(d), false) 1246 if err != nil { 1247 log.Debug("Skeleton fill failed", "err", err) 1248 } 1249 filled, hashes, proced := d.queue.RetrieveHeaders() 1250 if err == nil { 1251 log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) 1252 } 1253 return filled, hashes, proced, err 1254 } 1255 1256 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1257 // available peers, reserving a chunk of blocks for each, waiting for delivery 1258 // and also periodically checking for timeouts. 1259 func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { 1260 log.Debug("Downloading block bodies", "origin", from) 1261 err := d.concurrentFetch((*bodyQueue)(d), beaconMode) 1262 1263 log.Debug("Block body download terminated", "err", err) 1264 return err 1265 } 1266 1267 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1268 // available peers, reserving a chunk of receipts for each, waiting for delivery 1269 // and also periodically checking for timeouts. 1270 func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { 1271 log.Debug("Downloading receipts", "origin", from) 1272 err := d.concurrentFetch((*receiptQueue)(d), beaconMode) 1273 1274 log.Debug("Receipt download terminated", "err", err) 1275 return err 1276 } 1277 1278 // processHeaders takes batches of retrieved headers from an input channel and 1279 // keeps processing and scheduling them into the header chain and downloader's 1280 // queue until the stream ends or a failure occurs. 1281 func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { 1282 // Keep a count of uncertain headers to roll back 1283 var ( 1284 rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) 1285 rollbackErr error 1286 mode = d.getMode() 1287 ) 1288 defer func() { 1289 if rollback > 0 { 1290 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1291 if mode != LightSync { 1292 lastFastBlock = d.blockchain.CurrentSnapBlock().Number 1293 lastBlock = d.blockchain.CurrentBlock().Number 1294 } 1295 if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block 1296 // We're already unwinding the stack, only print the error to make it more visible 1297 log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) 1298 } 1299 curFastBlock, curBlock := common.Big0, common.Big0 1300 if mode != LightSync { 1301 curFastBlock = d.blockchain.CurrentSnapBlock().Number 1302 curBlock = d.blockchain.CurrentBlock().Number 1303 } 1304 log.Warn("Rolled back chain segment", 1305 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1306 "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1307 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) 1308 } 1309 }() 1310 // Wait for batches of headers to process 1311 gotHeaders := false 1312 1313 for { 1314 select { 1315 case <-d.cancelCh: 1316 rollbackErr = errCanceled 1317 return errCanceled 1318 1319 case task := <-d.headerProcCh: 1320 // Terminate header processing if we synced up 1321 if task == nil || len(task.headers) == 0 { 1322 // Notify everyone that headers are fully processed 1323 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1324 select { 1325 case ch <- false: 1326 case <-d.cancelCh: 1327 } 1328 } 1329 // If we're in legacy sync mode, we need to check total difficulty 1330 // violations from malicious peers. That is not needed in beacon 1331 // mode and we can skip to terminating sync. 1332 if !beaconMode { 1333 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1334 // better chain compared to ours. The only exception is if its promised blocks were 1335 // already imported by other means (e.g. fetcher): 1336 // 1337 // R <remote peer>, L <local node>: Both at block 10 1338 // R: Mine block 11, and propagate it to L 1339 // L: Queue block 11 for import 1340 // L: Notice that R's head and TD increased compared to ours, start sync 1341 // L: Import of block 11 finishes 1342 // L: Sync begins, and finds common ancestor at 11 1343 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1344 // R: Nothing to give 1345 if mode != LightSync { 1346 head := d.blockchain.CurrentBlock() 1347 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1348 return errStallingPeer 1349 } 1350 } 1351 // If snap or light syncing, ensure promised headers are indeed delivered. This is 1352 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1353 // of delivering the post-pivot blocks that would flag the invalid content. 1354 // 1355 // This check cannot be executed "as is" for full imports, since blocks may still be 1356 // queued for processing when the header download completes. However, as long as the 1357 // peer gave us something useful, we're already happy/progressed (above check). 1358 if mode == SnapSync || mode == LightSync { 1359 head := d.lightchain.CurrentHeader() 1360 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1361 return errStallingPeer 1362 } 1363 } 1364 } 1365 // Disable any rollback and return 1366 rollback = 0 1367 return nil 1368 } 1369 // Otherwise split the chunk of headers into batches and process them 1370 headers, hashes := task.headers, task.hashes 1371 1372 gotHeaders = true 1373 for len(headers) > 0 { 1374 // Terminate if something failed in between processing chunks 1375 select { 1376 case <-d.cancelCh: 1377 rollbackErr = errCanceled 1378 return errCanceled 1379 default: 1380 } 1381 // Select the next chunk of headers to import 1382 limit := maxHeadersProcess 1383 if limit > len(headers) { 1384 limit = len(headers) 1385 } 1386 chunkHeaders := headers[:limit] 1387 chunkHashes := hashes[:limit] 1388 1389 // In case of header only syncing, validate the chunk immediately 1390 if mode == SnapSync || mode == LightSync { 1391 // If we're importing pure headers, verify based on their recentness 1392 var pivot uint64 1393 1394 d.pivotLock.RLock() 1395 if d.pivotHeader != nil { 1396 pivot = d.pivotHeader.Number.Uint64() 1397 } 1398 d.pivotLock.RUnlock() 1399 1400 frequency := fsHeaderCheckFrequency 1401 if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1402 frequency = 1 1403 } 1404 // Although the received headers might be all valid, a legacy 1405 // PoW/PoA sync must not accept post-merge headers. Make sure 1406 // that any transition is rejected at this point. 1407 var ( 1408 rejected []*types.Header 1409 td *big.Int 1410 ) 1411 if !beaconMode && ttd != nil { 1412 td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) 1413 if td == nil { 1414 // This should never really happen, but handle gracefully for now 1415 log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) 1416 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1417 } 1418 for i, header := range chunkHeaders { 1419 td = new(big.Int).Add(td, header.Difficulty) 1420 if td.Cmp(ttd) >= 0 { 1421 // Terminal total difficulty reached, allow the last header in 1422 if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { 1423 chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] 1424 if len(rejected) > 0 { 1425 // Make a nicer user log as to the first TD truly rejected 1426 td = new(big.Int).Add(td, rejected[0].Difficulty) 1427 } 1428 } else { 1429 chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] 1430 } 1431 break 1432 } 1433 } 1434 } 1435 if len(chunkHeaders) > 0 { 1436 if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { 1437 rollbackErr = err 1438 1439 // If some headers were inserted, track them as uncertain 1440 if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { 1441 rollback = chunkHeaders[0].Number.Uint64() 1442 } 1443 log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) 1444 return fmt.Errorf("%w: %v", errInvalidChain, err) 1445 } 1446 // All verifications passed, track all headers within the allowed limits 1447 if mode == SnapSync { 1448 head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() 1449 if head-rollback > uint64(fsHeaderSafetyNet) { 1450 rollback = head - uint64(fsHeaderSafetyNet) 1451 } else { 1452 rollback = 1 1453 } 1454 } 1455 } 1456 if len(rejected) != 0 { 1457 // Merge threshold reached, stop importing, but don't roll back 1458 rollback = 0 1459 1460 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) 1461 return ErrMergeTransition 1462 } 1463 } 1464 // Unless we're doing light chains, schedule the headers for associated content retrieval 1465 if mode == FullSync || mode == SnapSync { 1466 // If we've reached the allowed number of pending headers, stall a bit 1467 for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1468 select { 1469 case <-d.cancelCh: 1470 rollbackErr = errCanceled 1471 return errCanceled 1472 case <-time.After(time.Second): 1473 } 1474 } 1475 // Otherwise insert the headers for content retrieval 1476 inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) 1477 if len(inserts) != len(chunkHeaders) { 1478 rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) 1479 return fmt.Errorf("%w: stale headers", errBadPeer) 1480 } 1481 } 1482 headers = headers[limit:] 1483 hashes = hashes[limit:] 1484 origin += uint64(limit) 1485 } 1486 // Update the highest block number we know if a higher one is found. 1487 d.syncStatsLock.Lock() 1488 if d.syncStatsChainHeight < origin { 1489 d.syncStatsChainHeight = origin - 1 1490 } 1491 d.syncStatsLock.Unlock() 1492 1493 // Signal the content downloaders of the availability of new tasks 1494 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1495 select { 1496 case ch <- true: 1497 default: 1498 } 1499 } 1500 } 1501 } 1502 } 1503 1504 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1505 func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { 1506 for { 1507 results := d.queue.Results(true) 1508 if len(results) == 0 { 1509 return nil 1510 } 1511 if d.chainInsertHook != nil { 1512 d.chainInsertHook(results) 1513 } 1514 // Although the received blocks might be all valid, a legacy PoW/PoA sync 1515 // must not accept post-merge blocks. Make sure that pre-merge blocks are 1516 // imported, but post-merge ones are rejected. 1517 var ( 1518 rejected []*fetchResult 1519 td *big.Int 1520 ) 1521 if !beaconMode && ttd != nil { 1522 td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) 1523 if td == nil { 1524 // This should never really happen, but handle gracefully for now 1525 log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) 1526 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1527 } 1528 for i, result := range results { 1529 td = new(big.Int).Add(td, result.Header.Difficulty) 1530 if td.Cmp(ttd) >= 0 { 1531 // Terminal total difficulty reached, allow the last block in 1532 if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { 1533 results, rejected = results[:i+1], results[i+1:] 1534 if len(rejected) > 0 { 1535 // Make a nicer user log as to the first TD truly rejected 1536 td = new(big.Int).Add(td, rejected[0].Header.Difficulty) 1537 } 1538 } else { 1539 results, rejected = results[:i], results[i:] 1540 } 1541 break 1542 } 1543 } 1544 } 1545 if err := d.importBlockResults(results); err != nil { 1546 return err 1547 } 1548 if len(rejected) != 0 { 1549 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) 1550 return ErrMergeTransition 1551 } 1552 } 1553 } 1554 1555 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1556 // Check for any early termination requests 1557 if len(results) == 0 { 1558 return nil 1559 } 1560 select { 1561 case <-d.quitCh: 1562 return errCancelContentProcessing 1563 default: 1564 } 1565 // Retrieve a batch of results to import 1566 first, last := results[0].Header, results[len(results)-1].Header 1567 log.Debug("Inserting downloaded chain", "items", len(results), 1568 "firstnum", first.Number, "firsthash", first.Hash(), 1569 "lastnum", last.Number, "lasthash", last.Hash(), 1570 ) 1571 blocks := make([]*types.Block, len(results)) 1572 for i, result := range results { 1573 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1574 } 1575 // Downloaded blocks are always regarded as trusted after the 1576 // transition. Because the downloaded chain is guided by the 1577 // consensus-layer. 1578 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1579 if index < len(results) { 1580 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1581 1582 // In post-merge, notify the engine API of encountered bad chains 1583 if d.badBlock != nil { 1584 head, _, _, err := d.skeleton.Bounds() 1585 if err != nil { 1586 log.Error("Failed to retrieve beacon bounds for bad block reporting", "err", err) 1587 } else { 1588 d.badBlock(blocks[index].Header(), head) 1589 } 1590 } 1591 } else { 1592 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1593 // when it needs to preprocess blocks to import a sidechain. 1594 // The importer will put together a new list of blocks to import, which is a superset 1595 // of the blocks delivered from the downloader, and the indexing will be off. 1596 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1597 } 1598 return fmt.Errorf("%w: %v", errInvalidChain, err) 1599 } 1600 return nil 1601 } 1602 1603 // processSnapSyncContent takes fetch results from the queue and writes them to the 1604 // database. It also controls the synchronisation of state nodes of the pivot block. 1605 func (d *Downloader) processSnapSyncContent() error { 1606 // Start syncing state of the reported head block. This should get us most of 1607 // the state of the pivot block. 1608 d.pivotLock.RLock() 1609 sync := d.syncState(d.pivotHeader.Root) 1610 d.pivotLock.RUnlock() 1611 1612 defer func() { 1613 // The `sync` object is replaced every time the pivot moves. We need to 1614 // defer close the very last active one, hence the lazy evaluation vs. 1615 // calling defer sync.Cancel() !!! 1616 sync.Cancel() 1617 }() 1618 1619 closeOnErr := func(s *stateSync) { 1620 if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { 1621 d.queue.Close() // wake up Results 1622 } 1623 } 1624 go closeOnErr(sync) 1625 1626 // To cater for moving pivot points, track the pivot block and subsequently 1627 // accumulated download results separately. 1628 var ( 1629 oldPivot *fetchResult // Locked in pivot block, might change eventually 1630 oldTail []*fetchResult // Downloaded content after the pivot 1631 ) 1632 for { 1633 // Wait for the next batch of downloaded data to be available, and if the pivot 1634 // block became stale, move the goalpost 1635 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1636 if len(results) == 0 { 1637 // If pivot sync is done, stop 1638 if oldPivot == nil { 1639 d.reportSnapSyncProgress(true) 1640 return sync.Cancel() 1641 } 1642 // If sync failed, stop 1643 select { 1644 case <-d.cancelCh: 1645 sync.Cancel() 1646 return errCanceled 1647 default: 1648 } 1649 } 1650 if d.chainInsertHook != nil { 1651 d.chainInsertHook(results) 1652 } 1653 d.reportSnapSyncProgress(false) 1654 1655 // If we haven't downloaded the pivot block yet, check pivot staleness 1656 // notifications from the header downloader 1657 d.pivotLock.RLock() 1658 pivot := d.pivotHeader 1659 d.pivotLock.RUnlock() 1660 1661 if oldPivot == nil { 1662 if pivot.Root != sync.root { 1663 sync.Cancel() 1664 sync = d.syncState(pivot.Root) 1665 1666 go closeOnErr(sync) 1667 } 1668 } else { 1669 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1670 } 1671 // Split around the pivot block and process the two sides via snap/full sync 1672 if atomic.LoadInt32(&d.committed) == 0 { 1673 latest := results[len(results)-1].Header 1674 // If the height is above the pivot block by 2 sets, it means the pivot 1675 // become stale in the network and it was garbage collected, move to a 1676 // new pivot. 1677 // 1678 // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those 1679 // need to be taken into account, otherwise we're detecting the pivot move 1680 // late and will drop peers due to unavailable state!!! 1681 if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) { 1682 log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay)) 1683 pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted 1684 1685 d.pivotLock.Lock() 1686 d.pivotHeader = pivot 1687 d.pivotLock.Unlock() 1688 1689 // Write out the pivot into the database so a rollback beyond it will 1690 // reenable snap sync 1691 rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) 1692 } 1693 } 1694 P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) 1695 if err := d.commitSnapSyncData(beforeP, sync); err != nil { 1696 return err 1697 } 1698 if P != nil { 1699 // If new pivot block found, cancel old state retrieval and restart 1700 if oldPivot != P { 1701 sync.Cancel() 1702 sync = d.syncState(P.Header.Root) 1703 1704 go closeOnErr(sync) 1705 oldPivot = P 1706 } 1707 // Wait for completion, occasionally checking for pivot staleness 1708 select { 1709 case <-sync.done: 1710 if sync.err != nil { 1711 return sync.err 1712 } 1713 if err := d.commitPivotBlock(P); err != nil { 1714 return err 1715 } 1716 oldPivot = nil 1717 1718 case <-time.After(time.Second): 1719 oldTail = afterP 1720 continue 1721 } 1722 } 1723 // Fast sync done, pivot commit done, full import 1724 if err := d.importBlockResults(afterP); err != nil { 1725 return err 1726 } 1727 } 1728 } 1729 1730 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1731 if len(results) == 0 { 1732 return nil, nil, nil 1733 } 1734 if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot { 1735 // the pivot is somewhere in the future 1736 return nil, results, nil 1737 } 1738 // This can also be optimized, but only happens very seldom 1739 for _, result := range results { 1740 num := result.Header.Number.Uint64() 1741 switch { 1742 case num < pivot: 1743 before = append(before, result) 1744 case num == pivot: 1745 p = result 1746 default: 1747 after = append(after, result) 1748 } 1749 } 1750 return p, before, after 1751 } 1752 1753 func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { 1754 // Check for any early termination requests 1755 if len(results) == 0 { 1756 return nil 1757 } 1758 select { 1759 case <-d.quitCh: 1760 return errCancelContentProcessing 1761 case <-stateSync.done: 1762 if err := stateSync.Wait(); err != nil { 1763 return err 1764 } 1765 default: 1766 } 1767 // Retrieve the batch of results to import 1768 first, last := results[0].Header, results[len(results)-1].Header 1769 log.Debug("Inserting snap-sync blocks", "items", len(results), 1770 "firstnum", first.Number, "firsthash", first.Hash(), 1771 "lastnumn", last.Number, "lasthash", last.Hash(), 1772 ) 1773 blocks := make([]*types.Block, len(results)) 1774 receipts := make([]types.Receipts, len(results)) 1775 for i, result := range results { 1776 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1777 receipts[i] = result.Receipts 1778 } 1779 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { 1780 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1781 return fmt.Errorf("%w: %v", errInvalidChain, err) 1782 } 1783 return nil 1784 } 1785 1786 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1787 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles).WithWithdrawals(result.Withdrawals) 1788 log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1789 1790 // Commit the pivot block as the new head, will require full sync from here on 1791 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { 1792 return err 1793 } 1794 if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { 1795 return err 1796 } 1797 atomic.StoreInt32(&d.committed, 1) 1798 return nil 1799 } 1800 1801 // DeliverSnapPacket is invoked from a peer's message handler when it transmits a 1802 // data packet for the local node to consume. 1803 func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { 1804 switch packet := packet.(type) { 1805 case *snap.AccountRangePacket: 1806 hashes, accounts, err := packet.Unpack() 1807 if err != nil { 1808 return err 1809 } 1810 return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) 1811 1812 case *snap.StorageRangesPacket: 1813 hashset, slotset := packet.Unpack() 1814 return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) 1815 1816 case *snap.ByteCodesPacket: 1817 return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) 1818 1819 case *snap.TrieNodesPacket: 1820 return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) 1821 1822 default: 1823 return fmt.Errorf("unexpected snap packet type: %T", packet) 1824 } 1825 } 1826 1827 // readHeaderRange returns a list of headers, using the given last header as the base, 1828 // and going backwards towards genesis. This method assumes that the caller already has 1829 // placed a reasonable cap on count. 1830 func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header { 1831 var ( 1832 current = last 1833 headers []*types.Header 1834 ) 1835 for { 1836 parent := d.lightchain.GetHeaderByHash(current.ParentHash) 1837 if parent == nil { 1838 break // The chain is not continuous, or the chain is exhausted 1839 } 1840 headers = append(headers, parent) 1841 if len(headers) >= count { 1842 break 1843 } 1844 current = parent 1845 } 1846 return headers 1847 } 1848 1849 // reportSnapSyncProgress calculates various status reports and provides it to the user. 1850 func (d *Downloader) reportSnapSyncProgress(force bool) { 1851 // Initialize the sync start time if it's the first time we're reporting 1852 if d.syncStartTime.IsZero() { 1853 d.syncStartTime = time.Now().Add(-time.Millisecond) // -1ms offset to avoid division by zero 1854 } 1855 // Don't report all the events, just occasionally 1856 if !force && time.Since(d.syncLogTime) < 8*time.Second { 1857 return 1858 } 1859 // Don't report anything until we have a meaningful progress 1860 var ( 1861 headerBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerHeaderTable) 1862 bodyBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerBodiesTable) 1863 receiptBytes, _ = d.stateDB.AncientSize(rawdb.ChainFreezerReceiptTable) 1864 ) 1865 syncedBytes := common.StorageSize(headerBytes + bodyBytes + receiptBytes) 1866 if syncedBytes == 0 { 1867 return 1868 } 1869 var ( 1870 header = d.blockchain.CurrentHeader() 1871 block = d.blockchain.CurrentSnapBlock() 1872 ) 1873 syncedBlocks := block.Number.Uint64() - d.syncStartBlock 1874 if syncedBlocks == 0 { 1875 return 1876 } 1877 // Retrieve the current chain head and calculate the ETA 1878 latest, _, _, err := d.skeleton.Bounds() 1879 if err != nil { 1880 // We're going to cheat for non-merged networks, but that's fine 1881 latest = d.pivotHeader 1882 } 1883 if latest == nil { 1884 // This should really never happen, but add some defensive code for now. 1885 // TODO(karalabe): Remove it eventually if we don't see it blow. 1886 log.Error("Nil latest block in sync progress report") 1887 return 1888 } 1889 var ( 1890 left = latest.Number.Uint64() - block.Number.Uint64() 1891 eta = time.Since(d.syncStartTime) / time.Duration(syncedBlocks) * time.Duration(left) 1892 1893 progress = fmt.Sprintf("%.2f%%", float64(block.Number.Uint64())*100/float64(latest.Number.Uint64())) 1894 headers = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(header.Number.Uint64()), common.StorageSize(headerBytes).TerminalString()) 1895 bodies = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(bodyBytes).TerminalString()) 1896 receipts = fmt.Sprintf("%v@%v", log.FormatLogfmtUint64(block.Number.Uint64()), common.StorageSize(receiptBytes).TerminalString()) 1897 ) 1898 log.Info("Syncing: chain download in progress", "synced", progress, "chain", syncedBytes, "headers", headers, "bodies", bodies, "receipts", receipts, "eta", common.PrettyDuration(eta)) 1899 d.syncLogTime = time.Now() 1900 }