github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/ethereum/go-ethereum" 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/core/rawdb" 31 "github.com/ethereum/go-ethereum/core/state/snapshot" 32 "github.com/ethereum/go-ethereum/core/types" 33 "github.com/ethereum/go-ethereum/eth/protocols/snap" 34 "github.com/ethereum/go-ethereum/ethdb" 35 "github.com/ethereum/go-ethereum/event" 36 "github.com/ethereum/go-ethereum/log" 37 "github.com/ethereum/go-ethereum/params" 38 ) 39 40 var ( 41 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 42 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 43 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 44 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 45 46 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 47 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 48 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 49 fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 50 lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 51 52 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 53 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 54 55 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync 56 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 57 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 58 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 59 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync 60 ) 61 62 var ( 63 errBusy = errors.New("busy") 64 errUnknownPeer = errors.New("peer is unknown or unhealthy") 65 errBadPeer = errors.New("action from bad peer ignored") 66 errStallingPeer = errors.New("peer is stalling") 67 errUnsyncedPeer = errors.New("unsynced peer") 68 errNoPeers = errors.New("no peers to keep download active") 69 errTimeout = errors.New("timeout") 70 errEmptyHeaderSet = errors.New("empty header set by peer") 71 errPeersUnavailable = errors.New("no peers available or all tried for download") 72 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 73 errInvalidChain = errors.New("retrieved hash chain is invalid") 74 errInvalidBody = errors.New("retrieved block body is invalid") 75 errInvalidReceipt = errors.New("retrieved receipt is invalid") 76 errCancelStateFetch = errors.New("state data download canceled (requested)") 77 errCancelContentProcessing = errors.New("content processing canceled (requested)") 78 errCanceled = errors.New("syncing canceled (requested)") 79 errTooOld = errors.New("peer's protocol version too old") 80 errNoAncestorFound = errors.New("no common ancestor found") 81 errNoPivotHeader = errors.New("pivot header is not found") 82 ErrMergeTransition = errors.New("legacy sync reached the merge") 83 ) 84 85 // peerDropFn is a callback type for dropping a peer detected as malicious. 86 type peerDropFn func(id string) 87 88 // headerTask is a set of downloaded headers to queue along with their precomputed 89 // hashes to avoid constant rehashing. 90 type headerTask struct { 91 headers []*types.Header 92 hashes []common.Hash 93 } 94 95 type Downloader struct { 96 mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode 97 mux *event.TypeMux // Event multiplexer to announce sync operation events 98 99 checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) 100 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 101 queue *queue // Scheduler for selecting the hashes to download 102 peers *peerSet // Set of active peers from which download can proceed 103 104 stateDB ethdb.Database // Database to state sync into (and deduplicate via) 105 106 // Statistics 107 syncStatsChainOrigin uint64 // Origin block number where syncing started at 108 syncStatsChainHeight uint64 // Highest block number known when syncing started 109 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 110 111 lightchain LightChain 112 blockchain BlockChain 113 114 // Callbacks 115 dropPeer peerDropFn // Drops a peer for misbehaving 116 117 // Status 118 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 119 synchronising int32 120 notified int32 121 committed int32 122 ancientLimit uint64 // The maximum block number which can be regarded as ancient data. 123 124 // Channels 125 headerProcCh chan *headerTask // Channel to feed the header processor new tasks 126 127 // Skeleton sync 128 skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode) 129 130 // State sync 131 pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root 132 pivotLock sync.RWMutex // Lock protecting pivot header reads from updates 133 134 snapSync bool // Whether to run state sync over the snap protocol 135 SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now 136 stateSyncStart chan *stateSync 137 138 // Cancellation and termination 139 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 140 cancelCh chan struct{} // Channel to cancel mid-flight syncs 141 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 142 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 143 144 quitCh chan struct{} // Quit channel to signal termination 145 quitLock sync.Mutex // Lock to prevent double closes 146 147 // Testing hooks 148 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 149 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 150 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 151 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 152 } 153 154 // LightChain encapsulates functions required to synchronise a light chain. 155 type LightChain interface { 156 // HasHeader verifies a header's presence in the local chain. 157 HasHeader(common.Hash, uint64) bool 158 159 // GetHeaderByHash retrieves a header from the local chain. 160 GetHeaderByHash(common.Hash) *types.Header 161 162 // CurrentHeader retrieves the head header from the local chain. 163 CurrentHeader() *types.Header 164 165 // GetTd returns the total difficulty of a local block. 166 GetTd(common.Hash, uint64) *big.Int 167 168 // InsertHeaderChain inserts a batch of headers into the local chain. 169 InsertHeaderChain([]*types.Header, int) (int, error) 170 171 // SetHead rewinds the local chain to a new head. 172 SetHead(uint64) error 173 } 174 175 // BlockChain encapsulates functions required to sync a (full or snap) blockchain. 176 type BlockChain interface { 177 LightChain 178 179 // HasBlock verifies a block's presence in the local chain. 180 HasBlock(common.Hash, uint64) bool 181 182 // HasFastBlock verifies a snap block's presence in the local chain. 183 HasFastBlock(common.Hash, uint64) bool 184 185 // GetBlockByHash retrieves a block from the local chain. 186 GetBlockByHash(common.Hash) *types.Block 187 188 // CurrentBlock retrieves the head block from the local chain. 189 CurrentBlock() *types.Block 190 191 // CurrentFastBlock retrieves the head snap block from the local chain. 192 CurrentFastBlock() *types.Block 193 194 // SnapSyncCommitHead directly commits the head block to a certain entity. 195 SnapSyncCommitHead(common.Hash) error 196 197 // InsertChain inserts a batch of blocks into the local chain. 198 InsertChain(types.Blocks) (int, error) 199 200 // InsertReceiptChain inserts a batch of receipts into the local chain. 201 InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error) 202 203 // Snapshots returns the blockchain snapshot tree to paused it during sync. 204 Snapshots() *snapshot.Tree 205 } 206 207 // New creates a new downloader to fetch hashes and blocks from remote peers. 208 func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { 209 if lightchain == nil { 210 lightchain = chain 211 } 212 dl := &Downloader{ 213 stateDB: stateDb, 214 mux: mux, 215 checkpoint: checkpoint, 216 queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), 217 peers: newPeerSet(), 218 blockchain: chain, 219 lightchain: lightchain, 220 dropPeer: dropPeer, 221 headerProcCh: make(chan *headerTask, 1), 222 quitCh: make(chan struct{}), 223 SnapSyncer: snap.NewSyncer(stateDb), 224 stateSyncStart: make(chan *stateSync), 225 } 226 dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) 227 228 go dl.stateFetcher() 229 return dl 230 } 231 232 // Progress retrieves the synchronisation boundaries, specifically the origin 233 // block where synchronisation started at (may have failed/suspended); the block 234 // or header sync is currently at; and the latest known block which the sync targets. 235 // 236 // In addition, during the state download phase of snap synchronisation the number 237 // of processed and the total number of known states are also returned. Otherwise 238 // these are zero. 239 func (d *Downloader) Progress() ethereum.SyncProgress { 240 // Lock the current stats and return the progress 241 d.syncStatsLock.RLock() 242 defer d.syncStatsLock.RUnlock() 243 244 current := uint64(0) 245 mode := d.getMode() 246 switch { 247 case d.blockchain != nil && mode == FullSync: 248 current = d.blockchain.CurrentBlock().NumberU64() 249 case d.blockchain != nil && mode == SnapSync: 250 current = d.blockchain.CurrentFastBlock().NumberU64() 251 case d.lightchain != nil: 252 current = d.lightchain.CurrentHeader().Number.Uint64() 253 default: 254 log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) 255 } 256 progress, pending := d.SnapSyncer.Progress() 257 258 return ethereum.SyncProgress{ 259 StartingBlock: d.syncStatsChainOrigin, 260 CurrentBlock: current, 261 HighestBlock: d.syncStatsChainHeight, 262 SyncedAccounts: progress.AccountSynced, 263 SyncedAccountBytes: uint64(progress.AccountBytes), 264 SyncedBytecodes: progress.BytecodeSynced, 265 SyncedBytecodeBytes: uint64(progress.BytecodeBytes), 266 SyncedStorage: progress.StorageSynced, 267 SyncedStorageBytes: uint64(progress.StorageBytes), 268 HealedTrienodes: progress.TrienodeHealSynced, 269 HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), 270 HealedBytecodes: progress.BytecodeHealSynced, 271 HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), 272 HealingTrienodes: pending.TrienodeHeal, 273 HealingBytecode: pending.BytecodeHeal, 274 } 275 } 276 277 // Synchronising returns whether the downloader is currently retrieving blocks. 278 func (d *Downloader) Synchronising() bool { 279 return atomic.LoadInt32(&d.synchronising) > 0 280 } 281 282 // RegisterPeer injects a new download peer into the set of block source to be 283 // used for fetching hashes and blocks from. 284 func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { 285 var logger log.Logger 286 if len(id) < 16 { 287 // Tests use short IDs, don't choke on them 288 logger = log.New("peer", id) 289 } else { 290 logger = log.New("peer", id[:8]) 291 } 292 logger.Trace("Registering sync peer") 293 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 294 logger.Error("Failed to register sync peer", "err", err) 295 return err 296 } 297 return nil 298 } 299 300 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 301 func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { 302 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 303 } 304 305 // UnregisterPeer remove a peer from the known list, preventing any action from 306 // the specified peer. An effort is also made to return any pending fetches into 307 // the queue. 308 func (d *Downloader) UnregisterPeer(id string) error { 309 // Unregister the peer from the active peer set and revoke any fetch tasks 310 var logger log.Logger 311 if len(id) < 16 { 312 // Tests use short IDs, don't choke on them 313 logger = log.New("peer", id) 314 } else { 315 logger = log.New("peer", id[:8]) 316 } 317 logger.Trace("Unregistering sync peer") 318 if err := d.peers.Unregister(id); err != nil { 319 logger.Error("Failed to unregister sync peer", "err", err) 320 return err 321 } 322 d.queue.Revoke(id) 323 324 return nil 325 } 326 327 // LegacySync tries to sync up our local block chain with a remote peer, both 328 // adding various sanity checks as well as wrapping it with various log entries. 329 func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { 330 err := d.synchronise(id, head, td, ttd, mode, false, nil) 331 332 switch err { 333 case nil, errBusy, errCanceled: 334 return err 335 } 336 if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || 337 errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || 338 errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { 339 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 340 if d.dropPeer == nil { 341 // The dropPeer method is nil when `--copydb` is used for a local copy. 342 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 343 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 344 } else { 345 d.dropPeer(id) 346 } 347 return err 348 } 349 if errors.Is(err, ErrMergeTransition) { 350 return err // This is an expected fault, don't keep printing it in a spin-loop 351 } 352 log.Warn("Synchronisation failed, retrying", "err", err) 353 return err 354 } 355 356 // synchronise will select the peer and use it for synchronising. If an empty string is given 357 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 358 // checks fail an error will be returned. This method is synchronous 359 func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { 360 // The beacon header syncer is async. It will start this synchronization and 361 // will continue doing other tasks. However, if synchornization needs to be 362 // cancelled, the syncer needs to know if we reached the startup point (and 363 // inited the cancel cannel) or not yet. Make sure that we'll signal even in 364 // case of a failure. 365 if beaconPing != nil { 366 defer func() { 367 select { 368 case <-beaconPing: // already notified 369 default: 370 close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing) 371 } 372 }() 373 } 374 // Mock out the synchronisation if testing 375 if d.synchroniseMock != nil { 376 return d.synchroniseMock(id, hash) 377 } 378 // Make sure only one goroutine is ever allowed past this point at once 379 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 380 return errBusy 381 } 382 defer atomic.StoreInt32(&d.synchronising, 0) 383 384 // Post a user notification of the sync (only once per session) 385 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 386 log.Info("Block synchronisation started") 387 } 388 if mode == SnapSync { 389 // Snap sync uses the snapshot namespace to store potentially flakey data until 390 // sync completely heals and finishes. Pause snapshot maintenance in the mean- 391 // time to prevent access. 392 if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests 393 snapshots.Disable() 394 } 395 } 396 // Reset the queue, peer set and wake channels to clean any internal leftover state 397 d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) 398 d.peers.Reset() 399 400 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 401 select { 402 case <-ch: 403 default: 404 } 405 } 406 for empty := false; !empty; { 407 select { 408 case <-d.headerProcCh: 409 default: 410 empty = true 411 } 412 } 413 // Create cancel channel for aborting mid-flight and mark the master peer 414 d.cancelLock.Lock() 415 d.cancelCh = make(chan struct{}) 416 d.cancelPeer = id 417 d.cancelLock.Unlock() 418 419 defer d.Cancel() // No matter what, we can't leave the cancel channel open 420 421 // Atomically set the requested sync mode 422 atomic.StoreUint32(&d.mode, uint32(mode)) 423 424 // Retrieve the origin peer and initiate the downloading process 425 var p *peerConnection 426 if !beaconMode { // Beacon mode doesn't need a peer to sync from 427 p = d.peers.Peer(id) 428 if p == nil { 429 return errUnknownPeer 430 } 431 } 432 if beaconPing != nil { 433 close(beaconPing) 434 } 435 return d.syncWithPeer(p, hash, td, ttd, beaconMode) 436 } 437 438 func (d *Downloader) getMode() SyncMode { 439 return SyncMode(atomic.LoadUint32(&d.mode)) 440 } 441 442 // syncWithPeer starts a block synchronization based on the hash chain from the 443 // specified peer and head hash. 444 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { 445 d.mux.Post(StartEvent{}) 446 defer func() { 447 // reset on error 448 if err != nil { 449 d.mux.Post(FailedEvent{err}) 450 } else { 451 latest := d.lightchain.CurrentHeader() 452 d.mux.Post(DoneEvent{latest}) 453 } 454 }() 455 mode := d.getMode() 456 457 if !beaconMode { 458 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) 459 } else { 460 log.Debug("Backfilling with the network", "mode", mode) 461 } 462 defer func(start time.Time) { 463 log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) 464 }(time.Now()) 465 466 // Look up the sync boundaries: the common ancestor and the target block 467 var latest, pivot *types.Header 468 if !beaconMode { 469 // In legacy mode, use the master peer to retrieve the headers from 470 latest, pivot, err = d.fetchHead(p) 471 if err != nil { 472 return err 473 } 474 } else { 475 // In beacon mode, user the skeleton chain to retrieve the headers from 476 latest, _, err = d.skeleton.Bounds() 477 if err != nil { 478 return err 479 } 480 if latest.Number.Uint64() > uint64(fsMinFullBlocks) { 481 number := latest.Number.Uint64() - uint64(fsMinFullBlocks) 482 483 // Retrieve the pivot header from the skeleton chain segment but 484 // fallback to local chain if it's not found in skeleton space. 485 if pivot = d.skeleton.Header(number); pivot == nil { 486 _, oldest, _ := d.skeleton.Bounds() // error is already checked 487 if number < oldest.Number.Uint64() { 488 count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks 489 headers := d.readHeaderRange(oldest, count) 490 if len(headers) == count { 491 pivot = headers[len(headers)-1] 492 log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) 493 } 494 } 495 } 496 // Print an error log and return directly in case the pivot header 497 // is still not found. It means the skeleton chain is not linked 498 // correctly with local chain. 499 if pivot == nil { 500 log.Error("Pivot header is not found", "number", number) 501 return errNoPivotHeader 502 } 503 } 504 } 505 // If no pivot block was returned, the head is below the min full block 506 // threshold (i.e. new chain). In that case we won't really snap sync 507 // anyway, but still need a valid pivot block to avoid some code hitting 508 // nil panics on access. 509 if mode == SnapSync && pivot == nil { 510 pivot = d.blockchain.CurrentBlock().Header() 511 } 512 height := latest.Number.Uint64() 513 514 var origin uint64 515 if !beaconMode { 516 // In legacy mode, reach out to the network and find the ancestor 517 origin, err = d.findAncestor(p, latest) 518 if err != nil { 519 return err 520 } 521 } else { 522 // In beacon mode, use the skeleton chain for the ancestor lookup 523 origin, err = d.findBeaconAncestor() 524 if err != nil { 525 return err 526 } 527 } 528 d.syncStatsLock.Lock() 529 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 530 d.syncStatsChainOrigin = origin 531 } 532 d.syncStatsChainHeight = height 533 d.syncStatsLock.Unlock() 534 535 // Ensure our origin point is below any snap sync pivot point 536 if mode == SnapSync { 537 if height <= uint64(fsMinFullBlocks) { 538 origin = 0 539 } else { 540 pivotNumber := pivot.Number.Uint64() 541 if pivotNumber <= origin { 542 origin = pivotNumber - 1 543 } 544 // Write out the pivot into the database so a rollback beyond it will 545 // reenable snap sync 546 rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) 547 } 548 } 549 d.committed = 1 550 if mode == SnapSync && pivot.Number.Uint64() != 0 { 551 d.committed = 0 552 } 553 if mode == SnapSync { 554 // Set the ancient data limitation. 555 // If we are running snap sync, all block data older than ancientLimit will be 556 // written to the ancient store. More recent data will be written to the active 557 // database and will wait for the freezer to migrate. 558 // 559 // If there is a checkpoint available, then calculate the ancientLimit through 560 // that. Otherwise calculate the ancient limit through the advertised height 561 // of the remote peer. 562 // 563 // The reason for picking checkpoint first is that a malicious peer can give us 564 // a fake (very high) height, forcing the ancient limit to also be very high. 565 // The peer would start to feed us valid blocks until head, resulting in all of 566 // the blocks might be written into the ancient store. A following mini-reorg 567 // could cause issues. 568 if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 { 569 d.ancientLimit = d.checkpoint 570 } else if height > fullMaxForkAncestry+1 { 571 d.ancientLimit = height - fullMaxForkAncestry - 1 572 } else { 573 d.ancientLimit = 0 574 } 575 frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. 576 577 // If a part of blockchain data has already been written into active store, 578 // disable the ancient style insertion explicitly. 579 if origin >= frozen && frozen != 0 { 580 d.ancientLimit = 0 581 log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1) 582 } else if d.ancientLimit > 0 { 583 log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit) 584 } 585 // Rewind the ancient store and blockchain if reorg happens. 586 if origin+1 < frozen { 587 if err := d.lightchain.SetHead(origin); err != nil { 588 return err 589 } 590 } 591 } 592 // Initiate the sync using a concurrent header and content retrieval algorithm 593 d.queue.Prepare(origin+1, mode) 594 if d.syncInitHook != nil { 595 d.syncInitHook(origin, height) 596 } 597 var headerFetcher func() error 598 if !beaconMode { 599 // In legacy mode, headers are retrieved from the network 600 headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } 601 } else { 602 // In beacon mode, headers are served by the skeleton syncer 603 headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } 604 } 605 fetchers := []func() error{ 606 headerFetcher, // Headers are always retrieved 607 func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync 608 func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync 609 func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, 610 } 611 if mode == SnapSync { 612 d.pivotLock.Lock() 613 d.pivotHeader = pivot 614 d.pivotLock.Unlock() 615 616 fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) 617 } else if mode == FullSync { 618 fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) 619 } 620 return d.spawnSync(fetchers) 621 } 622 623 // spawnSync runs d.process and all given fetcher functions to completion in 624 // separate goroutines, returning the first error that appears. 625 func (d *Downloader) spawnSync(fetchers []func() error) error { 626 errc := make(chan error, len(fetchers)) 627 d.cancelWg.Add(len(fetchers)) 628 for _, fn := range fetchers { 629 fn := fn 630 go func() { defer d.cancelWg.Done(); errc <- fn() }() 631 } 632 // Wait for the first error, then terminate the others. 633 var err error 634 for i := 0; i < len(fetchers); i++ { 635 if i == len(fetchers)-1 { 636 // Close the queue when all fetchers have exited. 637 // This will cause the block processor to end when 638 // it has processed the queue. 639 d.queue.Close() 640 } 641 if err = <-errc; err != nil && err != errCanceled { 642 break 643 } 644 } 645 d.queue.Close() 646 d.Cancel() 647 return err 648 } 649 650 // cancel aborts all of the operations and resets the queue. However, cancel does 651 // not wait for the running download goroutines to finish. This method should be 652 // used when cancelling the downloads from inside the downloader. 653 func (d *Downloader) cancel() { 654 // Close the current cancel channel 655 d.cancelLock.Lock() 656 defer d.cancelLock.Unlock() 657 658 if d.cancelCh != nil { 659 select { 660 case <-d.cancelCh: 661 // Channel was already closed 662 default: 663 close(d.cancelCh) 664 } 665 } 666 } 667 668 // Cancel aborts all of the operations and waits for all download goroutines to 669 // finish before returning. 670 func (d *Downloader) Cancel() { 671 d.cancel() 672 d.cancelWg.Wait() 673 } 674 675 // Terminate interrupts the downloader, canceling all pending operations. 676 // The downloader cannot be reused after calling Terminate. 677 func (d *Downloader) Terminate() { 678 // Close the termination channel (make sure double close is allowed) 679 d.quitLock.Lock() 680 select { 681 case <-d.quitCh: 682 default: 683 close(d.quitCh) 684 685 // Terminate the internal beacon syncer 686 d.skeleton.Terminate() 687 } 688 d.quitLock.Unlock() 689 690 // Cancel any pending download requests 691 d.Cancel() 692 } 693 694 // fetchHead retrieves the head header and prior pivot block (if available) from 695 // a remote peer. 696 func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { 697 p.log.Debug("Retrieving remote chain head") 698 mode := d.getMode() 699 700 // Request the advertised remote head block and wait for the response 701 latest, _ := p.peer.Head() 702 fetch := 1 703 if mode == SnapSync { 704 fetch = 2 // head + pivot headers 705 } 706 headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) 707 if err != nil { 708 return nil, nil, err 709 } 710 // Make sure the peer gave us at least one and at most the requested headers 711 if len(headers) == 0 || len(headers) > fetch { 712 return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) 713 } 714 // The first header needs to be the head, validate against the checkpoint 715 // and request. If only 1 header was returned, make sure there's no pivot 716 // or there was not one requested. 717 head = headers[0] 718 if (mode == SnapSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { 719 return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) 720 } 721 if len(headers) == 1 { 722 if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { 723 return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) 724 } 725 p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) 726 return head, nil, nil 727 } 728 // At this point we have 2 headers in total and the first is the 729 // validated head of the chain. Check the pivot number and return, 730 pivot = headers[1] 731 if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { 732 return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) 733 } 734 return head, pivot, nil 735 } 736 737 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 738 // common ancestor. 739 // It returns parameters to be used for peer.RequestHeadersByNumber: 740 // from - starting block number 741 // count - number of headers to request 742 // skip - number of headers to skip 743 // and also returns 'max', the last block which is expected to be returned by the remote peers, 744 // given the (from,count,skip) 745 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 746 var ( 747 from int 748 count int 749 MaxCount = MaxHeaderFetch / 16 750 ) 751 // requestHead is the highest block that we will ask for. If requestHead is not offset, 752 // the highest block that we will get is 16 blocks back from head, which means we 753 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 754 // between us and the peer is 1-2 blocks, which is most common 755 requestHead := int(remoteHeight) - 1 756 if requestHead < 0 { 757 requestHead = 0 758 } 759 // requestBottom is the lowest block we want included in the query 760 // Ideally, we want to include the one just below our own head 761 requestBottom := int(localHeight - 1) 762 if requestBottom < 0 { 763 requestBottom = 0 764 } 765 totalSpan := requestHead - requestBottom 766 span := 1 + totalSpan/MaxCount 767 if span < 2 { 768 span = 2 769 } 770 if span > 16 { 771 span = 16 772 } 773 774 count = 1 + totalSpan/span 775 if count > MaxCount { 776 count = MaxCount 777 } 778 if count < 2 { 779 count = 2 780 } 781 from = requestHead - (count-1)*span 782 if from < 0 { 783 from = 0 784 } 785 max := from + (count-1)*span 786 return int64(from), count, span - 1, uint64(max) 787 } 788 789 // findAncestor tries to locate the common ancestor link of the local chain and 790 // a remote peers blockchain. In the general case when our node was in sync and 791 // on the correct chain, checking the top N links should already get us a match. 792 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 793 // the head links match), we do a binary search to find the common ancestor. 794 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 795 // Figure out the valid ancestor range to prevent rewrite attacks 796 var ( 797 floor = int64(-1) 798 localHeight uint64 799 remoteHeight = remoteHeader.Number.Uint64() 800 ) 801 mode := d.getMode() 802 switch mode { 803 case FullSync: 804 localHeight = d.blockchain.CurrentBlock().NumberU64() 805 case SnapSync: 806 localHeight = d.blockchain.CurrentFastBlock().NumberU64() 807 default: 808 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 809 } 810 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 811 812 // Recap floor value for binary search 813 maxForkAncestry := fullMaxForkAncestry 814 if d.getMode() == LightSync { 815 maxForkAncestry = lightMaxForkAncestry 816 } 817 if localHeight >= maxForkAncestry { 818 // We're above the max reorg threshold, find the earliest fork point 819 floor = int64(localHeight - maxForkAncestry) 820 } 821 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 822 // all headers before that point will be missing. 823 if mode == LightSync { 824 // If we don't know the current CHT position, find it 825 if d.genesis == 0 { 826 header := d.lightchain.CurrentHeader() 827 for header != nil { 828 d.genesis = header.Number.Uint64() 829 if floor >= int64(d.genesis)-1 { 830 break 831 } 832 header = d.lightchain.GetHeaderByHash(header.ParentHash) 833 } 834 } 835 // We already know the "genesis" block number, cap floor to that 836 if floor < int64(d.genesis)-1 { 837 floor = int64(d.genesis) - 1 838 } 839 } 840 841 ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) 842 if err == nil { 843 return ancestor, nil 844 } 845 // The returned error was not nil. 846 // If the error returned does not reflect that a common ancestor was not found, return it. 847 // If the error reflects that a common ancestor was not found, continue to binary search, 848 // where the error value will be reassigned. 849 if !errors.Is(err, errNoAncestorFound) { 850 return 0, err 851 } 852 853 ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) 854 if err != nil { 855 return 0, err 856 } 857 return ancestor, nil 858 } 859 860 func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { 861 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 862 863 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 864 headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) 865 if err != nil { 866 return 0, err 867 } 868 // Wait for the remote response to the head fetch 869 number, hash := uint64(0), common.Hash{} 870 871 // Make sure the peer actually gave something valid 872 if len(headers) == 0 { 873 p.log.Warn("Empty head header set") 874 return 0, errEmptyHeaderSet 875 } 876 // Make sure the peer's reply conforms to the request 877 for i, header := range headers { 878 expectNumber := from + int64(i)*int64(skip+1) 879 if number := header.Number.Int64(); number != expectNumber { 880 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 881 return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) 882 } 883 } 884 // Check if a common ancestor was found 885 for i := len(headers) - 1; i >= 0; i-- { 886 // Skip any headers that underflow/overflow our requested set 887 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 888 continue 889 } 890 // Otherwise check if we already know the header or not 891 h := hashes[i] 892 n := headers[i].Number.Uint64() 893 894 var known bool 895 switch mode { 896 case FullSync: 897 known = d.blockchain.HasBlock(h, n) 898 case SnapSync: 899 known = d.blockchain.HasFastBlock(h, n) 900 default: 901 known = d.lightchain.HasHeader(h, n) 902 } 903 if known { 904 number, hash = n, h 905 break 906 } 907 } 908 // If the head fetch already found an ancestor, return 909 if hash != (common.Hash{}) { 910 if int64(number) <= floor { 911 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 912 return 0, errInvalidAncestor 913 } 914 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 915 return number, nil 916 } 917 return 0, errNoAncestorFound 918 } 919 920 func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { 921 hash := common.Hash{} 922 923 // Ancestor not found, we need to binary search over our chain 924 start, end := uint64(0), remoteHeight 925 if floor > 0 { 926 start = uint64(floor) 927 } 928 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 929 930 for start+1 < end { 931 // Split our chain interval in two, and request the hash to cross check 932 check := (start + end) / 2 933 934 headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) 935 if err != nil { 936 return 0, err 937 } 938 // Make sure the peer actually gave something valid 939 if len(headers) != 1 { 940 p.log.Warn("Multiple headers for single request", "headers", len(headers)) 941 return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) 942 } 943 // Modify the search interval based on the response 944 h := hashes[0] 945 n := headers[0].Number.Uint64() 946 947 var known bool 948 switch mode { 949 case FullSync: 950 known = d.blockchain.HasBlock(h, n) 951 case SnapSync: 952 known = d.blockchain.HasFastBlock(h, n) 953 default: 954 known = d.lightchain.HasHeader(h, n) 955 } 956 if !known { 957 end = check 958 continue 959 } 960 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 961 if header.Number.Uint64() != check { 962 p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 963 return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) 964 } 965 start = check 966 hash = h 967 } 968 // Ensure valid ancestry and return 969 if int64(start) <= floor { 970 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 971 return 0, errInvalidAncestor 972 } 973 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 974 return start, nil 975 } 976 977 // fetchHeaders keeps retrieving headers concurrently from the number 978 // requested, until no more are returned, potentially throttling on the way. To 979 // facilitate concurrency but still protect against malicious nodes sending bad 980 // headers, we construct a header chain skeleton using the "origin" peer we are 981 // syncing with, and fill in the missing headers using anyone else. Headers from 982 // other peers are only accepted if they map cleanly to the skeleton. If no one 983 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 984 // the origin is dropped. 985 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { 986 p.log.Debug("Directing header downloads", "origin", from) 987 defer p.log.Debug("Header download terminated") 988 989 // Start pulling the header chain skeleton until all is done 990 var ( 991 skeleton = true // Skeleton assembly phase or finishing up 992 pivoting = false // Whether the next request is pivot verification 993 ancestor = from 994 mode = d.getMode() 995 ) 996 for { 997 // Pull the next batch of headers, it either: 998 // - Pivot check to see if the chain moved too far 999 // - Skeleton retrieval to permit concurrent header fetches 1000 // - Full header retrieval if we're near the chain head 1001 var ( 1002 headers []*types.Header 1003 hashes []common.Hash 1004 err error 1005 ) 1006 switch { 1007 case pivoting: 1008 d.pivotLock.RLock() 1009 pivot := d.pivotHeader.Number.Uint64() 1010 d.pivotLock.RUnlock() 1011 1012 p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) 1013 headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep 1014 1015 case skeleton: 1016 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 1017 headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 1018 1019 default: 1020 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 1021 headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) 1022 } 1023 switch err { 1024 case nil: 1025 // Headers retrieved, continue with processing 1026 1027 case errCanceled: 1028 // Sync cancelled, no issue, propagate up 1029 return err 1030 1031 default: 1032 // Header retrieval either timed out, or the peer failed in some strange way 1033 // (e.g. disconnect). Consider the master peer bad and drop 1034 d.dropPeer(p.id) 1035 1036 // Finish the sync gracefully instead of dumping the gathered data though 1037 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1038 select { 1039 case ch <- false: 1040 case <-d.cancelCh: 1041 } 1042 } 1043 select { 1044 case d.headerProcCh <- nil: 1045 case <-d.cancelCh: 1046 } 1047 return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) 1048 } 1049 // If the pivot is being checked, move if it became stale and run the real retrieval 1050 var pivot uint64 1051 1052 d.pivotLock.RLock() 1053 if d.pivotHeader != nil { 1054 pivot = d.pivotHeader.Number.Uint64() 1055 } 1056 d.pivotLock.RUnlock() 1057 1058 if pivoting { 1059 if len(headers) == 2 { 1060 if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { 1061 log.Warn("Peer sent invalid next pivot", "have", have, "want", want) 1062 return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) 1063 } 1064 if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { 1065 log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) 1066 return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) 1067 } 1068 log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) 1069 pivot = headers[0].Number.Uint64() 1070 1071 d.pivotLock.Lock() 1072 d.pivotHeader = headers[0] 1073 d.pivotLock.Unlock() 1074 1075 // Write out the pivot into the database so a rollback beyond 1076 // it will reenable snap sync and update the state root that 1077 // the state syncer will be downloading. 1078 rawdb.WriteLastPivotNumber(d.stateDB, pivot) 1079 } 1080 // Disable the pivot check and fetch the next batch of headers 1081 pivoting = false 1082 continue 1083 } 1084 // If the skeleton's finished, pull any remaining head headers directly from the origin 1085 if skeleton && len(headers) == 0 { 1086 // A malicious node might withhold advertised headers indefinitely 1087 if from+uint64(MaxHeaderFetch)-1 <= head { 1088 p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) 1089 return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) 1090 } 1091 p.log.Debug("No skeleton, fetching headers directly") 1092 skeleton = false 1093 continue 1094 } 1095 // If no more headers are inbound, notify the content fetchers and return 1096 if len(headers) == 0 { 1097 // Don't abort header fetches while the pivot is downloading 1098 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 1099 p.log.Debug("No headers, waiting for pivot commit") 1100 select { 1101 case <-time.After(fsHeaderContCheck): 1102 continue 1103 case <-d.cancelCh: 1104 return errCanceled 1105 } 1106 } 1107 // Pivot done (or not in snap sync) and no more headers, terminate the process 1108 p.log.Debug("No more headers available") 1109 select { 1110 case d.headerProcCh <- nil: 1111 return nil 1112 case <-d.cancelCh: 1113 return errCanceled 1114 } 1115 } 1116 // If we received a skeleton batch, resolve internals concurrently 1117 var progressed bool 1118 if skeleton { 1119 filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) 1120 if err != nil { 1121 p.log.Debug("Skeleton chain invalid", "err", err) 1122 return fmt.Errorf("%w: %v", errInvalidChain, err) 1123 } 1124 headers = filled[proced:] 1125 hashes = hashset[proced:] 1126 1127 progressed = proced > 0 1128 from += uint64(proced) 1129 } else { 1130 // A malicious node might withhold advertised headers indefinitely 1131 if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { 1132 p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) 1133 return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) 1134 } 1135 // If we're closing in on the chain head, but haven't yet reached it, delay 1136 // the last few headers so mini reorgs on the head don't cause invalid hash 1137 // chain errors. 1138 if n := len(headers); n > 0 { 1139 // Retrieve the current head we're at 1140 var head uint64 1141 if mode == LightSync { 1142 head = d.lightchain.CurrentHeader().Number.Uint64() 1143 } else { 1144 head = d.blockchain.CurrentFastBlock().NumberU64() 1145 if full := d.blockchain.CurrentBlock().NumberU64(); head < full { 1146 head = full 1147 } 1148 } 1149 // If the head is below the common ancestor, we're actually deduplicating 1150 // already existing chain segments, so use the ancestor as the fake head. 1151 // Otherwise, we might end up delaying header deliveries pointlessly. 1152 if head < ancestor { 1153 head = ancestor 1154 } 1155 // If the head is way older than this batch, delay the last few headers 1156 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 1157 delay := reorgProtHeaderDelay 1158 if delay > n { 1159 delay = n 1160 } 1161 headers = headers[:n-delay] 1162 hashes = hashes[:n-delay] 1163 } 1164 } 1165 } 1166 // If no headers have bene delivered, or all of them have been delayed, 1167 // sleep a bit and retry. Take care with headers already consumed during 1168 // skeleton filling 1169 if len(headers) == 0 && !progressed { 1170 p.log.Trace("All headers delayed, waiting") 1171 select { 1172 case <-time.After(fsHeaderContCheck): 1173 continue 1174 case <-d.cancelCh: 1175 return errCanceled 1176 } 1177 } 1178 // Insert any remaining new headers and fetch the next batch 1179 if len(headers) > 0 { 1180 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 1181 select { 1182 case d.headerProcCh <- &headerTask{ 1183 headers: headers, 1184 hashes: hashes, 1185 }: 1186 case <-d.cancelCh: 1187 return errCanceled 1188 } 1189 from += uint64(len(headers)) 1190 } 1191 // If we're still skeleton filling snap sync, check pivot staleness 1192 // before continuing to the next skeleton filling 1193 if skeleton && pivot > 0 { 1194 pivoting = true 1195 } 1196 } 1197 } 1198 1199 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1200 // and maps them to the provided skeleton header chain. 1201 // 1202 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1203 // immediately to the header processor to keep the rest of the pipeline full even 1204 // in the case of header stalls. 1205 // 1206 // The method returns the entire filled skeleton and also the number of headers 1207 // already forwarded for processing. 1208 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { 1209 log.Debug("Filling up skeleton", "from", from) 1210 d.queue.ScheduleSkeleton(from, skeleton) 1211 1212 err := d.concurrentFetch((*headerQueue)(d), false) 1213 if err != nil { 1214 log.Debug("Skeleton fill failed", "err", err) 1215 } 1216 filled, hashes, proced := d.queue.RetrieveHeaders() 1217 if err == nil { 1218 log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) 1219 } 1220 return filled, hashes, proced, err 1221 } 1222 1223 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1224 // available peers, reserving a chunk of blocks for each, waiting for delivery 1225 // and also periodically checking for timeouts. 1226 func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { 1227 log.Debug("Downloading block bodies", "origin", from) 1228 err := d.concurrentFetch((*bodyQueue)(d), beaconMode) 1229 1230 log.Debug("Block body download terminated", "err", err) 1231 return err 1232 } 1233 1234 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1235 // available peers, reserving a chunk of receipts for each, waiting for delivery 1236 // and also periodically checking for timeouts. 1237 func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { 1238 log.Debug("Downloading receipts", "origin", from) 1239 err := d.concurrentFetch((*receiptQueue)(d), beaconMode) 1240 1241 log.Debug("Receipt download terminated", "err", err) 1242 return err 1243 } 1244 1245 // processHeaders takes batches of retrieved headers from an input channel and 1246 // keeps processing and scheduling them into the header chain and downloader's 1247 // queue until the stream ends or a failure occurs. 1248 func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { 1249 // Keep a count of uncertain headers to roll back 1250 var ( 1251 rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) 1252 rollbackErr error 1253 mode = d.getMode() 1254 ) 1255 defer func() { 1256 if rollback > 0 { 1257 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1258 if mode != LightSync { 1259 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1260 lastBlock = d.blockchain.CurrentBlock().Number() 1261 } 1262 if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block 1263 // We're already unwinding the stack, only print the error to make it more visible 1264 log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) 1265 } 1266 curFastBlock, curBlock := common.Big0, common.Big0 1267 if mode != LightSync { 1268 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1269 curBlock = d.blockchain.CurrentBlock().Number() 1270 } 1271 log.Warn("Rolled back chain segment", 1272 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1273 "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1274 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) 1275 } 1276 }() 1277 // Wait for batches of headers to process 1278 gotHeaders := false 1279 1280 for { 1281 select { 1282 case <-d.cancelCh: 1283 rollbackErr = errCanceled 1284 return errCanceled 1285 1286 case task := <-d.headerProcCh: 1287 // Terminate header processing if we synced up 1288 if task == nil || len(task.headers) == 0 { 1289 // Notify everyone that headers are fully processed 1290 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1291 select { 1292 case ch <- false: 1293 case <-d.cancelCh: 1294 } 1295 } 1296 // If we're in legacy sync mode, we need to check total difficulty 1297 // violations from malicious peers. That is not needed in beacon 1298 // mode and we can skip to terminating sync. 1299 if !beaconMode { 1300 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1301 // better chain compared to ours. The only exception is if its promised blocks were 1302 // already imported by other means (e.g. fetcher): 1303 // 1304 // R <remote peer>, L <local node>: Both at block 10 1305 // R: Mine block 11, and propagate it to L 1306 // L: Queue block 11 for import 1307 // L: Notice that R's head and TD increased compared to ours, start sync 1308 // L: Import of block 11 finishes 1309 // L: Sync begins, and finds common ancestor at 11 1310 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1311 // R: Nothing to give 1312 if mode != LightSync { 1313 head := d.blockchain.CurrentBlock() 1314 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1315 return errStallingPeer 1316 } 1317 } 1318 // If snap or light syncing, ensure promised headers are indeed delivered. This is 1319 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1320 // of delivering the post-pivot blocks that would flag the invalid content. 1321 // 1322 // This check cannot be executed "as is" for full imports, since blocks may still be 1323 // queued for processing when the header download completes. However, as long as the 1324 // peer gave us something useful, we're already happy/progressed (above check). 1325 if mode == SnapSync || mode == LightSync { 1326 head := d.lightchain.CurrentHeader() 1327 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1328 return errStallingPeer 1329 } 1330 } 1331 } 1332 // Disable any rollback and return 1333 rollback = 0 1334 return nil 1335 } 1336 // Otherwise split the chunk of headers into batches and process them 1337 headers, hashes := task.headers, task.hashes 1338 1339 gotHeaders = true 1340 for len(headers) > 0 { 1341 // Terminate if something failed in between processing chunks 1342 select { 1343 case <-d.cancelCh: 1344 rollbackErr = errCanceled 1345 return errCanceled 1346 default: 1347 } 1348 // Select the next chunk of headers to import 1349 limit := maxHeadersProcess 1350 if limit > len(headers) { 1351 limit = len(headers) 1352 } 1353 chunkHeaders := headers[:limit] 1354 chunkHashes := hashes[:limit] 1355 1356 // In case of header only syncing, validate the chunk immediately 1357 if mode == SnapSync || mode == LightSync { 1358 // If we're importing pure headers, verify based on their recentness 1359 var pivot uint64 1360 1361 d.pivotLock.RLock() 1362 if d.pivotHeader != nil { 1363 pivot = d.pivotHeader.Number.Uint64() 1364 } 1365 d.pivotLock.RUnlock() 1366 1367 frequency := fsHeaderCheckFrequency 1368 if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1369 frequency = 1 1370 } 1371 // Although the received headers might be all valid, a legacy 1372 // PoW/PoA sync must not accept post-merge headers. Make sure 1373 // that any transition is rejected at this point. 1374 var ( 1375 rejected []*types.Header 1376 td *big.Int 1377 ) 1378 if !beaconMode && ttd != nil { 1379 td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) 1380 if td == nil { 1381 // This should never really happen, but handle gracefully for now 1382 log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) 1383 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1384 } 1385 for i, header := range chunkHeaders { 1386 td = new(big.Int).Add(td, header.Difficulty) 1387 if td.Cmp(ttd) >= 0 { 1388 // Terminal total difficulty reached, allow the last header in 1389 if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { 1390 chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] 1391 if len(rejected) > 0 { 1392 // Make a nicer user log as to the first TD truly rejected 1393 td = new(big.Int).Add(td, rejected[0].Difficulty) 1394 } 1395 } else { 1396 chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] 1397 } 1398 break 1399 } 1400 } 1401 } 1402 if len(chunkHeaders) > 0 { 1403 if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { 1404 rollbackErr = err 1405 1406 // If some headers were inserted, track them as uncertain 1407 if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { 1408 rollback = chunkHeaders[0].Number.Uint64() 1409 } 1410 log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) 1411 return fmt.Errorf("%w: %v", errInvalidChain, err) 1412 } 1413 // All verifications passed, track all headers within the allowed limits 1414 if mode == SnapSync { 1415 head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() 1416 if head-rollback > uint64(fsHeaderSafetyNet) { 1417 rollback = head - uint64(fsHeaderSafetyNet) 1418 } else { 1419 rollback = 1 1420 } 1421 } 1422 } 1423 if len(rejected) != 0 { 1424 // Merge threshold reached, stop importing, but don't roll back 1425 rollback = 0 1426 1427 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) 1428 return ErrMergeTransition 1429 } 1430 } 1431 // Unless we're doing light chains, schedule the headers for associated content retrieval 1432 if mode == FullSync || mode == SnapSync { 1433 // If we've reached the allowed number of pending headers, stall a bit 1434 for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1435 select { 1436 case <-d.cancelCh: 1437 rollbackErr = errCanceled 1438 return errCanceled 1439 case <-time.After(time.Second): 1440 } 1441 } 1442 // Otherwise insert the headers for content retrieval 1443 inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) 1444 if len(inserts) != len(chunkHeaders) { 1445 rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) 1446 return fmt.Errorf("%w: stale headers", errBadPeer) 1447 } 1448 } 1449 headers = headers[limit:] 1450 hashes = hashes[limit:] 1451 origin += uint64(limit) 1452 } 1453 // Update the highest block number we know if a higher one is found. 1454 d.syncStatsLock.Lock() 1455 if d.syncStatsChainHeight < origin { 1456 d.syncStatsChainHeight = origin - 1 1457 } 1458 d.syncStatsLock.Unlock() 1459 1460 // Signal the content downloaders of the availablility of new tasks 1461 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1462 select { 1463 case ch <- true: 1464 default: 1465 } 1466 } 1467 } 1468 } 1469 } 1470 1471 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1472 func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { 1473 for { 1474 results := d.queue.Results(true) 1475 if len(results) == 0 { 1476 return nil 1477 } 1478 if d.chainInsertHook != nil { 1479 d.chainInsertHook(results) 1480 } 1481 // Although the received blocks might be all valid, a legacy PoW/PoA sync 1482 // must not accept post-merge blocks. Make sure that pre-merge blocks are 1483 // imported, but post-merge ones are rejected. 1484 var ( 1485 rejected []*fetchResult 1486 td *big.Int 1487 ) 1488 if !beaconMode && ttd != nil { 1489 td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) 1490 if td == nil { 1491 // This should never really happen, but handle gracefully for now 1492 log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) 1493 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1494 } 1495 for i, result := range results { 1496 td = new(big.Int).Add(td, result.Header.Difficulty) 1497 if td.Cmp(ttd) >= 0 { 1498 // Terminal total difficulty reached, allow the last block in 1499 if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { 1500 results, rejected = results[:i+1], results[i+1:] 1501 if len(rejected) > 0 { 1502 // Make a nicer user log as to the first TD truly rejected 1503 td = new(big.Int).Add(td, rejected[0].Header.Difficulty) 1504 } 1505 } else { 1506 results, rejected = results[:i], results[i:] 1507 } 1508 break 1509 } 1510 } 1511 } 1512 if err := d.importBlockResults(results); err != nil { 1513 return err 1514 } 1515 if len(rejected) != 0 { 1516 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) 1517 return ErrMergeTransition 1518 } 1519 } 1520 } 1521 1522 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1523 // Check for any early termination requests 1524 if len(results) == 0 { 1525 return nil 1526 } 1527 select { 1528 case <-d.quitCh: 1529 return errCancelContentProcessing 1530 default: 1531 } 1532 // Retrieve the a batch of results to import 1533 first, last := results[0].Header, results[len(results)-1].Header 1534 log.Debug("Inserting downloaded chain", "items", len(results), 1535 "firstnum", first.Number, "firsthash", first.Hash(), 1536 "lastnum", last.Number, "lasthash", last.Hash(), 1537 ) 1538 blocks := make([]*types.Block, len(results)) 1539 for i, result := range results { 1540 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1541 } 1542 // Downloaded blocks are always regarded as trusted after the 1543 // transition. Because the downloaded chain is guided by the 1544 // consensus-layer. 1545 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1546 if index < len(results) { 1547 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1548 } else { 1549 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1550 // when it needs to preprocess blocks to import a sidechain. 1551 // The importer will put together a new list of blocks to import, which is a superset 1552 // of the blocks delivered from the downloader, and the indexing will be off. 1553 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1554 } 1555 return fmt.Errorf("%w: %v", errInvalidChain, err) 1556 } 1557 return nil 1558 } 1559 1560 // processSnapSyncContent takes fetch results from the queue and writes them to the 1561 // database. It also controls the synchronisation of state nodes of the pivot block. 1562 func (d *Downloader) processSnapSyncContent() error { 1563 // Start syncing state of the reported head block. This should get us most of 1564 // the state of the pivot block. 1565 d.pivotLock.RLock() 1566 sync := d.syncState(d.pivotHeader.Root) 1567 d.pivotLock.RUnlock() 1568 1569 defer func() { 1570 // The `sync` object is replaced every time the pivot moves. We need to 1571 // defer close the very last active one, hence the lazy evaluation vs. 1572 // calling defer sync.Cancel() !!! 1573 sync.Cancel() 1574 }() 1575 1576 closeOnErr := func(s *stateSync) { 1577 if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { 1578 d.queue.Close() // wake up Results 1579 } 1580 } 1581 go closeOnErr(sync) 1582 1583 // To cater for moving pivot points, track the pivot block and subsequently 1584 // accumulated download results separately. 1585 var ( 1586 oldPivot *fetchResult // Locked in pivot block, might change eventually 1587 oldTail []*fetchResult // Downloaded content after the pivot 1588 ) 1589 for { 1590 // Wait for the next batch of downloaded data to be available, and if the pivot 1591 // block became stale, move the goalpost 1592 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1593 if len(results) == 0 { 1594 // If pivot sync is done, stop 1595 if oldPivot == nil { 1596 return sync.Cancel() 1597 } 1598 // If sync failed, stop 1599 select { 1600 case <-d.cancelCh: 1601 sync.Cancel() 1602 return errCanceled 1603 default: 1604 } 1605 } 1606 if d.chainInsertHook != nil { 1607 d.chainInsertHook(results) 1608 } 1609 // If we haven't downloaded the pivot block yet, check pivot staleness 1610 // notifications from the header downloader 1611 d.pivotLock.RLock() 1612 pivot := d.pivotHeader 1613 d.pivotLock.RUnlock() 1614 1615 if oldPivot == nil { 1616 if pivot.Root != sync.root { 1617 sync.Cancel() 1618 sync = d.syncState(pivot.Root) 1619 1620 go closeOnErr(sync) 1621 } 1622 } else { 1623 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1624 } 1625 // Split around the pivot block and process the two sides via snap/full sync 1626 if atomic.LoadInt32(&d.committed) == 0 { 1627 latest := results[len(results)-1].Header 1628 // If the height is above the pivot block by 2 sets, it means the pivot 1629 // become stale in the network and it was garbage collected, move to a 1630 // new pivot. 1631 // 1632 // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those 1633 // need to be taken into account, otherwise we're detecting the pivot move 1634 // late and will drop peers due to unavailable state!!! 1635 if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) { 1636 log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay)) 1637 pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted 1638 1639 d.pivotLock.Lock() 1640 d.pivotHeader = pivot 1641 d.pivotLock.Unlock() 1642 1643 // Write out the pivot into the database so a rollback beyond it will 1644 // reenable snap sync 1645 rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) 1646 } 1647 } 1648 P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) 1649 if err := d.commitSnapSyncData(beforeP, sync); err != nil { 1650 return err 1651 } 1652 if P != nil { 1653 // If new pivot block found, cancel old state retrieval and restart 1654 if oldPivot != P { 1655 sync.Cancel() 1656 sync = d.syncState(P.Header.Root) 1657 1658 go closeOnErr(sync) 1659 oldPivot = P 1660 } 1661 // Wait for completion, occasionally checking for pivot staleness 1662 select { 1663 case <-sync.done: 1664 if sync.err != nil { 1665 return sync.err 1666 } 1667 if err := d.commitPivotBlock(P); err != nil { 1668 return err 1669 } 1670 oldPivot = nil 1671 1672 case <-time.After(time.Second): 1673 oldTail = afterP 1674 continue 1675 } 1676 } 1677 // Fast sync done, pivot commit done, full import 1678 if err := d.importBlockResults(afterP); err != nil { 1679 return err 1680 } 1681 } 1682 } 1683 1684 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1685 if len(results) == 0 { 1686 return nil, nil, nil 1687 } 1688 if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot { 1689 // the pivot is somewhere in the future 1690 return nil, results, nil 1691 } 1692 // This can also be optimized, but only happens very seldom 1693 for _, result := range results { 1694 num := result.Header.Number.Uint64() 1695 switch { 1696 case num < pivot: 1697 before = append(before, result) 1698 case num == pivot: 1699 p = result 1700 default: 1701 after = append(after, result) 1702 } 1703 } 1704 return p, before, after 1705 } 1706 1707 func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { 1708 // Check for any early termination requests 1709 if len(results) == 0 { 1710 return nil 1711 } 1712 select { 1713 case <-d.quitCh: 1714 return errCancelContentProcessing 1715 case <-stateSync.done: 1716 if err := stateSync.Wait(); err != nil { 1717 return err 1718 } 1719 default: 1720 } 1721 // Retrieve the a batch of results to import 1722 first, last := results[0].Header, results[len(results)-1].Header 1723 log.Debug("Inserting snap-sync blocks", "items", len(results), 1724 "firstnum", first.Number, "firsthash", first.Hash(), 1725 "lastnumn", last.Number, "lasthash", last.Hash(), 1726 ) 1727 blocks := make([]*types.Block, len(results)) 1728 receipts := make([]types.Receipts, len(results)) 1729 for i, result := range results { 1730 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1731 receipts[i] = result.Receipts 1732 } 1733 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { 1734 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1735 return fmt.Errorf("%w: %v", errInvalidChain, err) 1736 } 1737 return nil 1738 } 1739 1740 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1741 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1742 log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1743 1744 // Commit the pivot block as the new head, will require full sync from here on 1745 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { 1746 return err 1747 } 1748 if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { 1749 return err 1750 } 1751 atomic.StoreInt32(&d.committed, 1) 1752 return nil 1753 } 1754 1755 // DeliverSnapPacket is invoked from a peer's message handler when it transmits a 1756 // data packet for the local node to consume. 1757 func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { 1758 switch packet := packet.(type) { 1759 case *snap.AccountRangePacket: 1760 hashes, accounts, err := packet.Unpack() 1761 if err != nil { 1762 return err 1763 } 1764 return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) 1765 1766 case *snap.StorageRangesPacket: 1767 hashset, slotset := packet.Unpack() 1768 return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) 1769 1770 case *snap.ByteCodesPacket: 1771 return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) 1772 1773 case *snap.TrieNodesPacket: 1774 return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) 1775 1776 default: 1777 return fmt.Errorf("unexpected snap packet type: %T", packet) 1778 } 1779 } 1780 1781 // readHeaderRange returns a list of headers, using the given last header as the base, 1782 // and going backwards towards genesis. This method assumes that the caller already has 1783 // placed a reasonable cap on count. 1784 func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header { 1785 var ( 1786 current = last 1787 headers []*types.Header 1788 ) 1789 for { 1790 parent := d.lightchain.GetHeaderByHash(current.ParentHash) 1791 if parent == nil { 1792 break // The chain is not continuous, or the chain is exhausted 1793 } 1794 headers = append(headers, parent) 1795 if len(headers) >= count { 1796 break 1797 } 1798 current = parent 1799 } 1800 return headers 1801 }