github.com/aiyaya188/klaytn@v0.0.0-20220629133911-2c66fd5546f4/eth/downloader/downloader.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package downloader contains the manual full chain synchronisation. 18 package downloader 19 20 import ( 21 "errors" 22 "fmt" 23 "math/big" 24 "sync" 25 "sync/atomic" 26 "time" 27 28 "github.com/aiyaya188/klaytn" 29 "github.com/aiyaya188/klaytn/common" 30 "github.com/aiyaya188/klaytn/core/rawdb" 31 "github.com/aiyaya188/klaytn/core/state/snapshot" 32 "github.com/aiyaya188/klaytn/core/types" 33 "github.com/aiyaya188/klaytn/eth/protocols/snap" 34 "github.com/aiyaya188/klaytn/ethdb" 35 "github.com/aiyaya188/klaytn/event" 36 "github.com/aiyaya188/klaytn/log" 37 "github.com/aiyaya188/klaytn/params" 38 ) 39 40 var ( 41 MaxBlockFetch = 128 // Amount of blocks to be fetched per retrieval request 42 MaxHeaderFetch = 192 // Amount of block headers to be fetched per retrieval request 43 MaxSkeletonSize = 128 // Number of header fetches to need for a skeleton assembly 44 MaxReceiptFetch = 256 // Amount of transaction receipts to allow fetching per request 45 46 maxQueuedHeaders = 32 * 1024 // [eth/62] Maximum number of headers to queue for import (DOS protection) 47 maxHeadersProcess = 2048 // Number of header download results to import at once into the chain 48 maxResultsProcess = 2048 // Number of content download results to import at once into the chain 49 fullMaxForkAncestry uint64 = params.FullImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 50 lightMaxForkAncestry uint64 = params.LightImmutabilityThreshold // Maximum chain reorganisation (locally redeclared so tests can reduce it) 51 52 reorgProtThreshold = 48 // Threshold number of recent blocks to disable mini reorg protection 53 reorgProtHeaderDelay = 2 // Number of headers to delay delivering to cover mini reorgs 54 55 fsHeaderCheckFrequency = 100 // Verification frequency of the downloaded headers during snap sync 56 fsHeaderSafetyNet = 2048 // Number of headers to discard in case a chain violation is detected 57 fsHeaderForceVerify = 24 // Number of headers to verify before and after the pivot to accept it 58 fsHeaderContCheck = 3 * time.Second // Time interval to check for header continuations during state download 59 fsMinFullBlocks = 64 // Number of blocks to retrieve fully even in snap sync 60 ) 61 62 var ( 63 errBusy = errors.New("busy") 64 errUnknownPeer = errors.New("peer is unknown or unhealthy") 65 errBadPeer = errors.New("action from bad peer ignored") 66 errStallingPeer = errors.New("peer is stalling") 67 errUnsyncedPeer = errors.New("unsynced peer") 68 errNoPeers = errors.New("no peers to keep download active") 69 errTimeout = errors.New("timeout") 70 errEmptyHeaderSet = errors.New("empty header set by peer") 71 errPeersUnavailable = errors.New("no peers available or all tried for download") 72 errInvalidAncestor = errors.New("retrieved ancestor is invalid") 73 errInvalidChain = errors.New("retrieved hash chain is invalid") 74 errInvalidBody = errors.New("retrieved block body is invalid") 75 errInvalidReceipt = errors.New("retrieved receipt is invalid") 76 errCancelStateFetch = errors.New("state data download canceled (requested)") 77 errCancelContentProcessing = errors.New("content processing canceled (requested)") 78 errCanceled = errors.New("syncing canceled (requested)") 79 errTooOld = errors.New("peer's protocol version too old") 80 errNoAncestorFound = errors.New("no common ancestor found") 81 errNoPivotHeader = errors.New("pivot header is not found") 82 ErrMergeTransition = errors.New("legacy sync reached the merge") 83 ) 84 85 // peerDropFn is a callback type for dropping a peer detected as malicious. 86 type peerDropFn func(id string) 87 88 // headerTask is a set of downloaded headers to queue along with their precomputed 89 // hashes to avoid constant rehashing. 90 type headerTask struct { 91 headers []*types.Header 92 hashes []common.Hash 93 } 94 95 type Downloader struct { 96 mode uint32 // Synchronisation mode defining the strategy used (per sync cycle), use d.getMode() to get the SyncMode 97 mux *event.TypeMux // Event multiplexer to announce sync operation events 98 99 checkpoint uint64 // Checkpoint block number to enforce head against (e.g. snap sync) 100 genesis uint64 // Genesis block number to limit sync to (e.g. light client CHT) 101 queue *queue // Scheduler for selecting the hashes to download 102 peers *peerSet // Set of active peers from which download can proceed 103 104 stateDB ethdb.Database // Database to state sync into (and deduplicate via) 105 106 // Statistics 107 syncStatsChainOrigin uint64 // Origin block number where syncing started at 108 syncStatsChainHeight uint64 // Highest block number known when syncing started 109 syncStatsLock sync.RWMutex // Lock protecting the sync stats fields 110 111 lightchain LightChain 112 blockchain BlockChain 113 114 // Callbacks 115 dropPeer peerDropFn // Drops a peer for misbehaving 116 117 // Status 118 synchroniseMock func(id string, hash common.Hash) error // Replacement for synchronise during testing 119 synchronising int32 120 notified int32 121 committed int32 122 ancientLimit uint64 // The maximum block number which can be regarded as ancient data. 123 124 // Channels 125 headerProcCh chan *headerTask // Channel to feed the header processor new tasks 126 127 // Skeleton sync 128 skeleton *skeleton // Header skeleton to backfill the chain with (eth2 mode) 129 130 // State sync 131 pivotHeader *types.Header // Pivot block header to dynamically push the syncing state root 132 pivotLock sync.RWMutex // Lock protecting pivot header reads from updates 133 134 SnapSyncer *snap.Syncer // TODO(karalabe): make private! hack for now 135 stateSyncStart chan *stateSync 136 137 // Cancellation and termination 138 cancelPeer string // Identifier of the peer currently being used as the master (cancel on drop) 139 cancelCh chan struct{} // Channel to cancel mid-flight syncs 140 cancelLock sync.RWMutex // Lock to protect the cancel channel and peer in delivers 141 cancelWg sync.WaitGroup // Make sure all fetcher goroutines have exited. 142 143 quitCh chan struct{} // Quit channel to signal termination 144 quitLock sync.Mutex // Lock to prevent double closes 145 146 // Testing hooks 147 syncInitHook func(uint64, uint64) // Method to call upon initiating a new sync run 148 bodyFetchHook func([]*types.Header) // Method to call upon starting a block body fetch 149 receiptFetchHook func([]*types.Header) // Method to call upon starting a receipt fetch 150 chainInsertHook func([]*fetchResult) // Method to call upon inserting a chain of blocks (possibly in multiple invocations) 151 } 152 153 // LightChain encapsulates functions required to synchronise a light chain. 154 type LightChain interface { 155 // HasHeader verifies a header's presence in the local chain. 156 HasHeader(common.Hash, uint64) bool 157 158 // GetHeaderByHash retrieves a header from the local chain. 159 GetHeaderByHash(common.Hash) *types.Header 160 161 // CurrentHeader retrieves the head header from the local chain. 162 CurrentHeader() *types.Header 163 164 // GetTd returns the total difficulty of a local block. 165 GetTd(common.Hash, uint64) *big.Int 166 167 // InsertHeaderChain inserts a batch of headers into the local chain. 168 InsertHeaderChain([]*types.Header, int) (int, error) 169 170 // SetHead rewinds the local chain to a new head. 171 SetHead(uint64) error 172 } 173 174 // BlockChain encapsulates functions required to sync a (full or snap) blockchain. 175 type BlockChain interface { 176 LightChain 177 178 // HasBlock verifies a block's presence in the local chain. 179 HasBlock(common.Hash, uint64) bool 180 181 // HasFastBlock verifies a snap block's presence in the local chain. 182 HasFastBlock(common.Hash, uint64) bool 183 184 // GetBlockByHash retrieves a block from the local chain. 185 GetBlockByHash(common.Hash) *types.Block 186 187 // CurrentBlock retrieves the head block from the local chain. 188 CurrentBlock() *types.Block 189 190 // CurrentFastBlock retrieves the head snap block from the local chain. 191 CurrentFastBlock() *types.Block 192 193 // SnapSyncCommitHead directly commits the head block to a certain entity. 194 SnapSyncCommitHead(common.Hash) error 195 196 // InsertChain inserts a batch of blocks into the local chain. 197 InsertChain(types.Blocks) (int, error) 198 199 // InsertReceiptChain inserts a batch of receipts into the local chain. 200 InsertReceiptChain(types.Blocks, []types.Receipts, uint64) (int, error) 201 202 // Snapshots returns the blockchain snapshot tree to paused it during sync. 203 Snapshots() *snapshot.Tree 204 } 205 206 // New creates a new downloader to fetch hashes and blocks from remote peers. 207 func New(checkpoint uint64, stateDb ethdb.Database, mux *event.TypeMux, chain BlockChain, lightchain LightChain, dropPeer peerDropFn, success func()) *Downloader { 208 if lightchain == nil { 209 lightchain = chain 210 } 211 dl := &Downloader{ 212 stateDB: stateDb, 213 mux: mux, 214 checkpoint: checkpoint, 215 queue: newQueue(blockCacheMaxItems, blockCacheInitialItems), 216 peers: newPeerSet(), 217 blockchain: chain, 218 lightchain: lightchain, 219 dropPeer: dropPeer, 220 headerProcCh: make(chan *headerTask, 1), 221 quitCh: make(chan struct{}), 222 SnapSyncer: snap.NewSyncer(stateDb), 223 stateSyncStart: make(chan *stateSync), 224 } 225 dl.skeleton = newSkeleton(stateDb, dl.peers, dropPeer, newBeaconBackfiller(dl, success)) 226 227 go dl.stateFetcher() 228 return dl 229 } 230 231 // Progress retrieves the synchronisation boundaries, specifically the origin 232 // block where synchronisation started at (may have failed/suspended); the block 233 // or header sync is currently at; and the latest known block which the sync targets. 234 // 235 // In addition, during the state download phase of snap synchronisation the number 236 // of processed and the total number of known states are also returned. Otherwise 237 // these are zero. 238 func (d *Downloader) Progress() ethereum.SyncProgress { 239 // Lock the current stats and return the progress 240 d.syncStatsLock.RLock() 241 defer d.syncStatsLock.RUnlock() 242 243 current := uint64(0) 244 mode := d.getMode() 245 switch { 246 case d.blockchain != nil && mode == FullSync: 247 current = d.blockchain.CurrentBlock().NumberU64() 248 case d.blockchain != nil && mode == SnapSync: 249 current = d.blockchain.CurrentFastBlock().NumberU64() 250 case d.lightchain != nil: 251 current = d.lightchain.CurrentHeader().Number.Uint64() 252 default: 253 log.Error("Unknown downloader chain/mode combo", "light", d.lightchain != nil, "full", d.blockchain != nil, "mode", mode) 254 } 255 progress, pending := d.SnapSyncer.Progress() 256 257 return ethereum.SyncProgress{ 258 StartingBlock: d.syncStatsChainOrigin, 259 CurrentBlock: current, 260 HighestBlock: d.syncStatsChainHeight, 261 SyncedAccounts: progress.AccountSynced, 262 SyncedAccountBytes: uint64(progress.AccountBytes), 263 SyncedBytecodes: progress.BytecodeSynced, 264 SyncedBytecodeBytes: uint64(progress.BytecodeBytes), 265 SyncedStorage: progress.StorageSynced, 266 SyncedStorageBytes: uint64(progress.StorageBytes), 267 HealedTrienodes: progress.TrienodeHealSynced, 268 HealedTrienodeBytes: uint64(progress.TrienodeHealBytes), 269 HealedBytecodes: progress.BytecodeHealSynced, 270 HealedBytecodeBytes: uint64(progress.BytecodeHealBytes), 271 HealingTrienodes: pending.TrienodeHeal, 272 HealingBytecode: pending.BytecodeHeal, 273 } 274 } 275 276 // Synchronising returns whether the downloader is currently retrieving blocks. 277 func (d *Downloader) Synchronising() bool { 278 return atomic.LoadInt32(&d.synchronising) > 0 279 } 280 281 // RegisterPeer injects a new download peer into the set of block source to be 282 // used for fetching hashes and blocks from. 283 func (d *Downloader) RegisterPeer(id string, version uint, peer Peer) error { 284 var logger log.Logger 285 if len(id) < 16 { 286 // Tests use short IDs, don't choke on them 287 logger = log.New("peer", id) 288 } else { 289 logger = log.New("peer", id[:8]) 290 } 291 logger.Trace("Registering sync peer") 292 if err := d.peers.Register(newPeerConnection(id, version, peer, logger)); err != nil { 293 logger.Error("Failed to register sync peer", "err", err) 294 return err 295 } 296 return nil 297 } 298 299 // RegisterLightPeer injects a light client peer, wrapping it so it appears as a regular peer. 300 func (d *Downloader) RegisterLightPeer(id string, version uint, peer LightPeer) error { 301 return d.RegisterPeer(id, version, &lightPeerWrapper{peer}) 302 } 303 304 // UnregisterPeer remove a peer from the known list, preventing any action from 305 // the specified peer. An effort is also made to return any pending fetches into 306 // the queue. 307 func (d *Downloader) UnregisterPeer(id string) error { 308 // Unregister the peer from the active peer set and revoke any fetch tasks 309 var logger log.Logger 310 if len(id) < 16 { 311 // Tests use short IDs, don't choke on them 312 logger = log.New("peer", id) 313 } else { 314 logger = log.New("peer", id[:8]) 315 } 316 logger.Trace("Unregistering sync peer") 317 if err := d.peers.Unregister(id); err != nil { 318 logger.Error("Failed to unregister sync peer", "err", err) 319 return err 320 } 321 d.queue.Revoke(id) 322 323 return nil 324 } 325 326 // LegacySync tries to sync up our local block chain with a remote peer, both 327 // adding various sanity checks as well as wrapping it with various log entries. 328 func (d *Downloader) LegacySync(id string, head common.Hash, td, ttd *big.Int, mode SyncMode) error { 329 err := d.synchronise(id, head, td, ttd, mode, false, nil) 330 331 switch err { 332 case nil, errBusy, errCanceled: 333 return err 334 } 335 if errors.Is(err, errInvalidChain) || errors.Is(err, errBadPeer) || errors.Is(err, errTimeout) || 336 errors.Is(err, errStallingPeer) || errors.Is(err, errUnsyncedPeer) || errors.Is(err, errEmptyHeaderSet) || 337 errors.Is(err, errPeersUnavailable) || errors.Is(err, errTooOld) || errors.Is(err, errInvalidAncestor) { 338 log.Warn("Synchronisation failed, dropping peer", "peer", id, "err", err) 339 if d.dropPeer == nil { 340 // The dropPeer method is nil when `--copydb` is used for a local copy. 341 // Timeouts can occur if e.g. compaction hits at the wrong time, and can be ignored 342 log.Warn("Downloader wants to drop peer, but peerdrop-function is not set", "peer", id) 343 } else { 344 d.dropPeer(id) 345 } 346 return err 347 } 348 if errors.Is(err, ErrMergeTransition) { 349 return err // This is an expected fault, don't keep printing it in a spin-loop 350 } 351 log.Warn("Synchronisation failed, retrying", "err", err) 352 return err 353 } 354 355 // synchronise will select the peer and use it for synchronising. If an empty string is given 356 // it will use the best peer possible and synchronize if its TD is higher than our own. If any of the 357 // checks fail an error will be returned. This method is synchronous 358 func (d *Downloader) synchronise(id string, hash common.Hash, td, ttd *big.Int, mode SyncMode, beaconMode bool, beaconPing chan struct{}) error { 359 // The beacon header syncer is async. It will start this synchronization and 360 // will continue doing other tasks. However, if synchronization needs to be 361 // cancelled, the syncer needs to know if we reached the startup point (and 362 // inited the cancel cannel) or not yet. Make sure that we'll signal even in 363 // case of a failure. 364 if beaconPing != nil { 365 defer func() { 366 select { 367 case <-beaconPing: // already notified 368 default: 369 close(beaconPing) // weird exit condition, notify that it's safe to cancel (the nothing) 370 } 371 }() 372 } 373 // Mock out the synchronisation if testing 374 if d.synchroniseMock != nil { 375 return d.synchroniseMock(id, hash) 376 } 377 // Make sure only one goroutine is ever allowed past this point at once 378 if !atomic.CompareAndSwapInt32(&d.synchronising, 0, 1) { 379 return errBusy 380 } 381 defer atomic.StoreInt32(&d.synchronising, 0) 382 383 // Post a user notification of the sync (only once per session) 384 if atomic.CompareAndSwapInt32(&d.notified, 0, 1) { 385 log.Info("Block synchronisation started") 386 } 387 if mode == SnapSync { 388 // Snap sync uses the snapshot namespace to store potentially flakey data until 389 // sync completely heals and finishes. Pause snapshot maintenance in the mean- 390 // time to prevent access. 391 if snapshots := d.blockchain.Snapshots(); snapshots != nil { // Only nil in tests 392 snapshots.Disable() 393 } 394 } 395 // Reset the queue, peer set and wake channels to clean any internal leftover state 396 d.queue.Reset(blockCacheMaxItems, blockCacheInitialItems) 397 d.peers.Reset() 398 399 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 400 select { 401 case <-ch: 402 default: 403 } 404 } 405 for empty := false; !empty; { 406 select { 407 case <-d.headerProcCh: 408 default: 409 empty = true 410 } 411 } 412 // Create cancel channel for aborting mid-flight and mark the master peer 413 d.cancelLock.Lock() 414 d.cancelCh = make(chan struct{}) 415 d.cancelPeer = id 416 d.cancelLock.Unlock() 417 418 defer d.Cancel() // No matter what, we can't leave the cancel channel open 419 420 // Atomically set the requested sync mode 421 atomic.StoreUint32(&d.mode, uint32(mode)) 422 423 // Retrieve the origin peer and initiate the downloading process 424 var p *peerConnection 425 if !beaconMode { // Beacon mode doesn't need a peer to sync from 426 p = d.peers.Peer(id) 427 if p == nil { 428 return errUnknownPeer 429 } 430 } 431 if beaconPing != nil { 432 close(beaconPing) 433 } 434 return d.syncWithPeer(p, hash, td, ttd, beaconMode) 435 } 436 437 func (d *Downloader) getMode() SyncMode { 438 return SyncMode(atomic.LoadUint32(&d.mode)) 439 } 440 441 // syncWithPeer starts a block synchronization based on the hash chain from the 442 // specified peer and head hash. 443 func (d *Downloader) syncWithPeer(p *peerConnection, hash common.Hash, td, ttd *big.Int, beaconMode bool) (err error) { 444 d.mux.Post(StartEvent{}) 445 defer func() { 446 // reset on error 447 if err != nil { 448 d.mux.Post(FailedEvent{err}) 449 } else { 450 latest := d.lightchain.CurrentHeader() 451 d.mux.Post(DoneEvent{latest}) 452 } 453 }() 454 mode := d.getMode() 455 456 if !beaconMode { 457 log.Debug("Synchronising with the network", "peer", p.id, "eth", p.version, "head", hash, "td", td, "mode", mode) 458 } else { 459 log.Debug("Backfilling with the network", "mode", mode) 460 } 461 defer func(start time.Time) { 462 log.Debug("Synchronisation terminated", "elapsed", common.PrettyDuration(time.Since(start))) 463 }(time.Now()) 464 465 // Look up the sync boundaries: the common ancestor and the target block 466 var latest, pivot *types.Header 467 if !beaconMode { 468 // In legacy mode, use the master peer to retrieve the headers from 469 latest, pivot, err = d.fetchHead(p) 470 if err != nil { 471 return err 472 } 473 } else { 474 // In beacon mode, user the skeleton chain to retrieve the headers from 475 latest, _, err = d.skeleton.Bounds() 476 if err != nil { 477 return err 478 } 479 if latest.Number.Uint64() > uint64(fsMinFullBlocks) { 480 number := latest.Number.Uint64() - uint64(fsMinFullBlocks) 481 482 // Retrieve the pivot header from the skeleton chain segment but 483 // fallback to local chain if it's not found in skeleton space. 484 if pivot = d.skeleton.Header(number); pivot == nil { 485 _, oldest, _ := d.skeleton.Bounds() // error is already checked 486 if number < oldest.Number.Uint64() { 487 count := int(oldest.Number.Uint64() - number) // it's capped by fsMinFullBlocks 488 headers := d.readHeaderRange(oldest, count) 489 if len(headers) == count { 490 pivot = headers[len(headers)-1] 491 log.Warn("Retrieved pivot header from local", "number", pivot.Number, "hash", pivot.Hash(), "latest", latest.Number, "oldest", oldest.Number) 492 } 493 } 494 } 495 // Print an error log and return directly in case the pivot header 496 // is still not found. It means the skeleton chain is not linked 497 // correctly with local chain. 498 if pivot == nil { 499 log.Error("Pivot header is not found", "number", number) 500 return errNoPivotHeader 501 } 502 } 503 } 504 // If no pivot block was returned, the head is below the min full block 505 // threshold (i.e. new chain). In that case we won't really snap sync 506 // anyway, but still need a valid pivot block to avoid some code hitting 507 // nil panics on access. 508 if mode == SnapSync && pivot == nil { 509 pivot = d.blockchain.CurrentBlock().Header() 510 } 511 height := latest.Number.Uint64() 512 513 var origin uint64 514 if !beaconMode { 515 // In legacy mode, reach out to the network and find the ancestor 516 origin, err = d.findAncestor(p, latest) 517 if err != nil { 518 return err 519 } 520 } else { 521 // In beacon mode, use the skeleton chain for the ancestor lookup 522 origin, err = d.findBeaconAncestor() 523 if err != nil { 524 return err 525 } 526 } 527 d.syncStatsLock.Lock() 528 if d.syncStatsChainHeight <= origin || d.syncStatsChainOrigin > origin { 529 d.syncStatsChainOrigin = origin 530 } 531 d.syncStatsChainHeight = height 532 d.syncStatsLock.Unlock() 533 534 // Ensure our origin point is below any snap sync pivot point 535 if mode == SnapSync { 536 if height <= uint64(fsMinFullBlocks) { 537 origin = 0 538 } else { 539 pivotNumber := pivot.Number.Uint64() 540 if pivotNumber <= origin { 541 origin = pivotNumber - 1 542 } 543 // Write out the pivot into the database so a rollback beyond it will 544 // reenable snap sync 545 rawdb.WriteLastPivotNumber(d.stateDB, pivotNumber) 546 } 547 } 548 d.committed = 1 549 if mode == SnapSync && pivot.Number.Uint64() != 0 { 550 d.committed = 0 551 } 552 if mode == SnapSync { 553 // Set the ancient data limitation. 554 // If we are running snap sync, all block data older than ancientLimit will be 555 // written to the ancient store. More recent data will be written to the active 556 // database and will wait for the freezer to migrate. 557 // 558 // If there is a checkpoint available, then calculate the ancientLimit through 559 // that. Otherwise calculate the ancient limit through the advertised height 560 // of the remote peer. 561 // 562 // The reason for picking checkpoint first is that a malicious peer can give us 563 // a fake (very high) height, forcing the ancient limit to also be very high. 564 // The peer would start to feed us valid blocks until head, resulting in all of 565 // the blocks might be written into the ancient store. A following mini-reorg 566 // could cause issues. 567 if d.checkpoint != 0 && d.checkpoint > fullMaxForkAncestry+1 { 568 d.ancientLimit = d.checkpoint 569 } else if height > fullMaxForkAncestry+1 { 570 d.ancientLimit = height - fullMaxForkAncestry - 1 571 } else { 572 d.ancientLimit = 0 573 } 574 frozen, _ := d.stateDB.Ancients() // Ignore the error here since light client can also hit here. 575 576 // If a part of blockchain data has already been written into active store, 577 // disable the ancient style insertion explicitly. 578 if origin >= frozen && frozen != 0 { 579 d.ancientLimit = 0 580 log.Info("Disabling direct-ancient mode", "origin", origin, "ancient", frozen-1) 581 } else if d.ancientLimit > 0 { 582 log.Debug("Enabling direct-ancient mode", "ancient", d.ancientLimit) 583 } 584 // Rewind the ancient store and blockchain if reorg happens. 585 if origin+1 < frozen { 586 if err := d.lightchain.SetHead(origin); err != nil { 587 return err 588 } 589 } 590 } 591 // Initiate the sync using a concurrent header and content retrieval algorithm 592 d.queue.Prepare(origin+1, mode) 593 if d.syncInitHook != nil { 594 d.syncInitHook(origin, height) 595 } 596 var headerFetcher func() error 597 if !beaconMode { 598 // In legacy mode, headers are retrieved from the network 599 headerFetcher = func() error { return d.fetchHeaders(p, origin+1, latest.Number.Uint64()) } 600 } else { 601 // In beacon mode, headers are served by the skeleton syncer 602 headerFetcher = func() error { return d.fetchBeaconHeaders(origin + 1) } 603 } 604 fetchers := []func() error{ 605 headerFetcher, // Headers are always retrieved 606 func() error { return d.fetchBodies(origin+1, beaconMode) }, // Bodies are retrieved during normal and snap sync 607 func() error { return d.fetchReceipts(origin+1, beaconMode) }, // Receipts are retrieved during snap sync 608 func() error { return d.processHeaders(origin+1, td, ttd, beaconMode) }, 609 } 610 if mode == SnapSync { 611 d.pivotLock.Lock() 612 d.pivotHeader = pivot 613 d.pivotLock.Unlock() 614 615 fetchers = append(fetchers, func() error { return d.processSnapSyncContent() }) 616 } else if mode == FullSync { 617 fetchers = append(fetchers, func() error { return d.processFullSyncContent(ttd, beaconMode) }) 618 } 619 return d.spawnSync(fetchers) 620 } 621 622 // spawnSync runs d.process and all given fetcher functions to completion in 623 // separate goroutines, returning the first error that appears. 624 func (d *Downloader) spawnSync(fetchers []func() error) error { 625 errc := make(chan error, len(fetchers)) 626 d.cancelWg.Add(len(fetchers)) 627 for _, fn := range fetchers { 628 fn := fn 629 go func() { defer d.cancelWg.Done(); errc <- fn() }() 630 } 631 // Wait for the first error, then terminate the others. 632 var err error 633 for i := 0; i < len(fetchers); i++ { 634 if i == len(fetchers)-1 { 635 // Close the queue when all fetchers have exited. 636 // This will cause the block processor to end when 637 // it has processed the queue. 638 d.queue.Close() 639 } 640 if err = <-errc; err != nil && err != errCanceled { 641 break 642 } 643 } 644 d.queue.Close() 645 d.Cancel() 646 return err 647 } 648 649 // cancel aborts all of the operations and resets the queue. However, cancel does 650 // not wait for the running download goroutines to finish. This method should be 651 // used when cancelling the downloads from inside the downloader. 652 func (d *Downloader) cancel() { 653 // Close the current cancel channel 654 d.cancelLock.Lock() 655 defer d.cancelLock.Unlock() 656 657 if d.cancelCh != nil { 658 select { 659 case <-d.cancelCh: 660 // Channel was already closed 661 default: 662 close(d.cancelCh) 663 } 664 } 665 } 666 667 // Cancel aborts all of the operations and waits for all download goroutines to 668 // finish before returning. 669 func (d *Downloader) Cancel() { 670 d.cancel() 671 d.cancelWg.Wait() 672 } 673 674 // Terminate interrupts the downloader, canceling all pending operations. 675 // The downloader cannot be reused after calling Terminate. 676 func (d *Downloader) Terminate() { 677 // Close the termination channel (make sure double close is allowed) 678 d.quitLock.Lock() 679 select { 680 case <-d.quitCh: 681 default: 682 close(d.quitCh) 683 684 // Terminate the internal beacon syncer 685 d.skeleton.Terminate() 686 } 687 d.quitLock.Unlock() 688 689 // Cancel any pending download requests 690 d.Cancel() 691 } 692 693 // fetchHead retrieves the head header and prior pivot block (if available) from 694 // a remote peer. 695 func (d *Downloader) fetchHead(p *peerConnection) (head *types.Header, pivot *types.Header, err error) { 696 p.log.Debug("Retrieving remote chain head") 697 mode := d.getMode() 698 699 // Request the advertised remote head block and wait for the response 700 latest, _ := p.peer.Head() 701 fetch := 1 702 if mode == SnapSync { 703 fetch = 2 // head + pivot headers 704 } 705 headers, hashes, err := d.fetchHeadersByHash(p, latest, fetch, fsMinFullBlocks-1, true) 706 if err != nil { 707 return nil, nil, err 708 } 709 // Make sure the peer gave us at least one and at most the requested headers 710 if len(headers) == 0 || len(headers) > fetch { 711 return nil, nil, fmt.Errorf("%w: returned headers %d != requested %d", errBadPeer, len(headers), fetch) 712 } 713 // The first header needs to be the head, validate against the checkpoint 714 // and request. If only 1 header was returned, make sure there's no pivot 715 // or there was not one requested. 716 head = headers[0] 717 if (mode == SnapSync || mode == LightSync) && head.Number.Uint64() < d.checkpoint { 718 return nil, nil, fmt.Errorf("%w: remote head %d below checkpoint %d", errUnsyncedPeer, head.Number, d.checkpoint) 719 } 720 if len(headers) == 1 { 721 if mode == SnapSync && head.Number.Uint64() > uint64(fsMinFullBlocks) { 722 return nil, nil, fmt.Errorf("%w: no pivot included along head header", errBadPeer) 723 } 724 p.log.Debug("Remote head identified, no pivot", "number", head.Number, "hash", hashes[0]) 725 return head, nil, nil 726 } 727 // At this point we have 2 headers in total and the first is the 728 // validated head of the chain. Check the pivot number and return, 729 pivot = headers[1] 730 if pivot.Number.Uint64() != head.Number.Uint64()-uint64(fsMinFullBlocks) { 731 return nil, nil, fmt.Errorf("%w: remote pivot %d != requested %d", errInvalidChain, pivot.Number, head.Number.Uint64()-uint64(fsMinFullBlocks)) 732 } 733 return head, pivot, nil 734 } 735 736 // calculateRequestSpan calculates what headers to request from a peer when trying to determine the 737 // common ancestor. 738 // It returns parameters to be used for peer.RequestHeadersByNumber: 739 // from - starting block number 740 // count - number of headers to request 741 // skip - number of headers to skip 742 // and also returns 'max', the last block which is expected to be returned by the remote peers, 743 // given the (from,count,skip) 744 func calculateRequestSpan(remoteHeight, localHeight uint64) (int64, int, int, uint64) { 745 var ( 746 from int 747 count int 748 MaxCount = MaxHeaderFetch / 16 749 ) 750 // requestHead is the highest block that we will ask for. If requestHead is not offset, 751 // the highest block that we will get is 16 blocks back from head, which means we 752 // will fetch 14 or 15 blocks unnecessarily in the case the height difference 753 // between us and the peer is 1-2 blocks, which is most common 754 requestHead := int(remoteHeight) - 1 755 if requestHead < 0 { 756 requestHead = 0 757 } 758 // requestBottom is the lowest block we want included in the query 759 // Ideally, we want to include the one just below our own head 760 requestBottom := int(localHeight - 1) 761 if requestBottom < 0 { 762 requestBottom = 0 763 } 764 totalSpan := requestHead - requestBottom 765 span := 1 + totalSpan/MaxCount 766 if span < 2 { 767 span = 2 768 } 769 if span > 16 { 770 span = 16 771 } 772 773 count = 1 + totalSpan/span 774 if count > MaxCount { 775 count = MaxCount 776 } 777 if count < 2 { 778 count = 2 779 } 780 from = requestHead - (count-1)*span 781 if from < 0 { 782 from = 0 783 } 784 max := from + (count-1)*span 785 return int64(from), count, span - 1, uint64(max) 786 } 787 788 // findAncestor tries to locate the common ancestor link of the local chain and 789 // a remote peers blockchain. In the general case when our node was in sync and 790 // on the correct chain, checking the top N links should already get us a match. 791 // In the rare scenario when we ended up on a long reorganisation (i.e. none of 792 // the head links match), we do a binary search to find the common ancestor. 793 func (d *Downloader) findAncestor(p *peerConnection, remoteHeader *types.Header) (uint64, error) { 794 // Figure out the valid ancestor range to prevent rewrite attacks 795 var ( 796 floor = int64(-1) 797 localHeight uint64 798 remoteHeight = remoteHeader.Number.Uint64() 799 ) 800 mode := d.getMode() 801 switch mode { 802 case FullSync: 803 localHeight = d.blockchain.CurrentBlock().NumberU64() 804 case SnapSync: 805 localHeight = d.blockchain.CurrentFastBlock().NumberU64() 806 default: 807 localHeight = d.lightchain.CurrentHeader().Number.Uint64() 808 } 809 p.log.Debug("Looking for common ancestor", "local", localHeight, "remote", remoteHeight) 810 811 // Recap floor value for binary search 812 maxForkAncestry := fullMaxForkAncestry 813 if d.getMode() == LightSync { 814 maxForkAncestry = lightMaxForkAncestry 815 } 816 if localHeight >= maxForkAncestry { 817 // We're above the max reorg threshold, find the earliest fork point 818 floor = int64(localHeight - maxForkAncestry) 819 } 820 // If we're doing a light sync, ensure the floor doesn't go below the CHT, as 821 // all headers before that point will be missing. 822 if mode == LightSync { 823 // If we don't know the current CHT position, find it 824 if d.genesis == 0 { 825 header := d.lightchain.CurrentHeader() 826 for header != nil { 827 d.genesis = header.Number.Uint64() 828 if floor >= int64(d.genesis)-1 { 829 break 830 } 831 header = d.lightchain.GetHeaderByHash(header.ParentHash) 832 } 833 } 834 // We already know the "genesis" block number, cap floor to that 835 if floor < int64(d.genesis)-1 { 836 floor = int64(d.genesis) - 1 837 } 838 } 839 840 ancestor, err := d.findAncestorSpanSearch(p, mode, remoteHeight, localHeight, floor) 841 if err == nil { 842 return ancestor, nil 843 } 844 // The returned error was not nil. 845 // If the error returned does not reflect that a common ancestor was not found, return it. 846 // If the error reflects that a common ancestor was not found, continue to binary search, 847 // where the error value will be reassigned. 848 if !errors.Is(err, errNoAncestorFound) { 849 return 0, err 850 } 851 852 ancestor, err = d.findAncestorBinarySearch(p, mode, remoteHeight, floor) 853 if err != nil { 854 return 0, err 855 } 856 return ancestor, nil 857 } 858 859 func (d *Downloader) findAncestorSpanSearch(p *peerConnection, mode SyncMode, remoteHeight, localHeight uint64, floor int64) (uint64, error) { 860 from, count, skip, max := calculateRequestSpan(remoteHeight, localHeight) 861 862 p.log.Trace("Span searching for common ancestor", "count", count, "from", from, "skip", skip) 863 headers, hashes, err := d.fetchHeadersByNumber(p, uint64(from), count, skip, false) 864 if err != nil { 865 return 0, err 866 } 867 // Wait for the remote response to the head fetch 868 number, hash := uint64(0), common.Hash{} 869 870 // Make sure the peer actually gave something valid 871 if len(headers) == 0 { 872 p.log.Warn("Empty head header set") 873 return 0, errEmptyHeaderSet 874 } 875 // Make sure the peer's reply conforms to the request 876 for i, header := range headers { 877 expectNumber := from + int64(i)*int64(skip+1) 878 if number := header.Number.Int64(); number != expectNumber { 879 p.log.Warn("Head headers broke chain ordering", "index", i, "requested", expectNumber, "received", number) 880 return 0, fmt.Errorf("%w: %v", errInvalidChain, errors.New("head headers broke chain ordering")) 881 } 882 } 883 // Check if a common ancestor was found 884 for i := len(headers) - 1; i >= 0; i-- { 885 // Skip any headers that underflow/overflow our requested set 886 if headers[i].Number.Int64() < from || headers[i].Number.Uint64() > max { 887 continue 888 } 889 // Otherwise check if we already know the header or not 890 h := hashes[i] 891 n := headers[i].Number.Uint64() 892 893 var known bool 894 switch mode { 895 case FullSync: 896 known = d.blockchain.HasBlock(h, n) 897 case SnapSync: 898 known = d.blockchain.HasFastBlock(h, n) 899 default: 900 known = d.lightchain.HasHeader(h, n) 901 } 902 if known { 903 number, hash = n, h 904 break 905 } 906 } 907 // If the head fetch already found an ancestor, return 908 if hash != (common.Hash{}) { 909 if int64(number) <= floor { 910 p.log.Warn("Ancestor below allowance", "number", number, "hash", hash, "allowance", floor) 911 return 0, errInvalidAncestor 912 } 913 p.log.Debug("Found common ancestor", "number", number, "hash", hash) 914 return number, nil 915 } 916 return 0, errNoAncestorFound 917 } 918 919 func (d *Downloader) findAncestorBinarySearch(p *peerConnection, mode SyncMode, remoteHeight uint64, floor int64) (uint64, error) { 920 hash := common.Hash{} 921 922 // Ancestor not found, we need to binary search over our chain 923 start, end := uint64(0), remoteHeight 924 if floor > 0 { 925 start = uint64(floor) 926 } 927 p.log.Trace("Binary searching for common ancestor", "start", start, "end", end) 928 929 for start+1 < end { 930 // Split our chain interval in two, and request the hash to cross check 931 check := (start + end) / 2 932 933 headers, hashes, err := d.fetchHeadersByNumber(p, check, 1, 0, false) 934 if err != nil { 935 return 0, err 936 } 937 // Make sure the peer actually gave something valid 938 if len(headers) != 1 { 939 p.log.Warn("Multiple headers for single request", "headers", len(headers)) 940 return 0, fmt.Errorf("%w: multiple headers (%d) for single request", errBadPeer, len(headers)) 941 } 942 // Modify the search interval based on the response 943 h := hashes[0] 944 n := headers[0].Number.Uint64() 945 946 var known bool 947 switch mode { 948 case FullSync: 949 known = d.blockchain.HasBlock(h, n) 950 case SnapSync: 951 known = d.blockchain.HasFastBlock(h, n) 952 default: 953 known = d.lightchain.HasHeader(h, n) 954 } 955 if !known { 956 end = check 957 continue 958 } 959 header := d.lightchain.GetHeaderByHash(h) // Independent of sync mode, header surely exists 960 if header.Number.Uint64() != check { 961 p.log.Warn("Received non requested header", "number", header.Number, "hash", header.Hash(), "request", check) 962 return 0, fmt.Errorf("%w: non-requested header (%d)", errBadPeer, header.Number) 963 } 964 start = check 965 hash = h 966 } 967 // Ensure valid ancestry and return 968 if int64(start) <= floor { 969 p.log.Warn("Ancestor below allowance", "number", start, "hash", hash, "allowance", floor) 970 return 0, errInvalidAncestor 971 } 972 p.log.Debug("Found common ancestor", "number", start, "hash", hash) 973 return start, nil 974 } 975 976 // fetchHeaders keeps retrieving headers concurrently from the number 977 // requested, until no more are returned, potentially throttling on the way. To 978 // facilitate concurrency but still protect against malicious nodes sending bad 979 // headers, we construct a header chain skeleton using the "origin" peer we are 980 // syncing with, and fill in the missing headers using anyone else. Headers from 981 // other peers are only accepted if they map cleanly to the skeleton. If no one 982 // can fill in the skeleton - not even the origin peer - it's assumed invalid and 983 // the origin is dropped. 984 func (d *Downloader) fetchHeaders(p *peerConnection, from uint64, head uint64) error { 985 p.log.Debug("Directing header downloads", "origin", from) 986 defer p.log.Debug("Header download terminated") 987 988 // Start pulling the header chain skeleton until all is done 989 var ( 990 skeleton = true // Skeleton assembly phase or finishing up 991 pivoting = false // Whether the next request is pivot verification 992 ancestor = from 993 mode = d.getMode() 994 ) 995 for { 996 // Pull the next batch of headers, it either: 997 // - Pivot check to see if the chain moved too far 998 // - Skeleton retrieval to permit concurrent header fetches 999 // - Full header retrieval if we're near the chain head 1000 var ( 1001 headers []*types.Header 1002 hashes []common.Hash 1003 err error 1004 ) 1005 switch { 1006 case pivoting: 1007 d.pivotLock.RLock() 1008 pivot := d.pivotHeader.Number.Uint64() 1009 d.pivotLock.RUnlock() 1010 1011 p.log.Trace("Fetching next pivot header", "number", pivot+uint64(fsMinFullBlocks)) 1012 headers, hashes, err = d.fetchHeadersByNumber(p, pivot+uint64(fsMinFullBlocks), 2, fsMinFullBlocks-9, false) // move +64 when it's 2x64-8 deep 1013 1014 case skeleton: 1015 p.log.Trace("Fetching skeleton headers", "count", MaxHeaderFetch, "from", from) 1016 headers, hashes, err = d.fetchHeadersByNumber(p, from+uint64(MaxHeaderFetch)-1, MaxSkeletonSize, MaxHeaderFetch-1, false) 1017 1018 default: 1019 p.log.Trace("Fetching full headers", "count", MaxHeaderFetch, "from", from) 1020 headers, hashes, err = d.fetchHeadersByNumber(p, from, MaxHeaderFetch, 0, false) 1021 } 1022 switch err { 1023 case nil: 1024 // Headers retrieved, continue with processing 1025 1026 case errCanceled: 1027 // Sync cancelled, no issue, propagate up 1028 return err 1029 1030 default: 1031 // Header retrieval either timed out, or the peer failed in some strange way 1032 // (e.g. disconnect). Consider the master peer bad and drop 1033 d.dropPeer(p.id) 1034 1035 // Finish the sync gracefully instead of dumping the gathered data though 1036 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1037 select { 1038 case ch <- false: 1039 case <-d.cancelCh: 1040 } 1041 } 1042 select { 1043 case d.headerProcCh <- nil: 1044 case <-d.cancelCh: 1045 } 1046 return fmt.Errorf("%w: header request failed: %v", errBadPeer, err) 1047 } 1048 // If the pivot is being checked, move if it became stale and run the real retrieval 1049 var pivot uint64 1050 1051 d.pivotLock.RLock() 1052 if d.pivotHeader != nil { 1053 pivot = d.pivotHeader.Number.Uint64() 1054 } 1055 d.pivotLock.RUnlock() 1056 1057 if pivoting { 1058 if len(headers) == 2 { 1059 if have, want := headers[0].Number.Uint64(), pivot+uint64(fsMinFullBlocks); have != want { 1060 log.Warn("Peer sent invalid next pivot", "have", have, "want", want) 1061 return fmt.Errorf("%w: next pivot number %d != requested %d", errInvalidChain, have, want) 1062 } 1063 if have, want := headers[1].Number.Uint64(), pivot+2*uint64(fsMinFullBlocks)-8; have != want { 1064 log.Warn("Peer sent invalid pivot confirmer", "have", have, "want", want) 1065 return fmt.Errorf("%w: next pivot confirmer number %d != requested %d", errInvalidChain, have, want) 1066 } 1067 log.Warn("Pivot seemingly stale, moving", "old", pivot, "new", headers[0].Number) 1068 pivot = headers[0].Number.Uint64() 1069 1070 d.pivotLock.Lock() 1071 d.pivotHeader = headers[0] 1072 d.pivotLock.Unlock() 1073 1074 // Write out the pivot into the database so a rollback beyond 1075 // it will reenable snap sync and update the state root that 1076 // the state syncer will be downloading. 1077 rawdb.WriteLastPivotNumber(d.stateDB, pivot) 1078 } 1079 // Disable the pivot check and fetch the next batch of headers 1080 pivoting = false 1081 continue 1082 } 1083 // If the skeleton's finished, pull any remaining head headers directly from the origin 1084 if skeleton && len(headers) == 0 { 1085 // A malicious node might withhold advertised headers indefinitely 1086 if from+uint64(MaxHeaderFetch)-1 <= head { 1087 p.log.Warn("Peer withheld skeleton headers", "advertised", head, "withheld", from+uint64(MaxHeaderFetch)-1) 1088 return fmt.Errorf("%w: withheld skeleton headers: advertised %d, withheld #%d", errStallingPeer, head, from+uint64(MaxHeaderFetch)-1) 1089 } 1090 p.log.Debug("No skeleton, fetching headers directly") 1091 skeleton = false 1092 continue 1093 } 1094 // If no more headers are inbound, notify the content fetchers and return 1095 if len(headers) == 0 { 1096 // Don't abort header fetches while the pivot is downloading 1097 if atomic.LoadInt32(&d.committed) == 0 && pivot <= from { 1098 p.log.Debug("No headers, waiting for pivot commit") 1099 select { 1100 case <-time.After(fsHeaderContCheck): 1101 continue 1102 case <-d.cancelCh: 1103 return errCanceled 1104 } 1105 } 1106 // Pivot done (or not in snap sync) and no more headers, terminate the process 1107 p.log.Debug("No more headers available") 1108 select { 1109 case d.headerProcCh <- nil: 1110 return nil 1111 case <-d.cancelCh: 1112 return errCanceled 1113 } 1114 } 1115 // If we received a skeleton batch, resolve internals concurrently 1116 var progressed bool 1117 if skeleton { 1118 filled, hashset, proced, err := d.fillHeaderSkeleton(from, headers) 1119 if err != nil { 1120 p.log.Debug("Skeleton chain invalid", "err", err) 1121 return fmt.Errorf("%w: %v", errInvalidChain, err) 1122 } 1123 headers = filled[proced:] 1124 hashes = hashset[proced:] 1125 1126 progressed = proced > 0 1127 from += uint64(proced) 1128 } else { 1129 // A malicious node might withhold advertised headers indefinitely 1130 if n := len(headers); n < MaxHeaderFetch && headers[n-1].Number.Uint64() < head { 1131 p.log.Warn("Peer withheld headers", "advertised", head, "delivered", headers[n-1].Number.Uint64()) 1132 return fmt.Errorf("%w: withheld headers: advertised %d, delivered %d", errStallingPeer, head, headers[n-1].Number.Uint64()) 1133 } 1134 // If we're closing in on the chain head, but haven't yet reached it, delay 1135 // the last few headers so mini reorgs on the head don't cause invalid hash 1136 // chain errors. 1137 if n := len(headers); n > 0 { 1138 // Retrieve the current head we're at 1139 var head uint64 1140 if mode == LightSync { 1141 head = d.lightchain.CurrentHeader().Number.Uint64() 1142 } else { 1143 head = d.blockchain.CurrentFastBlock().NumberU64() 1144 if full := d.blockchain.CurrentBlock().NumberU64(); head < full { 1145 head = full 1146 } 1147 } 1148 // If the head is below the common ancestor, we're actually deduplicating 1149 // already existing chain segments, so use the ancestor as the fake head. 1150 // Otherwise, we might end up delaying header deliveries pointlessly. 1151 if head < ancestor { 1152 head = ancestor 1153 } 1154 // If the head is way older than this batch, delay the last few headers 1155 if head+uint64(reorgProtThreshold) < headers[n-1].Number.Uint64() { 1156 delay := reorgProtHeaderDelay 1157 if delay > n { 1158 delay = n 1159 } 1160 headers = headers[:n-delay] 1161 hashes = hashes[:n-delay] 1162 } 1163 } 1164 } 1165 // If no headers have bene delivered, or all of them have been delayed, 1166 // sleep a bit and retry. Take care with headers already consumed during 1167 // skeleton filling 1168 if len(headers) == 0 && !progressed { 1169 p.log.Trace("All headers delayed, waiting") 1170 select { 1171 case <-time.After(fsHeaderContCheck): 1172 continue 1173 case <-d.cancelCh: 1174 return errCanceled 1175 } 1176 } 1177 // Insert any remaining new headers and fetch the next batch 1178 if len(headers) > 0 { 1179 p.log.Trace("Scheduling new headers", "count", len(headers), "from", from) 1180 select { 1181 case d.headerProcCh <- &headerTask{ 1182 headers: headers, 1183 hashes: hashes, 1184 }: 1185 case <-d.cancelCh: 1186 return errCanceled 1187 } 1188 from += uint64(len(headers)) 1189 } 1190 // If we're still skeleton filling snap sync, check pivot staleness 1191 // before continuing to the next skeleton filling 1192 if skeleton && pivot > 0 { 1193 pivoting = true 1194 } 1195 } 1196 } 1197 1198 // fillHeaderSkeleton concurrently retrieves headers from all our available peers 1199 // and maps them to the provided skeleton header chain. 1200 // 1201 // Any partial results from the beginning of the skeleton is (if possible) forwarded 1202 // immediately to the header processor to keep the rest of the pipeline full even 1203 // in the case of header stalls. 1204 // 1205 // The method returns the entire filled skeleton and also the number of headers 1206 // already forwarded for processing. 1207 func (d *Downloader) fillHeaderSkeleton(from uint64, skeleton []*types.Header) ([]*types.Header, []common.Hash, int, error) { 1208 log.Debug("Filling up skeleton", "from", from) 1209 d.queue.ScheduleSkeleton(from, skeleton) 1210 1211 err := d.concurrentFetch((*headerQueue)(d), false) 1212 if err != nil { 1213 log.Debug("Skeleton fill failed", "err", err) 1214 } 1215 filled, hashes, proced := d.queue.RetrieveHeaders() 1216 if err == nil { 1217 log.Debug("Skeleton fill succeeded", "filled", len(filled), "processed", proced) 1218 } 1219 return filled, hashes, proced, err 1220 } 1221 1222 // fetchBodies iteratively downloads the scheduled block bodies, taking any 1223 // available peers, reserving a chunk of blocks for each, waiting for delivery 1224 // and also periodically checking for timeouts. 1225 func (d *Downloader) fetchBodies(from uint64, beaconMode bool) error { 1226 log.Debug("Downloading block bodies", "origin", from) 1227 err := d.concurrentFetch((*bodyQueue)(d), beaconMode) 1228 1229 log.Debug("Block body download terminated", "err", err) 1230 return err 1231 } 1232 1233 // fetchReceipts iteratively downloads the scheduled block receipts, taking any 1234 // available peers, reserving a chunk of receipts for each, waiting for delivery 1235 // and also periodically checking for timeouts. 1236 func (d *Downloader) fetchReceipts(from uint64, beaconMode bool) error { 1237 log.Debug("Downloading receipts", "origin", from) 1238 err := d.concurrentFetch((*receiptQueue)(d), beaconMode) 1239 1240 log.Debug("Receipt download terminated", "err", err) 1241 return err 1242 } 1243 1244 // processHeaders takes batches of retrieved headers from an input channel and 1245 // keeps processing and scheduling them into the header chain and downloader's 1246 // queue until the stream ends or a failure occurs. 1247 func (d *Downloader) processHeaders(origin uint64, td, ttd *big.Int, beaconMode bool) error { 1248 // Keep a count of uncertain headers to roll back 1249 var ( 1250 rollback uint64 // Zero means no rollback (fine as you can't unroll the genesis) 1251 rollbackErr error 1252 mode = d.getMode() 1253 ) 1254 defer func() { 1255 if rollback > 0 { 1256 lastHeader, lastFastBlock, lastBlock := d.lightchain.CurrentHeader().Number, common.Big0, common.Big0 1257 if mode != LightSync { 1258 lastFastBlock = d.blockchain.CurrentFastBlock().Number() 1259 lastBlock = d.blockchain.CurrentBlock().Number() 1260 } 1261 if err := d.lightchain.SetHead(rollback - 1); err != nil { // -1 to target the parent of the first uncertain block 1262 // We're already unwinding the stack, only print the error to make it more visible 1263 log.Error("Failed to roll back chain segment", "head", rollback-1, "err", err) 1264 } 1265 curFastBlock, curBlock := common.Big0, common.Big0 1266 if mode != LightSync { 1267 curFastBlock = d.blockchain.CurrentFastBlock().Number() 1268 curBlock = d.blockchain.CurrentBlock().Number() 1269 } 1270 log.Warn("Rolled back chain segment", 1271 "header", fmt.Sprintf("%d->%d", lastHeader, d.lightchain.CurrentHeader().Number), 1272 "snap", fmt.Sprintf("%d->%d", lastFastBlock, curFastBlock), 1273 "block", fmt.Sprintf("%d->%d", lastBlock, curBlock), "reason", rollbackErr) 1274 } 1275 }() 1276 // Wait for batches of headers to process 1277 gotHeaders := false 1278 1279 for { 1280 select { 1281 case <-d.cancelCh: 1282 rollbackErr = errCanceled 1283 return errCanceled 1284 1285 case task := <-d.headerProcCh: 1286 // Terminate header processing if we synced up 1287 if task == nil || len(task.headers) == 0 { 1288 // Notify everyone that headers are fully processed 1289 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1290 select { 1291 case ch <- false: 1292 case <-d.cancelCh: 1293 } 1294 } 1295 // If we're in legacy sync mode, we need to check total difficulty 1296 // violations from malicious peers. That is not needed in beacon 1297 // mode and we can skip to terminating sync. 1298 if !beaconMode { 1299 // If no headers were retrieved at all, the peer violated its TD promise that it had a 1300 // better chain compared to ours. The only exception is if its promised blocks were 1301 // already imported by other means (e.g. fetcher): 1302 // 1303 // R <remote peer>, L <local node>: Both at block 10 1304 // R: Mine block 11, and propagate it to L 1305 // L: Queue block 11 for import 1306 // L: Notice that R's head and TD increased compared to ours, start sync 1307 // L: Import of block 11 finishes 1308 // L: Sync begins, and finds common ancestor at 11 1309 // L: Request new headers up from 11 (R's TD was higher, it must have something) 1310 // R: Nothing to give 1311 if mode != LightSync { 1312 head := d.blockchain.CurrentBlock() 1313 if !gotHeaders && td.Cmp(d.blockchain.GetTd(head.Hash(), head.NumberU64())) > 0 { 1314 return errStallingPeer 1315 } 1316 } 1317 // If snap or light syncing, ensure promised headers are indeed delivered. This is 1318 // needed to detect scenarios where an attacker feeds a bad pivot and then bails out 1319 // of delivering the post-pivot blocks that would flag the invalid content. 1320 // 1321 // This check cannot be executed "as is" for full imports, since blocks may still be 1322 // queued for processing when the header download completes. However, as long as the 1323 // peer gave us something useful, we're already happy/progressed (above check). 1324 if mode == SnapSync || mode == LightSync { 1325 head := d.lightchain.CurrentHeader() 1326 if td.Cmp(d.lightchain.GetTd(head.Hash(), head.Number.Uint64())) > 0 { 1327 return errStallingPeer 1328 } 1329 } 1330 } 1331 // Disable any rollback and return 1332 rollback = 0 1333 return nil 1334 } 1335 // Otherwise split the chunk of headers into batches and process them 1336 headers, hashes := task.headers, task.hashes 1337 1338 gotHeaders = true 1339 for len(headers) > 0 { 1340 // Terminate if something failed in between processing chunks 1341 select { 1342 case <-d.cancelCh: 1343 rollbackErr = errCanceled 1344 return errCanceled 1345 default: 1346 } 1347 // Select the next chunk of headers to import 1348 limit := maxHeadersProcess 1349 if limit > len(headers) { 1350 limit = len(headers) 1351 } 1352 chunkHeaders := headers[:limit] 1353 chunkHashes := hashes[:limit] 1354 1355 // In case of header only syncing, validate the chunk immediately 1356 if mode == SnapSync || mode == LightSync { 1357 // If we're importing pure headers, verify based on their recentness 1358 var pivot uint64 1359 1360 d.pivotLock.RLock() 1361 if d.pivotHeader != nil { 1362 pivot = d.pivotHeader.Number.Uint64() 1363 } 1364 d.pivotLock.RUnlock() 1365 1366 frequency := fsHeaderCheckFrequency 1367 if chunkHeaders[len(chunkHeaders)-1].Number.Uint64()+uint64(fsHeaderForceVerify) > pivot { 1368 frequency = 1 1369 } 1370 // Although the received headers might be all valid, a legacy 1371 // PoW/PoA sync must not accept post-merge headers. Make sure 1372 // that any transition is rejected at this point. 1373 var ( 1374 rejected []*types.Header 1375 td *big.Int 1376 ) 1377 if !beaconMode && ttd != nil { 1378 td = d.blockchain.GetTd(chunkHeaders[0].ParentHash, chunkHeaders[0].Number.Uint64()-1) 1379 if td == nil { 1380 // This should never really happen, but handle gracefully for now 1381 log.Error("Failed to retrieve parent header TD", "number", chunkHeaders[0].Number.Uint64()-1, "hash", chunkHeaders[0].ParentHash) 1382 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1383 } 1384 for i, header := range chunkHeaders { 1385 td = new(big.Int).Add(td, header.Difficulty) 1386 if td.Cmp(ttd) >= 0 { 1387 // Terminal total difficulty reached, allow the last header in 1388 if new(big.Int).Sub(td, header.Difficulty).Cmp(ttd) < 0 { 1389 chunkHeaders, rejected = chunkHeaders[:i+1], chunkHeaders[i+1:] 1390 if len(rejected) > 0 { 1391 // Make a nicer user log as to the first TD truly rejected 1392 td = new(big.Int).Add(td, rejected[0].Difficulty) 1393 } 1394 } else { 1395 chunkHeaders, rejected = chunkHeaders[:i], chunkHeaders[i:] 1396 } 1397 break 1398 } 1399 } 1400 } 1401 if len(chunkHeaders) > 0 { 1402 if n, err := d.lightchain.InsertHeaderChain(chunkHeaders, frequency); err != nil { 1403 rollbackErr = err 1404 1405 // If some headers were inserted, track them as uncertain 1406 if (mode == SnapSync || frequency > 1) && n > 0 && rollback == 0 { 1407 rollback = chunkHeaders[0].Number.Uint64() 1408 } 1409 log.Warn("Invalid header encountered", "number", chunkHeaders[n].Number, "hash", chunkHashes[n], "parent", chunkHeaders[n].ParentHash, "err", err) 1410 return fmt.Errorf("%w: %v", errInvalidChain, err) 1411 } 1412 // All verifications passed, track all headers within the allowed limits 1413 if mode == SnapSync { 1414 head := chunkHeaders[len(chunkHeaders)-1].Number.Uint64() 1415 if head-rollback > uint64(fsHeaderSafetyNet) { 1416 rollback = head - uint64(fsHeaderSafetyNet) 1417 } else { 1418 rollback = 1 1419 } 1420 } 1421 } 1422 if len(rejected) != 0 { 1423 // Merge threshold reached, stop importing, but don't roll back 1424 rollback = 0 1425 1426 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Number, "hash", rejected[0].Hash(), "td", td, "ttd", ttd) 1427 return ErrMergeTransition 1428 } 1429 } 1430 // Unless we're doing light chains, schedule the headers for associated content retrieval 1431 if mode == FullSync || mode == SnapSync { 1432 // If we've reached the allowed number of pending headers, stall a bit 1433 for d.queue.PendingBodies() >= maxQueuedHeaders || d.queue.PendingReceipts() >= maxQueuedHeaders { 1434 select { 1435 case <-d.cancelCh: 1436 rollbackErr = errCanceled 1437 return errCanceled 1438 case <-time.After(time.Second): 1439 } 1440 } 1441 // Otherwise insert the headers for content retrieval 1442 inserts := d.queue.Schedule(chunkHeaders, chunkHashes, origin) 1443 if len(inserts) != len(chunkHeaders) { 1444 rollbackErr = fmt.Errorf("stale headers: len inserts %v len(chunk) %v", len(inserts), len(chunkHeaders)) 1445 return fmt.Errorf("%w: stale headers", errBadPeer) 1446 } 1447 } 1448 headers = headers[limit:] 1449 hashes = hashes[limit:] 1450 origin += uint64(limit) 1451 } 1452 // Update the highest block number we know if a higher one is found. 1453 d.syncStatsLock.Lock() 1454 if d.syncStatsChainHeight < origin { 1455 d.syncStatsChainHeight = origin - 1 1456 } 1457 d.syncStatsLock.Unlock() 1458 1459 // Signal the content downloaders of the availablility of new tasks 1460 for _, ch := range []chan bool{d.queue.blockWakeCh, d.queue.receiptWakeCh} { 1461 select { 1462 case ch <- true: 1463 default: 1464 } 1465 } 1466 } 1467 } 1468 } 1469 1470 // processFullSyncContent takes fetch results from the queue and imports them into the chain. 1471 func (d *Downloader) processFullSyncContent(ttd *big.Int, beaconMode bool) error { 1472 for { 1473 results := d.queue.Results(true) 1474 if len(results) == 0 { 1475 return nil 1476 } 1477 if d.chainInsertHook != nil { 1478 d.chainInsertHook(results) 1479 } 1480 // Although the received blocks might be all valid, a legacy PoW/PoA sync 1481 // must not accept post-merge blocks. Make sure that pre-merge blocks are 1482 // imported, but post-merge ones are rejected. 1483 var ( 1484 rejected []*fetchResult 1485 td *big.Int 1486 ) 1487 if !beaconMode && ttd != nil { 1488 td = d.blockchain.GetTd(results[0].Header.ParentHash, results[0].Header.Number.Uint64()-1) 1489 if td == nil { 1490 // This should never really happen, but handle gracefully for now 1491 log.Error("Failed to retrieve parent block TD", "number", results[0].Header.Number.Uint64()-1, "hash", results[0].Header.ParentHash) 1492 return fmt.Errorf("%w: parent TD missing", errInvalidChain) 1493 } 1494 for i, result := range results { 1495 td = new(big.Int).Add(td, result.Header.Difficulty) 1496 if td.Cmp(ttd) >= 0 { 1497 // Terminal total difficulty reached, allow the last block in 1498 if new(big.Int).Sub(td, result.Header.Difficulty).Cmp(ttd) < 0 { 1499 results, rejected = results[:i+1], results[i+1:] 1500 if len(rejected) > 0 { 1501 // Make a nicer user log as to the first TD truly rejected 1502 td = new(big.Int).Add(td, rejected[0].Header.Difficulty) 1503 } 1504 } else { 1505 results, rejected = results[:i], results[i:] 1506 } 1507 break 1508 } 1509 } 1510 } 1511 if err := d.importBlockResults(results); err != nil { 1512 return err 1513 } 1514 if len(rejected) != 0 { 1515 log.Info("Legacy sync reached merge threshold", "number", rejected[0].Header.Number, "hash", rejected[0].Header.Hash(), "td", td, "ttd", ttd) 1516 return ErrMergeTransition 1517 } 1518 } 1519 } 1520 1521 func (d *Downloader) importBlockResults(results []*fetchResult) error { 1522 // Check for any early termination requests 1523 if len(results) == 0 { 1524 return nil 1525 } 1526 select { 1527 case <-d.quitCh: 1528 return errCancelContentProcessing 1529 default: 1530 } 1531 // Retrieve the a batch of results to import 1532 first, last := results[0].Header, results[len(results)-1].Header 1533 log.Debug("Inserting downloaded chain", "items", len(results), 1534 "firstnum", first.Number, "firsthash", first.Hash(), 1535 "lastnum", last.Number, "lasthash", last.Hash(), 1536 ) 1537 blocks := make([]*types.Block, len(results)) 1538 for i, result := range results { 1539 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1540 } 1541 // Downloaded blocks are always regarded as trusted after the 1542 // transition. Because the downloaded chain is guided by the 1543 // consensus-layer. 1544 if index, err := d.blockchain.InsertChain(blocks); err != nil { 1545 if index < len(results) { 1546 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1547 } else { 1548 // The InsertChain method in blockchain.go will sometimes return an out-of-bounds index, 1549 // when it needs to preprocess blocks to import a sidechain. 1550 // The importer will put together a new list of blocks to import, which is a superset 1551 // of the blocks delivered from the downloader, and the indexing will be off. 1552 log.Debug("Downloaded item processing failed on sidechain import", "index", index, "err", err) 1553 } 1554 return fmt.Errorf("%w: %v", errInvalidChain, err) 1555 } 1556 return nil 1557 } 1558 1559 // processSnapSyncContent takes fetch results from the queue and writes them to the 1560 // database. It also controls the synchronisation of state nodes of the pivot block. 1561 func (d *Downloader) processSnapSyncContent() error { 1562 // Start syncing state of the reported head block. This should get us most of 1563 // the state of the pivot block. 1564 d.pivotLock.RLock() 1565 sync := d.syncState(d.pivotHeader.Root) 1566 d.pivotLock.RUnlock() 1567 1568 defer func() { 1569 // The `sync` object is replaced every time the pivot moves. We need to 1570 // defer close the very last active one, hence the lazy evaluation vs. 1571 // calling defer sync.Cancel() !!! 1572 sync.Cancel() 1573 }() 1574 1575 closeOnErr := func(s *stateSync) { 1576 if err := s.Wait(); err != nil && err != errCancelStateFetch && err != errCanceled && err != snap.ErrCancelled { 1577 d.queue.Close() // wake up Results 1578 } 1579 } 1580 go closeOnErr(sync) 1581 1582 // To cater for moving pivot points, track the pivot block and subsequently 1583 // accumulated download results separately. 1584 var ( 1585 oldPivot *fetchResult // Locked in pivot block, might change eventually 1586 oldTail []*fetchResult // Downloaded content after the pivot 1587 ) 1588 for { 1589 // Wait for the next batch of downloaded data to be available, and if the pivot 1590 // block became stale, move the goalpost 1591 results := d.queue.Results(oldPivot == nil) // Block if we're not monitoring pivot staleness 1592 if len(results) == 0 { 1593 // If pivot sync is done, stop 1594 if oldPivot == nil { 1595 return sync.Cancel() 1596 } 1597 // If sync failed, stop 1598 select { 1599 case <-d.cancelCh: 1600 sync.Cancel() 1601 return errCanceled 1602 default: 1603 } 1604 } 1605 if d.chainInsertHook != nil { 1606 d.chainInsertHook(results) 1607 } 1608 // If we haven't downloaded the pivot block yet, check pivot staleness 1609 // notifications from the header downloader 1610 d.pivotLock.RLock() 1611 pivot := d.pivotHeader 1612 d.pivotLock.RUnlock() 1613 1614 if oldPivot == nil { 1615 if pivot.Root != sync.root { 1616 sync.Cancel() 1617 sync = d.syncState(pivot.Root) 1618 1619 go closeOnErr(sync) 1620 } 1621 } else { 1622 results = append(append([]*fetchResult{oldPivot}, oldTail...), results...) 1623 } 1624 // Split around the pivot block and process the two sides via snap/full sync 1625 if atomic.LoadInt32(&d.committed) == 0 { 1626 latest := results[len(results)-1].Header 1627 // If the height is above the pivot block by 2 sets, it means the pivot 1628 // become stale in the network and it was garbage collected, move to a 1629 // new pivot. 1630 // 1631 // Note, we have `reorgProtHeaderDelay` number of blocks withheld, Those 1632 // need to be taken into account, otherwise we're detecting the pivot move 1633 // late and will drop peers due to unavailable state!!! 1634 if height := latest.Number.Uint64(); height >= pivot.Number.Uint64()+2*uint64(fsMinFullBlocks)-uint64(reorgProtHeaderDelay) { 1635 log.Warn("Pivot became stale, moving", "old", pivot.Number.Uint64(), "new", height-uint64(fsMinFullBlocks)+uint64(reorgProtHeaderDelay)) 1636 pivot = results[len(results)-1-fsMinFullBlocks+reorgProtHeaderDelay].Header // must exist as lower old pivot is uncommitted 1637 1638 d.pivotLock.Lock() 1639 d.pivotHeader = pivot 1640 d.pivotLock.Unlock() 1641 1642 // Write out the pivot into the database so a rollback beyond it will 1643 // reenable snap sync 1644 rawdb.WriteLastPivotNumber(d.stateDB, pivot.Number.Uint64()) 1645 } 1646 } 1647 P, beforeP, afterP := splitAroundPivot(pivot.Number.Uint64(), results) 1648 if err := d.commitSnapSyncData(beforeP, sync); err != nil { 1649 return err 1650 } 1651 if P != nil { 1652 // If new pivot block found, cancel old state retrieval and restart 1653 if oldPivot != P { 1654 sync.Cancel() 1655 sync = d.syncState(P.Header.Root) 1656 1657 go closeOnErr(sync) 1658 oldPivot = P 1659 } 1660 // Wait for completion, occasionally checking for pivot staleness 1661 select { 1662 case <-sync.done: 1663 if sync.err != nil { 1664 return sync.err 1665 } 1666 if err := d.commitPivotBlock(P); err != nil { 1667 return err 1668 } 1669 oldPivot = nil 1670 1671 case <-time.After(time.Second): 1672 oldTail = afterP 1673 continue 1674 } 1675 } 1676 // Fast sync done, pivot commit done, full import 1677 if err := d.importBlockResults(afterP); err != nil { 1678 return err 1679 } 1680 } 1681 } 1682 1683 func splitAroundPivot(pivot uint64, results []*fetchResult) (p *fetchResult, before, after []*fetchResult) { 1684 if len(results) == 0 { 1685 return nil, nil, nil 1686 } 1687 if lastNum := results[len(results)-1].Header.Number.Uint64(); lastNum < pivot { 1688 // the pivot is somewhere in the future 1689 return nil, results, nil 1690 } 1691 // This can also be optimized, but only happens very seldom 1692 for _, result := range results { 1693 num := result.Header.Number.Uint64() 1694 switch { 1695 case num < pivot: 1696 before = append(before, result) 1697 case num == pivot: 1698 p = result 1699 default: 1700 after = append(after, result) 1701 } 1702 } 1703 return p, before, after 1704 } 1705 1706 func (d *Downloader) commitSnapSyncData(results []*fetchResult, stateSync *stateSync) error { 1707 // Check for any early termination requests 1708 if len(results) == 0 { 1709 return nil 1710 } 1711 select { 1712 case <-d.quitCh: 1713 return errCancelContentProcessing 1714 case <-stateSync.done: 1715 if err := stateSync.Wait(); err != nil { 1716 return err 1717 } 1718 default: 1719 } 1720 // Retrieve the a batch of results to import 1721 first, last := results[0].Header, results[len(results)-1].Header 1722 log.Debug("Inserting snap-sync blocks", "items", len(results), 1723 "firstnum", first.Number, "firsthash", first.Hash(), 1724 "lastnumn", last.Number, "lasthash", last.Hash(), 1725 ) 1726 blocks := make([]*types.Block, len(results)) 1727 receipts := make([]types.Receipts, len(results)) 1728 for i, result := range results { 1729 blocks[i] = types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1730 receipts[i] = result.Receipts 1731 } 1732 if index, err := d.blockchain.InsertReceiptChain(blocks, receipts, d.ancientLimit); err != nil { 1733 log.Debug("Downloaded item processing failed", "number", results[index].Header.Number, "hash", results[index].Header.Hash(), "err", err) 1734 return fmt.Errorf("%w: %v", errInvalidChain, err) 1735 } 1736 return nil 1737 } 1738 1739 func (d *Downloader) commitPivotBlock(result *fetchResult) error { 1740 block := types.NewBlockWithHeader(result.Header).WithBody(result.Transactions, result.Uncles) 1741 log.Debug("Committing snap sync pivot as new head", "number", block.Number(), "hash", block.Hash()) 1742 1743 // Commit the pivot block as the new head, will require full sync from here on 1744 if _, err := d.blockchain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{result.Receipts}, d.ancientLimit); err != nil { 1745 return err 1746 } 1747 if err := d.blockchain.SnapSyncCommitHead(block.Hash()); err != nil { 1748 return err 1749 } 1750 atomic.StoreInt32(&d.committed, 1) 1751 return nil 1752 } 1753 1754 // DeliverSnapPacket is invoked from a peer's message handler when it transmits a 1755 // data packet for the local node to consume. 1756 func (d *Downloader) DeliverSnapPacket(peer *snap.Peer, packet snap.Packet) error { 1757 switch packet := packet.(type) { 1758 case *snap.AccountRangePacket: 1759 hashes, accounts, err := packet.Unpack() 1760 if err != nil { 1761 return err 1762 } 1763 return d.SnapSyncer.OnAccounts(peer, packet.ID, hashes, accounts, packet.Proof) 1764 1765 case *snap.StorageRangesPacket: 1766 hashset, slotset := packet.Unpack() 1767 return d.SnapSyncer.OnStorage(peer, packet.ID, hashset, slotset, packet.Proof) 1768 1769 case *snap.ByteCodesPacket: 1770 return d.SnapSyncer.OnByteCodes(peer, packet.ID, packet.Codes) 1771 1772 case *snap.TrieNodesPacket: 1773 return d.SnapSyncer.OnTrieNodes(peer, packet.ID, packet.Nodes) 1774 1775 default: 1776 return fmt.Errorf("unexpected snap packet type: %T", packet) 1777 } 1778 } 1779 1780 // readHeaderRange returns a list of headers, using the given last header as the base, 1781 // and going backwards towards genesis. This method assumes that the caller already has 1782 // placed a reasonable cap on count. 1783 func (d *Downloader) readHeaderRange(last *types.Header, count int) []*types.Header { 1784 var ( 1785 current = last 1786 headers []*types.Header 1787 ) 1788 for { 1789 parent := d.lightchain.GetHeaderByHash(current.ParentHash) 1790 if parent == nil { 1791 break // The chain is not continuous, or the chain is exhausted 1792 } 1793 headers = append(headers, parent) 1794 if len(headers) >= count { 1795 break 1796 } 1797 current = parent 1798 } 1799 return headers 1800 }