github.com/theQRL/go-zond@v0.1.1/zond/downloader/skeleton.go (about) 1 // Copyright 2022 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package downloader 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "math/rand" 24 "sort" 25 "time" 26 27 "github.com/theQRL/go-zond/common" 28 "github.com/theQRL/go-zond/core/rawdb" 29 "github.com/theQRL/go-zond/core/types" 30 "github.com/theQRL/go-zond/log" 31 "github.com/theQRL/go-zond/zond/protocols/zond" 32 "github.com/theQRL/go-zond/zonddb" 33 ) 34 35 // scratchHeaders is the number of headers to store in a scratch space to allow 36 // concurrent downloads. A header is about 0.5KB in size, so there is no worry 37 // about using too much memory. The only catch is that we can only validate gaps 38 // after they're linked to the head, so the bigger the scratch space, the larger 39 // potential for invalid headers. 40 // 41 // The current scratch space of 131072 headers is expected to use 64MB RAM. 42 const scratchHeaders = 131072 43 44 // requestHeaders is the number of header to request from a remote peer in a single 45 // network packet. Although the skeleton downloader takes into consideration peer 46 // capacities when picking idlers, the packet size was decided to remain constant 47 // since headers are relatively small and it's easier to work with fixed batches 48 // vs. dynamic interval fillings. 49 const requestHeaders = 512 50 51 // errSyncLinked is an internal helper error to signal that the current sync 52 // cycle linked up to the genesis block, this the skeleton syncer should ping 53 // the backfiller to resume. Since we already have that logic on sync start, 54 // piggy-back on that instead of 2 entrypoints. 55 var errSyncLinked = errors.New("sync linked") 56 57 // errSyncMerged is an internal helper error to signal that the current sync 58 // cycle merged with a previously aborted subchain, thus the skeleton syncer 59 // should abort and restart with the new state. 60 var errSyncMerged = errors.New("sync merged") 61 62 // errSyncReorged is an internal helper error to signal that the head chain of 63 // the current sync cycle was (partially) reorged, thus the skeleton syncer 64 // should abort and restart with the new state. 65 var errSyncReorged = errors.New("sync reorged") 66 67 // errTerminated is returned if the sync mechanism was terminated for this run of 68 // the process. This is usually the case when Geth is shutting down and some events 69 // might still be propagating. 70 var errTerminated = errors.New("terminated") 71 72 // errReorgDenied is returned if an attempt is made to extend the beacon chain 73 // with a new header, but it does not link up to the existing sync. 74 var errReorgDenied = errors.New("non-forced head reorg denied") 75 76 func init() { 77 // Tuning parameters is nice, but the scratch space must be assignable in 78 // full to peers. It's a useless cornercase to support a dangling half-group. 79 if scratchHeaders%requestHeaders != 0 { 80 panic("Please make scratchHeaders divisible by requestHeaders") 81 } 82 } 83 84 // subchain is a contiguous header chain segment that is backed by the database, 85 // but may not be linked to the live chain. The skeleton downloader may produce 86 // a new one of these every time it is restarted until the subchain grows large 87 // enough to connect with a previous subchain. 88 // 89 // The subchains use the exact same database namespace and are not disjoint from 90 // each other. As such, extending one to overlap the other entails reducing the 91 // second one first. This combined buffer model is used to avoid having to move 92 // data on disk when two subchains are joined together. 93 type subchain struct { 94 Head uint64 // Block number of the newest header in the subchain 95 Tail uint64 // Block number of the oldest header in the subchain 96 Next common.Hash // Block hash of the next oldest header in the subchain 97 } 98 99 // skeletonProgress is a database entry to allow suspending and resuming a chain 100 // sync. As the skeleton header chain is downloaded backwards, restarts can and 101 // will produce temporarily disjoint subchains. There is no way to restart a 102 // suspended skeleton sync without prior knowledge of all prior suspension points. 103 type skeletonProgress struct { 104 Subchains []*subchain // Disjoint subchains downloaded until now 105 Finalized *uint64 // Last known finalized block number 106 } 107 108 // headUpdate is a notification that the beacon sync should switch to a new target. 109 // The update might request whether to forcefully change the target, or only try to 110 // extend it and fail if it's not possible. 111 type headUpdate struct { 112 header *types.Header // Header to update the sync target to 113 final *types.Header // Finalized header to use as thresholds 114 force bool // Whether to force the update or only extend if possible 115 errc chan error // Channel to signal acceptance of the new head 116 } 117 118 // headerRequest tracks a pending header request to ensure responses are to 119 // actual requests and to validate any security constraints. 120 // 121 // Concurrency note: header requests and responses are handled concurrently from 122 // the main runloop to allow Keccak256 hash verifications on the peer's thread and 123 // to drop on invalid response. The request struct must contain all the data to 124 // construct the response without accessing runloop internals (i.e. subchains). 125 // That is only included to allow the runloop to match a response to the task being 126 // synced without having yet another set of maps. 127 type headerRequest struct { 128 peer string // Peer to which this request is assigned 129 id uint64 // Request ID of this request 130 131 deliver chan *headerResponse // Channel to deliver successful response on 132 revert chan *headerRequest // Channel to deliver request failure on 133 cancel chan struct{} // Channel to track sync cancellation 134 stale chan struct{} // Channel to signal the request was dropped 135 136 head uint64 // Head number of the requested batch of headers 137 } 138 139 // headerResponse is an already verified remote response to a header request. 140 type headerResponse struct { 141 peer *peerConnection // Peer from which this response originates 142 reqid uint64 // Request ID that this response fulfils 143 headers []*types.Header // Chain of headers 144 } 145 146 // backfiller is a callback interface through which the skeleton sync can tell 147 // the downloader that it should suspend or resume backfilling on specific head 148 // events (e.g. suspend on forks or gaps, resume on successful linkups). 149 type backfiller interface { 150 // suspend requests the backfiller to abort any running full or snap sync 151 // based on the skeleton chain as it might be invalid. The backfiller should 152 // gracefully handle multiple consecutive suspends without a resume, even 153 // on initial startup. 154 // 155 // The method should return the last block header that has been successfully 156 // backfilled, or nil if the backfiller was not resumed. 157 suspend() *types.Header 158 159 // resume requests the backfiller to start running fill or snap sync based on 160 // the skeleton chain as it has successfully been linked. Appending new heads 161 // to the end of the chain will not result in suspend/resume cycles. 162 // leaking too much sync logic out to the filler. 163 resume() 164 } 165 166 // skeleton represents a header chain synchronized after the merge where blocks 167 // aren't validated any more via PoW in a forward fashion, rather are dictated 168 // and extended at the head via the beacon chain and backfilled on the original 169 // Ethereum block sync protocol. 170 // 171 // Since the skeleton is grown backwards from head to genesis, it is handled as 172 // a separate entity, not mixed in with the logical sequential transition of the 173 // blocks. Once the skeleton is connected to an existing, validated chain, the 174 // headers will be moved into the main downloader for filling and execution. 175 // 176 // Opposed to the original Ethereum block synchronization which is trustless (and 177 // uses a master peer to minimize the attack surface), post-merge block sync starts 178 // from a trusted head. As such, there is no need for a master peer any more and 179 // headers can be requested fully concurrently (though some batches might be 180 // discarded if they don't link up correctly). 181 // 182 // Although a skeleton is part of a sync cycle, it is not recreated, rather stays 183 // alive throughout the lifetime of the downloader. This allows it to be extended 184 // concurrently with the sync cycle, since extensions arrive from an API surface, 185 // not from within (vs. legacy Ethereum sync). 186 // 187 // Since the skeleton tracks the entire header chain until it is consumed by the 188 // forward block filling, it needs 0.5KB/block storage. At current mainnet sizes 189 // this is only possible with a disk backend. Since the skeleton is separate from 190 // the node's header chain, storing the headers ephemerally until sync finishes 191 // is wasted disk IO, but it's a price we're going to pay to keep things simple 192 // for now. 193 type skeleton struct { 194 db zonddb.Database // Database backing the skeleton 195 filler backfiller // Chain syncer suspended/resumed by head events 196 197 peers *peerSet // Set of peers we can sync from 198 idles map[string]*peerConnection // Set of idle peers in the current sync cycle 199 drop peerDropFn // Drops a peer for misbehaving 200 201 progress *skeletonProgress // Sync progress tracker for resumption and metrics 202 started time.Time // Timestamp when the skeleton syncer was created 203 logged time.Time // Timestamp when progress was last logged to the user 204 pulled uint64 // Number of headers downloaded in this run 205 206 scratchSpace []*types.Header // Scratch space to accumulate headers in (first = recent) 207 scratchOwners []string // Peer IDs owning chunks of the scratch space (pend or delivered) 208 scratchHead uint64 // Block number of the first item in the scratch space 209 210 requests map[uint64]*headerRequest // Header requests currently running 211 212 headEvents chan *headUpdate // Notification channel for new heads 213 terminate chan chan error // Termination channel to abort sync 214 terminated chan struct{} // Channel to signal that the syncer is dead 215 216 // Callback hooks used during testing 217 syncStarting func() // callback triggered after a sync cycle is inited but before started 218 } 219 220 // newSkeleton creates a new sync skeleton that tracks a potentially dangling 221 // header chain until it's linked into an existing set of blocks. 222 func newSkeleton(db zonddb.Database, peers *peerSet, drop peerDropFn, filler backfiller) *skeleton { 223 sk := &skeleton{ 224 db: db, 225 filler: filler, 226 peers: peers, 227 drop: drop, 228 requests: make(map[uint64]*headerRequest), 229 headEvents: make(chan *headUpdate), 230 terminate: make(chan chan error), 231 terminated: make(chan struct{}), 232 } 233 go sk.startup() 234 return sk 235 } 236 237 // startup is an initial background loop which waits for an event to start or 238 // tear the syncer down. This is required to make the skeleton sync loop once 239 // per process but at the same time not start before the beacon chain announces 240 // a new (existing) head. 241 func (s *skeleton) startup() { 242 // Close a notification channel so anyone sending us events will know if the 243 // sync loop was torn down for good. 244 defer close(s.terminated) 245 246 // Wait for startup or teardown. This wait might loop a few times if a beacon 247 // client requests sync head extensions, but not forced reorgs (i.e. they are 248 // giving us new payloads without setting a starting head initially). 249 for { 250 select { 251 case errc := <-s.terminate: 252 // No head was announced but Geth is shutting down 253 errc <- nil 254 return 255 256 case event := <-s.headEvents: 257 // New head announced, start syncing to it, looping every time a current 258 // cycle is terminated due to a chain event (head reorg, old chain merge). 259 if !event.force { 260 event.errc <- errors.New("forced head needed for startup") 261 continue 262 } 263 event.errc <- nil // forced head accepted for startup 264 head := event.header 265 s.started = time.Now() 266 267 for { 268 // If the sync cycle terminated or was terminated, propagate up when 269 // higher layers request termination. There's no fancy explicit error 270 // signalling as the sync loop should never terminate (TM). 271 newhead, err := s.sync(head) 272 switch { 273 case err == errSyncLinked: 274 // Sync cycle linked up to the genesis block. Tear down the loop 275 // and restart it so, it can properly notify the backfiller. Don't 276 // account a new head. 277 head = nil 278 279 case err == errSyncMerged: 280 // Subchains were merged, we just need to reinit the internal 281 // start to continue on the tail of the merged chain. Don't 282 // announce a new head, 283 head = nil 284 285 case err == errSyncReorged: 286 // The subchain being synced got modified at the head in a 287 // way that requires resyncing it. Restart sync with the new 288 // head to force a cleanup. 289 head = newhead 290 291 case err == errTerminated: 292 // Sync was requested to be terminated from within, stop and 293 // return (no need to pass a message, was already done internally) 294 return 295 296 default: 297 // Sync either successfully terminated or failed with an unhandled 298 // error. Abort and wait until Geth requests a termination. 299 errc := <-s.terminate 300 errc <- err 301 return 302 } 303 } 304 } 305 } 306 } 307 308 // Terminate tears down the syncer indefinitely. 309 func (s *skeleton) Terminate() error { 310 // Request termination and fetch any errors 311 errc := make(chan error) 312 s.terminate <- errc 313 err := <-errc 314 315 // Wait for full shutdown (not necessary, but cleaner) 316 <-s.terminated 317 return err 318 } 319 320 // Sync starts or resumes a previous sync cycle to download and maintain a reverse 321 // header chain starting at the head and leading towards genesis to an available 322 // ancestor. 323 // 324 // This method does not block, rather it just waits until the syncer receives the 325 // fed header. What the syncer does with it is the syncer's problem. 326 func (s *skeleton) Sync(head *types.Header, final *types.Header, force bool) error { 327 log.Trace("New skeleton head announced", "number", head.Number, "hash", head.Hash(), "force", force) 328 errc := make(chan error) 329 330 select { 331 case s.headEvents <- &headUpdate{header: head, final: final, force: force, errc: errc}: 332 return <-errc 333 case <-s.terminated: 334 return errTerminated 335 } 336 } 337 338 // sync is the internal version of Sync that executes a single sync cycle, either 339 // until some termination condition is reached, or until the current cycle merges 340 // with a previously aborted run. 341 func (s *skeleton) sync(head *types.Header) (*types.Header, error) { 342 // If we're continuing a previous merge interrupt, just access the existing 343 // old state without initing from disk. 344 if head == nil { 345 head = rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[0].Head) 346 } else { 347 // Otherwise, initialize the sync, trimming and previous leftovers until 348 // we're consistent with the newly requested chain head 349 s.initSync(head) 350 } 351 // Create the scratch space to fill with concurrently downloaded headers 352 s.scratchSpace = make([]*types.Header, scratchHeaders) 353 defer func() { s.scratchSpace = nil }() // don't hold on to references after sync 354 355 s.scratchOwners = make([]string, scratchHeaders/requestHeaders) 356 defer func() { s.scratchOwners = nil }() // don't hold on to references after sync 357 358 s.scratchHead = s.progress.Subchains[0].Tail - 1 // tail must not be 0! 359 360 // If the sync is already done, resume the backfiller. When the loop stops, 361 // terminate the backfiller too. 362 linked := len(s.progress.Subchains) == 1 && 363 rawdb.HasHeader(s.db, s.progress.Subchains[0].Next, s.scratchHead) && 364 rawdb.HasBody(s.db, s.progress.Subchains[0].Next, s.scratchHead) && 365 rawdb.HasReceipts(s.db, s.progress.Subchains[0].Next, s.scratchHead) 366 if linked { 367 s.filler.resume() 368 } 369 defer func() { 370 // The filler needs to be suspended, but since it can block for a while 371 // when there are many blocks queued up for full-sync importing, run it 372 // on a separate goroutine and consume head messages that need instant 373 // replies. 374 done := make(chan struct{}) 375 go func() { 376 defer close(done) 377 if filled := s.filler.suspend(); filled != nil { 378 // If something was filled, try to delete stale sync helpers. If 379 // unsuccessful, warn the user, but not much else we can do (it's 380 // a programming error, just let users report an issue and don't 381 // choke in the meantime). 382 if err := s.cleanStales(filled); err != nil { 383 log.Error("Failed to clean stale beacon headers", "err", err) 384 } 385 } 386 }() 387 // Wait for the suspend to finish, consuming head events in the meantime 388 // and dropping them on the floor. 389 for { 390 select { 391 case <-done: 392 return 393 case event := <-s.headEvents: 394 event.errc <- errors.New("beacon syncer reorging") 395 } 396 } 397 }() 398 // Create a set of unique channels for this sync cycle. We need these to be 399 // ephemeral so a data race doesn't accidentally deliver something stale on 400 // a persistent channel across syncs (yup, this happened) 401 var ( 402 requestFails = make(chan *headerRequest) 403 responses = make(chan *headerResponse) 404 ) 405 cancel := make(chan struct{}) 406 defer close(cancel) 407 408 log.Debug("Starting reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead) 409 410 // Whether sync completed or not, disregard any future packets 411 defer func() { 412 log.Debug("Terminating reverse header sync cycle", "head", head.Number, "hash", head.Hash(), "cont", s.scratchHead) 413 s.requests = make(map[uint64]*headerRequest) 414 }() 415 416 // Start tracking idle peers for task assignments 417 peering := make(chan *peeringEvent, 64) // arbitrary buffer, just some burst protection 418 419 peeringSub := s.peers.SubscribeEvents(peering) 420 defer peeringSub.Unsubscribe() 421 422 s.idles = make(map[string]*peerConnection) 423 for _, peer := range s.peers.AllPeers() { 424 s.idles[peer.id] = peer 425 } 426 // Nofity any tester listening for startup events 427 if s.syncStarting != nil { 428 s.syncStarting() 429 } 430 for { 431 // Something happened, try to assign new tasks to any idle peers 432 if !linked { 433 s.assignTasks(responses, requestFails, cancel) 434 } 435 // Wait for something to happen 436 select { 437 case event := <-peering: 438 // A peer joined or left, the tasks queue and allocations need to be 439 // checked for potential assignment or reassignment 440 peerid := event.peer.id 441 if event.join { 442 log.Debug("Joining skeleton peer", "id", peerid) 443 s.idles[peerid] = event.peer 444 } else { 445 log.Debug("Leaving skeleton peer", "id", peerid) 446 s.revertRequests(peerid) 447 delete(s.idles, peerid) 448 } 449 450 case errc := <-s.terminate: 451 errc <- nil 452 return nil, errTerminated 453 454 case event := <-s.headEvents: 455 // New head was announced, try to integrate it. If successful, nothing 456 // needs to be done as the head simply extended the last range. For now 457 // we don't seamlessly integrate reorgs to keep things simple. If the 458 // network starts doing many mini reorgs, it might be worthwhile handling 459 // a limited depth without an error. 460 if reorged := s.processNewHead(event.header, event.final, event.force); reorged { 461 // If a reorg is needed, and we're forcing the new head, signal 462 // the syncer to tear down and start over. Otherwise, drop the 463 // non-force reorg. 464 if event.force { 465 event.errc <- nil // forced head reorg accepted 466 return event.header, errSyncReorged 467 } 468 event.errc <- errReorgDenied 469 continue 470 } 471 event.errc <- nil // head extension accepted 472 473 // New head was integrated into the skeleton chain. If the backfiller 474 // is still running, it will pick it up. If it already terminated, 475 // a new cycle needs to be spun up. 476 if linked { 477 s.filler.resume() 478 } 479 480 case req := <-requestFails: 481 s.revertRequest(req) 482 483 case res := <-responses: 484 // Process the batch of headers. If though processing we managed to 485 // link the current subchain to a previously downloaded one, abort the 486 // sync and restart with the merged subchains. 487 // 488 // If we managed to link to the existing local chain or genesis block, 489 // abort sync altogether. 490 linked, merged := s.processResponse(res) 491 if linked { 492 log.Debug("Beacon sync linked to local chain") 493 return nil, errSyncLinked 494 } 495 if merged { 496 log.Debug("Beacon sync merged subchains") 497 return nil, errSyncMerged 498 } 499 // We still have work to do, loop and repeat 500 } 501 } 502 } 503 504 // initSync attempts to get the skeleton sync into a consistent state wrt any 505 // past state on disk and the newly requested head to sync to. If the new head 506 // is nil, the method will return and continue from the previous head. 507 func (s *skeleton) initSync(head *types.Header) { 508 // Extract the head number, we'll need it all over 509 number := head.Number.Uint64() 510 511 // Retrieve the previously saved sync progress 512 if status := rawdb.ReadSkeletonSyncStatus(s.db); len(status) > 0 { 513 s.progress = new(skeletonProgress) 514 if err := json.Unmarshal(status, s.progress); err != nil { 515 log.Error("Failed to decode skeleton sync status", "err", err) 516 } else { 517 // Previous sync was available, print some continuation logs 518 for _, subchain := range s.progress.Subchains { 519 log.Debug("Restarting skeleton subchain", "head", subchain.Head, "tail", subchain.Tail) 520 } 521 // Create a new subchain for the head (unless the last can be extended), 522 // trimming anything it would overwrite 523 headchain := &subchain{ 524 Head: number, 525 Tail: number, 526 Next: head.ParentHash, 527 } 528 for len(s.progress.Subchains) > 0 { 529 // If the last chain is above the new head, delete altogether 530 lastchain := s.progress.Subchains[0] 531 if lastchain.Tail >= headchain.Tail { 532 log.Debug("Dropping skeleton subchain", "head", lastchain.Head, "tail", lastchain.Tail) 533 s.progress.Subchains = s.progress.Subchains[1:] 534 continue 535 } 536 // Otherwise truncate the last chain if needed and abort trimming 537 if lastchain.Head >= headchain.Tail { 538 log.Debug("Trimming skeleton subchain", "oldhead", lastchain.Head, "newhead", headchain.Tail-1, "tail", lastchain.Tail) 539 lastchain.Head = headchain.Tail - 1 540 } 541 break 542 } 543 // If the last subchain can be extended, we're lucky. Otherwise, create 544 // a new subchain sync task. 545 var extended bool 546 if n := len(s.progress.Subchains); n > 0 { 547 lastchain := s.progress.Subchains[0] 548 if lastchain.Head == headchain.Tail-1 { 549 lasthead := rawdb.ReadSkeletonHeader(s.db, lastchain.Head) 550 if lasthead.Hash() == head.ParentHash { 551 log.Debug("Extended skeleton subchain with new head", "head", headchain.Tail, "tail", lastchain.Tail) 552 lastchain.Head = headchain.Tail 553 extended = true 554 } 555 } 556 } 557 if !extended { 558 log.Debug("Created new skeleton subchain", "head", number, "tail", number) 559 s.progress.Subchains = append([]*subchain{headchain}, s.progress.Subchains...) 560 } 561 // Update the database with the new sync stats and insert the new 562 // head header. We won't delete any trimmed skeleton headers since 563 // those will be outside the index space of the many subchains and 564 // the database space will be reclaimed eventually when processing 565 // blocks above the current head (TODO(karalabe): don't forget). 566 batch := s.db.NewBatch() 567 568 rawdb.WriteSkeletonHeader(batch, head) 569 s.saveSyncStatus(batch) 570 571 if err := batch.Write(); err != nil { 572 log.Crit("Failed to write skeleton sync status", "err", err) 573 } 574 return 575 } 576 } 577 // Either we've failed to decode the previous state, or there was none. Start 578 // a fresh sync with a single subchain represented by the currently sent 579 // chain head. 580 s.progress = &skeletonProgress{ 581 Subchains: []*subchain{ 582 { 583 Head: number, 584 Tail: number, 585 Next: head.ParentHash, 586 }, 587 }, 588 } 589 batch := s.db.NewBatch() 590 591 rawdb.WriteSkeletonHeader(batch, head) 592 s.saveSyncStatus(batch) 593 594 if err := batch.Write(); err != nil { 595 log.Crit("Failed to write initial skeleton sync status", "err", err) 596 } 597 log.Debug("Created initial skeleton subchain", "head", number, "tail", number) 598 } 599 600 // saveSyncStatus marshals the remaining sync tasks into leveldb. 601 func (s *skeleton) saveSyncStatus(db zonddb.KeyValueWriter) { 602 status, err := json.Marshal(s.progress) 603 if err != nil { 604 panic(err) // This can only fail during implementation 605 } 606 rawdb.WriteSkeletonSyncStatus(db, status) 607 } 608 609 // processNewHead does the internal shuffling for a new head marker and either 610 // accepts and integrates it into the skeleton or requests a reorg. Upon reorg, 611 // the syncer will tear itself down and restart with a fresh head. It is simpler 612 // to reconstruct the sync state than to mutate it and hope for the best. 613 func (s *skeleton) processNewHead(head *types.Header, final *types.Header, force bool) bool { 614 // If a new finalized block was announced, update the sync process independent 615 // of what happens with the sync head below 616 if final != nil { 617 if number := final.Number.Uint64(); s.progress.Finalized == nil || *s.progress.Finalized != number { 618 s.progress.Finalized = new(uint64) 619 *s.progress.Finalized = final.Number.Uint64() 620 621 s.saveSyncStatus(s.db) 622 } 623 } 624 // If the header cannot be inserted without interruption, return an error for 625 // the outer loop to tear down the skeleton sync and restart it 626 number := head.Number.Uint64() 627 628 lastchain := s.progress.Subchains[0] 629 if lastchain.Tail >= number { 630 // If the chain is down to a single beacon header, and it is re-announced 631 // once more, ignore it instead of tearing down sync for a noop. 632 if lastchain.Head == lastchain.Tail { 633 if current := rawdb.ReadSkeletonHeader(s.db, number); current.Hash() == head.Hash() { 634 return false 635 } 636 } 637 // Not a noop / double head announce, abort with a reorg 638 if force { 639 log.Warn("Beacon chain reorged", "tail", lastchain.Tail, "head", lastchain.Head, "newHead", number) 640 } 641 return true 642 } 643 if lastchain.Head+1 < number { 644 if force { 645 log.Warn("Beacon chain gapped", "head", lastchain.Head, "newHead", number) 646 } 647 return true 648 } 649 if parent := rawdb.ReadSkeletonHeader(s.db, number-1); parent.Hash() != head.ParentHash { 650 if force { 651 log.Warn("Beacon chain forked", "ancestor", number-1, "hash", parent.Hash(), "want", head.ParentHash) 652 } 653 return true 654 } 655 // New header seems to be in the last subchain range. Unwind any extra headers 656 // from the chain tip and insert the new head. We won't delete any trimmed 657 // skeleton headers since those will be outside the index space of the many 658 // subchains and the database space will be reclaimed eventually when processing 659 // blocks above the current head (TODO(karalabe): don't forget). 660 batch := s.db.NewBatch() 661 662 rawdb.WriteSkeletonHeader(batch, head) 663 lastchain.Head = number 664 s.saveSyncStatus(batch) 665 666 if err := batch.Write(); err != nil { 667 log.Crit("Failed to write skeleton sync status", "err", err) 668 } 669 return false 670 } 671 672 // assignTasks attempts to match idle peers to pending header retrievals. 673 func (s *skeleton) assignTasks(success chan *headerResponse, fail chan *headerRequest, cancel chan struct{}) { 674 // Sort the peers by download capacity to use faster ones if many available 675 idlers := &peerCapacitySort{ 676 peers: make([]*peerConnection, 0, len(s.idles)), 677 caps: make([]int, 0, len(s.idles)), 678 } 679 targetTTL := s.peers.rates.TargetTimeout() 680 for _, peer := range s.idles { 681 idlers.peers = append(idlers.peers, peer) 682 idlers.caps = append(idlers.caps, s.peers.rates.Capacity(peer.id, zond.BlockHeadersMsg, targetTTL)) 683 } 684 if len(idlers.peers) == 0 { 685 return 686 } 687 sort.Sort(idlers) 688 689 // Find header regions not yet downloading and fill them 690 for task, owner := range s.scratchOwners { 691 // If we're out of idle peers, stop assigning tasks 692 if len(idlers.peers) == 0 { 693 return 694 } 695 // Skip any tasks already filling 696 if owner != "" { 697 continue 698 } 699 // If we've reached the genesis, stop assigning tasks 700 if uint64(task*requestHeaders) >= s.scratchHead { 701 return 702 } 703 // Found a task and have peers available, assign it 704 idle := idlers.peers[0] 705 706 idlers.peers = idlers.peers[1:] 707 idlers.caps = idlers.caps[1:] 708 709 // Matched a pending task to an idle peer, allocate a unique request id 710 var reqid uint64 711 for { 712 reqid = uint64(rand.Int63()) 713 if reqid == 0 { 714 continue 715 } 716 if _, ok := s.requests[reqid]; ok { 717 continue 718 } 719 break 720 } 721 // Generate the network query and send it to the peer 722 req := &headerRequest{ 723 peer: idle.id, 724 id: reqid, 725 deliver: success, 726 revert: fail, 727 cancel: cancel, 728 stale: make(chan struct{}), 729 head: s.scratchHead - uint64(task*requestHeaders), 730 } 731 s.requests[reqid] = req 732 delete(s.idles, idle.id) 733 734 // Generate the network query and send it to the peer 735 go s.executeTask(idle, req) 736 737 // Inject the request into the task to block further assignments 738 s.scratchOwners[task] = idle.id 739 } 740 } 741 742 // executeTask executes a single fetch request, blocking until either a result 743 // arrives or a timeouts / cancellation is triggered. The method should be run 744 // on its own goroutine and will deliver on the requested channels. 745 func (s *skeleton) executeTask(peer *peerConnection, req *headerRequest) { 746 start := time.Now() 747 resCh := make(chan *zond.Response) 748 749 // Figure out how many headers to fetch. Usually this will be a full batch, 750 // but for the very tail of the chain, trim the request to the number left. 751 // Since nodes may or may not return the genesis header for a batch request, 752 // don't even request it. The parent hash of block #1 is enough to link. 753 requestCount := requestHeaders 754 if req.head < requestHeaders { 755 requestCount = int(req.head) 756 } 757 peer.log.Trace("Fetching skeleton headers", "from", req.head, "count", requestCount) 758 netreq, err := peer.peer.RequestHeadersByNumber(req.head, requestCount, 0, true, resCh) 759 if err != nil { 760 peer.log.Trace("Failed to request headers", "err", err) 761 s.scheduleRevertRequest(req) 762 return 763 } 764 defer netreq.Close() 765 766 // Wait until the response arrives, the request is cancelled or times out 767 ttl := s.peers.rates.TargetTimeout() 768 769 timeoutTimer := time.NewTimer(ttl) 770 defer timeoutTimer.Stop() 771 772 select { 773 case <-req.cancel: 774 peer.log.Debug("Header request cancelled") 775 s.scheduleRevertRequest(req) 776 777 case <-timeoutTimer.C: 778 // Header retrieval timed out, update the metrics 779 peer.log.Warn("Header request timed out, dropping peer", "elapsed", ttl) 780 headerTimeoutMeter.Mark(1) 781 s.peers.rates.Update(peer.id, zond.BlockHeadersMsg, 0, 0) 782 s.scheduleRevertRequest(req) 783 784 // At this point we either need to drop the offending peer, or we need a 785 // mechanism to allow waiting for the response and not cancel it. For now 786 // lets go with dropping since the header sizes are deterministic and the 787 // beacon sync runs exclusive (downloader is idle) so there should be no 788 // other load to make timeouts probable. If we notice that timeouts happen 789 // more often than we'd like, we can introduce a tracker for the requests 790 // gone stale and monitor them. However, in that case too, we need a way 791 // to protect against malicious peers never responding, so it would need 792 // a second, hard-timeout mechanism. 793 s.drop(peer.id) 794 795 case res := <-resCh: 796 // Headers successfully retrieved, update the metrics 797 headers := *res.Res.(*zond.BlockHeadersPacket) 798 799 headerReqTimer.Update(time.Since(start)) 800 s.peers.rates.Update(peer.id, zond.BlockHeadersMsg, res.Time, len(headers)) 801 802 // Cross validate the headers with the requests 803 switch { 804 case len(headers) == 0: 805 // No headers were delivered, reject the response and reschedule 806 peer.log.Debug("No headers delivered") 807 res.Done <- errors.New("no headers delivered") 808 s.scheduleRevertRequest(req) 809 810 case headers[0].Number.Uint64() != req.head: 811 // Header batch anchored at non-requested number 812 peer.log.Debug("Invalid header response head", "have", headers[0].Number, "want", req.head) 813 res.Done <- errors.New("invalid header batch anchor") 814 s.scheduleRevertRequest(req) 815 816 case req.head >= requestHeaders && len(headers) != requestHeaders: 817 // Invalid number of non-genesis headers delivered, reject the response and reschedule 818 peer.log.Debug("Invalid non-genesis header count", "have", len(headers), "want", requestHeaders) 819 res.Done <- errors.New("not enough non-genesis headers delivered") 820 s.scheduleRevertRequest(req) 821 822 case req.head < requestHeaders && uint64(len(headers)) != req.head: 823 // Invalid number of genesis headers delivered, reject the response and reschedule 824 peer.log.Debug("Invalid genesis header count", "have", len(headers), "want", headers[0].Number.Uint64()) 825 res.Done <- errors.New("not enough genesis headers delivered") 826 s.scheduleRevertRequest(req) 827 828 default: 829 // Packet seems structurally valid, check hash progression and if it 830 // is correct too, deliver for storage 831 for i := 0; i < len(headers)-1; i++ { 832 if headers[i].ParentHash != headers[i+1].Hash() { 833 peer.log.Debug("Invalid hash progression", "index", i, "wantparenthash", headers[i].ParentHash, "haveparenthash", headers[i+1].Hash()) 834 res.Done <- errors.New("invalid hash progression") 835 s.scheduleRevertRequest(req) 836 return 837 } 838 } 839 // Hash chain is valid. The delivery might still be junk as we're 840 // downloading batches concurrently (so no way to link the headers 841 // until gaps are filled); in that case, we'll nuke the peer when 842 // we detect the fault. 843 res.Done <- nil 844 845 select { 846 case req.deliver <- &headerResponse{ 847 peer: peer, 848 reqid: req.id, 849 headers: headers, 850 }: 851 case <-req.cancel: 852 } 853 } 854 } 855 } 856 857 // revertRequests locates all the currently pending requests from a particular 858 // peer and reverts them, rescheduling for others to fulfill. 859 func (s *skeleton) revertRequests(peer string) { 860 // Gather the requests first, revertals need the lock too 861 var requests []*headerRequest 862 for _, req := range s.requests { 863 if req.peer == peer { 864 requests = append(requests, req) 865 } 866 } 867 // Revert all the requests matching the peer 868 for _, req := range requests { 869 s.revertRequest(req) 870 } 871 } 872 873 // scheduleRevertRequest asks the event loop to clean up a request and return 874 // all failed retrieval tasks to the scheduler for reassignment. 875 func (s *skeleton) scheduleRevertRequest(req *headerRequest) { 876 select { 877 case req.revert <- req: 878 // Sync event loop notified 879 case <-req.cancel: 880 // Sync cycle got cancelled 881 case <-req.stale: 882 // Request already reverted 883 } 884 } 885 886 // revertRequest cleans up a request and returns all failed retrieval tasks to 887 // the scheduler for reassignment. 888 // 889 // Note, this needs to run on the event runloop thread to reschedule to idle peers. 890 // On peer threads, use scheduleRevertRequest. 891 func (s *skeleton) revertRequest(req *headerRequest) { 892 log.Trace("Reverting header request", "peer", req.peer, "reqid", req.id) 893 select { 894 case <-req.stale: 895 log.Trace("Header request already reverted", "peer", req.peer, "reqid", req.id) 896 return 897 default: 898 } 899 close(req.stale) 900 901 // Remove the request from the tracked set 902 delete(s.requests, req.id) 903 904 // Remove the request from the tracked set and mark the task as not-pending, 905 // ready for rescheduling 906 s.scratchOwners[(s.scratchHead-req.head)/requestHeaders] = "" 907 } 908 909 func (s *skeleton) processResponse(res *headerResponse) (linked bool, merged bool) { 910 res.peer.log.Trace("Processing header response", "head", res.headers[0].Number, "hash", res.headers[0].Hash(), "count", len(res.headers)) 911 912 // Whether the response is valid, we can mark the peer as idle and notify 913 // the scheduler to assign a new task. If the response is invalid, we'll 914 // drop the peer in a bit. 915 s.idles[res.peer.id] = res.peer 916 917 // Ensure the response is for a valid request 918 if _, ok := s.requests[res.reqid]; !ok { 919 // Some internal accounting is broken. A request either times out or it 920 // gets fulfilled successfully. It should not be possible to deliver a 921 // response to a non-existing request. 922 res.peer.log.Error("Unexpected header packet") 923 return false, false 924 } 925 delete(s.requests, res.reqid) 926 927 // Insert the delivered headers into the scratch space independent of the 928 // content or continuation; those will be validated in a moment 929 head := res.headers[0].Number.Uint64() 930 copy(s.scratchSpace[s.scratchHead-head:], res.headers) 931 932 // If there's still a gap in the head of the scratch space, abort 933 if s.scratchSpace[0] == nil { 934 return false, false 935 } 936 // Try to consume any head headers, validating the boundary conditions 937 batch := s.db.NewBatch() 938 for s.scratchSpace[0] != nil { 939 // Next batch of headers available, cross-reference with the subchain 940 // we are extending and either accept or discard 941 if s.progress.Subchains[0].Next != s.scratchSpace[0].Hash() { 942 // Print a log messages to track what's going on 943 tail := s.progress.Subchains[0].Tail 944 want := s.progress.Subchains[0].Next 945 have := s.scratchSpace[0].Hash() 946 947 log.Warn("Invalid skeleton headers", "peer", s.scratchOwners[0], "number", tail-1, "want", want, "have", have) 948 949 // The peer delivered junk, or at least not the subchain we are 950 // syncing to. Free up the scratch space and assignment, reassign 951 // and drop the original peer. 952 for i := 0; i < requestHeaders; i++ { 953 s.scratchSpace[i] = nil 954 } 955 s.drop(s.scratchOwners[0]) 956 s.scratchOwners[0] = "" 957 break 958 } 959 // Scratch delivery matches required subchain, deliver the batch of 960 // headers and push the subchain forward 961 var consumed int 962 for _, header := range s.scratchSpace[:requestHeaders] { 963 if header != nil { // nil when the genesis is reached 964 consumed++ 965 966 rawdb.WriteSkeletonHeader(batch, header) 967 s.pulled++ 968 969 s.progress.Subchains[0].Tail-- 970 s.progress.Subchains[0].Next = header.ParentHash 971 972 // If we've reached an existing block in the chain, stop retrieving 973 // headers. Note, if we want to support light clients with the same 974 // code we'd need to switch here based on the downloader mode. That 975 // said, there's no such functionality for now, so don't complicate. 976 // 977 // In the case of full sync it would be enough to check for the body, 978 // but even a full syncing node will generate a receipt once block 979 // processing is done, so it's just one more "needless" check. 980 // 981 // The weird cascading checks are done to minimize the database reads. 982 linked = rawdb.HasHeader(s.db, header.ParentHash, header.Number.Uint64()-1) && 983 rawdb.HasBody(s.db, header.ParentHash, header.Number.Uint64()-1) && 984 rawdb.HasReceipts(s.db, header.ParentHash, header.Number.Uint64()-1) 985 if linked { 986 break 987 } 988 } 989 } 990 head := s.progress.Subchains[0].Head 991 tail := s.progress.Subchains[0].Tail 992 next := s.progress.Subchains[0].Next 993 994 log.Trace("Primary subchain extended", "head", head, "tail", tail, "next", next) 995 996 // If the beacon chain was linked to the local chain, completely swap out 997 // all internal progress and abort header synchronization. 998 if linked { 999 // Linking into the local chain should also mean that there are no 1000 // leftover subchains, but in the case of importing the blocks via 1001 // the engine API, we will not push the subchains forward. This will 1002 // lead to a gap between an old sync cycle and a future one. 1003 if subchains := len(s.progress.Subchains); subchains > 1 { 1004 switch { 1005 // If there are only 2 subchains - the current one and an older 1006 // one - and the old one consists of a single block, then it's 1007 // the expected new sync cycle after some propagated blocks. Log 1008 // it for debugging purposes, explicitly clean and don't escalate. 1009 case subchains == 2 && s.progress.Subchains[1].Head == s.progress.Subchains[1].Tail: 1010 // Remove the leftover skeleton header associated with old 1011 // skeleton chain only if it's not covered by the current 1012 // skeleton range. 1013 if s.progress.Subchains[1].Head < s.progress.Subchains[0].Tail { 1014 log.Debug("Cleaning previous beacon sync state", "head", s.progress.Subchains[1].Head) 1015 rawdb.DeleteSkeletonHeader(batch, s.progress.Subchains[1].Head) 1016 } 1017 // Drop the leftover skeleton chain since it's stale. 1018 s.progress.Subchains = s.progress.Subchains[:1] 1019 1020 // If we have more than one header or more than one leftover chain, 1021 // the syncer's internal state is corrupted. Do try to fix it, but 1022 // be very vocal about the fault. 1023 default: 1024 var context []interface{} 1025 1026 for i := range s.progress.Subchains[1:] { 1027 context = append(context, fmt.Sprintf("stale_head_%d", i+1)) 1028 context = append(context, s.progress.Subchains[i+1].Head) 1029 context = append(context, fmt.Sprintf("stale_tail_%d", i+1)) 1030 context = append(context, s.progress.Subchains[i+1].Tail) 1031 context = append(context, fmt.Sprintf("stale_next_%d", i+1)) 1032 context = append(context, s.progress.Subchains[i+1].Next) 1033 } 1034 log.Error("Cleaning spurious beacon sync leftovers", context...) 1035 s.progress.Subchains = s.progress.Subchains[:1] 1036 1037 // Note, here we didn't actually delete the headers at all, 1038 // just the metadata. We could implement a cleanup mechanism, 1039 // but further modifying corrupted state is kind of asking 1040 // for it. Unless there's a good enough reason to risk it, 1041 // better to live with the small database junk. 1042 } 1043 } 1044 break 1045 } 1046 // Batch of headers consumed, shift the download window forward 1047 copy(s.scratchSpace, s.scratchSpace[requestHeaders:]) 1048 for i := 0; i < requestHeaders; i++ { 1049 s.scratchSpace[scratchHeaders-i-1] = nil 1050 } 1051 copy(s.scratchOwners, s.scratchOwners[1:]) 1052 s.scratchOwners[scratchHeaders/requestHeaders-1] = "" 1053 1054 s.scratchHead -= uint64(consumed) 1055 1056 // If the subchain extended into the next subchain, we need to handle 1057 // the overlap. Since there could be many overlaps (come on), do this 1058 // in a loop. 1059 for len(s.progress.Subchains) > 1 && s.progress.Subchains[1].Head >= s.progress.Subchains[0].Tail { 1060 // Extract some stats from the second subchain 1061 head := s.progress.Subchains[1].Head 1062 tail := s.progress.Subchains[1].Tail 1063 next := s.progress.Subchains[1].Next 1064 1065 // Since we just overwrote part of the next subchain, we need to trim 1066 // its head independent of matching or mismatching content 1067 if s.progress.Subchains[1].Tail >= s.progress.Subchains[0].Tail { 1068 // Fully overwritten, get rid of the subchain as a whole 1069 log.Debug("Previous subchain fully overwritten", "head", head, "tail", tail, "next", next) 1070 s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) 1071 continue 1072 } else { 1073 // Partially overwritten, trim the head to the overwritten size 1074 log.Debug("Previous subchain partially overwritten", "head", head, "tail", tail, "next", next) 1075 s.progress.Subchains[1].Head = s.progress.Subchains[0].Tail - 1 1076 } 1077 // If the old subchain is an extension of the new one, merge the two 1078 // and let the skeleton syncer restart (to clean internal state) 1079 if rawdb.ReadSkeletonHeader(s.db, s.progress.Subchains[1].Head).Hash() == s.progress.Subchains[0].Next { 1080 log.Debug("Previous subchain merged", "head", head, "tail", tail, "next", next) 1081 s.progress.Subchains[0].Tail = s.progress.Subchains[1].Tail 1082 s.progress.Subchains[0].Next = s.progress.Subchains[1].Next 1083 1084 s.progress.Subchains = append(s.progress.Subchains[:1], s.progress.Subchains[2:]...) 1085 merged = true 1086 } 1087 } 1088 // If subchains were merged, all further available headers in the scratch 1089 // space are invalid since we skipped ahead. Stop processing the scratch 1090 // space to avoid dropping peers thinking they delivered invalid data. 1091 if merged { 1092 break 1093 } 1094 } 1095 s.saveSyncStatus(batch) 1096 if err := batch.Write(); err != nil { 1097 log.Crit("Failed to write skeleton headers and progress", "err", err) 1098 } 1099 // Print a progress report making the UX a bit nicer 1100 left := s.progress.Subchains[0].Tail - 1 1101 if linked { 1102 left = 0 1103 } 1104 if time.Since(s.logged) > 8*time.Second || left == 0 { 1105 s.logged = time.Now() 1106 1107 if s.pulled == 0 { 1108 log.Info("Beacon sync starting", "left", left) 1109 } else { 1110 eta := float64(time.Since(s.started)) / float64(s.pulled) * float64(left) 1111 log.Info("Syncing beacon headers", "downloaded", s.pulled, "left", left, "eta", common.PrettyDuration(eta)) 1112 } 1113 } 1114 return linked, merged 1115 } 1116 1117 // cleanStales removes previously synced beacon headers that have become stale 1118 // due to the downloader backfilling past the tracked tail. 1119 func (s *skeleton) cleanStales(filled *types.Header) error { 1120 number := filled.Number.Uint64() 1121 log.Trace("Cleaning stale beacon headers", "filled", number, "hash", filled.Hash()) 1122 1123 // If the filled header is below the linked subchain, something's 1124 // corrupted internally. Report and error and refuse to do anything. 1125 if number < s.progress.Subchains[0].Tail { 1126 return fmt.Errorf("filled header below beacon header tail: %d < %d", number, s.progress.Subchains[0].Tail) 1127 } 1128 // Subchain seems trimmable, push the tail forward up to the last 1129 // filled header and delete everything before it - if available. In 1130 // case we filled past the head, recreate the subchain with a new 1131 // head to keep it consistent with the data on disk. 1132 var ( 1133 start = s.progress.Subchains[0].Tail // start deleting from the first known header 1134 end = number // delete until the requested threshold 1135 batch = s.db.NewBatch() 1136 ) 1137 s.progress.Subchains[0].Tail = number 1138 s.progress.Subchains[0].Next = filled.ParentHash 1139 1140 if s.progress.Subchains[0].Head < number { 1141 // If more headers were filled than available, push the entire 1142 // subchain forward to keep tracking the node's block imports 1143 end = s.progress.Subchains[0].Head + 1 // delete the entire original range, including the head 1144 s.progress.Subchains[0].Head = number // assign a new head (tail is already assigned to this) 1145 1146 // The entire original skeleton chain was deleted and a new one 1147 // defined. Make sure the new single-header chain gets pushed to 1148 // disk to keep internal state consistent. 1149 rawdb.WriteSkeletonHeader(batch, filled) 1150 } 1151 // Execute the trimming and the potential rewiring of the progress 1152 s.saveSyncStatus(batch) 1153 for n := start; n < end; n++ { 1154 // If the batch grew too big, flush it and continue with a new batch. 1155 // The catch is that the sync metadata needs to reflect the actually 1156 // flushed state, so temporarily change the subchain progress and 1157 // revert after the flush. 1158 if batch.ValueSize() >= zonddb.IdealBatchSize { 1159 tmpTail := s.progress.Subchains[0].Tail 1160 tmpNext := s.progress.Subchains[0].Next 1161 1162 s.progress.Subchains[0].Tail = n 1163 s.progress.Subchains[0].Next = rawdb.ReadSkeletonHeader(s.db, n).ParentHash 1164 s.saveSyncStatus(batch) 1165 1166 if err := batch.Write(); err != nil { 1167 log.Crit("Failed to write beacon trim data", "err", err) 1168 } 1169 batch.Reset() 1170 1171 s.progress.Subchains[0].Tail = tmpTail 1172 s.progress.Subchains[0].Next = tmpNext 1173 s.saveSyncStatus(batch) 1174 } 1175 rawdb.DeleteSkeletonHeader(batch, n) 1176 } 1177 if err := batch.Write(); err != nil { 1178 log.Crit("Failed to write beacon trim data", "err", err) 1179 } 1180 return nil 1181 } 1182 1183 // Bounds retrieves the current head and tail tracked by the skeleton syncer 1184 // and optionally the last known finalized header if any was announced and if 1185 // it is still in the sync range. This method is used by the backfiller, whose 1186 // life cycle is controlled by the skeleton syncer. 1187 // 1188 // Note, the method will not use the internal state of the skeleton, but will 1189 // rather blindly pull stuff from the database. This is fine, because the back- 1190 // filler will only run when the skeleton chain is fully downloaded and stable. 1191 // There might be new heads appended, but those are atomic from the perspective 1192 // of this method. Any head reorg will first tear down the backfiller and only 1193 // then make the modification. 1194 func (s *skeleton) Bounds() (head *types.Header, tail *types.Header, final *types.Header, err error) { 1195 // Read the current sync progress from disk and figure out the current head. 1196 // Although there's a lot of error handling here, these are mostly as sanity 1197 // checks to avoid crashing if a programming error happens. These should not 1198 // happen in live code. 1199 status := rawdb.ReadSkeletonSyncStatus(s.db) 1200 if len(status) == 0 { 1201 return nil, nil, nil, errors.New("beacon sync not yet started") 1202 } 1203 progress := new(skeletonProgress) 1204 if err := json.Unmarshal(status, progress); err != nil { 1205 return nil, nil, nil, err 1206 } 1207 head = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Head) 1208 if head == nil { 1209 return nil, nil, nil, fmt.Errorf("head skeleton header %d is missing", progress.Subchains[0].Head) 1210 } 1211 tail = rawdb.ReadSkeletonHeader(s.db, progress.Subchains[0].Tail) 1212 if tail == nil { 1213 return nil, nil, nil, fmt.Errorf("tail skeleton header %d is missing", progress.Subchains[0].Tail) 1214 } 1215 if progress.Finalized != nil && tail.Number.Uint64() <= *progress.Finalized && *progress.Finalized <= head.Number.Uint64() { 1216 final = rawdb.ReadSkeletonHeader(s.db, *progress.Finalized) 1217 if final == nil { 1218 return nil, nil, nil, fmt.Errorf("finalized skeleton header %d is missing", *progress.Finalized) 1219 } 1220 } 1221 return head, tail, final, nil 1222 } 1223 1224 // Header retrieves a specific header tracked by the skeleton syncer. This method 1225 // is meant to be used by the backfiller, whose life cycle is controlled by the 1226 // skeleton syncer. 1227 // 1228 // Note, outside the permitted runtimes, this method might return nil results and 1229 // subsequent calls might return headers from different chains. 1230 func (s *skeleton) Header(number uint64) *types.Header { 1231 return rawdb.ReadSkeletonHeader(s.db, number) 1232 }