github.com/ethereum/go-ethereum@v1.16.1/core/headerchain.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "fmt" 22 "sync/atomic" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/common/lru" 27 "github.com/ethereum/go-ethereum/consensus" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/params" 33 "github.com/ethereum/go-ethereum/rlp" 34 ) 35 36 const ( 37 headerCacheLimit = 512 38 numberCacheLimit = 2048 39 ) 40 41 // HeaderChain implements the basic block header chain logic. It is not usable 42 // in itself, but rather an internal structure of core.Blockchain. 43 // 44 // HeaderChain is responsible for maintaining the header chain including the 45 // header query and updating. 46 // 47 // The data components maintained by HeaderChain include: 48 // 49 // - total difficulty 50 // - header 51 // - block hash -> number mapping 52 // - canonical number -> hash mapping 53 // - head header flag. 54 // 55 // It is not thread safe, the encapsulating chain structures should do the 56 // necessary mutex locking/unlocking. 57 type HeaderChain struct { 58 config *params.ChainConfig 59 chainDb ethdb.Database 60 genesisHeader *types.Header 61 62 currentHeader atomic.Pointer[types.Header] // Current head of the header chain (maybe above the block chain!) 63 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 64 65 headerCache *lru.Cache[common.Hash, *types.Header] 66 numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers 67 68 procInterrupt func() bool 69 engine consensus.Engine 70 } 71 72 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 73 // to the parent's interrupt semaphore. 74 func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 75 hc := &HeaderChain{ 76 config: config, 77 chainDb: chainDb, 78 headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), 79 numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), 80 procInterrupt: procInterrupt, 81 engine: engine, 82 } 83 hc.genesisHeader = hc.GetHeaderByNumber(0) 84 if hc.genesisHeader == nil { 85 return nil, ErrNoGenesis 86 } 87 hc.currentHeader.Store(hc.genesisHeader) 88 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 89 if chead := hc.GetHeaderByHash(head); chead != nil { 90 hc.currentHeader.Store(chead) 91 } 92 } 93 hc.currentHeaderHash = hc.CurrentHeader().Hash() 94 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 95 return hc, nil 96 } 97 98 // GetBlockNumber retrieves the block number belonging to the given hash 99 // from the cache or database 100 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 101 if cached, ok := hc.numberCache.Get(hash); ok { 102 return &cached 103 } 104 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 105 if number != nil { 106 hc.numberCache.Add(hash, *number) 107 } 108 return number 109 } 110 111 type headerWriteResult struct { 112 status WriteStatus 113 ignored int 114 imported int 115 lastHash common.Hash 116 lastHeader *types.Header 117 } 118 119 // Reorg reorgs the local canonical chain into the specified chain. The reorg 120 // can be classified into two cases: (a) extend the local chain (b) switch the 121 // head to the given header. 122 func (hc *HeaderChain) Reorg(headers []*types.Header) error { 123 // Short circuit if nothing to reorg. 124 if len(headers) == 0 { 125 return nil 126 } 127 // If the parent of the (first) block is already the canon header, 128 // we don't have to go backwards to delete canon blocks, but simply 129 // pile them onto the existing chain. Otherwise, do the necessary 130 // reorgs. 131 var ( 132 first = headers[0] 133 last = headers[len(headers)-1] 134 batch = hc.chainDb.NewBatch() 135 ) 136 if first.ParentHash != hc.currentHeaderHash { 137 // Delete any canonical number assignments above the new head 138 for i := last.Number.Uint64() + 1; ; i++ { 139 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 140 if hash == (common.Hash{}) { 141 break 142 } 143 rawdb.DeleteCanonicalHash(batch, i) 144 } 145 // Overwrite any stale canonical number assignments, going 146 // backwards from the first header in this import until the 147 // cross link between two chains. 148 var ( 149 header = first 150 headNumber = header.Number.Uint64() 151 headHash = header.Hash() 152 ) 153 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 154 rawdb.WriteCanonicalHash(batch, headHash, headNumber) 155 if headNumber == 0 { 156 break // It shouldn't be reached 157 } 158 headHash, headNumber = header.ParentHash, header.Number.Uint64()-1 159 header = hc.GetHeader(headHash, headNumber) 160 if header == nil { 161 return fmt.Errorf("missing parent %d %x", headNumber, headHash) 162 } 163 } 164 } 165 // Extend the canonical chain with the new headers 166 for i := 0; i < len(headers)-1; i++ { 167 hash := headers[i+1].ParentHash // Save some extra hashing 168 num := headers[i].Number.Uint64() 169 rawdb.WriteCanonicalHash(batch, hash, num) 170 rawdb.WriteHeadHeaderHash(batch, hash) 171 } 172 // Write the last header 173 hash := headers[len(headers)-1].Hash() 174 num := headers[len(headers)-1].Number.Uint64() 175 rawdb.WriteCanonicalHash(batch, hash, num) 176 rawdb.WriteHeadHeaderHash(batch, hash) 177 178 if err := batch.Write(); err != nil { 179 return err 180 } 181 // Last step update all in-memory head header markers 182 hc.currentHeaderHash = last.Hash() 183 hc.currentHeader.Store(types.CopyHeader(last)) 184 headHeaderGauge.Update(last.Number.Int64()) 185 return nil 186 } 187 188 // WriteHeaders writes a chain of headers into the local chain, given that the 189 // parents are already known. The chain head header won't be updated in this 190 // function, the additional SetCanonical is expected in order to finish the entire 191 // procedure. 192 func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) { 193 if len(headers) == 0 { 194 return 0, nil 195 } 196 if !hc.HasHeader(headers[0].ParentHash, headers[0].Number.Uint64()-1) { 197 return 0, consensus.ErrUnknownAncestor 198 } 199 var ( 200 inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain 201 parentKnown = true // Set to true to force hc.HasHeader check the first iteration 202 batch = hc.chainDb.NewBatch() 203 ) 204 for i, header := range headers { 205 var hash common.Hash 206 // The headers have already been validated at this point, so we already 207 // know that it's a contiguous chain, where 208 // headers[i].Hash() == headers[i+1].ParentHash 209 if i < len(headers)-1 { 210 hash = headers[i+1].ParentHash 211 } else { 212 hash = header.Hash() 213 } 214 number := header.Number.Uint64() 215 216 // If the parent was not present, store it 217 // If the header is already known, skip it, otherwise store 218 alreadyKnown := parentKnown && hc.HasHeader(hash, number) 219 if !alreadyKnown { 220 rawdb.WriteHeader(batch, header) 221 inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash}) 222 hc.headerCache.Add(hash, header) 223 hc.numberCache.Add(hash, number) 224 } 225 parentKnown = alreadyKnown 226 } 227 // Skip the slow disk write of all headers if interrupted. 228 if hc.procInterrupt() { 229 log.Debug("Premature abort during headers import") 230 return 0, errors.New("aborted") 231 } 232 // Commit to disk! 233 if err := batch.Write(); err != nil { 234 log.Crit("Failed to write headers", "error", err) 235 } 236 return len(inserted), nil 237 } 238 239 // writeHeadersAndSetHead writes a batch of block headers and applies the last 240 // header as the chain head. 241 // 242 // Note: This method is not concurrent-safe with inserting blocks simultaneously 243 // into the chain, as side effects caused by reorganisations cannot be emulated 244 // without the real blocks. Hence, writing headers directly should only be done 245 // in two scenarios: pure-header mode of operation (light clients), or properly 246 // separated header/block phases (non-archive clients). 247 func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header) (*headerWriteResult, error) { 248 inserted, err := hc.WriteHeaders(headers) 249 if err != nil { 250 return nil, err 251 } 252 var ( 253 lastHeader = headers[len(headers)-1] 254 lastHash = headers[len(headers)-1].Hash() 255 result = &headerWriteResult{ 256 status: NonStatTy, 257 ignored: len(headers) - inserted, 258 imported: inserted, 259 lastHash: lastHash, 260 lastHeader: lastHeader, 261 } 262 ) 263 // Special case, all the inserted headers are already on the canonical 264 // header chain, skip the reorg operation. 265 if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() { 266 return result, nil 267 } 268 // Apply the reorg operation 269 if err := hc.Reorg(headers); err != nil { 270 return nil, err 271 } 272 result.status = CanonStatTy 273 return result, nil 274 } 275 276 // ValidateHeaderChain verifies that the supplied header chain is contiguous 277 // and conforms to consensus rules. 278 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) { 279 // Do a sanity check that the provided chain is actually ordered and linked 280 for i := 1; i < len(chain); i++ { 281 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { 282 hash, parentHash := chain[i].Hash(), chain[i-1].Hash() 283 284 // Chain broke ancestry, log a message (programming error) and skip insertion 285 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, 286 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 287 288 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number, 289 parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) 290 } 291 } 292 // Start the parallel verifier 293 abort, results := hc.engine.VerifyHeaders(hc, chain) 294 defer close(abort) 295 296 // Iterate over the headers and ensure they all check out 297 for i := range chain { 298 // If the chain is terminating, stop processing blocks 299 if hc.procInterrupt() { 300 log.Debug("Premature abort during headers verification") 301 return 0, errors.New("aborted") 302 } 303 // Otherwise wait for headers checks and ensure they pass 304 if err := <-results; err != nil { 305 return i, err 306 } 307 } 308 return 0, nil 309 } 310 311 // InsertHeaderChain inserts the given headers and does the reorganisations. 312 // 313 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 314 // validated by ValidateHeaderChain before calling InsertHeaderChain. 315 // 316 // This insert is all-or-nothing. If this returns an error, no headers were written, 317 // otherwise they were all processed successfully. 318 // 319 // The returned 'write status' says if the inserted headers are part of the canonical chain 320 // or a side chain. 321 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { 322 if hc.procInterrupt() { 323 return 0, errors.New("aborted") 324 } 325 res, err := hc.writeHeadersAndSetHead(chain) 326 if err != nil { 327 return 0, err 328 } 329 // Report some public statistics so the user has a clue what's going on 330 context := []interface{}{ 331 "count", res.imported, 332 "elapsed", common.PrettyDuration(time.Since(start)), 333 } 334 if last := res.lastHeader; last != nil { 335 context = append(context, "number", last.Number, "hash", res.lastHash) 336 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 337 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 338 } 339 } 340 if res.ignored > 0 { 341 context = append(context, []interface{}{"ignored", res.ignored}...) 342 } 343 log.Debug("Imported new block headers", context...) 344 return res.status, err 345 } 346 347 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 348 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 349 // number of blocks to be individually checked before we reach the canonical chain. 350 // 351 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 352 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 353 if ancestor > number { 354 return common.Hash{}, 0 355 } 356 if ancestor == 1 { 357 // in this case it is cheaper to just read the header 358 if header := hc.GetHeader(hash, number); header != nil { 359 return header.ParentHash, number - 1 360 } 361 return common.Hash{}, 0 362 } 363 for ancestor != 0 { 364 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 365 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 366 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 367 number -= ancestor 368 return ancestorHash, number 369 } 370 } 371 if *maxNonCanonical == 0 { 372 return common.Hash{}, 0 373 } 374 *maxNonCanonical-- 375 ancestor-- 376 header := hc.GetHeader(hash, number) 377 if header == nil { 378 return common.Hash{}, 0 379 } 380 hash = header.ParentHash 381 number-- 382 } 383 return hash, number 384 } 385 386 // GetHeader retrieves a block header from the database by hash and number, 387 // caching it if found. 388 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 389 // Short circuit if the header's already in the cache, retrieve otherwise 390 if header, ok := hc.headerCache.Get(hash); ok { 391 return header 392 } 393 header := rawdb.ReadHeader(hc.chainDb, hash, number) 394 if header == nil { 395 return nil 396 } 397 // Cache the found header for next time and return 398 hc.headerCache.Add(hash, header) 399 return header 400 } 401 402 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 403 // found. 404 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 405 number := hc.GetBlockNumber(hash) 406 if number == nil { 407 return nil 408 } 409 return hc.GetHeader(hash, *number) 410 } 411 412 // HasHeader checks if a block header is present in the database or not. 413 // In theory, if header is present in the database, all relative components 414 // like td and hash->number should be present too. 415 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 416 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 417 return true 418 } 419 return rawdb.HasHeader(hc.chainDb, hash, number) 420 } 421 422 // GetHeaderByNumber retrieves a block header from the database by number, 423 // caching it (associated with its hash) if found. 424 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 425 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 426 if hash == (common.Hash{}) { 427 return nil 428 } 429 return hc.GetHeader(hash, number) 430 } 431 432 // GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going 433 // backwards from the given number. 434 // If the 'number' is higher than the highest local header, this method will 435 // return a best-effort response, containing the headers that we do have. 436 func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { 437 // If the request is for future headers, we still return the portion of 438 // headers that we are able to serve 439 if current := hc.CurrentHeader().Number.Uint64(); current < number { 440 if count > number-current { 441 count -= number - current 442 number = current 443 } else { 444 return nil 445 } 446 } 447 var headers []rlp.RawValue 448 // If we have some of the headers in cache already, use that before going to db. 449 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 450 if hash == (common.Hash{}) { 451 return nil 452 } 453 for count > 0 { 454 header, ok := hc.headerCache.Get(hash) 455 if !ok { 456 break 457 } 458 rlpData, _ := rlp.EncodeToBytes(header) 459 headers = append(headers, rlpData) 460 hash = header.ParentHash 461 count-- 462 number-- 463 } 464 // Read remaining from db 465 if count > 0 { 466 headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...) 467 } 468 return headers 469 } 470 471 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 472 return rawdb.ReadCanonicalHash(hc.chainDb, number) 473 } 474 475 // CurrentHeader retrieves the current head header of the canonical chain. The 476 // header is retrieved from the HeaderChain's internal cache. 477 func (hc *HeaderChain) CurrentHeader() *types.Header { 478 return hc.currentHeader.Load() 479 } 480 481 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 482 // as the given header. 483 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 484 hc.currentHeader.Store(head) 485 hc.currentHeaderHash = head.Hash() 486 headHeaderGauge.Update(head.Number.Int64()) 487 } 488 489 type ( 490 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 491 // before head header is updated. The method will return the actual block it 492 // updated the head to (missing state) and a flag if setHead should continue 493 // rewinding till that forcefully (exceeded ancient limits) 494 UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (*types.Header, bool) 495 496 // DeleteBlockContentCallback is a callback function that is called by SetHead 497 // before each header is deleted. 498 DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64) 499 ) 500 501 // SetHead rewinds the local chain to a new head. Everything above the new head 502 // will be deleted and the new one set. 503 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 504 hc.setHead(head, 0, updateFn, delFn) 505 } 506 507 // SetHeadWithTimestamp rewinds the local chain to a new head timestamp. Everything 508 // above the new head will be deleted and the new one set. 509 func (hc *HeaderChain) SetHeadWithTimestamp(time uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 510 hc.setHead(0, time, updateFn, delFn) 511 } 512 513 // setHead rewinds the local chain to a new head block or a head timestamp. 514 // Everything above the new head will be deleted and the new one set. 515 func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 516 // Sanity check that there's no attempt to undo the genesis block. This is 517 // a fairly synthetic case where someone enables a timestamp based fork 518 // below the genesis timestamp. It's nice to not allow that instead of the 519 // entire chain getting deleted. 520 if headTime > 0 && hc.genesisHeader.Time > headTime { 521 // Note, a critical error is quite brutal, but we should really not reach 522 // this point. Since pre-timestamp based forks it was impossible to have 523 // a fork before block 0, the setHead would always work. With timestamp 524 // forks it becomes possible to specify below the genesis. That said, the 525 // only time we setHead via timestamp is with chain config changes on the 526 // startup, so failing hard there is ok. 527 log.Crit("Rejecting genesis rewind via timestamp", "target", headTime, "genesis", hc.genesisHeader.Time) 528 } 529 var ( 530 parentHash common.Hash 531 batch = hc.chainDb.NewBatch() 532 origin = true 533 ) 534 done := func(header *types.Header) bool { 535 if headTime > 0 { 536 return header.Time <= headTime 537 } 538 return header.Number.Uint64() <= headBlock 539 } 540 for hdr := hc.CurrentHeader(); hdr != nil && !done(hdr); hdr = hc.CurrentHeader() { 541 num := hdr.Number.Uint64() 542 543 // Rewind chain to new head 544 parent := hc.GetHeader(hdr.ParentHash, num-1) 545 if parent == nil { 546 parent = hc.genesisHeader 547 } 548 parentHash = parent.Hash() 549 550 // Notably, since geth has the possibility for setting the head to a low 551 // height which is even lower than ancient head. 552 // In order to ensure that the head is always no higher than the data in 553 // the database (ancient store or active store), we need to update head 554 // first then remove the relative data from the database. 555 // 556 // Update head first(head fast block, head full block) before deleting the data. 557 markerBatch := hc.chainDb.NewBatch() 558 if updateFn != nil { 559 newHead, force := updateFn(markerBatch, parent) 560 if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) { 561 log.Warn("Force rewinding till ancient limit", "head", newHead.Number.Uint64()) 562 headBlock, headTime = newHead.Number.Uint64(), 0 // Target timestamp passed, continue rewind in block mode (cleaner) 563 } 564 } 565 // Update head header then. 566 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 567 if err := markerBatch.Write(); err != nil { 568 log.Crit("Failed to update chain markers", "error", err) 569 } 570 hc.currentHeader.Store(parent) 571 hc.currentHeaderHash = parentHash 572 headHeaderGauge.Update(parent.Number.Int64()) 573 574 // If this is the first iteration, wipe any leftover data upwards too so 575 // we don't end up with dangling daps in the database 576 var nums []uint64 577 if origin { 578 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 579 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 580 } 581 origin = false 582 } 583 nums = append(nums, num) 584 585 // Remove the related data from the database on all sidechains 586 for _, num := range nums { 587 // Gather all the side fork hashes 588 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 589 if len(hashes) == 0 { 590 // No hashes in the database whatsoever, probably frozen already 591 hashes = append(hashes, hdr.Hash()) 592 } 593 for _, hash := range hashes { 594 // Remove the associated block body and receipts if required. 595 // 596 // If the block is in the chain freezer, then this delete operation 597 // is actually ineffective. 598 if delFn != nil { 599 delFn(batch, hash, num) 600 } 601 // Remove the hash->number mapping along with the header itself 602 rawdb.DeleteHeader(batch, hash, num) 603 } 604 // Remove the number->hash mapping 605 rawdb.DeleteCanonicalHash(batch, num) 606 } 607 } 608 // Flush all accumulated deletions. 609 if err := batch.Write(); err != nil { 610 log.Crit("Failed to commit batch in setHead", "err", err) 611 } 612 // Explicitly flush the pending writes in the key-value store to disk, ensuring 613 // data durability of the previous deletions. 614 if err := hc.chainDb.SyncKeyValue(); err != nil { 615 log.Crit("Failed to sync the key-value store in setHead", "err", err) 616 } 617 // Truncate the excessive chain segments in the ancient store. 618 // These are actually deferred deletions from the loop above. 619 // 620 // This step must be performed after synchronizing the key-value store; 621 // otherwise, in the event of a panic, it's theoretically possible to 622 // lose recent key-value store writes while the ancient store deletions 623 // remain, leading to data inconsistency, e.g., the gap between the key 624 // value store and ancient can be created due to unclean shutdown. 625 if delFn != nil { 626 // Ignore the error here since light client won't hit this path 627 frozen, _ := hc.chainDb.Ancients() 628 header := hc.CurrentHeader() 629 630 // Truncate the excessive chain segment above the current chain head 631 // in the ancient store. 632 if header.Number.Uint64()+1 < frozen { 633 _, err := hc.chainDb.TruncateHead(header.Number.Uint64() + 1) 634 if err != nil { 635 log.Crit("Failed to truncate head block", "err", err) 636 } 637 } 638 } 639 // Clear out any stale content from the caches 640 hc.headerCache.Purge() 641 hc.numberCache.Purge() 642 } 643 644 // SetGenesis sets a new genesis block header for the chain 645 func (hc *HeaderChain) SetGenesis(head *types.Header) { 646 hc.genesisHeader = head 647 } 648 649 // Config retrieves the header chain's chain configuration. 650 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 651 652 // Engine retrieves the header chain's consensus engine. 653 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 654 655 // GetBlock implements consensus.ChainReader, and returns nil for every input as 656 // a header chain does not have blocks available for retrieval. 657 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 658 return nil 659 }