github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/core/headerchain.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 "errors" 21 "fmt" 22 "math/big" 23 "sync/atomic" 24 "time" 25 26 "github.com/ethereum/go-ethereum/common" 27 "github.com/ethereum/go-ethereum/common/lru" 28 "github.com/ethereum/go-ethereum/consensus" 29 "github.com/ethereum/go-ethereum/core/rawdb" 30 "github.com/ethereum/go-ethereum/core/types" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/params" 34 "github.com/ethereum/go-ethereum/rlp" 35 ) 36 37 const ( 38 headerCacheLimit = 512 39 tdCacheLimit = 1024 40 numberCacheLimit = 2048 41 ) 42 43 // HeaderChain implements the basic block header chain logic. It is not usable 44 // in itself, but rather an internal structure of core.Blockchain. 45 // 46 // HeaderChain is responsible for maintaining the header chain including the 47 // header query and updating. 48 // 49 // The data components maintained by HeaderChain include: 50 // 51 // - total difficulty 52 // - header 53 // - block hash -> number mapping 54 // - canonical number -> hash mapping 55 // - head header flag. 56 // 57 // It is not thread safe, the encapsulating chain structures should do the 58 // necessary mutex locking/unlocking. 59 type HeaderChain struct { 60 config *params.ChainConfig 61 chainDb ethdb.Database 62 genesisHeader *types.Header 63 64 currentHeader atomic.Pointer[types.Header] // Current head of the header chain (maybe above the block chain!) 65 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 66 67 headerCache *lru.Cache[common.Hash, *types.Header] 68 tdCache *lru.Cache[common.Hash, *big.Int] // most recent total difficulties 69 numberCache *lru.Cache[common.Hash, uint64] // most recent block numbers 70 71 procInterrupt func() bool 72 engine consensus.Engine 73 } 74 75 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 76 // to the parent's interrupt semaphore. 77 func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 78 hc := &HeaderChain{ 79 config: config, 80 chainDb: chainDb, 81 headerCache: lru.NewCache[common.Hash, *types.Header](headerCacheLimit), 82 tdCache: lru.NewCache[common.Hash, *big.Int](tdCacheLimit), 83 numberCache: lru.NewCache[common.Hash, uint64](numberCacheLimit), 84 procInterrupt: procInterrupt, 85 engine: engine, 86 } 87 hc.genesisHeader = hc.GetHeaderByNumber(0) 88 if hc.genesisHeader == nil { 89 return nil, ErrNoGenesis 90 } 91 hc.currentHeader.Store(hc.genesisHeader) 92 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 93 if chead := hc.GetHeaderByHash(head); chead != nil { 94 hc.currentHeader.Store(chead) 95 } 96 } 97 hc.currentHeaderHash = hc.CurrentHeader().Hash() 98 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 99 return hc, nil 100 } 101 102 // GetBlockNumber retrieves the block number belonging to the given hash 103 // from the cache or database 104 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 105 if cached, ok := hc.numberCache.Get(hash); ok { 106 return &cached 107 } 108 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 109 if number != nil { 110 hc.numberCache.Add(hash, *number) 111 } 112 return number 113 } 114 115 type headerWriteResult struct { 116 status WriteStatus 117 ignored int 118 imported int 119 lastHash common.Hash 120 lastHeader *types.Header 121 } 122 123 // Reorg reorgs the local canonical chain into the specified chain. The reorg 124 // can be classified into two cases: (a) extend the local chain (b) switch the 125 // head to the given header. 126 func (hc *HeaderChain) Reorg(headers []*types.Header) error { 127 // Short circuit if nothing to reorg. 128 if len(headers) == 0 { 129 return nil 130 } 131 // If the parent of the (first) block is already the canon header, 132 // we don't have to go backwards to delete canon blocks, but simply 133 // pile them onto the existing chain. Otherwise, do the necessary 134 // reorgs. 135 var ( 136 first = headers[0] 137 last = headers[len(headers)-1] 138 batch = hc.chainDb.NewBatch() 139 ) 140 if first.ParentHash != hc.currentHeaderHash { 141 // Delete any canonical number assignments above the new head 142 for i := last.Number.Uint64() + 1; ; i++ { 143 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 144 if hash == (common.Hash{}) { 145 break 146 } 147 rawdb.DeleteCanonicalHash(batch, i) 148 } 149 // Overwrite any stale canonical number assignments, going 150 // backwards from the first header in this import until the 151 // cross link between two chains. 152 var ( 153 header = first 154 headNumber = header.Number.Uint64() 155 headHash = header.Hash() 156 ) 157 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 158 rawdb.WriteCanonicalHash(batch, headHash, headNumber) 159 if headNumber == 0 { 160 break // It shouldn't be reached 161 } 162 headHash, headNumber = header.ParentHash, header.Number.Uint64()-1 163 header = hc.GetHeader(headHash, headNumber) 164 if header == nil { 165 return fmt.Errorf("missing parent %d %x", headNumber, headHash) 166 } 167 } 168 } 169 // Extend the canonical chain with the new headers 170 for i := 0; i < len(headers)-1; i++ { 171 hash := headers[i+1].ParentHash // Save some extra hashing 172 num := headers[i].Number.Uint64() 173 rawdb.WriteCanonicalHash(batch, hash, num) 174 rawdb.WriteHeadHeaderHash(batch, hash) 175 } 176 // Write the last header 177 hash := headers[len(headers)-1].Hash() 178 num := headers[len(headers)-1].Number.Uint64() 179 rawdb.WriteCanonicalHash(batch, hash, num) 180 rawdb.WriteHeadHeaderHash(batch, hash) 181 182 if err := batch.Write(); err != nil { 183 return err 184 } 185 // Last step update all in-memory head header markers 186 hc.currentHeaderHash = last.Hash() 187 hc.currentHeader.Store(types.CopyHeader(last)) 188 headHeaderGauge.Update(last.Number.Int64()) 189 return nil 190 } 191 192 // WriteHeaders writes a chain of headers into the local chain, given that the 193 // parents are already known. The chain head header won't be updated in this 194 // function, the additional SetCanonical is expected in order to finish the entire 195 // procedure. 196 func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) { 197 if len(headers) == 0 { 198 return 0, nil 199 } 200 ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) 201 if ptd == nil { 202 return 0, consensus.ErrUnknownAncestor 203 } 204 var ( 205 newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain 206 inserted []rawdb.NumberHash // Ephemeral lookup of number/hash for the chain 207 parentKnown = true // Set to true to force hc.HasHeader check the first iteration 208 batch = hc.chainDb.NewBatch() 209 ) 210 for i, header := range headers { 211 var hash common.Hash 212 // The headers have already been validated at this point, so we already 213 // know that it's a contiguous chain, where 214 // headers[i].Hash() == headers[i+1].ParentHash 215 if i < len(headers)-1 { 216 hash = headers[i+1].ParentHash 217 } else { 218 hash = header.Hash() 219 } 220 number := header.Number.Uint64() 221 newTD.Add(newTD, header.Difficulty) 222 223 // If the parent was not present, store it 224 // If the header is already known, skip it, otherwise store 225 alreadyKnown := parentKnown && hc.HasHeader(hash, number) 226 if !alreadyKnown { 227 // Irrelevant of the canonical status, write the TD and header to the database. 228 rawdb.WriteTd(batch, hash, number, newTD) 229 hc.tdCache.Add(hash, new(big.Int).Set(newTD)) 230 231 rawdb.WriteHeader(batch, header) 232 inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash}) 233 hc.headerCache.Add(hash, header) 234 hc.numberCache.Add(hash, number) 235 } 236 parentKnown = alreadyKnown 237 } 238 // Skip the slow disk write of all headers if interrupted. 239 if hc.procInterrupt() { 240 log.Debug("Premature abort during headers import") 241 return 0, errors.New("aborted") 242 } 243 // Commit to disk! 244 if err := batch.Write(); err != nil { 245 log.Crit("Failed to write headers", "error", err) 246 } 247 return len(inserted), nil 248 } 249 250 // writeHeadersAndSetHead writes a batch of block headers and applies the last 251 // header as the chain head if the fork choicer says it's ok to update the chain. 252 // Note: This method is not concurrent-safe with inserting blocks simultaneously 253 // into the chain, as side effects caused by reorganisations cannot be emulated 254 // without the real blocks. Hence, writing headers directly should only be done 255 // in two scenarios: pure-header mode of operation (light clients), or properly 256 // separated header/block phases (non-archive clients). 257 func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) { 258 inserted, err := hc.WriteHeaders(headers) 259 if err != nil { 260 return nil, err 261 } 262 var ( 263 lastHeader = headers[len(headers)-1] 264 lastHash = headers[len(headers)-1].Hash() 265 result = &headerWriteResult{ 266 status: NonStatTy, 267 ignored: len(headers) - inserted, 268 imported: inserted, 269 lastHash: lastHash, 270 lastHeader: lastHeader, 271 } 272 ) 273 // Ask the fork choicer if the reorg is necessary 274 if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil { 275 return nil, err 276 } else if !reorg { 277 if inserted != 0 { 278 result.status = SideStatTy 279 } 280 return result, nil 281 } 282 // Special case, all the inserted headers are already on the canonical 283 // header chain, skip the reorg operation. 284 if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() { 285 return result, nil 286 } 287 // Apply the reorg operation 288 if err := hc.Reorg(headers); err != nil { 289 return nil, err 290 } 291 result.status = CanonStatTy 292 return result, nil 293 } 294 295 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header) (int, error) { 296 // Do a sanity check that the provided chain is actually ordered and linked 297 for i := 1; i < len(chain); i++ { 298 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { 299 hash := chain[i].Hash() 300 parentHash := chain[i-1].Hash() 301 // Chain broke ancestry, log a message (programming error) and skip insertion 302 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, 303 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 304 305 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number, 306 parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) 307 } 308 } 309 // Start the parallel verifier 310 abort, results := hc.engine.VerifyHeaders(hc, chain) 311 defer close(abort) 312 313 // Iterate over the headers and ensure they all check out 314 for i := range chain { 315 // If the chain is terminating, stop processing blocks 316 if hc.procInterrupt() { 317 log.Debug("Premature abort during headers verification") 318 return 0, errors.New("aborted") 319 } 320 // Otherwise wait for headers checks and ensure they pass 321 if err := <-results; err != nil { 322 return i, err 323 } 324 } 325 326 return 0, nil 327 } 328 329 // InsertHeaderChain inserts the given headers and does the reorganisations. 330 // 331 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 332 // validated by ValidateHeaderChain before calling InsertHeaderChain. 333 // 334 // This insert is all-or-nothing. If this returns an error, no headers were written, 335 // otherwise they were all processed successfully. 336 // 337 // The returned 'write status' says if the inserted headers are part of the canonical chain 338 // or a side chain. 339 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) { 340 if hc.procInterrupt() { 341 return 0, errors.New("aborted") 342 } 343 res, err := hc.writeHeadersAndSetHead(chain, forker) 344 if err != nil { 345 return 0, err 346 } 347 // Report some public statistics so the user has a clue what's going on 348 context := []interface{}{ 349 "count", res.imported, 350 "elapsed", common.PrettyDuration(time.Since(start)), 351 } 352 if last := res.lastHeader; last != nil { 353 context = append(context, "number", last.Number, "hash", res.lastHash) 354 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 355 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 356 } 357 } 358 if res.ignored > 0 { 359 context = append(context, []interface{}{"ignored", res.ignored}...) 360 } 361 log.Debug("Imported new block headers", context...) 362 return res.status, err 363 } 364 365 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 366 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 367 // number of blocks to be individually checked before we reach the canonical chain. 368 // 369 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 370 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 371 if ancestor > number { 372 return common.Hash{}, 0 373 } 374 if ancestor == 1 { 375 // in this case it is cheaper to just read the header 376 if header := hc.GetHeader(hash, number); header != nil { 377 return header.ParentHash, number - 1 378 } 379 return common.Hash{}, 0 380 } 381 for ancestor != 0 { 382 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 383 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 384 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 385 number -= ancestor 386 return ancestorHash, number 387 } 388 } 389 if *maxNonCanonical == 0 { 390 return common.Hash{}, 0 391 } 392 *maxNonCanonical-- 393 ancestor-- 394 header := hc.GetHeader(hash, number) 395 if header == nil { 396 return common.Hash{}, 0 397 } 398 hash = header.ParentHash 399 number-- 400 } 401 return hash, number 402 } 403 404 // GetTd retrieves a block's total difficulty in the canonical chain from the 405 // database by hash and number, caching it if found. 406 func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { 407 // Short circuit if the td's already in the cache, retrieve otherwise 408 if cached, ok := hc.tdCache.Get(hash); ok { 409 return cached 410 } 411 td := rawdb.ReadTd(hc.chainDb, hash, number) 412 if td == nil { 413 return nil 414 } 415 // Cache the found body for next time and return 416 hc.tdCache.Add(hash, td) 417 return td 418 } 419 420 // GetHeader retrieves a block header from the database by hash and number, 421 // caching it if found. 422 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 423 // Short circuit if the header's already in the cache, retrieve otherwise 424 if header, ok := hc.headerCache.Get(hash); ok { 425 return header 426 } 427 header := rawdb.ReadHeader(hc.chainDb, hash, number) 428 if header == nil { 429 return nil 430 } 431 // Cache the found header for next time and return 432 hc.headerCache.Add(hash, header) 433 return header 434 } 435 436 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 437 // found. 438 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 439 number := hc.GetBlockNumber(hash) 440 if number == nil { 441 return nil 442 } 443 return hc.GetHeader(hash, *number) 444 } 445 446 // HasHeader checks if a block header is present in the database or not. 447 // In theory, if header is present in the database, all relative components 448 // like td and hash->number should be present too. 449 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 450 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 451 return true 452 } 453 return rawdb.HasHeader(hc.chainDb, hash, number) 454 } 455 456 // GetHeaderByNumber retrieves a block header from the database by number, 457 // caching it (associated with its hash) if found. 458 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 459 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 460 if hash == (common.Hash{}) { 461 return nil 462 } 463 return hc.GetHeader(hash, number) 464 } 465 466 // GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going 467 // backwards from the given number. 468 // If the 'number' is higher than the highest local header, this method will 469 // return a best-effort response, containing the headers that we do have. 470 func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue { 471 // If the request is for future headers, we still return the portion of 472 // headers that we are able to serve 473 if current := hc.CurrentHeader().Number.Uint64(); current < number { 474 if count > number-current { 475 count -= number - current 476 number = current 477 } else { 478 return nil 479 } 480 } 481 var headers []rlp.RawValue 482 // If we have some of the headers in cache already, use that before going to db. 483 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 484 if hash == (common.Hash{}) { 485 return nil 486 } 487 for count > 0 { 488 header, ok := hc.headerCache.Get(hash) 489 if !ok { 490 break 491 } 492 rlpData, _ := rlp.EncodeToBytes(header) 493 headers = append(headers, rlpData) 494 hash = header.ParentHash 495 count-- 496 number-- 497 } 498 // Read remaining from db 499 if count > 0 { 500 headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...) 501 } 502 return headers 503 } 504 505 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 506 return rawdb.ReadCanonicalHash(hc.chainDb, number) 507 } 508 509 // CurrentHeader retrieves the current head header of the canonical chain. The 510 // header is retrieved from the HeaderChain's internal cache. 511 func (hc *HeaderChain) CurrentHeader() *types.Header { 512 return hc.currentHeader.Load() 513 } 514 515 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 516 // as the given header. 517 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 518 hc.currentHeader.Store(head) 519 hc.currentHeaderHash = head.Hash() 520 headHeaderGauge.Update(head.Number.Int64()) 521 } 522 523 type ( 524 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 525 // before head header is updated. The method will return the actual block it 526 // updated the head to (missing state) and a flag if setHead should continue 527 // rewinding till that forcefully (exceeded ancient limits) 528 UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (*types.Header, bool) 529 530 // DeleteBlockContentCallback is a callback function that is called by SetHead 531 // before each header is deleted. 532 DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64) 533 ) 534 535 // SetHead rewinds the local chain to a new head. Everything above the new head 536 // will be deleted and the new one set. 537 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 538 hc.setHead(head, 0, updateFn, delFn) 539 } 540 541 // SetHeadWithTimestamp rewinds the local chain to a new head timestamp. Everything 542 // above the new head will be deleted and the new one set. 543 func (hc *HeaderChain) SetHeadWithTimestamp(time uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 544 hc.setHead(0, time, updateFn, delFn) 545 } 546 547 // setHead rewinds the local chain to a new head block or a head timestamp. 548 // Everything above the new head will be deleted and the new one set. 549 func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 550 // Sanity check that there's no attempt to undo the genesis block. This is 551 // a fairly synthetic case where someone enables a timestamp based fork 552 // below the genesis timestamp. It's nice to not allow that instead of the 553 // entire chain getting deleted. 554 if headTime > 0 && hc.genesisHeader.Time > headTime { 555 // Note, a critical error is quite brutal, but we should really not reach 556 // this point. Since pre-timestamp based forks it was impossible to have 557 // a fork before block 0, the setHead would always work. With timestamp 558 // forks it becomes possible to specify below the genesis. That said, the 559 // only time we setHead via timestamp is with chain config changes on the 560 // startup, so failing hard there is ok. 561 log.Crit("Rejecting genesis rewind via timestamp", "target", headTime, "genesis", hc.genesisHeader.Time) 562 } 563 var ( 564 parentHash common.Hash 565 batch = hc.chainDb.NewBatch() 566 origin = true 567 ) 568 done := func(header *types.Header) bool { 569 if headTime > 0 { 570 return header.Time <= headTime 571 } 572 return header.Number.Uint64() <= headBlock 573 } 574 for hdr := hc.CurrentHeader(); hdr != nil && !done(hdr); hdr = hc.CurrentHeader() { 575 num := hdr.Number.Uint64() 576 577 // Rewind chain to new head 578 parent := hc.GetHeader(hdr.ParentHash, num-1) 579 if parent == nil { 580 parent = hc.genesisHeader 581 } 582 parentHash = parent.Hash() 583 584 // Notably, since geth has the possibility for setting the head to a low 585 // height which is even lower than ancient head. 586 // In order to ensure that the head is always no higher than the data in 587 // the database (ancient store or active store), we need to update head 588 // first then remove the relative data from the database. 589 // 590 // Update head first(head fast block, head full block) before deleting the data. 591 markerBatch := hc.chainDb.NewBatch() 592 if updateFn != nil { 593 newHead, force := updateFn(markerBatch, parent) 594 if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) { 595 log.Warn("Force rewinding till ancient limit", "head", newHead.Number.Uint64()) 596 headBlock, headTime = newHead.Number.Uint64(), 0 // Target timestamp passed, continue rewind in block mode (cleaner) 597 } 598 } 599 // Update head header then. 600 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 601 if err := markerBatch.Write(); err != nil { 602 log.Crit("Failed to update chain markers", "error", err) 603 } 604 hc.currentHeader.Store(parent) 605 hc.currentHeaderHash = parentHash 606 headHeaderGauge.Update(parent.Number.Int64()) 607 608 // If this is the first iteration, wipe any leftover data upwards too so 609 // we don't end up with dangling daps in the database 610 var nums []uint64 611 if origin { 612 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 613 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 614 } 615 origin = false 616 } 617 nums = append(nums, num) 618 619 // Remove the related data from the database on all sidechains 620 for _, num := range nums { 621 // Gather all the side fork hashes 622 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 623 if len(hashes) == 0 { 624 // No hashes in the database whatsoever, probably frozen already 625 hashes = append(hashes, hdr.Hash()) 626 } 627 for _, hash := range hashes { 628 if delFn != nil { 629 delFn(batch, hash, num) 630 } 631 rawdb.DeleteHeader(batch, hash, num) 632 rawdb.DeleteTd(batch, hash, num) 633 } 634 rawdb.DeleteCanonicalHash(batch, num) 635 } 636 } 637 // Flush all accumulated deletions. 638 if err := batch.Write(); err != nil { 639 log.Crit("Failed to rewind block", "error", err) 640 } 641 // Clear out any stale content from the caches 642 hc.headerCache.Purge() 643 hc.tdCache.Purge() 644 hc.numberCache.Purge() 645 } 646 647 // SetGenesis sets a new genesis block header for the chain 648 func (hc *HeaderChain) SetGenesis(head *types.Header) { 649 hc.genesisHeader = head 650 } 651 652 // Config retrieves the header chain's chain configuration. 653 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 654 655 // Engine retrieves the header chain's consensus engine. 656 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 657 658 // GetBlock implements consensus.ChainReader, and returns nil for every input as 659 // a header chain does not have blocks available for retrieval. 660 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 661 return nil 662 }