github.com/Evanesco-Labs/go-evanesco@v1.0.1/core/headerchain.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 crand "crypto/rand" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 mrand "math/rand" 26 "sync/atomic" 27 "time" 28 29 "github.com/Evanesco-Labs/go-evanesco/common" 30 "github.com/Evanesco-Labs/go-evanesco/consensus" 31 "github.com/Evanesco-Labs/go-evanesco/core/rawdb" 32 "github.com/Evanesco-Labs/go-evanesco/core/types" 33 "github.com/Evanesco-Labs/go-evanesco/ethdb" 34 "github.com/Evanesco-Labs/go-evanesco/log" 35 "github.com/Evanesco-Labs/go-evanesco/params" 36 lru "github.com/hashicorp/golang-lru" 37 ) 38 39 const ( 40 headerCacheLimit = 512 41 tdCacheLimit = 1024 42 numberCacheLimit = 2048 43 ) 44 45 // HeaderChain implements the basic block header chain logic that is shared by 46 // core.BlockChain and light.LightChain. It is not usable in itself, only as 47 // a part of either structure. 48 // 49 // HeaderChain is responsible for maintaining the header chain including the 50 // header query and updating. 51 // 52 // The components maintained by headerchain includes: (1) total difficult 53 // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping 54 // and (5) head header flag. 55 // 56 // It is not thread safe either, the encapsulating chain structures should do 57 // the necessary mutex locking/unlocking. 58 type HeaderChain struct { 59 config *params.ChainConfig 60 61 chainDb ethdb.Database 62 genesisHeader *types.Header 63 64 currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) 65 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 66 67 headerCache *lru.Cache // Cache for the most recent block headers 68 tdCache *lru.Cache // Cache for the most recent block total difficulties 69 numberCache *lru.Cache // Cache for the most recent block numbers 70 71 procInterrupt func() bool 72 73 rand *mrand.Rand 74 engine consensus.Engine 75 } 76 77 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 78 // to the parent's interrupt semaphore. 79 func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 80 headerCache, _ := lru.New(headerCacheLimit) 81 tdCache, _ := lru.New(tdCacheLimit) 82 numberCache, _ := lru.New(numberCacheLimit) 83 84 // Seed a fast but crypto originating random generator 85 seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) 86 if err != nil { 87 return nil, err 88 } 89 90 hc := &HeaderChain{ 91 config: config, 92 chainDb: chainDb, 93 headerCache: headerCache, 94 tdCache: tdCache, 95 numberCache: numberCache, 96 procInterrupt: procInterrupt, 97 rand: mrand.New(mrand.NewSource(seed.Int64())), 98 engine: engine, 99 } 100 101 hc.genesisHeader = hc.GetHeaderByNumber(0) 102 if hc.genesisHeader == nil { 103 return nil, ErrNoGenesis 104 } 105 106 hc.currentHeader.Store(hc.genesisHeader) 107 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 108 if chead := hc.GetHeaderByHash(head); chead != nil { 109 hc.currentHeader.Store(chead) 110 } 111 } 112 hc.currentHeaderHash = hc.CurrentHeader().Hash() 113 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 114 115 return hc, nil 116 } 117 118 // GetBlockNumber retrieves the block number belonging to the given hash 119 // from the cache or database 120 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 121 if cached, ok := hc.numberCache.Get(hash); ok { 122 number := cached.(uint64) 123 return &number 124 } 125 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 126 if number != nil { 127 hc.numberCache.Add(hash, *number) 128 } 129 return number 130 } 131 132 type headerWriteResult struct { 133 status WriteStatus 134 ignored int 135 imported int 136 lastHash common.Hash 137 lastHeader *types.Header 138 } 139 140 // WriteHeaders writes a chain of headers into the local chain, given that the parents 141 // are already known. If the total difficulty of the newly inserted chain becomes 142 // greater than the current known TD, the canonical chain is reorged. 143 // 144 // Note: This method is not concurrent-safe with inserting blocks simultaneously 145 // into the chain, as side effects caused by reorganisations cannot be emulated 146 // without the real blocks. Hence, writing headers directly should only be done 147 // in two scenarios: pure-header mode of operation (light clients), or properly 148 // separated header/block phases (non-archive clients). 149 func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { 150 if len(headers) == 0 { 151 return &headerWriteResult{}, nil 152 } 153 ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) 154 if ptd == nil { 155 return &headerWriteResult{}, consensus.ErrUnknownAncestor 156 } 157 var ( 158 lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number 159 lastHash = headers[0].ParentHash // Last imported header hash 160 newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain 161 162 lastHeader *types.Header 163 inserted []numberHash // Ephemeral lookup of number/hash for the chain 164 firstInserted = -1 // Index of the first non-ignored header 165 ) 166 167 batch := hc.chainDb.NewBatch() 168 parentKnown := true // Set to true to force hc.HasHeader check the first iteration 169 for i, header := range headers { 170 var hash common.Hash 171 // The headers have already been validated at this point, so we already 172 // know that it's a contiguous chain, where 173 // headers[i].Hash() == headers[i+1].ParentHash 174 if i < len(headers)-1 { 175 hash = headers[i+1].ParentHash 176 } else { 177 hash = header.Hash() 178 } 179 number := header.Number.Uint64() 180 newTD.Add(newTD, header.Difficulty) 181 182 // If the parent was not present, store it 183 // If the header is already known, skip it, otherwise store 184 alreadyKnown := parentKnown && hc.HasHeader(hash, number) 185 if !alreadyKnown { 186 // Irrelevant of the canonical status, write the TD and header to the database. 187 rawdb.WriteTd(batch, hash, number, newTD) 188 hc.tdCache.Add(hash, new(big.Int).Set(newTD)) 189 190 rawdb.WriteHeader(batch, header) 191 inserted = append(inserted, numberHash{number, hash}) 192 hc.headerCache.Add(hash, header) 193 hc.numberCache.Add(hash, number) 194 if firstInserted < 0 { 195 firstInserted = i 196 } 197 } 198 parentKnown = alreadyKnown 199 lastHeader, lastHash, lastNumber = header, hash, number 200 } 201 202 // Skip the slow disk write of all headers if interrupted. 203 if hc.procInterrupt() { 204 log.Debug("Premature abort during headers import") 205 return &headerWriteResult{}, errors.New("aborted") 206 } 207 // Commit to disk! 208 if err := batch.Write(); err != nil { 209 log.Crit("Failed to write headers", "error", err) 210 } 211 batch.Reset() 212 213 var ( 214 head = hc.CurrentHeader().Number.Uint64() 215 localTD = hc.GetTd(hc.currentHeaderHash, head) 216 status = SideStatTy 217 ) 218 // If the total difficulty is higher than our known, add it to the canonical chain 219 // Second clause in the if statement reduces the vulnerability to selfish mining. 220 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 221 reorg := newTD.Cmp(localTD) > 0 222 if !reorg && newTD.Cmp(localTD) == 0 { 223 if lastNumber < head { 224 reorg = true 225 } else if lastNumber == head { 226 reorg = mrand.Float64() < 0.5 227 } 228 } 229 // If the parent of the (first) block is already the canon header, 230 // we don't have to go backwards to delete canon blocks, but 231 // simply pile them onto the existing chain 232 chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash 233 if reorg { 234 // If the header can be added into canonical chain, adjust the 235 // header chain markers(canonical indexes and head header flag). 236 // 237 // Note all markers should be written atomically. 238 markerBatch := batch // we can reuse the batch to keep allocs down 239 if !chainAlreadyCanon { 240 // Delete any canonical number assignments above the new head 241 for i := lastNumber + 1; ; i++ { 242 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 243 if hash == (common.Hash{}) { 244 break 245 } 246 rawdb.DeleteCanonicalHash(markerBatch, i) 247 } 248 // Overwrite any stale canonical number assignments, going 249 // backwards from the first header in this import 250 var ( 251 headHash = headers[0].ParentHash // inserted[0].parent? 252 headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? 253 headHeader = hc.GetHeader(headHash, headNumber) 254 ) 255 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 256 rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) 257 headHash = headHeader.ParentHash 258 headNumber = headHeader.Number.Uint64() - 1 259 headHeader = hc.GetHeader(headHash, headNumber) 260 } 261 // If some of the older headers were already known, but obtained canon-status 262 // during this import batch, then we need to write that now 263 // Further down, we continue writing the staus for the ones that 264 // were not already known 265 for i := 0; i < firstInserted; i++ { 266 hash := headers[i].Hash() 267 num := headers[i].Number.Uint64() 268 rawdb.WriteCanonicalHash(markerBatch, hash, num) 269 rawdb.WriteHeadHeaderHash(markerBatch, hash) 270 } 271 } 272 // Extend the canonical chain with the new headers 273 for _, hn := range inserted { 274 rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) 275 rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) 276 } 277 if err := markerBatch.Write(); err != nil { 278 log.Crit("Failed to write header markers into disk", "err", err) 279 } 280 markerBatch.Reset() 281 // Last step update all in-memory head header markers 282 hc.currentHeaderHash = lastHash 283 hc.currentHeader.Store(types.CopyHeader(lastHeader)) 284 headHeaderGauge.Update(lastHeader.Number.Int64()) 285 286 // Chain status is canonical since this insert was a reorg. 287 // Note that all inserts which have higher TD than existing are 'reorg'. 288 status = CanonStatTy 289 } 290 291 if len(inserted) == 0 { 292 status = NonStatTy 293 } 294 return &headerWriteResult{ 295 status: status, 296 ignored: len(headers) - len(inserted), 297 imported: len(inserted), 298 lastHash: lastHash, 299 lastHeader: lastHeader, 300 }, nil 301 } 302 303 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 304 // Do a sanity check that the provided chain is actually ordered and linked 305 for i := 1; i < len(chain); i++ { 306 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { 307 hash := chain[i].Hash() 308 parentHash := chain[i-1].Hash() 309 // Chain broke ancestry, log a message (programming error) and skip insertion 310 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, 311 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 312 313 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number, 314 parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) 315 } 316 // If the header is a banned one, straight out abort 317 if BadHashes[chain[i].ParentHash] { 318 return i - 1, ErrBlacklistedHash 319 } 320 // If it's the last header in the cunk, we need to check it too 321 if i == len(chain)-1 && BadHashes[chain[i].Hash()] { 322 return i, ErrBlacklistedHash 323 } 324 } 325 326 // Generate the list of seal verification requests, and start the parallel verifier 327 seals := make([]bool, len(chain)) 328 if checkFreq != 0 { 329 // In case of checkFreq == 0 all seals are left false. 330 for i := 0; i <= len(seals)/checkFreq; i++ { 331 index := i*checkFreq + hc.rand.Intn(checkFreq) 332 if index >= len(seals) { 333 index = len(seals) - 1 334 } 335 seals[index] = true 336 } 337 // Last should always be verified to avoid junk. 338 seals[len(seals)-1] = true 339 } 340 341 abort, results := hc.engine.VerifyHeaders(hc, chain, seals) 342 defer close(abort) 343 344 // Iterate over the headers and ensure they all check out 345 for i := range chain { 346 // If the chain is terminating, stop processing blocks 347 if hc.procInterrupt() { 348 log.Debug("Premature abort during headers verification") 349 return 0, errors.New("aborted") 350 } 351 // Otherwise wait for headers checks and ensure they pass 352 if err := <-results; err != nil { 353 return i, err 354 } 355 } 356 357 return 0, nil 358 } 359 360 // InsertHeaderChain inserts the given headers. 361 // 362 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 363 // validated by ValidateHeaderChain before calling InsertHeaderChain. 364 // 365 // This insert is all-or-nothing. If this returns an error, no headers were written, 366 // otherwise they were all processed successfully. 367 // 368 // The returned 'write status' says if the inserted headers are part of the canonical chain 369 // or a side chain. 370 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { 371 if hc.procInterrupt() { 372 return 0, errors.New("aborted") 373 } 374 res, err := hc.writeHeaders(chain) 375 376 // Report some public statistics so the user has a clue what's going on 377 context := []interface{}{ 378 "count", res.imported, 379 "elapsed", common.PrettyDuration(time.Since(start)), 380 } 381 if err != nil { 382 context = append(context, "err", err) 383 } 384 if last := res.lastHeader; last != nil { 385 context = append(context, "number", last.Number, "hash", res.lastHash) 386 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 387 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 388 } 389 } 390 if res.ignored > 0 { 391 context = append(context, []interface{}{"ignored", res.ignored}...) 392 } 393 log.Info("Imported new block headers", context...) 394 return res.status, err 395 } 396 397 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 398 // hash, fetching towards the genesis block. 399 func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 400 // Get the origin header from which to fetch 401 header := hc.GetHeaderByHash(hash) 402 if header == nil { 403 return nil 404 } 405 // Iterate the headers until enough is collected or the genesis reached 406 chain := make([]common.Hash, 0, max) 407 for i := uint64(0); i < max; i++ { 408 next := header.ParentHash 409 if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil { 410 break 411 } 412 chain = append(chain, next) 413 if header.Number.Sign() == 0 { 414 break 415 } 416 } 417 return chain 418 } 419 420 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 421 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 422 // number of blocks to be individually checked before we reach the canonical chain. 423 // 424 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 425 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 426 if ancestor > number { 427 return common.Hash{}, 0 428 } 429 if ancestor == 1 { 430 // in this case it is cheaper to just read the header 431 if header := hc.GetHeader(hash, number); header != nil { 432 return header.ParentHash, number - 1 433 } 434 return common.Hash{}, 0 435 } 436 for ancestor != 0 { 437 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 438 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 439 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 440 number -= ancestor 441 return ancestorHash, number 442 } 443 } 444 if *maxNonCanonical == 0 { 445 return common.Hash{}, 0 446 } 447 *maxNonCanonical-- 448 ancestor-- 449 header := hc.GetHeader(hash, number) 450 if header == nil { 451 return common.Hash{}, 0 452 } 453 hash = header.ParentHash 454 number-- 455 } 456 return hash, number 457 } 458 459 // GetTd retrieves a block's total difficulty in the canonical chain from the 460 // database by hash and number, caching it if found. 461 func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { 462 // Short circuit if the td's already in the cache, retrieve otherwise 463 if cached, ok := hc.tdCache.Get(hash); ok { 464 return cached.(*big.Int) 465 } 466 td := rawdb.ReadTd(hc.chainDb, hash, number) 467 if td == nil { 468 return nil 469 } 470 // Cache the found body for next time and return 471 hc.tdCache.Add(hash, td) 472 return td 473 } 474 475 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 476 // database by hash, caching it if found. 477 func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int { 478 number := hc.GetBlockNumber(hash) 479 if number == nil { 480 return nil 481 } 482 return hc.GetTd(hash, *number) 483 } 484 485 // GetHeader retrieves a block header from the database by hash and number, 486 // caching it if found. 487 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 488 // Short circuit if the header's already in the cache, retrieve otherwise 489 if header, ok := hc.headerCache.Get(hash); ok { 490 return header.(*types.Header) 491 } 492 header := rawdb.ReadHeader(hc.chainDb, hash, number) 493 if header == nil { 494 return nil 495 } 496 // Cache the found header for next time and return 497 hc.headerCache.Add(hash, header) 498 return header 499 } 500 501 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 502 // found. 503 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 504 number := hc.GetBlockNumber(hash) 505 if number == nil { 506 return nil 507 } 508 return hc.GetHeader(hash, *number) 509 } 510 511 // HasHeader checks if a block header is present in the database or not. 512 // In theory, if header is present in the database, all relative components 513 // like td and hash->number should be present too. 514 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 515 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 516 return true 517 } 518 return rawdb.HasHeader(hc.chainDb, hash, number) 519 } 520 521 // GetHeaderByNumber retrieves a block header from the database by number, 522 // caching it (associated with its hash) if found. 523 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 524 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 525 if hash == (common.Hash{}) { 526 return nil 527 } 528 return hc.GetHeader(hash, number) 529 } 530 531 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 532 return rawdb.ReadCanonicalHash(hc.chainDb, number) 533 } 534 535 // CurrentHeader retrieves the current head header of the canonical chain. The 536 // header is retrieved from the HeaderChain's internal cache. 537 func (hc *HeaderChain) CurrentHeader() *types.Header { 538 return hc.currentHeader.Load().(*types.Header) 539 } 540 541 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 542 // as the given header. 543 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 544 hc.currentHeader.Store(head) 545 hc.currentHeaderHash = head.Hash() 546 headHeaderGauge.Update(head.Number.Int64()) 547 } 548 549 type ( 550 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 551 // before head header is updated. The method will return the actual block it 552 // updated the head to (missing state) and a flag if setHead should continue 553 // rewinding till that forcefully (exceeded ancient limits) 554 UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) 555 556 // DeleteBlockContentCallback is a callback function that is called by SetHead 557 // before each header is deleted. 558 DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64) 559 ) 560 561 // SetHead rewinds the local chain to a new head. Everything above the new head 562 // will be deleted and the new one set. 563 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 564 var ( 565 parentHash common.Hash 566 batch = hc.chainDb.NewBatch() 567 origin = true 568 ) 569 for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { 570 num := hdr.Number.Uint64() 571 572 // Rewind block chain to new head. 573 parent := hc.GetHeader(hdr.ParentHash, num-1) 574 if parent == nil { 575 parent = hc.genesisHeader 576 } 577 parentHash = hdr.ParentHash 578 579 // Notably, since geth has the possibility for setting the head to a low 580 // height which is even lower than ancient head. 581 // In order to ensure that the head is always no higher than the data in 582 // the database (ancient store or active store), we need to update head 583 // first then remove the relative data from the database. 584 // 585 // Update head first(head fast block, head full block) before deleting the data. 586 markerBatch := hc.chainDb.NewBatch() 587 if updateFn != nil { 588 newHead, force := updateFn(markerBatch, parent) 589 if force && newHead < head { 590 log.Warn("Force rewinding till ancient limit", "head", newHead) 591 head = newHead 592 } 593 } 594 // Update head header then. 595 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 596 if err := markerBatch.Write(); err != nil { 597 log.Crit("Failed to update chain markers", "error", err) 598 } 599 hc.currentHeader.Store(parent) 600 hc.currentHeaderHash = parentHash 601 headHeaderGauge.Update(parent.Number.Int64()) 602 603 // If this is the first iteration, wipe any leftover data upwards too so 604 // we don't end up with dangling daps in the database 605 var nums []uint64 606 if origin { 607 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 608 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 609 } 610 origin = false 611 } 612 nums = append(nums, num) 613 614 // Remove the related data from the database on all sidechains 615 for _, num := range nums { 616 // Gather all the side fork hashes 617 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 618 if len(hashes) == 0 { 619 // No hashes in the database whatsoever, probably frozen already 620 hashes = append(hashes, hdr.Hash()) 621 } 622 for _, hash := range hashes { 623 if delFn != nil { 624 delFn(batch, hash, num) 625 } 626 rawdb.DeleteHeader(batch, hash, num) 627 rawdb.DeleteTd(batch, hash, num) 628 } 629 rawdb.DeleteCanonicalHash(batch, num) 630 } 631 } 632 // Flush all accumulated deletions. 633 if err := batch.Write(); err != nil { 634 log.Crit("Failed to rewind block", "error", err) 635 } 636 // Clear out any stale content from the caches 637 hc.headerCache.Purge() 638 hc.tdCache.Purge() 639 hc.numberCache.Purge() 640 } 641 642 // SetGenesis sets a new genesis block header for the chain 643 func (hc *HeaderChain) SetGenesis(head *types.Header) { 644 hc.genesisHeader = head 645 } 646 647 // Config retrieves the header chain's chain configuration. 648 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 649 650 // Engine retrieves the header chain's consensus engine. 651 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 652 653 // GetBlock implements consensus.ChainReader, and returns nil for every input as 654 // a header chain does not have blocks available for retrieval. 655 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 656 return nil 657 }