github.com/core-coin/go-core/v2@v2.1.9/core/headerchain.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 crand "crypto/rand" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 mrand "math/rand" 26 "sync/atomic" 27 "time" 28 29 lru "github.com/hashicorp/golang-lru" 30 31 "github.com/core-coin/go-core/v2/xcbdb" 32 33 "github.com/core-coin/go-core/v2/common" 34 "github.com/core-coin/go-core/v2/consensus" 35 "github.com/core-coin/go-core/v2/core/rawdb" 36 "github.com/core-coin/go-core/v2/core/types" 37 "github.com/core-coin/go-core/v2/log" 38 "github.com/core-coin/go-core/v2/params" 39 ) 40 41 const ( 42 headerCacheLimit = 512 43 tdCacheLimit = 1024 44 numberCacheLimit = 2048 45 ) 46 47 // HeaderChain implements the basic block header chain logic that is shared by 48 // core.BlockChain and light.LightChain. It is not usable in itself, only as 49 // a part of either structure. 50 // 51 // HeaderChain is responsible for maintaining the header chain including the 52 // header query and updating. 53 // 54 // The components maintained by headerchain includes: (1) total difficult 55 // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping 56 // and (5) head header flag. 57 // 58 // It is not thread safe either, the encapsulating chain structures should do 59 // the necessary mutex locking/unlocking. 60 type HeaderChain struct { 61 config *params.ChainConfig 62 63 chainDb xcbdb.Database 64 genesisHeader *types.Header 65 66 currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) 67 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 68 69 headerCache *lru.Cache // Cache for the most recent block headers 70 tdCache *lru.Cache // Cache for the most recent block total difficulties 71 numberCache *lru.Cache // Cache for the most recent block numbers 72 73 procInterrupt func() bool 74 75 rand *mrand.Rand 76 engine consensus.Engine 77 } 78 79 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 80 // to the parent's interrupt semaphore. 81 func NewHeaderChain(chainDb xcbdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 82 headerCache, _ := lru.New(headerCacheLimit) 83 tdCache, _ := lru.New(tdCacheLimit) 84 numberCache, _ := lru.New(numberCacheLimit) 85 86 // Seed a fast but crypto originating random generator 87 seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) 88 if err != nil { 89 return nil, err 90 } 91 92 hc := &HeaderChain{ 93 config: config, 94 chainDb: chainDb, 95 headerCache: headerCache, 96 tdCache: tdCache, 97 numberCache: numberCache, 98 procInterrupt: procInterrupt, 99 rand: mrand.New(mrand.NewSource(seed.Int64())), 100 engine: engine, 101 } 102 103 hc.genesisHeader = hc.GetHeaderByNumber(0) 104 if hc.genesisHeader == nil { 105 return nil, ErrNoGenesis 106 } 107 108 hc.currentHeader.Store(hc.genesisHeader) 109 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 110 if chead := hc.GetHeaderByHash(head); chead != nil { 111 hc.currentHeader.Store(chead) 112 } 113 } 114 hc.currentHeaderHash = hc.CurrentHeader().Hash() 115 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 116 117 return hc, nil 118 } 119 120 // GetBlockNumber retrieves the block number belonging to the given hash 121 // from the cache or database 122 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 123 if cached, ok := hc.numberCache.Get(hash); ok { 124 number := cached.(uint64) 125 return &number 126 } 127 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 128 if number != nil { 129 hc.numberCache.Add(hash, *number) 130 } 131 return number 132 } 133 134 type headerWriteResult struct { 135 status WriteStatus 136 ignored int 137 imported int 138 lastHash common.Hash 139 lastHeader *types.Header 140 } 141 142 // WriteHeaders writes a chain of headers into the local chain, given that the parents 143 // are already known. If the total difficulty of the newly inserted chain becomes 144 // greater than the current known TD, the canonical chain is reorged. 145 // 146 // Note: This method is not concurrent-safe with inserting blocks simultaneously 147 // into the chain, as side effects caused by reorganisations cannot be emulated 148 // without the real blocks. Hence, writing headers directly should only be done 149 // in two scenarios: pure-header mode of operation (light clients), or properly 150 // separated header/block phases (non-archive clients). 151 func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { 152 if len(headers) == 0 { 153 return &headerWriteResult{}, nil 154 } 155 ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) 156 if ptd == nil { 157 return &headerWriteResult{}, consensus.ErrUnknownAncestor 158 } 159 var ( 160 lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number 161 lastHash = headers[0].ParentHash // Last imported header hash 162 newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain 163 164 lastHeader *types.Header 165 inserted []numberHash // Ephemeral lookup of number/hash for the chain 166 firstInserted = -1 // Index of the first non-ignored header 167 ) 168 169 batch := hc.chainDb.NewBatch() 170 for i, header := range headers { 171 var hash common.Hash 172 // The headers have already been validated at this point, so we already 173 // know that it's a contiguous chain, where 174 // headers[i].Hash() == headers[i+1].ParentHash 175 if i < len(headers)-1 { 176 hash = headers[i+1].ParentHash 177 } else { 178 hash = header.Hash() 179 } 180 number := header.Number.Uint64() 181 newTD.Add(newTD, header.Difficulty) 182 183 // If the header is already known, skip it, otherwise store 184 if !hc.HasHeader(hash, number) { 185 // Irrelevant of the canonical status, write the TD and header to the database. 186 rawdb.WriteTd(batch, hash, number, newTD) 187 hc.tdCache.Add(hash, new(big.Int).Set(newTD)) 188 189 rawdb.WriteHeader(batch, header) 190 inserted = append(inserted, numberHash{number, hash}) 191 hc.headerCache.Add(hash, header) 192 hc.numberCache.Add(hash, number) 193 if firstInserted < 0 { 194 firstInserted = i 195 } 196 } 197 lastHeader, lastHash, lastNumber = header, hash, number 198 } 199 200 // Skip the slow disk write of all headers if interrupted. 201 if hc.procInterrupt() { 202 log.Debug("Premature abort during headers import") 203 return &headerWriteResult{}, errors.New("aborted") 204 } 205 // Commit to disk! 206 if err := batch.Write(); err != nil { 207 log.Crit("Failed to write headers", "error", err) 208 } 209 batch.Reset() 210 211 var ( 212 head = hc.CurrentHeader().Number.Uint64() 213 localTD = hc.GetTd(hc.currentHeaderHash, head) 214 status = SideStatTy 215 ) 216 // If the total difficulty is higher than our known, add it to the canonical chain 217 // Second clause in the if statement reduces the vulnerability to selfish mining. 218 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 219 reorg := newTD.Cmp(localTD) > 0 220 if !reorg && newTD.Cmp(localTD) == 0 { 221 if lastNumber < head { 222 reorg = true 223 } else if lastNumber == head { 224 reorg = mrand.Float64() < 0.5 225 } 226 } 227 // If the parent of the (first) block is already the canon header, 228 // we don't have to go backwards to delete canon blocks, but 229 // simply pile them onto the existing chain 230 chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash 231 if reorg { 232 // If the header can be added into canonical chain, adjust the 233 // header chain markers(canonical indexes and head header flag). 234 // 235 // Note all markers should be written atomically. 236 markerBatch := batch // we can reuse the batch to keep allocs down 237 if !chainAlreadyCanon { 238 // Delete any canonical number assignments above the new head 239 for i := lastNumber + 1; ; i++ { 240 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 241 if hash == (common.Hash{}) { 242 break 243 } 244 rawdb.DeleteCanonicalHash(markerBatch, i) 245 } 246 // Overwrite any stale canonical number assignments, going 247 // backwards from the first header in this import 248 var ( 249 headHash = headers[0].ParentHash // inserted[0].parent? 250 headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? 251 headHeader = hc.GetHeader(headHash, headNumber) 252 ) 253 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 254 rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) 255 headHash = headHeader.ParentHash 256 headNumber = headHeader.Number.Uint64() - 1 257 headHeader = hc.GetHeader(headHash, headNumber) 258 } 259 // If some of the older headers were already known, but obtained canon-status 260 // during this import batch, then we need to write that now 261 // Further down, we continue writing the staus for the ones that 262 // were not already known 263 for i := 0; i < firstInserted; i++ { 264 hash := headers[i].Hash() 265 num := headers[i].Number.Uint64() 266 rawdb.WriteCanonicalHash(markerBatch, hash, num) 267 rawdb.WriteHeadHeaderHash(markerBatch, hash) 268 } 269 } 270 // Extend the canonical chain with the new headers 271 for _, hn := range inserted { 272 rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) 273 rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) 274 } 275 if err := markerBatch.Write(); err != nil { 276 log.Crit("Failed to write header markers into disk", "err", err) 277 } 278 markerBatch.Reset() 279 // Last step update all in-memory head header markers 280 hc.currentHeaderHash = lastHash 281 hc.currentHeader.Store(types.CopyHeader(lastHeader)) 282 headHeaderGauge.Update(lastHeader.Number.Int64()) 283 284 // Chain status is canonical since this insert was a reorg. 285 // Note that all inserts which have higher TD than existing are 'reorg'. 286 status = CanonStatTy 287 } 288 289 if len(inserted) == 0 { 290 status = NonStatTy 291 } 292 return &headerWriteResult{ 293 status: status, 294 ignored: len(headers) - len(inserted), 295 imported: len(inserted), 296 lastHash: lastHash, 297 lastHeader: lastHeader, 298 }, nil 299 } 300 301 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 302 // Do a sanity check that the provided chain is actually ordered and linked 303 for i := 1; i < len(chain); i++ { 304 parentHash := chain[i-1].Hash() 305 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 || chain[i].ParentHash != parentHash { 306 // Chain broke ancestry, log a message (programming error) and skip insertion 307 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", chain[i].Hash(), 308 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 309 310 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x…], item %d is #%d [%x…] (parent [%x…])", i-1, chain[i-1].Number, 311 parentHash.Bytes()[:4], i, chain[i].Number, chain[i].Hash().Bytes()[:4], chain[i].ParentHash[:4]) 312 } 313 // If the header is a banned one, straight out abort 314 if BadHashes[parentHash] { 315 return i - 1, ErrBlacklistedHash 316 } 317 // If it's the last header in the cunk, we need to check it too 318 if i == len(chain)-1 && BadHashes[chain[i].Hash()] { 319 return i, ErrBlacklistedHash 320 } 321 } 322 323 // Generate the list of seal verification requests, and start the parallel verifier 324 seals := make([]bool, len(chain)) 325 if checkFreq != 0 { 326 // In case of checkFreq == 0 all seals are left false. 327 for i := 0; i < len(seals)/checkFreq; i++ { 328 index := i*checkFreq + hc.rand.Intn(checkFreq) 329 if index >= len(seals) { 330 index = len(seals) - 1 331 } 332 seals[index] = true 333 } 334 // Last should always be verified to avoid junk. 335 seals[len(seals)-1] = true 336 } 337 338 abort, results := hc.engine.VerifyHeaders(hc, chain, seals) 339 defer close(abort) 340 341 // Iterate over the headers and ensure they all check out 342 for i := range chain { 343 // If the chain is terminating, stop processing blocks 344 if hc.procInterrupt() { 345 log.Debug("Premature abort during headers verification") 346 return 0, errors.New("aborted") 347 } 348 // Otherwise wait for headers checks and ensure they pass 349 if err := <-results; err != nil { 350 return i, err 351 } 352 } 353 354 return 0, nil 355 } 356 357 // InsertHeaderChain inserts the given headers. 358 // 359 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 360 // validated by ValidateHeaderChain before calling InsertHeaderChain. 361 // 362 // This insert is all-or-nothing. If this returns an error, no headers were written, 363 // otherwise they were all processed successfully. 364 // 365 // The returned 'write status' says if the inserted headers are part of the canonical chain 366 // or a side chain. 367 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { 368 if hc.procInterrupt() { 369 return 0, errors.New("aborted") 370 } 371 res, err := hc.writeHeaders(chain) 372 373 // Report some public statistics so the user has a clue what's going on 374 context := []interface{}{ 375 "count", res.imported, 376 "elapsed", common.PrettyDuration(time.Since(start)), 377 } 378 if err != nil { 379 context = append(context, "err", err) 380 } 381 if last := res.lastHeader; last != nil { 382 context = append(context, "number", last.Number, "hash", res.lastHash) 383 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 384 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 385 } 386 } 387 if res.ignored > 0 { 388 context = append(context, []interface{}{"ignored", res.ignored}...) 389 } 390 log.Info("Imported new block headers", context...) 391 return res.status, err 392 } 393 394 // GetBlockHashesFromHash retrieves a number of block hashes starting at a given 395 // hash, fetching towards the genesis block. 396 func (hc *HeaderChain) GetBlockHashesFromHash(hash common.Hash, max uint64) []common.Hash { 397 // Get the origin header from which to fetch 398 header := hc.GetHeaderByHash(hash) 399 if header == nil { 400 return nil 401 } 402 // Iterate the headers until enough is collected or the genesis reached 403 chain := make([]common.Hash, 0, max) 404 for i := uint64(0); i < max; i++ { 405 next := header.ParentHash 406 if header = hc.GetHeader(next, header.Number.Uint64()-1); header == nil { 407 break 408 } 409 chain = append(chain, next) 410 if header.Number.Sign() == 0 { 411 break 412 } 413 } 414 return chain 415 } 416 417 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 418 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 419 // number of blocks to be individually checked before we reach the canonical chain. 420 // 421 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 422 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 423 if ancestor > number { 424 return common.Hash{}, 0 425 } 426 if ancestor == 1 { 427 // in this case it is cheaper to just read the header 428 if header := hc.GetHeader(hash, number); header != nil { 429 return header.ParentHash, number - 1 430 } 431 return common.Hash{}, 0 432 } 433 for ancestor != 0 { 434 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 435 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 436 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 437 number -= ancestor 438 return ancestorHash, number 439 } 440 } 441 if *maxNonCanonical == 0 { 442 return common.Hash{}, 0 443 } 444 *maxNonCanonical-- 445 ancestor-- 446 header := hc.GetHeader(hash, number) 447 if header == nil { 448 return common.Hash{}, 0 449 } 450 hash = header.ParentHash 451 number-- 452 } 453 return hash, number 454 } 455 456 // GetTd retrieves a block's total difficulty in the canonical chain from the 457 // database by hash and number, caching it if found. 458 func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { 459 // Short circuit if the td's already in the cache, retrieve otherwise 460 if cached, ok := hc.tdCache.Get(hash); ok { 461 return cached.(*big.Int) 462 } 463 td := rawdb.ReadTd(hc.chainDb, hash, number) 464 if td == nil { 465 return nil 466 } 467 // Cache the found body for next time and return 468 hc.tdCache.Add(hash, td) 469 return td 470 } 471 472 // GetTdByHash retrieves a block's total difficulty in the canonical chain from the 473 // database by hash, caching it if found. 474 func (hc *HeaderChain) GetTdByHash(hash common.Hash) *big.Int { 475 number := hc.GetBlockNumber(hash) 476 if number == nil { 477 return nil 478 } 479 return hc.GetTd(hash, *number) 480 } 481 482 // GetHeader retrieves a block header from the database by hash and number, 483 // caching it if found. 484 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 485 // Short circuit if the header's already in the cache, retrieve otherwise 486 if header, ok := hc.headerCache.Get(hash); ok { 487 return header.(*types.Header) 488 } 489 header := rawdb.ReadHeader(hc.chainDb, hash, number) 490 if header == nil { 491 return nil 492 } 493 // Cache the found header for next time and return 494 hc.headerCache.Add(hash, header) 495 return header 496 } 497 498 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 499 // found. 500 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 501 number := hc.GetBlockNumber(hash) 502 if number == nil { 503 return nil 504 } 505 return hc.GetHeader(hash, *number) 506 } 507 508 // HasHeader checks if a block header is present in the database or not. 509 // In theory, if header is present in the database, all relative components 510 // like td and hash->number should be present too. 511 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 512 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 513 return true 514 } 515 return rawdb.HasHeader(hc.chainDb, hash, number) 516 } 517 518 // GetHeaderByNumber retrieves a block header from the database by number, 519 // caching it (associated with its hash) if found. 520 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 521 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 522 if hash == (common.Hash{}) { 523 return nil 524 } 525 return hc.GetHeader(hash, number) 526 } 527 528 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 529 return rawdb.ReadCanonicalHash(hc.chainDb, number) 530 } 531 532 // CurrentHeader retrieves the current head header of the canonical chain. The 533 // header is retrieved from the HeaderChain's internal cache. 534 func (hc *HeaderChain) CurrentHeader() *types.Header { 535 return hc.currentHeader.Load().(*types.Header) 536 } 537 538 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 539 // as the given header. 540 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 541 hc.currentHeader.Store(head) 542 hc.currentHeaderHash = head.Hash() 543 headHeaderGauge.Update(head.Number.Int64()) 544 } 545 546 type ( 547 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 548 // before head header is updated. The method will return the actual block it 549 // updated the head to (missing state) and a flag if setHead should continue 550 // rewinding till that forcefully (exceeded ancient limits) 551 UpdateHeadBlocksCallback func(xcbdb.KeyValueWriter, *types.Header) (uint64, bool) 552 553 // DeleteBlockContentCallback is a callback function that is called by SetHead 554 // before each header is deleted. 555 DeleteBlockContentCallback func(xcbdb.KeyValueWriter, common.Hash, uint64) 556 ) 557 558 // SetHead rewinds the local chain to a new head. Everything above the new head 559 // will be deleted and the new one set. 560 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 561 var ( 562 parentHash common.Hash 563 batch = hc.chainDb.NewBatch() 564 origin = true 565 ) 566 for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { 567 num := hdr.Number.Uint64() 568 569 // Rewind block chain to new head. 570 parent := hc.GetHeader(hdr.ParentHash, num-1) 571 if parent == nil { 572 parent = hc.genesisHeader 573 } 574 parentHash = hdr.ParentHash 575 576 // Notably, since gocore has the possibility for setting the head to a low 577 // height which is even lower than ancient head. 578 // In order to ensure that the head is always no higher than the data in 579 // the database (ancient store or active store), we need to update head 580 // first then remove the relative data from the database. 581 // 582 // Update head first(head fast block, head full block) before deleting the data. 583 markerBatch := hc.chainDb.NewBatch() 584 if updateFn != nil { 585 newHead, force := updateFn(markerBatch, parent) 586 if force && newHead < head { 587 log.Warn("Force rewinding till ancient limit", "head", newHead) 588 head = newHead 589 } 590 } 591 // Update head header then. 592 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 593 if err := markerBatch.Write(); err != nil { 594 log.Crit("Failed to update chain markers", "error", err) 595 } 596 hc.currentHeader.Store(parent) 597 hc.currentHeaderHash = parentHash 598 headHeaderGauge.Update(parent.Number.Int64()) 599 600 // If this is the first iteration, wipe any leftover data upwards too so 601 // we don't end up with dangling daps in the database 602 var nums []uint64 603 if origin { 604 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 605 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 606 } 607 origin = false 608 } 609 nums = append(nums, num) 610 611 // Remove the related data from the database on all sidechains 612 for _, num := range nums { 613 // Gather all the side fork hashes 614 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 615 if len(hashes) == 0 { 616 // No hashes in the database whatsoever, probably frozen already 617 hashes = append(hashes, hdr.Hash()) 618 } 619 for _, hash := range hashes { 620 if delFn != nil { 621 delFn(batch, hash, num) 622 } 623 rawdb.DeleteHeader(batch, hash, num) 624 rawdb.DeleteTd(batch, hash, num) 625 } 626 rawdb.DeleteCanonicalHash(batch, num) 627 } 628 } 629 // Flush all accumulated deletions. 630 if err := batch.Write(); err != nil { 631 log.Crit("Failed to rewind block", "error", err) 632 } 633 // Clear out any stale content from the caches 634 hc.headerCache.Purge() 635 hc.tdCache.Purge() 636 hc.numberCache.Purge() 637 } 638 639 // SetGenesis sets a new genesis block header for the chain 640 func (hc *HeaderChain) SetGenesis(head *types.Header) { 641 hc.genesisHeader = head 642 } 643 644 // Config retrieves the header chain's chain configuration. 645 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 646 647 // Engine retrieves the header chain's consensus engine. 648 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 649 650 // GetBlock implements consensus.ChainReader, and returns nil for every input as 651 // a header chain does not have blocks available for retrieval. 652 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 653 return nil 654 }