github.com/ubiq/go-ubiq/v6@v6.0.0/core/headerchain.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package core 18 19 import ( 20 crand "crypto/rand" 21 "errors" 22 "fmt" 23 "math" 24 "math/big" 25 mrand "math/rand" 26 "sort" 27 "sync/atomic" 28 "time" 29 30 lru "github.com/hashicorp/golang-lru" 31 "github.com/ubiq/go-ubiq/v6/common" 32 "github.com/ubiq/go-ubiq/v6/consensus" 33 "github.com/ubiq/go-ubiq/v6/core/rawdb" 34 "github.com/ubiq/go-ubiq/v6/core/types" 35 "github.com/ubiq/go-ubiq/v6/ethdb" 36 "github.com/ubiq/go-ubiq/v6/log" 37 "github.com/ubiq/go-ubiq/v6/params" 38 ) 39 40 const ( 41 headerCacheLimit = 512 42 tdCacheLimit = 1024 43 numberCacheLimit = 2048 44 hashCacheLimit = 2048 45 medianTimeBlocks = 11 46 ) 47 48 // HeaderChain implements the basic block header chain logic that is shared by 49 // core.BlockChain and light.LightChain. It is not usable in itself, only as 50 // a part of either structure. 51 // 52 // HeaderChain is responsible for maintaining the header chain including the 53 // header query and updating. 54 // 55 // The components maintained by headerchain includes: (1) total difficult 56 // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping 57 // and (5) head header flag. 58 // 59 // It is not thread safe either, the encapsulating chain structures should do 60 // the necessary mutex locking/unlocking. 61 type HeaderChain struct { 62 config *params.ChainConfig 63 64 chainDb ethdb.Database 65 genesisHeader *types.Header 66 67 currentHeader atomic.Value // Current head of the header chain (may be above the block chain!) 68 currentHeaderHash common.Hash // Hash of the current head of the header chain (prevent recomputing all the time) 69 70 headerCache *lru.Cache // Cache for the most recent block headers 71 tdCache *lru.Cache // Cache for the most recent block total difficulties 72 numberCache *lru.Cache // Cache for the most recent block numbers 73 hashCache *lru.Cache // Cache of the most recent block hashes 74 75 procInterrupt func() bool 76 77 rand *mrand.Rand 78 engine consensus.Engine 79 } 80 81 // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points 82 // to the parent's interrupt semaphore. 83 func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) { 84 headerCache, _ := lru.New(headerCacheLimit) 85 tdCache, _ := lru.New(tdCacheLimit) 86 numberCache, _ := lru.New(numberCacheLimit) 87 hashCache, _ := lru.New(hashCacheLimit) 88 89 // Seed a fast but crypto originating random generator 90 seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64)) 91 if err != nil { 92 return nil, err 93 } 94 95 hc := &HeaderChain{ 96 config: config, 97 chainDb: chainDb, 98 headerCache: headerCache, 99 tdCache: tdCache, 100 numberCache: numberCache, 101 hashCache: hashCache, 102 procInterrupt: procInterrupt, 103 rand: mrand.New(mrand.NewSource(seed.Int64())), 104 engine: engine, 105 } 106 107 hc.genesisHeader = hc.GetHeaderByNumber(0) 108 if hc.genesisHeader == nil { 109 return nil, ErrNoGenesis 110 } 111 112 hc.currentHeader.Store(hc.genesisHeader) 113 if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) { 114 if chead := hc.GetHeaderByHash(head); chead != nil { 115 hc.currentHeader.Store(chead) 116 } 117 } 118 hc.currentHeaderHash = hc.CurrentHeader().Hash() 119 headHeaderGauge.Update(hc.CurrentHeader().Number.Int64()) 120 121 return hc, nil 122 } 123 124 // GetBlockNumber retrieves the block number belonging to the given hash 125 // from the cache or database 126 func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 { 127 if cached, ok := hc.numberCache.Get(hash); ok { 128 number := cached.(uint64) 129 return &number 130 } 131 number := rawdb.ReadHeaderNumber(hc.chainDb, hash) 132 if number != nil { 133 hc.numberCache.Add(hash, *number) 134 } 135 return number 136 } 137 138 type headerWriteResult struct { 139 status WriteStatus 140 ignored int 141 imported int 142 lastHash common.Hash 143 lastHeader *types.Header 144 } 145 146 // WriteHeaders writes a chain of headers into the local chain, given that the parents 147 // are already known. If the total difficulty of the newly inserted chain becomes 148 // greater than the current known TD, the canonical chain is reorged. 149 // 150 // Note: This method is not concurrent-safe with inserting blocks simultaneously 151 // into the chain, as side effects caused by reorganisations cannot be emulated 152 // without the real blocks. Hence, writing headers directly should only be done 153 // in two scenarios: pure-header mode of operation (light clients), or properly 154 // separated header/block phases (non-archive clients). 155 func (hc *HeaderChain) writeHeaders(headers []*types.Header) (result *headerWriteResult, err error) { 156 if len(headers) == 0 { 157 return &headerWriteResult{}, nil 158 } 159 ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1) 160 if ptd == nil { 161 return &headerWriteResult{}, consensus.ErrUnknownAncestor 162 } 163 var ( 164 lastNumber = headers[0].Number.Uint64() - 1 // Last successfully imported number 165 lastHash = headers[0].ParentHash // Last imported header hash 166 newTD = new(big.Int).Set(ptd) // Total difficulty of inserted chain 167 168 lastHeader *types.Header 169 inserted []numberHash // Ephemeral lookup of number/hash for the chain 170 firstInserted = -1 // Index of the first non-ignored header 171 ) 172 173 batch := hc.chainDb.NewBatch() 174 parentKnown := true // Set to true to force hc.HasHeader check the first iteration 175 for i, header := range headers { 176 var hash common.Hash 177 // The headers have already been validated at this point, so we already 178 // know that it's a contiguous chain, where 179 // headers[i].Hash() == headers[i+1].ParentHash 180 if i < len(headers)-1 { 181 hash = headers[i+1].ParentHash 182 } else { 183 hash = header.Hash() 184 } 185 number := header.Number.Uint64() 186 newTD.Add(newTD, header.Difficulty) 187 188 // If the parent was not present, store it 189 // If the header is already known, skip it, otherwise store 190 alreadyKnown := parentKnown && hc.HasHeader(hash, number) 191 if !alreadyKnown { 192 // Irrelevant of the canonical status, write the TD and header to the database. 193 rawdb.WriteTd(batch, hash, number, newTD) 194 hc.tdCache.Add(hash, new(big.Int).Set(newTD)) 195 196 rawdb.WriteHeader(batch, header) 197 inserted = append(inserted, numberHash{number, hash}) 198 hc.headerCache.Add(hash, header) 199 hc.numberCache.Add(hash, number) 200 if firstInserted < 0 { 201 firstInserted = i 202 } 203 } 204 parentKnown = alreadyKnown 205 lastHeader, lastHash, lastNumber = header, hash, number 206 } 207 208 // Skip the slow disk write of all headers if interrupted. 209 if hc.procInterrupt() { 210 log.Debug("Premature abort during headers import") 211 return &headerWriteResult{}, errors.New("aborted") 212 } 213 // Commit to disk! 214 if err := batch.Write(); err != nil { 215 log.Crit("Failed to write headers", "error", err) 216 } 217 batch.Reset() 218 219 var ( 220 head = hc.CurrentHeader().Number.Uint64() 221 localTD = hc.GetTd(hc.currentHeaderHash, head) 222 status = SideStatTy 223 ) 224 // If the total difficulty is higher than our known, add it to the canonical chain 225 // Second clause in the if statement reduces the vulnerability to selfish mining. 226 // Please refer to http://www.cs.cornell.edu/~ie53/publications/btcProcFC.pdf 227 reorg := newTD.Cmp(localTD) > 0 228 if !reorg && newTD.Cmp(localTD) == 0 { 229 if lastNumber < head { 230 reorg = true 231 } else if lastNumber == head { 232 reorg = mrand.Float64() < 0.5 233 } 234 } 235 // If the parent of the (first) block is already the canon header, 236 // we don't have to go backwards to delete canon blocks, but 237 // simply pile them onto the existing chain 238 chainAlreadyCanon := headers[0].ParentHash == hc.currentHeaderHash 239 if reorg { 240 // If the header can be added into canonical chain, adjust the 241 // header chain markers(canonical indexes and head header flag). 242 // 243 // Note all markers should be written atomically. 244 markerBatch := batch // we can reuse the batch to keep allocs down 245 if !chainAlreadyCanon { 246 // Delete any canonical number assignments above the new head 247 for i := lastNumber + 1; ; i++ { 248 hash := rawdb.ReadCanonicalHash(hc.chainDb, i) 249 if hash == (common.Hash{}) { 250 break 251 } 252 rawdb.DeleteCanonicalHash(markerBatch, i) 253 } 254 // Overwrite any stale canonical number assignments, going 255 // backwards from the first header in this import 256 var ( 257 headHash = headers[0].ParentHash // inserted[0].parent? 258 headNumber = headers[0].Number.Uint64() - 1 // inserted[0].num-1 ? 259 headHeader = hc.GetHeader(headHash, headNumber) 260 ) 261 for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash { 262 rawdb.WriteCanonicalHash(markerBatch, headHash, headNumber) 263 headHash = headHeader.ParentHash 264 headNumber = headHeader.Number.Uint64() - 1 265 headHeader = hc.GetHeader(headHash, headNumber) 266 } 267 // If some of the older headers were already known, but obtained canon-status 268 // during this import batch, then we need to write that now 269 // Further down, we continue writing the staus for the ones that 270 // were not already known 271 for i := 0; i < firstInserted; i++ { 272 hash := headers[i].Hash() 273 num := headers[i].Number.Uint64() 274 rawdb.WriteCanonicalHash(markerBatch, hash, num) 275 rawdb.WriteHeadHeaderHash(markerBatch, hash) 276 } 277 } 278 // Extend the canonical chain with the new headers 279 for _, hn := range inserted { 280 rawdb.WriteCanonicalHash(markerBatch, hn.hash, hn.number) 281 rawdb.WriteHeadHeaderHash(markerBatch, hn.hash) 282 } 283 if err := markerBatch.Write(); err != nil { 284 log.Crit("Failed to write header markers into disk", "err", err) 285 } 286 markerBatch.Reset() 287 // Last step update all in-memory head header markers 288 hc.currentHeaderHash = lastHash 289 hc.currentHeader.Store(types.CopyHeader(lastHeader)) 290 headHeaderGauge.Update(lastHeader.Number.Int64()) 291 292 // Chain status is canonical since this insert was a reorg. 293 // Note that all inserts which have higher TD than existing are 'reorg'. 294 status = CanonStatTy 295 } 296 297 if len(inserted) == 0 { 298 status = NonStatTy 299 } 300 return &headerWriteResult{ 301 status: status, 302 ignored: len(headers) - len(inserted), 303 imported: len(inserted), 304 lastHash: lastHash, 305 lastHeader: lastHeader, 306 }, nil 307 } 308 309 func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) { 310 // Do a sanity check that the provided chain is actually ordered and linked 311 if chain == nil { 312 log.Error("chain equals nil") 313 } 314 315 for i := 1; i < len(chain); i++ { 316 if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 { 317 hash := chain[i].Hash() 318 parentHash := chain[i-1].Hash() 319 // Chain broke ancestry, log a message (programming error) and skip insertion 320 log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash, 321 "parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash) 322 323 return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number, 324 parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4]) 325 } 326 // If the header is a banned one, straight out abort 327 if BadHashes[chain[i].ParentHash] { 328 return i - 1, ErrBlacklistedHash 329 } 330 // If it's the last header in the cunk, we need to check it too 331 if i == len(chain)-1 && BadHashes[chain[i].Hash()] { 332 return i, ErrBlacklistedHash 333 } 334 } 335 336 // Generate the list of seal verification requests, and start the parallel verifier 337 seals := make([]bool, len(chain)) 338 if checkFreq != 0 { 339 // In case of checkFreq == 0 all seals are left false. 340 for i := 0; i <= len(seals)/checkFreq; i++ { 341 index := i*checkFreq + hc.rand.Intn(checkFreq) 342 if index >= len(seals) { 343 index = len(seals) - 1 344 } 345 seals[index] = true 346 } 347 // Last should always be verified to avoid junk. 348 seals[len(seals)-1] = true 349 } 350 351 abort, results := hc.engine.VerifyHeaders(hc, chain, seals) 352 defer close(abort) 353 354 // Iterate over the headers and ensure they all check out 355 for i := range chain { 356 // If the chain is terminating, stop processing blocks 357 if hc.procInterrupt() { 358 log.Debug("Premature abort during headers verification") 359 return 0, errors.New("aborted") 360 } 361 // Otherwise wait for headers checks and ensure they pass 362 if err := <-results; err != nil { 363 return i, err 364 } 365 } 366 367 return 0, nil 368 } 369 370 // InsertHeaderChain inserts the given headers. 371 // 372 // The validity of the headers is NOT CHECKED by this method, i.e. they need to be 373 // validated by ValidateHeaderChain before calling InsertHeaderChain. 374 // 375 // This insert is all-or-nothing. If this returns an error, no headers were written, 376 // otherwise they were all processed successfully. 377 // 378 // The returned 'write status' says if the inserted headers are part of the canonical chain 379 // or a side chain. 380 func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time) (WriteStatus, error) { 381 if hc.procInterrupt() { 382 return 0, errors.New("aborted") 383 } 384 res, err := hc.writeHeaders(chain) 385 386 // Report some public statistics so the user has a clue what's going on 387 context := []interface{}{ 388 "count", res.imported, 389 "elapsed", common.PrettyDuration(time.Since(start)), 390 } 391 if err != nil { 392 context = append(context, "err", err) 393 } 394 if last := res.lastHeader; last != nil { 395 context = append(context, "number", last.Number, "hash", res.lastHash) 396 if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute { 397 context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...) 398 } 399 } 400 if res.ignored > 0 { 401 context = append(context, []interface{}{"ignored", res.ignored}...) 402 } 403 log.Info("Imported new block headers", context...) 404 return res.status, err 405 } 406 407 // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or 408 // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the 409 // number of blocks to be individually checked before we reach the canonical chain. 410 // 411 // Note: ancestor == 0 returns the same block, 1 returns its parent and so on. 412 func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) { 413 if ancestor > number { 414 return common.Hash{}, 0 415 } 416 if ancestor == 1 { 417 // in this case it is cheaper to just read the header 418 if header := hc.GetHeader(hash, number); header != nil { 419 return header.ParentHash, number - 1 420 } 421 return common.Hash{}, 0 422 } 423 for ancestor != 0 { 424 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 425 ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor) 426 if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash { 427 number -= ancestor 428 return ancestorHash, number 429 } 430 } 431 if *maxNonCanonical == 0 { 432 return common.Hash{}, 0 433 } 434 *maxNonCanonical-- 435 ancestor-- 436 header := hc.GetHeader(hash, number) 437 if header == nil { 438 return common.Hash{}, 0 439 } 440 hash = header.ParentHash 441 number-- 442 } 443 return hash, number 444 } 445 446 // GetTd retrieves a block's total difficulty in the canonical chain from the 447 // database by hash and number, caching it if found. 448 func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int { 449 // Short circuit if the td's already in the cache, retrieve otherwise 450 if cached, ok := hc.tdCache.Get(hash); ok { 451 return cached.(*big.Int) 452 } 453 td := rawdb.ReadTd(hc.chainDb, hash, number) 454 if td == nil { 455 return nil 456 } 457 // Cache the found body for next time and return 458 hc.tdCache.Add(hash, td) 459 return td 460 } 461 462 // calcPastMedianTime calculates the median time of the previous few blocks 463 // prior to, and including, the passed block node. 464 // 465 // Modified from btcsuite 466 func (hc *HeaderChain) CalcPastMedianTime(number uint64, parent *types.Header) *big.Int { 467 468 // Genesis block. 469 if number == 0 { 470 return big.NewInt(int64(hc.GetHeaderByNumber(0).Time)) 471 } 472 473 timestamps := make([]*big.Int, medianTimeBlocks) 474 numNodes := 0 475 limit := uint64(0) 476 if number >= medianTimeBlocks { 477 limit = number - medianTimeBlocks + 1 478 } 479 480 for i := number; i >= limit; i-- { 481 if parent != nil && i == number { 482 timestamps[numNodes] = big.NewInt(int64(parent.Time)) 483 } else { 484 header := hc.GetHeaderByNumber(i) 485 timestamps[numNodes] = big.NewInt(int64(header.Time)) 486 } 487 numNodes++ 488 if i == 0 { 489 break 490 } 491 } 492 493 // Prune the slice to the actual number of available timestamps which 494 // will be fewer than desired near the beginning of the block chain 495 // and sort them. 496 timestamps = timestamps[:numNodes] 497 sort.Sort(BigIntSlice(timestamps)) 498 499 medianTimestamp := timestamps[numNodes/2] 500 return medianTimestamp 501 } 502 503 // GetHeader retrieves a block header from the database by hash and number, 504 // caching it if found. 505 func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header { 506 // Short circuit if the header's already in the cache, retrieve otherwise 507 if header, ok := hc.headerCache.Get(hash); ok { 508 return header.(*types.Header) 509 } 510 header := rawdb.ReadHeader(hc.chainDb, hash, number) 511 if header == nil { 512 return nil 513 } 514 // Cache the found header for next time and return 515 hc.headerCache.Add(hash, header) 516 return header 517 } 518 519 // GetHeaderByHash retrieves a block header from the database by hash, caching it if 520 // found. 521 func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header { 522 number := hc.GetBlockNumber(hash) 523 if number == nil { 524 return nil 525 } 526 return hc.GetHeader(hash, *number) 527 } 528 529 // HasHeader checks if a block header is present in the database or not. 530 // In theory, if header is present in the database, all relative components 531 // like td and hash->number should be present too. 532 func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool { 533 if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) { 534 return true 535 } 536 return rawdb.HasHeader(hc.chainDb, hash, number) 537 } 538 539 // GetHeaderByNumber retrieves a block header from the database by number, 540 // caching it (associated with its hash) if found. 541 func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header { 542 // check cache 543 if cache, ok := hc.hashCache.Get(number); ok { 544 return hc.GetHeader(cache.(common.Hash), number) 545 } 546 547 hash := rawdb.ReadCanonicalHash(hc.chainDb, number) 548 if hash == (common.Hash{}) { 549 return nil 550 } 551 552 // cache for next time and return 553 hc.hashCache.Add(number, hash) 554 return hc.GetHeader(hash, number) 555 } 556 557 func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash { 558 return rawdb.ReadCanonicalHash(hc.chainDb, number) 559 } 560 561 // CurrentHeader retrieves the current head header of the canonical chain. The 562 // header is retrieved from the HeaderChain's internal cache. 563 func (hc *HeaderChain) CurrentHeader() *types.Header { 564 return hc.currentHeader.Load().(*types.Header) 565 } 566 567 // SetCurrentHeader sets the in-memory head header marker of the canonical chan 568 // as the given header. 569 func (hc *HeaderChain) SetCurrentHeader(head *types.Header) { 570 hc.currentHeader.Store(head) 571 hc.currentHeaderHash = head.Hash() 572 headHeaderGauge.Update(head.Number.Int64()) 573 } 574 575 type ( 576 // UpdateHeadBlocksCallback is a callback function that is called by SetHead 577 // before head header is updated. The method will return the actual block it 578 // updated the head to (missing state) and a flag if setHead should continue 579 // rewinding till that forcefully (exceeded ancient limits) 580 UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool) 581 582 // DeleteBlockContentCallback is a callback function that is called by SetHead 583 // before each header is deleted. 584 DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64) 585 ) 586 587 // SetHead rewinds the local chain to a new head. Everything above the new head 588 // will be deleted and the new one set. 589 func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) { 590 var ( 591 parentHash common.Hash 592 batch = hc.chainDb.NewBatch() 593 origin = true 594 ) 595 for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() { 596 num := hdr.Number.Uint64() 597 598 // Rewind block chain to new head. 599 parent := hc.GetHeader(hdr.ParentHash, num-1) 600 if parent == nil { 601 parent = hc.genesisHeader 602 } 603 parentHash = parent.Hash() 604 605 // Notably, since geth has the possibility for setting the head to a low 606 // height which is even lower than ancient head. 607 // In order to ensure that the head is always no higher than the data in 608 // the database (ancient store or active store), we need to update head 609 // first then remove the relative data from the database. 610 // 611 // Update head first(head fast block, head full block) before deleting the data. 612 markerBatch := hc.chainDb.NewBatch() 613 if updateFn != nil { 614 newHead, force := updateFn(markerBatch, parent) 615 if force && newHead < head { 616 log.Warn("Force rewinding till ancient limit", "head", newHead) 617 head = newHead 618 } 619 } 620 // Update head header then. 621 rawdb.WriteHeadHeaderHash(markerBatch, parentHash) 622 if err := markerBatch.Write(); err != nil { 623 log.Crit("Failed to update chain markers", "error", err) 624 } 625 hc.currentHeader.Store(parent) 626 hc.currentHeaderHash = parentHash 627 headHeaderGauge.Update(parent.Number.Int64()) 628 629 // If this is the first iteration, wipe any leftover data upwards too so 630 // we don't end up with dangling daps in the database 631 var nums []uint64 632 if origin { 633 for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ { 634 nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path 635 } 636 origin = false 637 } 638 nums = append(nums, num) 639 640 // Remove the related data from the database on all sidechains 641 for _, num := range nums { 642 // Gather all the side fork hashes 643 hashes := rawdb.ReadAllHashes(hc.chainDb, num) 644 if len(hashes) == 0 { 645 // No hashes in the database whatsoever, probably frozen already 646 hashes = append(hashes, hdr.Hash()) 647 } 648 for _, hash := range hashes { 649 if delFn != nil { 650 delFn(batch, hash, num) 651 } 652 rawdb.DeleteHeader(batch, hash, num) 653 rawdb.DeleteTd(batch, hash, num) 654 } 655 rawdb.DeleteCanonicalHash(batch, num) 656 } 657 } 658 // Flush all accumulated deletions. 659 if err := batch.Write(); err != nil { 660 log.Crit("Failed to rewind block", "error", err) 661 } 662 // Clear out any stale content from the caches 663 hc.headerCache.Purge() 664 hc.tdCache.Purge() 665 hc.numberCache.Purge() 666 hc.hashCache.Purge() 667 } 668 669 // SetGenesis sets a new genesis block header for the chain 670 func (hc *HeaderChain) SetGenesis(head *types.Header) { 671 hc.genesisHeader = head 672 } 673 674 // Config retrieves the header chain's chain configuration. 675 func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config } 676 677 // Engine retrieves the header chain's consensus engine. 678 func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine } 679 680 // GetBlock implements consensus.ChainReader, and returns nil for every input as 681 // a header chain does not have blocks available for retrieval. 682 func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block { 683 return nil 684 } 685 686 // BigIntSlice attaches the methods of sort.Interface to []*big.Int, sorting in increasing order. (used by CalcPastMedianTime) 687 type BigIntSlice []*big.Int 688 689 func (s BigIntSlice) Len() int { return len(s) } 690 func (s BigIntSlice) Less(i, j int) bool { return s[i].Cmp(s[j]) < 0 } 691 func (s BigIntSlice) Swap(i, j int) { s[i], s[j] = s[j], s[i] }