github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/core/headerchain.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	crand "crypto/rand"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus"
    31  	"github.com/ethereum/go-ethereum/core/rawdb"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/log"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/rlp"
    37  	lru "github.com/hashicorp/golang-lru"
    38  )
    39  
    40  const (
    41  	headerCacheLimit = 512
    42  	tdCacheLimit     = 1024
    43  	numberCacheLimit = 2048
    44  )
    45  
    46  // HeaderChain implements the basic block header chain logic that is shared by
    47  // core.BlockChain and light.LightChain. It is not usable in itself, only as
    48  // a part of either structure.
    49  //
    50  // HeaderChain is responsible for maintaining the header chain including the
    51  // header query and updating.
    52  //
    53  // The components maintained by headerchain includes: (1) total difficulty
    54  // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
    55  // and (5) head header flag.
    56  //
    57  // It is not thread safe either, the encapsulating chain structures should do
    58  // the necessary mutex locking/unlocking.
    59  type HeaderChain struct {
    60  	config        *params.ChainConfig
    61  	chainDb       ethdb.Database
    62  	genesisHeader *types.Header
    63  
    64  	currentHeader     atomic.Value // Current head of the header chain (may be above the block chain!)
    65  	currentHeaderHash common.Hash  // Hash of the current head of the header chain (prevent recomputing all the time)
    66  
    67  	headerCache *lru.Cache // Cache for the most recent block headers
    68  	tdCache     *lru.Cache // Cache for the most recent block total difficulties
    69  	numberCache *lru.Cache // Cache for the most recent block numbers
    70  
    71  	procInterrupt func() bool
    72  
    73  	rand   *mrand.Rand
    74  	engine consensus.Engine
    75  }
    76  
    77  // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
    78  // to the parent's interrupt semaphore.
    79  func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
    80  	headerCache, _ := lru.New(headerCacheLimit)
    81  	tdCache, _ := lru.New(tdCacheLimit)
    82  	numberCache, _ := lru.New(numberCacheLimit)
    83  
    84  	// Seed a fast but crypto originating random generator
    85  	seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
    86  	if err != nil {
    87  		return nil, err
    88  	}
    89  	hc := &HeaderChain{
    90  		config:        config,
    91  		chainDb:       chainDb,
    92  		headerCache:   headerCache,
    93  		tdCache:       tdCache,
    94  		numberCache:   numberCache,
    95  		procInterrupt: procInterrupt,
    96  		rand:          mrand.New(mrand.NewSource(seed.Int64())),
    97  		engine:        engine,
    98  	}
    99  	hc.genesisHeader = hc.GetHeaderByNumber(0)
   100  	if hc.genesisHeader == nil {
   101  		return nil, ErrNoGenesis
   102  	}
   103  	hc.currentHeader.Store(hc.genesisHeader)
   104  	if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
   105  		if chead := hc.GetHeaderByHash(head); chead != nil {
   106  			hc.currentHeader.Store(chead)
   107  		}
   108  	}
   109  	hc.currentHeaderHash = hc.CurrentHeader().Hash()
   110  	headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
   111  	return hc, nil
   112  }
   113  
   114  // GetBlockNumber retrieves the block number belonging to the given hash
   115  // from the cache or database
   116  func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
   117  	if cached, ok := hc.numberCache.Get(hash); ok {
   118  		number := cached.(uint64)
   119  		return &number
   120  	}
   121  	number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
   122  	if number != nil {
   123  		hc.numberCache.Add(hash, *number)
   124  	}
   125  	return number
   126  }
   127  
   128  type headerWriteResult struct {
   129  	status     WriteStatus
   130  	ignored    int
   131  	imported   int
   132  	lastHash   common.Hash
   133  	lastHeader *types.Header
   134  }
   135  
   136  // Reorg reorgs the local canonical chain into the specified chain. The reorg
   137  // can be classified into two cases: (a) extend the local chain (b) switch the
   138  // head to the given header.
   139  func (hc *HeaderChain) Reorg(headers []*types.Header) error {
   140  	// Short circuit if nothing to reorg.
   141  	if len(headers) == 0 {
   142  		return nil
   143  	}
   144  	// If the parent of the (first) block is already the canon header,
   145  	// we don't have to go backwards to delete canon blocks, but simply
   146  	// pile them onto the existing chain. Otherwise, do the necessary
   147  	// reorgs.
   148  	var (
   149  		first = headers[0]
   150  		last  = headers[len(headers)-1]
   151  		batch = hc.chainDb.NewBatch()
   152  	)
   153  	if first.ParentHash != hc.currentHeaderHash {
   154  		// Delete any canonical number assignments above the new head
   155  		for i := last.Number.Uint64() + 1; ; i++ {
   156  			hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
   157  			if hash == (common.Hash{}) {
   158  				break
   159  			}
   160  			rawdb.DeleteCanonicalHash(batch, i)
   161  		}
   162  		// Overwrite any stale canonical number assignments, going
   163  		// backwards from the first header in this import until the
   164  		// cross link between two chains.
   165  		var (
   166  			header     = first
   167  			headNumber = header.Number.Uint64()
   168  			headHash   = header.Hash()
   169  		)
   170  		for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
   171  			rawdb.WriteCanonicalHash(batch, headHash, headNumber)
   172  			if headNumber == 0 {
   173  				break // It shouldn't be reached
   174  			}
   175  			headHash, headNumber = header.ParentHash, header.Number.Uint64()-1
   176  			header = hc.GetHeader(headHash, headNumber)
   177  			if header == nil {
   178  				return fmt.Errorf("missing parent %d %x", headNumber, headHash)
   179  			}
   180  		}
   181  	}
   182  	// Extend the canonical chain with the new headers
   183  	for i := 0; i < len(headers)-1; i++ {
   184  		hash := headers[i+1].ParentHash // Save some extra hashing
   185  		num := headers[i].Number.Uint64()
   186  		rawdb.WriteCanonicalHash(batch, hash, num)
   187  		rawdb.WriteHeadHeaderHash(batch, hash)
   188  	}
   189  	// Write the last header
   190  	hash := headers[len(headers)-1].Hash()
   191  	num := headers[len(headers)-1].Number.Uint64()
   192  	rawdb.WriteCanonicalHash(batch, hash, num)
   193  	rawdb.WriteHeadHeaderHash(batch, hash)
   194  
   195  	if err := batch.Write(); err != nil {
   196  		return err
   197  	}
   198  	// Last step update all in-memory head header markers
   199  	hc.currentHeaderHash = last.Hash()
   200  	hc.currentHeader.Store(types.CopyHeader(last))
   201  	headHeaderGauge.Update(last.Number.Int64())
   202  	return nil
   203  }
   204  
   205  // WriteHeaders writes a chain of headers into the local chain, given that the
   206  // parents are already known. The chain head header won't be updated in this
   207  // function, the additional setChainHead is expected in order to finish the entire
   208  // procedure.
   209  func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
   210  	if len(headers) == 0 {
   211  		return 0, nil
   212  	}
   213  	ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
   214  	if ptd == nil {
   215  		return 0, consensus.ErrUnknownAncestor
   216  	}
   217  	var (
   218  		newTD       = new(big.Int).Set(ptd) // Total difficulty of inserted chain
   219  		inserted    []rawdb.NumberHash      // Ephemeral lookup of number/hash for the chain
   220  		parentKnown = true                  // Set to true to force hc.HasHeader check the first iteration
   221  		batch       = hc.chainDb.NewBatch()
   222  	)
   223  	for i, header := range headers {
   224  		var hash common.Hash
   225  		// The headers have already been validated at this point, so we already
   226  		// know that it's a contiguous chain, where
   227  		// headers[i].Hash() == headers[i+1].ParentHash
   228  		if i < len(headers)-1 {
   229  			hash = headers[i+1].ParentHash
   230  		} else {
   231  			hash = header.Hash()
   232  		}
   233  		number := header.Number.Uint64()
   234  		newTD.Add(newTD, header.Difficulty)
   235  
   236  		// If the parent was not present, store it
   237  		// If the header is already known, skip it, otherwise store
   238  		alreadyKnown := parentKnown && hc.HasHeader(hash, number)
   239  		if !alreadyKnown {
   240  			// Irrelevant of the canonical status, write the TD and header to the database.
   241  			rawdb.WriteTd(batch, hash, number, newTD)
   242  			hc.tdCache.Add(hash, new(big.Int).Set(newTD))
   243  
   244  			rawdb.WriteHeader(batch, header)
   245  			inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
   246  			hc.headerCache.Add(hash, header)
   247  			hc.numberCache.Add(hash, number)
   248  		}
   249  		parentKnown = alreadyKnown
   250  	}
   251  	// Skip the slow disk write of all headers if interrupted.
   252  	if hc.procInterrupt() {
   253  		log.Debug("Premature abort during headers import")
   254  		return 0, errors.New("aborted")
   255  	}
   256  	// Commit to disk!
   257  	if err := batch.Write(); err != nil {
   258  		log.Crit("Failed to write headers", "error", err)
   259  	}
   260  	return len(inserted), nil
   261  }
   262  
   263  // writeHeadersAndSetHead writes a batch of block headers and applies the last
   264  // header as the chain head if the fork choicer says it's ok to update the chain.
   265  // Note: This method is not concurrent-safe with inserting blocks simultaneously
   266  // into the chain, as side effects caused by reorganisations cannot be emulated
   267  // without the real blocks. Hence, writing headers directly should only be done
   268  // in two scenarios: pure-header mode of operation (light clients), or properly
   269  // separated header/block phases (non-archive clients).
   270  func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
   271  	inserted, err := hc.WriteHeaders(headers)
   272  	if err != nil {
   273  		return nil, err
   274  	}
   275  	var (
   276  		lastHeader = headers[len(headers)-1]
   277  		lastHash   = headers[len(headers)-1].Hash()
   278  		result     = &headerWriteResult{
   279  			status:     NonStatTy,
   280  			ignored:    len(headers) - inserted,
   281  			imported:   inserted,
   282  			lastHash:   lastHash,
   283  			lastHeader: lastHeader,
   284  		}
   285  	)
   286  
   287  	// Ask the fork choicer if the reorg is necessary
   288  	reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader)
   289  	if err != nil {
   290  		return nil, err
   291  	} else if !reorg {
   292  		if inserted != 0 {
   293  			result.status = SideStatTy
   294  		}
   295  		return result, nil
   296  	}
   297  
   298  	isValid, err := forker.ValidateReorg(hc.CurrentHeader(), headers)
   299  	if err != nil {
   300  		return nil, err
   301  	} else if !isValid {
   302  		if inserted != 0 {
   303  			result.status = SideStatTy
   304  		}
   305  		return result, nil
   306  	}
   307  	// Special case, all the inserted headers are already on the canonical
   308  	// header chain, skip the reorg operation.
   309  	if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
   310  		return result, nil
   311  	}
   312  	// Apply the reorg operation
   313  	if err := hc.Reorg(headers); err != nil {
   314  		return nil, err
   315  	}
   316  	result.status = CanonStatTy
   317  	return result, nil
   318  }
   319  
   320  func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
   321  	// Do a sanity check that the provided chain is actually ordered and linked
   322  	for i := 1; i < len(chain); i++ {
   323  		if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 {
   324  			hash := chain[i].Hash()
   325  			parentHash := chain[i-1].Hash()
   326  			// Chain broke ancestry, log a message (programming error) and skip insertion
   327  			log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash,
   328  				"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
   329  
   330  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number,
   331  				parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4])
   332  		}
   333  		// If the header is a banned one, straight out abort
   334  		if BadHashes[chain[i].ParentHash] {
   335  			return i - 1, ErrBannedHash
   336  		}
   337  		// If it's the last header in the cunk, we need to check it too
   338  		if i == len(chain)-1 && BadHashes[chain[i].Hash()] {
   339  			return i, ErrBannedHash
   340  		}
   341  	}
   342  
   343  	// Generate the list of seal verification requests, and start the parallel verifier
   344  	seals := make([]bool, len(chain))
   345  	if checkFreq != 0 {
   346  		// In case of checkFreq == 0 all seals are left false.
   347  		for i := 0; i <= len(seals)/checkFreq; i++ {
   348  			index := i*checkFreq + hc.rand.Intn(checkFreq)
   349  			if index >= len(seals) {
   350  				index = len(seals) - 1
   351  			}
   352  			seals[index] = true
   353  		}
   354  		// Last should always be verified to avoid junk.
   355  		seals[len(seals)-1] = true
   356  	}
   357  
   358  	abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
   359  	defer close(abort)
   360  
   361  	// Iterate over the headers and ensure they all check out
   362  	for i := range chain {
   363  		// If the chain is terminating, stop processing blocks
   364  		if hc.procInterrupt() {
   365  			log.Debug("Premature abort during headers verification")
   366  			return 0, errors.New("aborted")
   367  		}
   368  		// Otherwise wait for headers checks and ensure they pass
   369  		if err := <-results; err != nil {
   370  			return i, err
   371  		}
   372  	}
   373  
   374  	return 0, nil
   375  }
   376  
   377  // InsertHeaderChain inserts the given headers and does the reorganisations.
   378  //
   379  // The validity of the headers is NOT CHECKED by this method, i.e. they need to be
   380  // validated by ValidateHeaderChain before calling InsertHeaderChain.
   381  //
   382  // This insert is all-or-nothing. If this returns an error, no headers were written,
   383  // otherwise they were all processed successfully.
   384  //
   385  // The returned 'write status' says if the inserted headers are part of the canonical chain
   386  // or a side chain.
   387  func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
   388  	if hc.procInterrupt() {
   389  		return 0, errors.New("aborted")
   390  	}
   391  	res, err := hc.writeHeadersAndSetHead(chain, forker)
   392  	if err != nil {
   393  		return 0, err
   394  	}
   395  	// Report some public statistics so the user has a clue what's going on
   396  	context := []interface{}{
   397  		"count", res.imported,
   398  		"elapsed", common.PrettyDuration(time.Since(start)),
   399  	}
   400  	if last := res.lastHeader; last != nil {
   401  		context = append(context, "number", last.Number, "hash", res.lastHash)
   402  		if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
   403  			context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
   404  		}
   405  	}
   406  	if res.ignored > 0 {
   407  		context = append(context, []interface{}{"ignored", res.ignored}...)
   408  	}
   409  	log.Info("Imported new block headers", context...)
   410  	return res.status, err
   411  }
   412  
   413  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
   414  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
   415  // number of blocks to be individually checked before we reach the canonical chain.
   416  //
   417  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
   418  func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
   419  	if ancestor > number {
   420  		return common.Hash{}, 0
   421  	}
   422  	if ancestor == 1 {
   423  		// in this case it is cheaper to just read the header
   424  		if header := hc.GetHeader(hash, number); header != nil {
   425  			return header.ParentHash, number - 1
   426  		}
   427  		return common.Hash{}, 0
   428  	}
   429  	for ancestor != 0 {
   430  		if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   431  			ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor)
   432  			if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   433  				number -= ancestor
   434  				return ancestorHash, number
   435  			}
   436  		}
   437  		if *maxNonCanonical == 0 {
   438  			return common.Hash{}, 0
   439  		}
   440  		*maxNonCanonical--
   441  		ancestor--
   442  		header := hc.GetHeader(hash, number)
   443  		if header == nil {
   444  			return common.Hash{}, 0
   445  		}
   446  		hash = header.ParentHash
   447  		number--
   448  	}
   449  	return hash, number
   450  }
   451  
   452  // GetTd retrieves a block's total difficulty in the canonical chain from the
   453  // database by hash and number, caching it if found.
   454  func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
   455  	// Short circuit if the td's already in the cache, retrieve otherwise
   456  	if cached, ok := hc.tdCache.Get(hash); ok {
   457  		return cached.(*big.Int)
   458  	}
   459  	td := rawdb.ReadTd(hc.chainDb, hash, number)
   460  	if td == nil {
   461  		return nil
   462  	}
   463  	// Cache the found body for next time and return
   464  	hc.tdCache.Add(hash, td)
   465  	return td
   466  }
   467  
   468  // GetHeader retrieves a block header from the database by hash and number,
   469  // caching it if found.
   470  func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
   471  	// Short circuit if the header's already in the cache, retrieve otherwise
   472  	if header, ok := hc.headerCache.Get(hash); ok {
   473  		return header.(*types.Header)
   474  	}
   475  	header := rawdb.ReadHeader(hc.chainDb, hash, number)
   476  	if header == nil {
   477  		return nil
   478  	}
   479  	// Cache the found header for next time and return
   480  	hc.headerCache.Add(hash, header)
   481  	return header
   482  }
   483  
   484  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
   485  // found.
   486  func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
   487  	number := hc.GetBlockNumber(hash)
   488  	if number == nil {
   489  		return nil
   490  	}
   491  	return hc.GetHeader(hash, *number)
   492  }
   493  
   494  // HasHeader checks if a block header is present in the database or not.
   495  // In theory, if header is present in the database, all relative components
   496  // like td and hash->number should be present too.
   497  func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
   498  	if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
   499  		return true
   500  	}
   501  	return rawdb.HasHeader(hc.chainDb, hash, number)
   502  }
   503  
   504  // GetHeaderByNumber retrieves a block header from the database by number,
   505  // caching it (associated with its hash) if found.
   506  func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
   507  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   508  	if hash == (common.Hash{}) {
   509  		return nil
   510  	}
   511  	return hc.GetHeader(hash, number)
   512  }
   513  
   514  // GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
   515  // backwards from the given number.
   516  // If the 'number' is higher than the highest local header, this method will
   517  // return a best-effort response, containing the headers that we do have.
   518  func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
   519  	// If the request is for future headers, we still return the portion of
   520  	// headers that we are able to serve
   521  	if current := hc.CurrentHeader().Number.Uint64(); current < number {
   522  		if count > number-current {
   523  			count -= number - current
   524  			number = current
   525  		} else {
   526  			return nil
   527  		}
   528  	}
   529  	var headers []rlp.RawValue
   530  	// If we have some of the headers in cache already, use that before going to db.
   531  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   532  	if hash == (common.Hash{}) {
   533  		return nil
   534  	}
   535  	for count > 0 {
   536  		header, ok := hc.headerCache.Get(hash)
   537  		if !ok {
   538  			break
   539  		}
   540  		h := header.(*types.Header)
   541  		rlpData, _ := rlp.EncodeToBytes(h)
   542  		headers = append(headers, rlpData)
   543  		hash = h.ParentHash
   544  		count--
   545  		number--
   546  	}
   547  	// Read remaining from db
   548  	if count > 0 {
   549  		headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
   550  	}
   551  	return headers
   552  }
   553  
   554  func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
   555  	return rawdb.ReadCanonicalHash(hc.chainDb, number)
   556  }
   557  
   558  // CurrentHeader retrieves the current head header of the canonical chain. The
   559  // header is retrieved from the HeaderChain's internal cache.
   560  func (hc *HeaderChain) CurrentHeader() *types.Header {
   561  	return hc.currentHeader.Load().(*types.Header)
   562  }
   563  
   564  // SetCurrentHeader sets the in-memory head header marker of the canonical chan
   565  // as the given header.
   566  func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
   567  	hc.currentHeader.Store(head)
   568  	hc.currentHeaderHash = head.Hash()
   569  	headHeaderGauge.Update(head.Number.Int64())
   570  }
   571  
   572  type (
   573  	// UpdateHeadBlocksCallback is a callback function that is called by SetHead
   574  	// before head header is updated. The method will return the actual block it
   575  	// updated the head to (missing state) and a flag if setHead should continue
   576  	// rewinding till that forcefully (exceeded ancient limits)
   577  	UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool)
   578  
   579  	// DeleteBlockContentCallback is a callback function that is called by SetHead
   580  	// before each header is deleted.
   581  	DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64)
   582  )
   583  
   584  // SetHead rewinds the local chain to a new head. Everything above the new head
   585  // will be deleted and the new one set.
   586  func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
   587  	var (
   588  		parentHash common.Hash
   589  		batch      = hc.chainDb.NewBatch()
   590  		origin     = true
   591  	)
   592  	for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
   593  		num := hdr.Number.Uint64()
   594  
   595  		// Rewind block chain to new head.
   596  		parent := hc.GetHeader(hdr.ParentHash, num-1)
   597  		if parent == nil {
   598  			parent = hc.genesisHeader
   599  		}
   600  		parentHash = parent.Hash()
   601  
   602  		// Notably, since geth has the possibility for setting the head to a low
   603  		// height which is even lower than ancient head.
   604  		// In order to ensure that the head is always no higher than the data in
   605  		// the database (ancient store or active store), we need to update head
   606  		// first then remove the relative data from the database.
   607  		//
   608  		// Update head first(head fast block, head full block) before deleting the data.
   609  		markerBatch := hc.chainDb.NewBatch()
   610  		if updateFn != nil {
   611  			newHead, force := updateFn(markerBatch, parent)
   612  			if force && newHead < head {
   613  				log.Warn("Force rewinding till ancient limit", "head", newHead)
   614  				head = newHead
   615  			}
   616  		}
   617  		// Update head header then.
   618  		rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
   619  		if err := markerBatch.Write(); err != nil {
   620  			log.Crit("Failed to update chain markers", "error", err)
   621  		}
   622  		hc.currentHeader.Store(parent)
   623  		hc.currentHeaderHash = parentHash
   624  		headHeaderGauge.Update(parent.Number.Int64())
   625  
   626  		// If this is the first iteration, wipe any leftover data upwards too so
   627  		// we don't end up with dangling daps in the database
   628  		var nums []uint64
   629  		if origin {
   630  			for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
   631  				nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
   632  			}
   633  			origin = false
   634  		}
   635  		nums = append(nums, num)
   636  
   637  		// Remove the related data from the database on all sidechains
   638  		for _, num := range nums {
   639  			// Gather all the side fork hashes
   640  			hashes := rawdb.ReadAllHashes(hc.chainDb, num)
   641  			if len(hashes) == 0 {
   642  				// No hashes in the database whatsoever, probably frozen already
   643  				hashes = append(hashes, hdr.Hash())
   644  			}
   645  			for _, hash := range hashes {
   646  				if delFn != nil {
   647  					delFn(batch, hash, num)
   648  				}
   649  				rawdb.DeleteHeader(batch, hash, num)
   650  				rawdb.DeleteTd(batch, hash, num)
   651  			}
   652  			rawdb.DeleteCanonicalHash(batch, num)
   653  		}
   654  	}
   655  	// Flush all accumulated deletions.
   656  	if err := batch.Write(); err != nil {
   657  		log.Crit("Failed to rewind block", "error", err)
   658  	}
   659  	// Clear out any stale content from the caches
   660  	hc.headerCache.Purge()
   661  	hc.tdCache.Purge()
   662  	hc.numberCache.Purge()
   663  }
   664  
   665  // SetGenesis sets a new genesis block header for the chain
   666  func (hc *HeaderChain) SetGenesis(head *types.Header) {
   667  	hc.genesisHeader = head
   668  }
   669  
   670  // Config retrieves the header chain's chain configuration.
   671  func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
   672  
   673  // Engine retrieves the header chain's consensus engine.
   674  func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine }
   675  
   676  // GetBlock implements consensus.ChainReader, and returns nil for every input as
   677  // a header chain does not have blocks available for retrieval.
   678  func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   679  	return nil
   680  }