github.com/Cleverse/go-ethereum@v0.0.0-20220927095127-45113064e7f2/core/headerchain.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	crand "crypto/rand"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/consensus"
    31  	"github.com/ethereum/go-ethereum/core/rawdb"
    32  	"github.com/ethereum/go-ethereum/core/types"
    33  	"github.com/ethereum/go-ethereum/ethdb"
    34  	"github.com/ethereum/go-ethereum/log"
    35  	"github.com/ethereum/go-ethereum/params"
    36  	"github.com/ethereum/go-ethereum/rlp"
    37  	lru "github.com/hashicorp/golang-lru"
    38  )
    39  
    40  const (
    41  	headerCacheLimit = 512
    42  	tdCacheLimit     = 1024
    43  	numberCacheLimit = 2048
    44  )
    45  
    46  // HeaderChain implements the basic block header chain logic that is shared by
    47  // core.BlockChain and light.LightChain. It is not usable in itself, only as
    48  // a part of either structure.
    49  //
    50  // HeaderChain is responsible for maintaining the header chain including the
    51  // header query and updating.
    52  //
    53  // The components maintained by headerchain includes: (1) total difficulty
    54  // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
    55  // and (5) head header flag.
    56  //
    57  // It is not thread safe either, the encapsulating chain structures should do
    58  // the necessary mutex locking/unlocking.
    59  type HeaderChain struct {
    60  	config        *params.ChainConfig
    61  	chainDb       ethdb.Database
    62  	genesisHeader *types.Header
    63  
    64  	currentHeader     atomic.Value // Current head of the header chain (may be above the block chain!)
    65  	currentHeaderHash common.Hash  // Hash of the current head of the header chain (prevent recomputing all the time)
    66  
    67  	headerCache *lru.Cache // Cache for the most recent block headers
    68  	tdCache     *lru.Cache // Cache for the most recent block total difficulties
    69  	numberCache *lru.Cache // Cache for the most recent block numbers
    70  
    71  	procInterrupt func() bool
    72  
    73  	rand   *mrand.Rand
    74  	engine consensus.Engine
    75  }
    76  
    77  // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
    78  // to the parent's interrupt semaphore.
    79  func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
    80  	headerCache, _ := lru.New(headerCacheLimit)
    81  	tdCache, _ := lru.New(tdCacheLimit)
    82  	numberCache, _ := lru.New(numberCacheLimit)
    83  
    84  	// Seed a fast but crypto originating random generator
    85  	seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
    86  	if err != nil {
    87  		return nil, err
    88  	}
    89  	hc := &HeaderChain{
    90  		config:        config,
    91  		chainDb:       chainDb,
    92  		headerCache:   headerCache,
    93  		tdCache:       tdCache,
    94  		numberCache:   numberCache,
    95  		procInterrupt: procInterrupt,
    96  		rand:          mrand.New(mrand.NewSource(seed.Int64())),
    97  		engine:        engine,
    98  	}
    99  	hc.genesisHeader = hc.GetHeaderByNumber(0)
   100  	if hc.genesisHeader == nil {
   101  		return nil, ErrNoGenesis
   102  	}
   103  	hc.currentHeader.Store(hc.genesisHeader)
   104  	if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
   105  		if chead := hc.GetHeaderByHash(head); chead != nil {
   106  			hc.currentHeader.Store(chead)
   107  		}
   108  	}
   109  	hc.currentHeaderHash = hc.CurrentHeader().Hash()
   110  	headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
   111  	return hc, nil
   112  }
   113  
   114  // GetBlockNumber retrieves the block number belonging to the given hash
   115  // from the cache or database
   116  func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
   117  	if cached, ok := hc.numberCache.Get(hash); ok {
   118  		number := cached.(uint64)
   119  		return &number
   120  	}
   121  	number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
   122  	if number != nil {
   123  		hc.numberCache.Add(hash, *number)
   124  	}
   125  	return number
   126  }
   127  
   128  type headerWriteResult struct {
   129  	status     WriteStatus
   130  	ignored    int
   131  	imported   int
   132  	lastHash   common.Hash
   133  	lastHeader *types.Header
   134  }
   135  
   136  // Reorg reorgs the local canonical chain into the specified chain. The reorg
   137  // can be classified into two cases: (a) extend the local chain (b) switch the
   138  // head to the given header.
   139  func (hc *HeaderChain) Reorg(headers []*types.Header) error {
   140  	// Short circuit if nothing to reorg.
   141  	if len(headers) == 0 {
   142  		return nil
   143  	}
   144  	// If the parent of the (first) block is already the canon header,
   145  	// we don't have to go backwards to delete canon blocks, but simply
   146  	// pile them onto the existing chain. Otherwise, do the necessary
   147  	// reorgs.
   148  	var (
   149  		first = headers[0]
   150  		last  = headers[len(headers)-1]
   151  		batch = hc.chainDb.NewBatch()
   152  	)
   153  	if first.ParentHash != hc.currentHeaderHash {
   154  		// Delete any canonical number assignments above the new head
   155  		for i := last.Number.Uint64() + 1; ; i++ {
   156  			hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
   157  			if hash == (common.Hash{}) {
   158  				break
   159  			}
   160  			rawdb.DeleteCanonicalHash(batch, i)
   161  		}
   162  		// Overwrite any stale canonical number assignments, going
   163  		// backwards from the first header in this import until the
   164  		// cross link between two chains.
   165  		var (
   166  			header     = first
   167  			headNumber = header.Number.Uint64()
   168  			headHash   = header.Hash()
   169  		)
   170  		for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
   171  			rawdb.WriteCanonicalHash(batch, headHash, headNumber)
   172  			if headNumber == 0 {
   173  				break // It shouldn't be reached
   174  			}
   175  			headHash, headNumber = header.ParentHash, header.Number.Uint64()-1
   176  			header = hc.GetHeader(headHash, headNumber)
   177  			if header == nil {
   178  				return fmt.Errorf("missing parent %d %x", headNumber, headHash)
   179  			}
   180  		}
   181  	}
   182  	// Extend the canonical chain with the new headers
   183  	for i := 0; i < len(headers)-1; i++ {
   184  		hash := headers[i+1].ParentHash // Save some extra hashing
   185  		num := headers[i].Number.Uint64()
   186  		rawdb.WriteCanonicalHash(batch, hash, num)
   187  		rawdb.WriteHeadHeaderHash(batch, hash)
   188  	}
   189  	// Write the last header
   190  	hash := headers[len(headers)-1].Hash()
   191  	num := headers[len(headers)-1].Number.Uint64()
   192  	rawdb.WriteCanonicalHash(batch, hash, num)
   193  	rawdb.WriteHeadHeaderHash(batch, hash)
   194  
   195  	if err := batch.Write(); err != nil {
   196  		return err
   197  	}
   198  	// Last step update all in-memory head header markers
   199  	hc.currentHeaderHash = last.Hash()
   200  	hc.currentHeader.Store(types.CopyHeader(last))
   201  	headHeaderGauge.Update(last.Number.Int64())
   202  	return nil
   203  }
   204  
   205  // WriteHeaders writes a chain of headers into the local chain, given that the
   206  // parents are already known. The chain head header won't be updated in this
   207  // function, the additional SetCanonical is expected in order to finish the entire
   208  // procedure.
   209  func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
   210  	if len(headers) == 0 {
   211  		return 0, nil
   212  	}
   213  	ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
   214  	if ptd == nil {
   215  		return 0, consensus.ErrUnknownAncestor
   216  	}
   217  	var (
   218  		newTD       = new(big.Int).Set(ptd) // Total difficulty of inserted chain
   219  		inserted    []rawdb.NumberHash      // Ephemeral lookup of number/hash for the chain
   220  		parentKnown = true                  // Set to true to force hc.HasHeader check the first iteration
   221  		batch       = hc.chainDb.NewBatch()
   222  	)
   223  	for i, header := range headers {
   224  		var hash common.Hash
   225  		// The headers have already been validated at this point, so we already
   226  		// know that it's a contiguous chain, where
   227  		// headers[i].Hash() == headers[i+1].ParentHash
   228  		if i < len(headers)-1 {
   229  			hash = headers[i+1].ParentHash
   230  		} else {
   231  			hash = header.Hash()
   232  		}
   233  		number := header.Number.Uint64()
   234  		newTD.Add(newTD, header.Difficulty)
   235  
   236  		// If the parent was not present, store it
   237  		// If the header is already known, skip it, otherwise store
   238  		alreadyKnown := parentKnown && hc.HasHeader(hash, number)
   239  		if !alreadyKnown {
   240  			// Irrelevant of the canonical status, write the TD and header to the database.
   241  			rawdb.WriteTd(batch, hash, number, newTD)
   242  			hc.tdCache.Add(hash, new(big.Int).Set(newTD))
   243  
   244  			rawdb.WriteHeader(batch, header)
   245  			inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
   246  			hc.headerCache.Add(hash, header)
   247  			hc.numberCache.Add(hash, number)
   248  		}
   249  		parentKnown = alreadyKnown
   250  	}
   251  	// Skip the slow disk write of all headers if interrupted.
   252  	if hc.procInterrupt() {
   253  		log.Debug("Premature abort during headers import")
   254  		return 0, errors.New("aborted")
   255  	}
   256  	// Commit to disk!
   257  	if err := batch.Write(); err != nil {
   258  		log.Crit("Failed to write headers", "error", err)
   259  	}
   260  	return len(inserted), nil
   261  }
   262  
   263  // writeHeadersAndSetHead writes a batch of block headers and applies the last
   264  // header as the chain head if the fork choicer says it's ok to update the chain.
   265  // Note: This method is not concurrent-safe with inserting blocks simultaneously
   266  // into the chain, as side effects caused by reorganisations cannot be emulated
   267  // without the real blocks. Hence, writing headers directly should only be done
   268  // in two scenarios: pure-header mode of operation (light clients), or properly
   269  // separated header/block phases (non-archive clients).
   270  func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
   271  	inserted, err := hc.WriteHeaders(headers)
   272  	if err != nil {
   273  		return nil, err
   274  	}
   275  	var (
   276  		lastHeader = headers[len(headers)-1]
   277  		lastHash   = headers[len(headers)-1].Hash()
   278  		result     = &headerWriteResult{
   279  			status:     NonStatTy,
   280  			ignored:    len(headers) - inserted,
   281  			imported:   inserted,
   282  			lastHash:   lastHash,
   283  			lastHeader: lastHeader,
   284  		}
   285  	)
   286  	// Ask the fork choicer if the reorg is necessary
   287  	if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
   288  		return nil, err
   289  	} else if !reorg {
   290  		if inserted != 0 {
   291  			result.status = SideStatTy
   292  		}
   293  		return result, nil
   294  	}
   295  	// Special case, all the inserted headers are already on the canonical
   296  	// header chain, skip the reorg operation.
   297  	if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
   298  		return result, nil
   299  	}
   300  	// Apply the reorg operation
   301  	if err := hc.Reorg(headers); err != nil {
   302  		return nil, err
   303  	}
   304  	result.status = CanonStatTy
   305  	return result, nil
   306  }
   307  
   308  func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
   309  	// Do a sanity check that the provided chain is actually ordered and linked
   310  	for i := 1; i < len(chain); i++ {
   311  		if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 {
   312  			hash := chain[i].Hash()
   313  			parentHash := chain[i-1].Hash()
   314  			// Chain broke ancestry, log a message (programming error) and skip insertion
   315  			log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash,
   316  				"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
   317  
   318  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number,
   319  				parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4])
   320  		}
   321  		// If the header is a banned one, straight out abort
   322  		if BadHashes[chain[i].ParentHash] {
   323  			return i - 1, ErrBannedHash
   324  		}
   325  		// If it's the last header in the cunk, we need to check it too
   326  		if i == len(chain)-1 && BadHashes[chain[i].Hash()] {
   327  			return i, ErrBannedHash
   328  		}
   329  	}
   330  
   331  	// Generate the list of seal verification requests, and start the parallel verifier
   332  	seals := make([]bool, len(chain))
   333  	if checkFreq != 0 {
   334  		// In case of checkFreq == 0 all seals are left false.
   335  		for i := 0; i <= len(seals)/checkFreq; i++ {
   336  			index := i*checkFreq + hc.rand.Intn(checkFreq)
   337  			if index >= len(seals) {
   338  				index = len(seals) - 1
   339  			}
   340  			seals[index] = true
   341  		}
   342  		// Last should always be verified to avoid junk.
   343  		seals[len(seals)-1] = true
   344  	}
   345  
   346  	abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
   347  	defer close(abort)
   348  
   349  	// Iterate over the headers and ensure they all check out
   350  	for i := range chain {
   351  		// If the chain is terminating, stop processing blocks
   352  		if hc.procInterrupt() {
   353  			log.Debug("Premature abort during headers verification")
   354  			return 0, errors.New("aborted")
   355  		}
   356  		// Otherwise wait for headers checks and ensure they pass
   357  		if err := <-results; err != nil {
   358  			return i, err
   359  		}
   360  	}
   361  
   362  	return 0, nil
   363  }
   364  
   365  // InsertHeaderChain inserts the given headers and does the reorganisations.
   366  //
   367  // The validity of the headers is NOT CHECKED by this method, i.e. they need to be
   368  // validated by ValidateHeaderChain before calling InsertHeaderChain.
   369  //
   370  // This insert is all-or-nothing. If this returns an error, no headers were written,
   371  // otherwise they were all processed successfully.
   372  //
   373  // The returned 'write status' says if the inserted headers are part of the canonical chain
   374  // or a side chain.
   375  func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
   376  	if hc.procInterrupt() {
   377  		return 0, errors.New("aborted")
   378  	}
   379  	res, err := hc.writeHeadersAndSetHead(chain, forker)
   380  	if err != nil {
   381  		return 0, err
   382  	}
   383  	// Report some public statistics so the user has a clue what's going on
   384  	context := []interface{}{
   385  		"count", res.imported,
   386  		"elapsed", common.PrettyDuration(time.Since(start)),
   387  	}
   388  	if last := res.lastHeader; last != nil {
   389  		context = append(context, "number", last.Number, "hash", res.lastHash)
   390  		if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
   391  			context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
   392  		}
   393  	}
   394  	if res.ignored > 0 {
   395  		context = append(context, []interface{}{"ignored", res.ignored}...)
   396  	}
   397  	log.Info("Imported new block headers", context...)
   398  	return res.status, err
   399  }
   400  
   401  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
   402  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
   403  // number of blocks to be individually checked before we reach the canonical chain.
   404  //
   405  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
   406  func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
   407  	if ancestor > number {
   408  		return common.Hash{}, 0
   409  	}
   410  	if ancestor == 1 {
   411  		// in this case it is cheaper to just read the header
   412  		if header := hc.GetHeader(hash, number); header != nil {
   413  			return header.ParentHash, number - 1
   414  		}
   415  		return common.Hash{}, 0
   416  	}
   417  	for ancestor != 0 {
   418  		if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   419  			ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor)
   420  			if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   421  				number -= ancestor
   422  				return ancestorHash, number
   423  			}
   424  		}
   425  		if *maxNonCanonical == 0 {
   426  			return common.Hash{}, 0
   427  		}
   428  		*maxNonCanonical--
   429  		ancestor--
   430  		header := hc.GetHeader(hash, number)
   431  		if header == nil {
   432  			return common.Hash{}, 0
   433  		}
   434  		hash = header.ParentHash
   435  		number--
   436  	}
   437  	return hash, number
   438  }
   439  
   440  // GetTd retrieves a block's total difficulty in the canonical chain from the
   441  // database by hash and number, caching it if found.
   442  func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
   443  	// Short circuit if the td's already in the cache, retrieve otherwise
   444  	if cached, ok := hc.tdCache.Get(hash); ok {
   445  		return cached.(*big.Int)
   446  	}
   447  	td := rawdb.ReadTd(hc.chainDb, hash, number)
   448  	if td == nil {
   449  		return nil
   450  	}
   451  	// Cache the found body for next time and return
   452  	hc.tdCache.Add(hash, td)
   453  	return td
   454  }
   455  
   456  // GetHeader retrieves a block header from the database by hash and number,
   457  // caching it if found.
   458  func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
   459  	// Short circuit if the header's already in the cache, retrieve otherwise
   460  	if header, ok := hc.headerCache.Get(hash); ok {
   461  		return header.(*types.Header)
   462  	}
   463  	header := rawdb.ReadHeader(hc.chainDb, hash, number)
   464  	if header == nil {
   465  		return nil
   466  	}
   467  	// Cache the found header for next time and return
   468  	hc.headerCache.Add(hash, header)
   469  	return header
   470  }
   471  
   472  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
   473  // found.
   474  func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
   475  	number := hc.GetBlockNumber(hash)
   476  	if number == nil {
   477  		return nil
   478  	}
   479  	return hc.GetHeader(hash, *number)
   480  }
   481  
   482  // HasHeader checks if a block header is present in the database or not.
   483  // In theory, if header is present in the database, all relative components
   484  // like td and hash->number should be present too.
   485  func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
   486  	if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
   487  		return true
   488  	}
   489  	return rawdb.HasHeader(hc.chainDb, hash, number)
   490  }
   491  
   492  // GetHeaderByNumber retrieves a block header from the database by number,
   493  // caching it (associated with its hash) if found.
   494  func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
   495  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   496  	if hash == (common.Hash{}) {
   497  		return nil
   498  	}
   499  	return hc.GetHeader(hash, number)
   500  }
   501  
   502  // GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
   503  // backwards from the given number.
   504  // If the 'number' is higher than the highest local header, this method will
   505  // return a best-effort response, containing the headers that we do have.
   506  func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
   507  	// If the request is for future headers, we still return the portion of
   508  	// headers that we are able to serve
   509  	if current := hc.CurrentHeader().Number.Uint64(); current < number {
   510  		if count > number-current {
   511  			count -= number - current
   512  			number = current
   513  		} else {
   514  			return nil
   515  		}
   516  	}
   517  	var headers []rlp.RawValue
   518  	// If we have some of the headers in cache already, use that before going to db.
   519  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   520  	if hash == (common.Hash{}) {
   521  		return nil
   522  	}
   523  	for count > 0 {
   524  		header, ok := hc.headerCache.Get(hash)
   525  		if !ok {
   526  			break
   527  		}
   528  		h := header.(*types.Header)
   529  		rlpData, _ := rlp.EncodeToBytes(h)
   530  		headers = append(headers, rlpData)
   531  		hash = h.ParentHash
   532  		count--
   533  		number--
   534  	}
   535  	// Read remaining from db
   536  	if count > 0 {
   537  		headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
   538  	}
   539  	return headers
   540  }
   541  
   542  func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
   543  	return rawdb.ReadCanonicalHash(hc.chainDb, number)
   544  }
   545  
   546  // CurrentHeader retrieves the current head header of the canonical chain. The
   547  // header is retrieved from the HeaderChain's internal cache.
   548  func (hc *HeaderChain) CurrentHeader() *types.Header {
   549  	return hc.currentHeader.Load().(*types.Header)
   550  }
   551  
   552  // SetCurrentHeader sets the in-memory head header marker of the canonical chan
   553  // as the given header.
   554  func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
   555  	hc.currentHeader.Store(head)
   556  	hc.currentHeaderHash = head.Hash()
   557  	headHeaderGauge.Update(head.Number.Int64())
   558  }
   559  
   560  type (
   561  	// UpdateHeadBlocksCallback is a callback function that is called by SetHead
   562  	// before head header is updated. The method will return the actual block it
   563  	// updated the head to (missing state) and a flag if setHead should continue
   564  	// rewinding till that forcefully (exceeded ancient limits)
   565  	UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (uint64, bool)
   566  
   567  	// DeleteBlockContentCallback is a callback function that is called by SetHead
   568  	// before each header is deleted.
   569  	DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64)
   570  )
   571  
   572  // SetHead rewinds the local chain to a new head. Everything above the new head
   573  // will be deleted and the new one set.
   574  func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
   575  	var (
   576  		parentHash common.Hash
   577  		batch      = hc.chainDb.NewBatch()
   578  		origin     = true
   579  	)
   580  	for hdr := hc.CurrentHeader(); hdr != nil && hdr.Number.Uint64() > head; hdr = hc.CurrentHeader() {
   581  		num := hdr.Number.Uint64()
   582  
   583  		// Rewind block chain to new head.
   584  		parent := hc.GetHeader(hdr.ParentHash, num-1)
   585  		if parent == nil {
   586  			parent = hc.genesisHeader
   587  		}
   588  		parentHash = parent.Hash()
   589  
   590  		// Notably, since geth has the possibility for setting the head to a low
   591  		// height which is even lower than ancient head.
   592  		// In order to ensure that the head is always no higher than the data in
   593  		// the database (ancient store or active store), we need to update head
   594  		// first then remove the relative data from the database.
   595  		//
   596  		// Update head first(head fast block, head full block) before deleting the data.
   597  		markerBatch := hc.chainDb.NewBatch()
   598  		if updateFn != nil {
   599  			newHead, force := updateFn(markerBatch, parent)
   600  			if force && newHead < head {
   601  				log.Warn("Force rewinding till ancient limit", "head", newHead)
   602  				head = newHead
   603  			}
   604  		}
   605  		// Update head header then.
   606  		rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
   607  		if err := markerBatch.Write(); err != nil {
   608  			log.Crit("Failed to update chain markers", "error", err)
   609  		}
   610  		hc.currentHeader.Store(parent)
   611  		hc.currentHeaderHash = parentHash
   612  		headHeaderGauge.Update(parent.Number.Int64())
   613  
   614  		// If this is the first iteration, wipe any leftover data upwards too so
   615  		// we don't end up with dangling daps in the database
   616  		var nums []uint64
   617  		if origin {
   618  			for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
   619  				nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
   620  			}
   621  			origin = false
   622  		}
   623  		nums = append(nums, num)
   624  
   625  		// Remove the related data from the database on all sidechains
   626  		for _, num := range nums {
   627  			// Gather all the side fork hashes
   628  			hashes := rawdb.ReadAllHashes(hc.chainDb, num)
   629  			if len(hashes) == 0 {
   630  				// No hashes in the database whatsoever, probably frozen already
   631  				hashes = append(hashes, hdr.Hash())
   632  			}
   633  			for _, hash := range hashes {
   634  				if delFn != nil {
   635  					delFn(batch, hash, num)
   636  				}
   637  				rawdb.DeleteHeader(batch, hash, num)
   638  				rawdb.DeleteTd(batch, hash, num)
   639  			}
   640  			rawdb.DeleteCanonicalHash(batch, num)
   641  		}
   642  	}
   643  	// Flush all accumulated deletions.
   644  	if err := batch.Write(); err != nil {
   645  		log.Crit("Failed to rewind block", "error", err)
   646  	}
   647  	// Clear out any stale content from the caches
   648  	hc.headerCache.Purge()
   649  	hc.tdCache.Purge()
   650  	hc.numberCache.Purge()
   651  }
   652  
   653  // SetGenesis sets a new genesis block header for the chain
   654  func (hc *HeaderChain) SetGenesis(head *types.Header) {
   655  	hc.genesisHeader = head
   656  }
   657  
   658  // Config retrieves the header chain's chain configuration.
   659  func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
   660  
   661  // Engine retrieves the header chain's consensus engine.
   662  func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine }
   663  
   664  // GetBlock implements consensus.ChainReader, and returns nil for every input as
   665  // a header chain does not have blocks available for retrieval.
   666  func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   667  	return nil
   668  }