github.com/jimmyx0x/go-ethereum@v1.10.28/core/headerchain.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package core
    18  
    19  import (
    20  	crand "crypto/rand"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	mrand "math/rand"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/common/lru"
    31  	"github.com/ethereum/go-ethereum/consensus"
    32  	"github.com/ethereum/go-ethereum/core/rawdb"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/ethdb"
    35  	"github.com/ethereum/go-ethereum/log"
    36  	"github.com/ethereum/go-ethereum/params"
    37  	"github.com/ethereum/go-ethereum/rlp"
    38  )
    39  
    40  const (
    41  	headerCacheLimit = 512
    42  	tdCacheLimit     = 1024
    43  	numberCacheLimit = 2048
    44  )
    45  
    46  // HeaderChain implements the basic block header chain logic that is shared by
    47  // core.BlockChain and light.LightChain. It is not usable in itself, only as
    48  // a part of either structure.
    49  //
    50  // HeaderChain is responsible for maintaining the header chain including the
    51  // header query and updating.
    52  //
    53  // The components maintained by headerchain includes: (1) total difficulty
    54  // (2) header (3) block hash -> number mapping (4) canonical number -> hash mapping
    55  // and (5) head header flag.
    56  //
    57  // It is not thread safe either, the encapsulating chain structures should do
    58  // the necessary mutex locking/unlocking.
    59  type HeaderChain struct {
    60  	config        *params.ChainConfig
    61  	chainDb       ethdb.Database
    62  	genesisHeader *types.Header
    63  
    64  	currentHeader     atomic.Value // Current head of the header chain (may be above the block chain!)
    65  	currentHeaderHash common.Hash  // Hash of the current head of the header chain (prevent recomputing all the time)
    66  
    67  	headerCache *lru.Cache[common.Hash, *types.Header]
    68  	tdCache     *lru.Cache[common.Hash, *big.Int] // most recent total difficulties
    69  	numberCache *lru.Cache[common.Hash, uint64]   // most recent block numbers
    70  
    71  	procInterrupt func() bool
    72  
    73  	rand   *mrand.Rand
    74  	engine consensus.Engine
    75  }
    76  
    77  // NewHeaderChain creates a new HeaderChain structure. ProcInterrupt points
    78  // to the parent's interrupt semaphore.
    79  func NewHeaderChain(chainDb ethdb.Database, config *params.ChainConfig, engine consensus.Engine, procInterrupt func() bool) (*HeaderChain, error) {
    80  	// Seed a fast but crypto originating random generator
    81  	seed, err := crand.Int(crand.Reader, big.NewInt(math.MaxInt64))
    82  	if err != nil {
    83  		return nil, err
    84  	}
    85  	hc := &HeaderChain{
    86  		config:        config,
    87  		chainDb:       chainDb,
    88  		headerCache:   lru.NewCache[common.Hash, *types.Header](headerCacheLimit),
    89  		tdCache:       lru.NewCache[common.Hash, *big.Int](tdCacheLimit),
    90  		numberCache:   lru.NewCache[common.Hash, uint64](numberCacheLimit),
    91  		procInterrupt: procInterrupt,
    92  		rand:          mrand.New(mrand.NewSource(seed.Int64())),
    93  		engine:        engine,
    94  	}
    95  	hc.genesisHeader = hc.GetHeaderByNumber(0)
    96  	if hc.genesisHeader == nil {
    97  		return nil, ErrNoGenesis
    98  	}
    99  	hc.currentHeader.Store(hc.genesisHeader)
   100  	if head := rawdb.ReadHeadBlockHash(chainDb); head != (common.Hash{}) {
   101  		if chead := hc.GetHeaderByHash(head); chead != nil {
   102  			hc.currentHeader.Store(chead)
   103  		}
   104  	}
   105  	hc.currentHeaderHash = hc.CurrentHeader().Hash()
   106  	headHeaderGauge.Update(hc.CurrentHeader().Number.Int64())
   107  	return hc, nil
   108  }
   109  
   110  // GetBlockNumber retrieves the block number belonging to the given hash
   111  // from the cache or database
   112  func (hc *HeaderChain) GetBlockNumber(hash common.Hash) *uint64 {
   113  	if cached, ok := hc.numberCache.Get(hash); ok {
   114  		return &cached
   115  	}
   116  	number := rawdb.ReadHeaderNumber(hc.chainDb, hash)
   117  	if number != nil {
   118  		hc.numberCache.Add(hash, *number)
   119  	}
   120  	return number
   121  }
   122  
   123  type headerWriteResult struct {
   124  	status     WriteStatus
   125  	ignored    int
   126  	imported   int
   127  	lastHash   common.Hash
   128  	lastHeader *types.Header
   129  }
   130  
   131  // Reorg reorgs the local canonical chain into the specified chain. The reorg
   132  // can be classified into two cases: (a) extend the local chain (b) switch the
   133  // head to the given header.
   134  func (hc *HeaderChain) Reorg(headers []*types.Header) error {
   135  	// Short circuit if nothing to reorg.
   136  	if len(headers) == 0 {
   137  		return nil
   138  	}
   139  	// If the parent of the (first) block is already the canon header,
   140  	// we don't have to go backwards to delete canon blocks, but simply
   141  	// pile them onto the existing chain. Otherwise, do the necessary
   142  	// reorgs.
   143  	var (
   144  		first = headers[0]
   145  		last  = headers[len(headers)-1]
   146  		batch = hc.chainDb.NewBatch()
   147  	)
   148  	if first.ParentHash != hc.currentHeaderHash {
   149  		// Delete any canonical number assignments above the new head
   150  		for i := last.Number.Uint64() + 1; ; i++ {
   151  			hash := rawdb.ReadCanonicalHash(hc.chainDb, i)
   152  			if hash == (common.Hash{}) {
   153  				break
   154  			}
   155  			rawdb.DeleteCanonicalHash(batch, i)
   156  		}
   157  		// Overwrite any stale canonical number assignments, going
   158  		// backwards from the first header in this import until the
   159  		// cross link between two chains.
   160  		var (
   161  			header     = first
   162  			headNumber = header.Number.Uint64()
   163  			headHash   = header.Hash()
   164  		)
   165  		for rawdb.ReadCanonicalHash(hc.chainDb, headNumber) != headHash {
   166  			rawdb.WriteCanonicalHash(batch, headHash, headNumber)
   167  			if headNumber == 0 {
   168  				break // It shouldn't be reached
   169  			}
   170  			headHash, headNumber = header.ParentHash, header.Number.Uint64()-1
   171  			header = hc.GetHeader(headHash, headNumber)
   172  			if header == nil {
   173  				return fmt.Errorf("missing parent %d %x", headNumber, headHash)
   174  			}
   175  		}
   176  	}
   177  	// Extend the canonical chain with the new headers
   178  	for i := 0; i < len(headers)-1; i++ {
   179  		hash := headers[i+1].ParentHash // Save some extra hashing
   180  		num := headers[i].Number.Uint64()
   181  		rawdb.WriteCanonicalHash(batch, hash, num)
   182  		rawdb.WriteHeadHeaderHash(batch, hash)
   183  	}
   184  	// Write the last header
   185  	hash := headers[len(headers)-1].Hash()
   186  	num := headers[len(headers)-1].Number.Uint64()
   187  	rawdb.WriteCanonicalHash(batch, hash, num)
   188  	rawdb.WriteHeadHeaderHash(batch, hash)
   189  
   190  	if err := batch.Write(); err != nil {
   191  		return err
   192  	}
   193  	// Last step update all in-memory head header markers
   194  	hc.currentHeaderHash = last.Hash()
   195  	hc.currentHeader.Store(types.CopyHeader(last))
   196  	headHeaderGauge.Update(last.Number.Int64())
   197  	return nil
   198  }
   199  
   200  // WriteHeaders writes a chain of headers into the local chain, given that the
   201  // parents are already known. The chain head header won't be updated in this
   202  // function, the additional SetCanonical is expected in order to finish the entire
   203  // procedure.
   204  func (hc *HeaderChain) WriteHeaders(headers []*types.Header) (int, error) {
   205  	if len(headers) == 0 {
   206  		return 0, nil
   207  	}
   208  	ptd := hc.GetTd(headers[0].ParentHash, headers[0].Number.Uint64()-1)
   209  	if ptd == nil {
   210  		return 0, consensus.ErrUnknownAncestor
   211  	}
   212  	var (
   213  		newTD       = new(big.Int).Set(ptd) // Total difficulty of inserted chain
   214  		inserted    []rawdb.NumberHash      // Ephemeral lookup of number/hash for the chain
   215  		parentKnown = true                  // Set to true to force hc.HasHeader check the first iteration
   216  		batch       = hc.chainDb.NewBatch()
   217  	)
   218  	for i, header := range headers {
   219  		var hash common.Hash
   220  		// The headers have already been validated at this point, so we already
   221  		// know that it's a contiguous chain, where
   222  		// headers[i].Hash() == headers[i+1].ParentHash
   223  		if i < len(headers)-1 {
   224  			hash = headers[i+1].ParentHash
   225  		} else {
   226  			hash = header.Hash()
   227  		}
   228  		number := header.Number.Uint64()
   229  		newTD.Add(newTD, header.Difficulty)
   230  
   231  		// If the parent was not present, store it
   232  		// If the header is already known, skip it, otherwise store
   233  		alreadyKnown := parentKnown && hc.HasHeader(hash, number)
   234  		if !alreadyKnown {
   235  			// Irrelevant of the canonical status, write the TD and header to the database.
   236  			rawdb.WriteTd(batch, hash, number, newTD)
   237  			hc.tdCache.Add(hash, new(big.Int).Set(newTD))
   238  
   239  			rawdb.WriteHeader(batch, header)
   240  			inserted = append(inserted, rawdb.NumberHash{Number: number, Hash: hash})
   241  			hc.headerCache.Add(hash, header)
   242  			hc.numberCache.Add(hash, number)
   243  		}
   244  		parentKnown = alreadyKnown
   245  	}
   246  	// Skip the slow disk write of all headers if interrupted.
   247  	if hc.procInterrupt() {
   248  		log.Debug("Premature abort during headers import")
   249  		return 0, errors.New("aborted")
   250  	}
   251  	// Commit to disk!
   252  	if err := batch.Write(); err != nil {
   253  		log.Crit("Failed to write headers", "error", err)
   254  	}
   255  	return len(inserted), nil
   256  }
   257  
   258  // writeHeadersAndSetHead writes a batch of block headers and applies the last
   259  // header as the chain head if the fork choicer says it's ok to update the chain.
   260  // Note: This method is not concurrent-safe with inserting blocks simultaneously
   261  // into the chain, as side effects caused by reorganisations cannot be emulated
   262  // without the real blocks. Hence, writing headers directly should only be done
   263  // in two scenarios: pure-header mode of operation (light clients), or properly
   264  // separated header/block phases (non-archive clients).
   265  func (hc *HeaderChain) writeHeadersAndSetHead(headers []*types.Header, forker *ForkChoice) (*headerWriteResult, error) {
   266  	inserted, err := hc.WriteHeaders(headers)
   267  	if err != nil {
   268  		return nil, err
   269  	}
   270  	var (
   271  		lastHeader = headers[len(headers)-1]
   272  		lastHash   = headers[len(headers)-1].Hash()
   273  		result     = &headerWriteResult{
   274  			status:     NonStatTy,
   275  			ignored:    len(headers) - inserted,
   276  			imported:   inserted,
   277  			lastHash:   lastHash,
   278  			lastHeader: lastHeader,
   279  		}
   280  	)
   281  	// Ask the fork choicer if the reorg is necessary
   282  	if reorg, err := forker.ReorgNeeded(hc.CurrentHeader(), lastHeader); err != nil {
   283  		return nil, err
   284  	} else if !reorg {
   285  		if inserted != 0 {
   286  			result.status = SideStatTy
   287  		}
   288  		return result, nil
   289  	}
   290  	// Special case, all the inserted headers are already on the canonical
   291  	// header chain, skip the reorg operation.
   292  	if hc.GetCanonicalHash(lastHeader.Number.Uint64()) == lastHash && lastHeader.Number.Uint64() <= hc.CurrentHeader().Number.Uint64() {
   293  		return result, nil
   294  	}
   295  	// Apply the reorg operation
   296  	if err := hc.Reorg(headers); err != nil {
   297  		return nil, err
   298  	}
   299  	result.status = CanonStatTy
   300  	return result, nil
   301  }
   302  
   303  func (hc *HeaderChain) ValidateHeaderChain(chain []*types.Header, checkFreq int) (int, error) {
   304  	// Do a sanity check that the provided chain is actually ordered and linked
   305  	for i := 1; i < len(chain); i++ {
   306  		if chain[i].Number.Uint64() != chain[i-1].Number.Uint64()+1 {
   307  			hash := chain[i].Hash()
   308  			parentHash := chain[i-1].Hash()
   309  			// Chain broke ancestry, log a message (programming error) and skip insertion
   310  			log.Error("Non contiguous header insert", "number", chain[i].Number, "hash", hash,
   311  				"parent", chain[i].ParentHash, "prevnumber", chain[i-1].Number, "prevhash", parentHash)
   312  
   313  			return 0, fmt.Errorf("non contiguous insert: item %d is #%d [%x..], item %d is #%d [%x..] (parent [%x..])", i-1, chain[i-1].Number,
   314  				parentHash.Bytes()[:4], i, chain[i].Number, hash.Bytes()[:4], chain[i].ParentHash[:4])
   315  		}
   316  		// If the header is a banned one, straight out abort
   317  		if BadHashes[chain[i].ParentHash] {
   318  			return i - 1, ErrBannedHash
   319  		}
   320  		// If it's the last header in the cunk, we need to check it too
   321  		if i == len(chain)-1 && BadHashes[chain[i].Hash()] {
   322  			return i, ErrBannedHash
   323  		}
   324  	}
   325  
   326  	// Generate the list of seal verification requests, and start the parallel verifier
   327  	seals := make([]bool, len(chain))
   328  	if checkFreq != 0 {
   329  		// In case of checkFreq == 0 all seals are left false.
   330  		for i := 0; i <= len(seals)/checkFreq; i++ {
   331  			index := i*checkFreq + hc.rand.Intn(checkFreq)
   332  			if index >= len(seals) {
   333  				index = len(seals) - 1
   334  			}
   335  			seals[index] = true
   336  		}
   337  		// Last should always be verified to avoid junk.
   338  		seals[len(seals)-1] = true
   339  	}
   340  
   341  	abort, results := hc.engine.VerifyHeaders(hc, chain, seals)
   342  	defer close(abort)
   343  
   344  	// Iterate over the headers and ensure they all check out
   345  	for i := range chain {
   346  		// If the chain is terminating, stop processing blocks
   347  		if hc.procInterrupt() {
   348  			log.Debug("Premature abort during headers verification")
   349  			return 0, errors.New("aborted")
   350  		}
   351  		// Otherwise wait for headers checks and ensure they pass
   352  		if err := <-results; err != nil {
   353  			return i, err
   354  		}
   355  	}
   356  
   357  	return 0, nil
   358  }
   359  
   360  // InsertHeaderChain inserts the given headers and does the reorganisations.
   361  //
   362  // The validity of the headers is NOT CHECKED by this method, i.e. they need to be
   363  // validated by ValidateHeaderChain before calling InsertHeaderChain.
   364  //
   365  // This insert is all-or-nothing. If this returns an error, no headers were written,
   366  // otherwise they were all processed successfully.
   367  //
   368  // The returned 'write status' says if the inserted headers are part of the canonical chain
   369  // or a side chain.
   370  func (hc *HeaderChain) InsertHeaderChain(chain []*types.Header, start time.Time, forker *ForkChoice) (WriteStatus, error) {
   371  	if hc.procInterrupt() {
   372  		return 0, errors.New("aborted")
   373  	}
   374  	res, err := hc.writeHeadersAndSetHead(chain, forker)
   375  	if err != nil {
   376  		return 0, err
   377  	}
   378  	// Report some public statistics so the user has a clue what's going on
   379  	context := []interface{}{
   380  		"count", res.imported,
   381  		"elapsed", common.PrettyDuration(time.Since(start)),
   382  	}
   383  	if last := res.lastHeader; last != nil {
   384  		context = append(context, "number", last.Number, "hash", res.lastHash)
   385  		if timestamp := time.Unix(int64(last.Time), 0); time.Since(timestamp) > time.Minute {
   386  			context = append(context, []interface{}{"age", common.PrettyAge(timestamp)}...)
   387  		}
   388  	}
   389  	if res.ignored > 0 {
   390  		context = append(context, []interface{}{"ignored", res.ignored}...)
   391  	}
   392  	log.Info("Imported new block headers", context...)
   393  	return res.status, err
   394  }
   395  
   396  // GetAncestor retrieves the Nth ancestor of a given block. It assumes that either the given block or
   397  // a close ancestor of it is canonical. maxNonCanonical points to a downwards counter limiting the
   398  // number of blocks to be individually checked before we reach the canonical chain.
   399  //
   400  // Note: ancestor == 0 returns the same block, 1 returns its parent and so on.
   401  func (hc *HeaderChain) GetAncestor(hash common.Hash, number, ancestor uint64, maxNonCanonical *uint64) (common.Hash, uint64) {
   402  	if ancestor > number {
   403  		return common.Hash{}, 0
   404  	}
   405  	if ancestor == 1 {
   406  		// in this case it is cheaper to just read the header
   407  		if header := hc.GetHeader(hash, number); header != nil {
   408  			return header.ParentHash, number - 1
   409  		}
   410  		return common.Hash{}, 0
   411  	}
   412  	for ancestor != 0 {
   413  		if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   414  			ancestorHash := rawdb.ReadCanonicalHash(hc.chainDb, number-ancestor)
   415  			if rawdb.ReadCanonicalHash(hc.chainDb, number) == hash {
   416  				number -= ancestor
   417  				return ancestorHash, number
   418  			}
   419  		}
   420  		if *maxNonCanonical == 0 {
   421  			return common.Hash{}, 0
   422  		}
   423  		*maxNonCanonical--
   424  		ancestor--
   425  		header := hc.GetHeader(hash, number)
   426  		if header == nil {
   427  			return common.Hash{}, 0
   428  		}
   429  		hash = header.ParentHash
   430  		number--
   431  	}
   432  	return hash, number
   433  }
   434  
   435  // GetTd retrieves a block's total difficulty in the canonical chain from the
   436  // database by hash and number, caching it if found.
   437  func (hc *HeaderChain) GetTd(hash common.Hash, number uint64) *big.Int {
   438  	// Short circuit if the td's already in the cache, retrieve otherwise
   439  	if cached, ok := hc.tdCache.Get(hash); ok {
   440  		return cached
   441  	}
   442  	td := rawdb.ReadTd(hc.chainDb, hash, number)
   443  	if td == nil {
   444  		return nil
   445  	}
   446  	// Cache the found body for next time and return
   447  	hc.tdCache.Add(hash, td)
   448  	return td
   449  }
   450  
   451  // GetHeader retrieves a block header from the database by hash and number,
   452  // caching it if found.
   453  func (hc *HeaderChain) GetHeader(hash common.Hash, number uint64) *types.Header {
   454  	// Short circuit if the header's already in the cache, retrieve otherwise
   455  	if header, ok := hc.headerCache.Get(hash); ok {
   456  		return header
   457  	}
   458  	header := rawdb.ReadHeader(hc.chainDb, hash, number)
   459  	if header == nil {
   460  		return nil
   461  	}
   462  	// Cache the found header for next time and return
   463  	hc.headerCache.Add(hash, header)
   464  	return header
   465  }
   466  
   467  // GetHeaderByHash retrieves a block header from the database by hash, caching it if
   468  // found.
   469  func (hc *HeaderChain) GetHeaderByHash(hash common.Hash) *types.Header {
   470  	number := hc.GetBlockNumber(hash)
   471  	if number == nil {
   472  		return nil
   473  	}
   474  	return hc.GetHeader(hash, *number)
   475  }
   476  
   477  // HasHeader checks if a block header is present in the database or not.
   478  // In theory, if header is present in the database, all relative components
   479  // like td and hash->number should be present too.
   480  func (hc *HeaderChain) HasHeader(hash common.Hash, number uint64) bool {
   481  	if hc.numberCache.Contains(hash) || hc.headerCache.Contains(hash) {
   482  		return true
   483  	}
   484  	return rawdb.HasHeader(hc.chainDb, hash, number)
   485  }
   486  
   487  // GetHeaderByNumber retrieves a block header from the database by number,
   488  // caching it (associated with its hash) if found.
   489  func (hc *HeaderChain) GetHeaderByNumber(number uint64) *types.Header {
   490  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   491  	if hash == (common.Hash{}) {
   492  		return nil
   493  	}
   494  	return hc.GetHeader(hash, number)
   495  }
   496  
   497  // GetHeadersFrom returns a contiguous segment of headers, in rlp-form, going
   498  // backwards from the given number.
   499  // If the 'number' is higher than the highest local header, this method will
   500  // return a best-effort response, containing the headers that we do have.
   501  func (hc *HeaderChain) GetHeadersFrom(number, count uint64) []rlp.RawValue {
   502  	// If the request is for future headers, we still return the portion of
   503  	// headers that we are able to serve
   504  	if current := hc.CurrentHeader().Number.Uint64(); current < number {
   505  		if count > number-current {
   506  			count -= number - current
   507  			number = current
   508  		} else {
   509  			return nil
   510  		}
   511  	}
   512  	var headers []rlp.RawValue
   513  	// If we have some of the headers in cache already, use that before going to db.
   514  	hash := rawdb.ReadCanonicalHash(hc.chainDb, number)
   515  	if hash == (common.Hash{}) {
   516  		return nil
   517  	}
   518  	for count > 0 {
   519  		header, ok := hc.headerCache.Get(hash)
   520  		if !ok {
   521  			break
   522  		}
   523  		rlpData, _ := rlp.EncodeToBytes(header)
   524  		headers = append(headers, rlpData)
   525  		hash = header.ParentHash
   526  		count--
   527  		number--
   528  	}
   529  	// Read remaining from db
   530  	if count > 0 {
   531  		headers = append(headers, rawdb.ReadHeaderRange(hc.chainDb, number, count)...)
   532  	}
   533  	return headers
   534  }
   535  
   536  func (hc *HeaderChain) GetCanonicalHash(number uint64) common.Hash {
   537  	return rawdb.ReadCanonicalHash(hc.chainDb, number)
   538  }
   539  
   540  // CurrentHeader retrieves the current head header of the canonical chain. The
   541  // header is retrieved from the HeaderChain's internal cache.
   542  func (hc *HeaderChain) CurrentHeader() *types.Header {
   543  	return hc.currentHeader.Load().(*types.Header)
   544  }
   545  
   546  // SetCurrentHeader sets the in-memory head header marker of the canonical chan
   547  // as the given header.
   548  func (hc *HeaderChain) SetCurrentHeader(head *types.Header) {
   549  	hc.currentHeader.Store(head)
   550  	hc.currentHeaderHash = head.Hash()
   551  	headHeaderGauge.Update(head.Number.Int64())
   552  }
   553  
   554  type (
   555  	// UpdateHeadBlocksCallback is a callback function that is called by SetHead
   556  	// before head header is updated. The method will return the actual block it
   557  	// updated the head to (missing state) and a flag if setHead should continue
   558  	// rewinding till that forcefully (exceeded ancient limits)
   559  	UpdateHeadBlocksCallback func(ethdb.KeyValueWriter, *types.Header) (*types.Header, bool)
   560  
   561  	// DeleteBlockContentCallback is a callback function that is called by SetHead
   562  	// before each header is deleted.
   563  	DeleteBlockContentCallback func(ethdb.KeyValueWriter, common.Hash, uint64)
   564  )
   565  
   566  // SetHead rewinds the local chain to a new head. Everything above the new head
   567  // will be deleted and the new one set.
   568  func (hc *HeaderChain) SetHead(head uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
   569  	hc.setHead(head, 0, updateFn, delFn)
   570  }
   571  
   572  // SetHeadWithTimestamp rewinds the local chain to a new head timestamp. Everything
   573  // above the new head will be deleted and the new one set.
   574  func (hc *HeaderChain) SetHeadWithTimestamp(time uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
   575  	hc.setHead(0, time, updateFn, delFn)
   576  }
   577  
   578  // setHead rewinds the local chain to a new head block or a head timestamp.
   579  // Everything above the new head will be deleted and the new one set.
   580  func (hc *HeaderChain) setHead(headBlock uint64, headTime uint64, updateFn UpdateHeadBlocksCallback, delFn DeleteBlockContentCallback) {
   581  	// Sanity check that there's no attempt to undo the genesis block. This is
   582  	// a fairly synthetic case where someone enables a timestamp based fork
   583  	// below the genesis timestamp. It's nice to not allow that instead of the
   584  	// entire chain getting deleted.
   585  	if headTime > 0 && hc.genesisHeader.Time > headTime {
   586  		// Note, a critical error is quite brutal, but we should really not reach
   587  		// this point. Since pre-timestamp based forks it was impossible to have
   588  		// a fork before block 0, the setHead would always work. With timestamp
   589  		// forks it becomes possible to specify below the genesis. That said, the
   590  		// only time we setHead via timestamp is with chain config changes on the
   591  		// startup, so failing hard there is ok.
   592  		log.Crit("Rejecting genesis rewind via timestamp", "target", headTime, "genesis", hc.genesisHeader.Time)
   593  	}
   594  	var (
   595  		parentHash common.Hash
   596  		batch      = hc.chainDb.NewBatch()
   597  		origin     = true
   598  	)
   599  	done := func(header *types.Header) bool {
   600  		if headTime > 0 {
   601  			return header.Time <= headTime
   602  		}
   603  		return header.Number.Uint64() <= headBlock
   604  	}
   605  	for hdr := hc.CurrentHeader(); hdr != nil && !done(hdr); hdr = hc.CurrentHeader() {
   606  		num := hdr.Number.Uint64()
   607  
   608  		// Rewind chain to new head
   609  		parent := hc.GetHeader(hdr.ParentHash, num-1)
   610  		if parent == nil {
   611  			parent = hc.genesisHeader
   612  		}
   613  		parentHash = parent.Hash()
   614  
   615  		// Notably, since geth has the possibility for setting the head to a low
   616  		// height which is even lower than ancient head.
   617  		// In order to ensure that the head is always no higher than the data in
   618  		// the database (ancient store or active store), we need to update head
   619  		// first then remove the relative data from the database.
   620  		//
   621  		// Update head first(head fast block, head full block) before deleting the data.
   622  		markerBatch := hc.chainDb.NewBatch()
   623  		if updateFn != nil {
   624  			newHead, force := updateFn(markerBatch, parent)
   625  			if force && ((headTime > 0 && newHead.Time < headTime) || (headTime == 0 && newHead.Number.Uint64() < headBlock)) {
   626  				log.Warn("Force rewinding till ancient limit", "head", newHead.Number.Uint64())
   627  				headBlock, headTime = newHead.Number.Uint64(), 0 // Target timestamp passed, continue rewind in block mode (cleaner)
   628  			}
   629  		}
   630  		// Update head header then.
   631  		rawdb.WriteHeadHeaderHash(markerBatch, parentHash)
   632  		if err := markerBatch.Write(); err != nil {
   633  			log.Crit("Failed to update chain markers", "error", err)
   634  		}
   635  		hc.currentHeader.Store(parent)
   636  		hc.currentHeaderHash = parentHash
   637  		headHeaderGauge.Update(parent.Number.Int64())
   638  
   639  		// If this is the first iteration, wipe any leftover data upwards too so
   640  		// we don't end up with dangling daps in the database
   641  		var nums []uint64
   642  		if origin {
   643  			for n := num + 1; len(rawdb.ReadAllHashes(hc.chainDb, n)) > 0; n++ {
   644  				nums = append([]uint64{n}, nums...) // suboptimal, but we don't really expect this path
   645  			}
   646  			origin = false
   647  		}
   648  		nums = append(nums, num)
   649  
   650  		// Remove the related data from the database on all sidechains
   651  		for _, num := range nums {
   652  			// Gather all the side fork hashes
   653  			hashes := rawdb.ReadAllHashes(hc.chainDb, num)
   654  			if len(hashes) == 0 {
   655  				// No hashes in the database whatsoever, probably frozen already
   656  				hashes = append(hashes, hdr.Hash())
   657  			}
   658  			for _, hash := range hashes {
   659  				if delFn != nil {
   660  					delFn(batch, hash, num)
   661  				}
   662  				rawdb.DeleteHeader(batch, hash, num)
   663  				rawdb.DeleteTd(batch, hash, num)
   664  			}
   665  			rawdb.DeleteCanonicalHash(batch, num)
   666  		}
   667  	}
   668  	// Flush all accumulated deletions.
   669  	if err := batch.Write(); err != nil {
   670  		log.Crit("Failed to rewind block", "error", err)
   671  	}
   672  	// Clear out any stale content from the caches
   673  	hc.headerCache.Purge()
   674  	hc.tdCache.Purge()
   675  	hc.numberCache.Purge()
   676  }
   677  
   678  // SetGenesis sets a new genesis block header for the chain
   679  func (hc *HeaderChain) SetGenesis(head *types.Header) {
   680  	hc.genesisHeader = head
   681  }
   682  
   683  // Config retrieves the header chain's chain configuration.
   684  func (hc *HeaderChain) Config() *params.ChainConfig { return hc.config }
   685  
   686  // Engine retrieves the header chain's consensus engine.
   687  func (hc *HeaderChain) Engine() consensus.Engine { return hc.engine }
   688  
   689  // GetBlock implements consensus.ChainReader, and returns nil for every input as
   690  // a header chain does not have blocks available for retrieval.
   691  func (hc *HeaderChain) GetBlock(hash common.Hash, number uint64) *types.Block {
   692  	return nil
   693  }