github.com/phillinzzz/newBsc@v1.1.6/core/rawdb/accessors_chain.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"math/big"
    23  	"sort"
    24  
    25  	"github.com/phillinzzz/newBsc/common"
    26  	"github.com/phillinzzz/newBsc/core/types"
    27  	"github.com/phillinzzz/newBsc/crypto"
    28  	"github.com/phillinzzz/newBsc/ethdb"
    29  	"github.com/phillinzzz/newBsc/log"
    30  	"github.com/phillinzzz/newBsc/params"
    31  	"github.com/phillinzzz/newBsc/rlp"
    32  )
    33  
    34  // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
    35  func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
    36  	data, _ := db.Ancient(freezerHashTable, number)
    37  	if len(data) == 0 {
    38  		data, _ = db.Get(headerHashKey(number))
    39  		// In the background freezer is moving data from leveldb to flatten files.
    40  		// So during the first check for ancient db, the data is not yet in there,
    41  		// but when we reach into leveldb, the data was already moved. That would
    42  		// result in a not found error.
    43  		if len(data) == 0 {
    44  			data, _ = db.Ancient(freezerHashTable, number)
    45  		}
    46  	}
    47  	if len(data) == 0 {
    48  		return common.Hash{}
    49  	}
    50  	return common.BytesToHash(data)
    51  }
    52  
    53  // WriteCanonicalHash stores the hash assigned to a canonical block number.
    54  func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
    55  	if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
    56  		log.Crit("Failed to store number to hash mapping", "err", err)
    57  	}
    58  }
    59  
    60  // DeleteCanonicalHash removes the number to hash canonical mapping.
    61  func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
    62  	if err := db.Delete(headerHashKey(number)); err != nil {
    63  		log.Crit("Failed to delete number to hash mapping", "err", err)
    64  	}
    65  }
    66  
    67  // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
    68  // both canonical and reorged forks included.
    69  func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
    70  	prefix := headerKeyPrefix(number)
    71  
    72  	hashes := make([]common.Hash, 0, 1)
    73  	it := db.NewIterator(prefix, nil)
    74  	defer it.Release()
    75  
    76  	for it.Next() {
    77  		if key := it.Key(); len(key) == len(prefix)+32 {
    78  			hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
    79  		}
    80  	}
    81  	return hashes
    82  }
    83  
    84  // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
    85  // certain chain range. If the accumulated entries reaches the given threshold,
    86  // abort the iteration and return the semi-finish result.
    87  func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
    88  	// Short circuit if the limit is 0.
    89  	if limit == 0 {
    90  		return nil, nil
    91  	}
    92  	var (
    93  		numbers []uint64
    94  		hashes  []common.Hash
    95  	)
    96  	// Construct the key prefix of start point.
    97  	start, end := headerHashKey(from), headerHashKey(to)
    98  	it := db.NewIterator(nil, start)
    99  	defer it.Release()
   100  
   101  	for it.Next() {
   102  		if bytes.Compare(it.Key(), end) >= 0 {
   103  			break
   104  		}
   105  		if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
   106  			numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
   107  			hashes = append(hashes, common.BytesToHash(it.Value()))
   108  			// If the accumulated entries reaches the limit threshold, return.
   109  			if len(numbers) >= limit {
   110  				break
   111  			}
   112  		}
   113  	}
   114  	return numbers, hashes
   115  }
   116  
   117  // ReadHeaderNumber returns the header number assigned to a hash.
   118  func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
   119  	data, _ := db.Get(headerNumberKey(hash))
   120  	if len(data) != 8 {
   121  		return nil
   122  	}
   123  	number := binary.BigEndian.Uint64(data)
   124  	return &number
   125  }
   126  
   127  // WriteHeaderNumber stores the hash->number mapping.
   128  func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   129  	key := headerNumberKey(hash)
   130  	enc := encodeBlockNumber(number)
   131  	if err := db.Put(key, enc); err != nil {
   132  		log.Crit("Failed to store hash to number mapping", "err", err)
   133  	}
   134  }
   135  
   136  // DeleteHeaderNumber removes hash->number mapping.
   137  func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
   138  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   139  		log.Crit("Failed to delete hash to number mapping", "err", err)
   140  	}
   141  }
   142  
   143  // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
   144  func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
   145  	data, _ := db.Get(headHeaderKey)
   146  	if len(data) == 0 {
   147  		return common.Hash{}
   148  	}
   149  	return common.BytesToHash(data)
   150  }
   151  
   152  // WriteHeadHeaderHash stores the hash of the current canonical head header.
   153  func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
   154  	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
   155  		log.Crit("Failed to store last header's hash", "err", err)
   156  	}
   157  }
   158  
   159  // ReadHeadBlockHash retrieves the hash of the current canonical head block.
   160  func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
   161  	data, _ := db.Get(headBlockKey)
   162  	if len(data) == 0 {
   163  		return common.Hash{}
   164  	}
   165  	return common.BytesToHash(data)
   166  }
   167  
   168  // WriteHeadBlockHash stores the head block's hash.
   169  func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   170  	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
   171  		log.Crit("Failed to store last block's hash", "err", err)
   172  	}
   173  }
   174  
   175  // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
   176  func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
   177  	data, _ := db.Get(headFastBlockKey)
   178  	if len(data) == 0 {
   179  		return common.Hash{}
   180  	}
   181  	return common.BytesToHash(data)
   182  }
   183  
   184  // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
   185  func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   186  	if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
   187  		log.Crit("Failed to store last fast block's hash", "err", err)
   188  	}
   189  }
   190  
   191  // ReadLastPivotNumber retrieves the number of the last pivot block. If the node
   192  // full synced, the last pivot will always be nil.
   193  func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
   194  	data, _ := db.Get(lastPivotKey)
   195  	if len(data) == 0 {
   196  		return nil
   197  	}
   198  	var pivot uint64
   199  	if err := rlp.DecodeBytes(data, &pivot); err != nil {
   200  		log.Error("Invalid pivot block number in database", "err", err)
   201  		return nil
   202  	}
   203  	return &pivot
   204  }
   205  
   206  // WriteLastPivotNumber stores the number of the last pivot block.
   207  func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
   208  	enc, err := rlp.EncodeToBytes(pivot)
   209  	if err != nil {
   210  		log.Crit("Failed to encode pivot block number", "err", err)
   211  	}
   212  	if err := db.Put(lastPivotKey, enc); err != nil {
   213  		log.Crit("Failed to store pivot block number", "err", err)
   214  	}
   215  }
   216  
   217  // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
   218  // reporting correct numbers across restarts.
   219  func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
   220  	data, _ := db.Get(fastTrieProgressKey)
   221  	if len(data) == 0 {
   222  		return 0
   223  	}
   224  	return new(big.Int).SetBytes(data).Uint64()
   225  }
   226  
   227  // WriteFastTrieProgress stores the fast sync trie process counter to support
   228  // retrieving it across restarts.
   229  func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
   230  	if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
   231  		log.Crit("Failed to store fast sync trie progress", "err", err)
   232  	}
   233  }
   234  
   235  // ReadTxIndexTail retrieves the number of oldest indexed block
   236  // whose transaction indices has been indexed. If the corresponding entry
   237  // is non-existent in database it means the indexing has been finished.
   238  func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
   239  	data, _ := db.Get(txIndexTailKey)
   240  	if len(data) != 8 {
   241  		return nil
   242  	}
   243  	number := binary.BigEndian.Uint64(data)
   244  	return &number
   245  }
   246  
   247  // WriteTxIndexTail stores the number of oldest indexed block
   248  // into database.
   249  func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
   250  	if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
   251  		log.Crit("Failed to store the transaction index tail", "err", err)
   252  	}
   253  }
   254  
   255  // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
   256  func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
   257  	data, _ := db.Get(fastTxLookupLimitKey)
   258  	if len(data) != 8 {
   259  		return nil
   260  	}
   261  	number := binary.BigEndian.Uint64(data)
   262  	return &number
   263  }
   264  
   265  // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
   266  func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
   267  	if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
   268  		log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
   269  	}
   270  }
   271  
   272  // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
   273  func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   274  	// First try to look up the data in ancient database. Extra hash
   275  	// comparison is necessary since ancient database only maintains
   276  	// the canonical data.
   277  	data, _ := db.Ancient(freezerHeaderTable, number)
   278  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   279  		return data
   280  	}
   281  	// Then try to look up the data in leveldb.
   282  	data, _ = db.Get(headerKey(number, hash))
   283  	if len(data) > 0 {
   284  		return data
   285  	}
   286  	// In the background freezer is moving data from leveldb to flatten files.
   287  	// So during the first check for ancient db, the data is not yet in there,
   288  	// but when we reach into leveldb, the data was already moved. That would
   289  	// result in a not found error.
   290  	data, _ = db.Ancient(freezerHeaderTable, number)
   291  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   292  		return data
   293  	}
   294  	return nil // Can't find the data anywhere.
   295  }
   296  
   297  // HasHeader verifies the existence of a block header corresponding to the hash.
   298  func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
   299  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   300  		return true
   301  	}
   302  	if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
   303  		return false
   304  	}
   305  	return true
   306  }
   307  
   308  // ReadHeader retrieves the block header corresponding to the hash.
   309  func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
   310  	data := ReadHeaderRLP(db, hash, number)
   311  	if len(data) == 0 {
   312  		return nil
   313  	}
   314  	header := new(types.Header)
   315  	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
   316  		log.Error("Invalid block header RLP", "hash", hash, "err", err)
   317  		return nil
   318  	}
   319  	return header
   320  }
   321  
   322  // WriteHeader stores a block header into the database and also stores the hash-
   323  // to-number mapping.
   324  func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
   325  	var (
   326  		hash   = header.Hash()
   327  		number = header.Number.Uint64()
   328  	)
   329  	// Write the hash -> number mapping
   330  	WriteHeaderNumber(db, hash, number)
   331  
   332  	// Write the encoded header
   333  	data, err := rlp.EncodeToBytes(header)
   334  	if err != nil {
   335  		log.Crit("Failed to RLP encode header", "err", err)
   336  	}
   337  	key := headerKey(number, hash)
   338  	if err := db.Put(key, data); err != nil {
   339  		log.Crit("Failed to store header", "err", err)
   340  	}
   341  }
   342  
   343  // DeleteHeader removes all block header data associated with a hash.
   344  func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   345  	deleteHeaderWithoutNumber(db, hash, number)
   346  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   347  		log.Crit("Failed to delete hash to number mapping", "err", err)
   348  	}
   349  }
   350  
   351  // deleteHeaderWithoutNumber removes only the block header but does not remove
   352  // the hash to number mapping.
   353  func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   354  	if err := db.Delete(headerKey(number, hash)); err != nil {
   355  		log.Crit("Failed to delete header", "err", err)
   356  	}
   357  }
   358  
   359  // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
   360  func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   361  	// First try to look up the data in ancient database. Extra hash
   362  	// comparison is necessary since ancient database only maintains
   363  	// the canonical data.
   364  	data, _ := db.Ancient(freezerBodiesTable, number)
   365  	if len(data) > 0 {
   366  		h, _ := db.Ancient(freezerHashTable, number)
   367  		if common.BytesToHash(h) == hash {
   368  			return data
   369  		}
   370  	}
   371  	// Then try to look up the data in leveldb.
   372  	data, _ = db.Get(blockBodyKey(number, hash))
   373  	if len(data) > 0 {
   374  		return data
   375  	}
   376  	// In the background freezer is moving data from leveldb to flatten files.
   377  	// So during the first check for ancient db, the data is not yet in there,
   378  	// but when we reach into leveldb, the data was already moved. That would
   379  	// result in a not found error.
   380  	data, _ = db.Ancient(freezerBodiesTable, number)
   381  	if len(data) > 0 {
   382  		h, _ := db.Ancient(freezerHashTable, number)
   383  		if common.BytesToHash(h) == hash {
   384  			return data
   385  		}
   386  	}
   387  	return nil // Can't find the data anywhere.
   388  }
   389  
   390  // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
   391  // block at number, in RLP encoding.
   392  func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
   393  	// If it's an ancient one, we don't need the canonical hash
   394  	data, _ := db.Ancient(freezerBodiesTable, number)
   395  	if len(data) == 0 {
   396  		// Need to get the hash
   397  		data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
   398  		// In the background freezer is moving data from leveldb to flatten files.
   399  		// So during the first check for ancient db, the data is not yet in there,
   400  		// but when we reach into leveldb, the data was already moved. That would
   401  		// result in a not found error.
   402  		if len(data) == 0 {
   403  			data, _ = db.Ancient(freezerBodiesTable, number)
   404  		}
   405  	}
   406  	return data
   407  }
   408  
   409  // WriteBodyRLP stores an RLP encoded block body into the database.
   410  func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
   411  	if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
   412  		log.Crit("Failed to store block body", "err", err)
   413  	}
   414  }
   415  
   416  // HasBody verifies the existence of a block body corresponding to the hash.
   417  func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
   418  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   419  		return true
   420  	}
   421  	if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
   422  		return false
   423  	}
   424  	return true
   425  }
   426  
   427  // ReadBody retrieves the block body corresponding to the hash.
   428  func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
   429  	data := ReadBodyRLP(db, hash, number)
   430  	if len(data) == 0 {
   431  		return nil
   432  	}
   433  	body := new(types.Body)
   434  	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
   435  		log.Error("Invalid block body RLP", "hash", hash, "err", err)
   436  		return nil
   437  	}
   438  	return body
   439  }
   440  
   441  // WriteBody stores a block body into the database.
   442  func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
   443  	data, err := rlp.EncodeToBytes(body)
   444  	if err != nil {
   445  		log.Crit("Failed to RLP encode body", "err", err)
   446  	}
   447  	WriteBodyRLP(db, hash, number, data)
   448  }
   449  
   450  func WriteDiffLayer(db ethdb.KeyValueWriter, hash common.Hash, layer *types.DiffLayer) {
   451  	data, err := rlp.EncodeToBytes(layer)
   452  	if err != nil {
   453  		log.Crit("Failed to RLP encode diff layer", "err", err)
   454  	}
   455  	WriteDiffLayerRLP(db, hash, data)
   456  }
   457  
   458  func WriteDiffLayerRLP(db ethdb.KeyValueWriter, blockHash common.Hash, rlp rlp.RawValue) {
   459  	if err := db.Put(diffLayerKey(blockHash), rlp); err != nil {
   460  		log.Crit("Failed to store diff layer", "err", err)
   461  	}
   462  }
   463  
   464  func ReadDiffLayer(db ethdb.KeyValueReader, blockHash common.Hash) *types.DiffLayer {
   465  	data := ReadDiffLayerRLP(db, blockHash)
   466  	if len(data) == 0 {
   467  		return nil
   468  	}
   469  	diff := new(types.DiffLayer)
   470  	if err := rlp.Decode(bytes.NewReader(data), diff); err != nil {
   471  		log.Error("Invalid diff layer RLP", "hash", blockHash, "err", err)
   472  		return nil
   473  	}
   474  	return diff
   475  }
   476  
   477  func ReadDiffLayerRLP(db ethdb.KeyValueReader, blockHash common.Hash) rlp.RawValue {
   478  	data, _ := db.Get(diffLayerKey(blockHash))
   479  	return data
   480  }
   481  
   482  func DeleteDiffLayer(db ethdb.KeyValueWriter, blockHash common.Hash) {
   483  	if err := db.Delete(diffLayerKey(blockHash)); err != nil {
   484  		log.Crit("Failed to delete diffLayer", "err", err)
   485  	}
   486  }
   487  
   488  // DeleteBody removes all block body data associated with a hash.
   489  func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   490  	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
   491  		log.Crit("Failed to delete block body", "err", err)
   492  	}
   493  }
   494  
   495  // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
   496  func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   497  	// First try to look up the data in ancient database. Extra hash
   498  	// comparison is necessary since ancient database only maintains
   499  	// the canonical data.
   500  	data, _ := db.Ancient(freezerDifficultyTable, number)
   501  	if len(data) > 0 {
   502  		h, _ := db.Ancient(freezerHashTable, number)
   503  		if common.BytesToHash(h) == hash {
   504  			return data
   505  		}
   506  	}
   507  	// Then try to look up the data in leveldb.
   508  	data, _ = db.Get(headerTDKey(number, hash))
   509  	if len(data) > 0 {
   510  		return data
   511  	}
   512  	// In the background freezer is moving data from leveldb to flatten files.
   513  	// So during the first check for ancient db, the data is not yet in there,
   514  	// but when we reach into leveldb, the data was already moved. That would
   515  	// result in a not found error.
   516  	data, _ = db.Ancient(freezerDifficultyTable, number)
   517  	if len(data) > 0 {
   518  		h, _ := db.Ancient(freezerHashTable, number)
   519  		if common.BytesToHash(h) == hash {
   520  			return data
   521  		}
   522  	}
   523  	return nil // Can't find the data anywhere.
   524  }
   525  
   526  // ReadTd retrieves a block's total difficulty corresponding to the hash.
   527  func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
   528  	data := ReadTdRLP(db, hash, number)
   529  	if len(data) == 0 {
   530  		return nil
   531  	}
   532  	td := new(big.Int)
   533  	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
   534  		log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
   535  		return nil
   536  	}
   537  	return td
   538  }
   539  
   540  // WriteTd stores the total difficulty of a block into the database.
   541  func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
   542  	data, err := rlp.EncodeToBytes(td)
   543  	if err != nil {
   544  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   545  	}
   546  	if err := db.Put(headerTDKey(number, hash), data); err != nil {
   547  		log.Crit("Failed to store block total difficulty", "err", err)
   548  	}
   549  }
   550  
   551  // DeleteTd removes all block total difficulty data associated with a hash.
   552  func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   553  	if err := db.Delete(headerTDKey(number, hash)); err != nil {
   554  		log.Crit("Failed to delete block total difficulty", "err", err)
   555  	}
   556  }
   557  
   558  // HasReceipts verifies the existence of all the transaction receipts belonging
   559  // to a block.
   560  func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
   561  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   562  		return true
   563  	}
   564  	if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
   565  		return false
   566  	}
   567  	return true
   568  }
   569  
   570  // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
   571  func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   572  	// First try to look up the data in ancient database. Extra hash
   573  	// comparison is necessary since ancient database only maintains
   574  	// the canonical data.
   575  	data, _ := db.Ancient(freezerReceiptTable, number)
   576  	if len(data) > 0 {
   577  		h, _ := db.Ancient(freezerHashTable, number)
   578  		if common.BytesToHash(h) == hash {
   579  			return data
   580  		}
   581  	}
   582  	// Then try to look up the data in leveldb.
   583  	data, _ = db.Get(blockReceiptsKey(number, hash))
   584  	if len(data) > 0 {
   585  		return data
   586  	}
   587  	// In the background freezer is moving data from leveldb to flatten files.
   588  	// So during the first check for ancient db, the data is not yet in there,
   589  	// but when we reach into leveldb, the data was already moved. That would
   590  	// result in a not found error.
   591  	data, _ = db.Ancient(freezerReceiptTable, number)
   592  	if len(data) > 0 {
   593  		h, _ := db.Ancient(freezerHashTable, number)
   594  		if common.BytesToHash(h) == hash {
   595  			return data
   596  		}
   597  	}
   598  	return nil // Can't find the data anywhere.
   599  }
   600  
   601  // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
   602  // The receipt metadata fields are not guaranteed to be populated, so they
   603  // should not be used. Use ReadReceipts instead if the metadata is needed.
   604  func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
   605  	// Retrieve the flattened receipt slice
   606  	data := ReadReceiptsRLP(db, hash, number)
   607  	if len(data) == 0 {
   608  		return nil
   609  	}
   610  	// Convert the receipts from their storage form to their internal representation
   611  	storageReceipts := []*types.ReceiptForStorage{}
   612  	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
   613  		log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
   614  		return nil
   615  	}
   616  	receipts := make(types.Receipts, len(storageReceipts))
   617  	for i, storageReceipt := range storageReceipts {
   618  		receipts[i] = (*types.Receipt)(storageReceipt)
   619  	}
   620  	return receipts
   621  }
   622  
   623  // ReadReceipts retrieves all the transaction receipts belonging to a block, including
   624  // its correspoinding metadata fields. If it is unable to populate these metadata
   625  // fields then nil is returned.
   626  //
   627  // The current implementation populates these metadata fields by reading the receipts'
   628  // corresponding block body, so if the block body is not found it will return nil even
   629  // if the receipt itself is stored.
   630  func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
   631  	// We're deriving many fields from the block body, retrieve beside the receipt
   632  	receipts := ReadRawReceipts(db, hash, number)
   633  	if receipts == nil {
   634  		return nil
   635  	}
   636  	body := ReadBody(db, hash, number)
   637  	if body == nil {
   638  		log.Error("Missing body but have receipt", "hash", hash, "number", number)
   639  		return nil
   640  	}
   641  	if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
   642  		log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
   643  		return nil
   644  	}
   645  	return receipts
   646  }
   647  
   648  // WriteReceipts stores all the transaction receipts belonging to a block.
   649  func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
   650  	// Convert the receipts into their storage form and serialize them
   651  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   652  	for i, receipt := range receipts {
   653  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   654  	}
   655  	bytes, err := rlp.EncodeToBytes(storageReceipts)
   656  	if err != nil {
   657  		log.Crit("Failed to encode block receipts", "err", err)
   658  	}
   659  	// Store the flattened receipt slice
   660  	if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
   661  		log.Crit("Failed to store block receipts", "err", err)
   662  	}
   663  }
   664  
   665  // DeleteReceipts removes all receipt data associated with a block hash.
   666  func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   667  	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
   668  		log.Crit("Failed to delete block receipts", "err", err)
   669  	}
   670  }
   671  
   672  // ReadBlock retrieves an entire block corresponding to the hash, assembling it
   673  // back from the stored header and body. If either the header or body could not
   674  // be retrieved nil is returned.
   675  //
   676  // Note, due to concurrent download of header and block body the header and thus
   677  // canonical hash can be stored in the database but the body data not (yet).
   678  func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
   679  	header := ReadHeader(db, hash, number)
   680  	if header == nil {
   681  		return nil
   682  	}
   683  	body := ReadBody(db, hash, number)
   684  	if body == nil {
   685  		return nil
   686  	}
   687  	return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
   688  }
   689  
   690  // WriteBlock serializes a block into the database, header and body separately.
   691  func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
   692  	WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
   693  	WriteHeader(db, block.Header())
   694  }
   695  
   696  // WriteAncientBlock writes entire block data into ancient store and returns the total written size.
   697  func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
   698  	// Encode all block components to RLP format.
   699  	headerBlob, err := rlp.EncodeToBytes(block.Header())
   700  	if err != nil {
   701  		log.Crit("Failed to RLP encode block header", "err", err)
   702  	}
   703  	bodyBlob, err := rlp.EncodeToBytes(block.Body())
   704  	if err != nil {
   705  		log.Crit("Failed to RLP encode body", "err", err)
   706  	}
   707  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   708  	for i, receipt := range receipts {
   709  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   710  	}
   711  	receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
   712  	if err != nil {
   713  		log.Crit("Failed to RLP encode block receipts", "err", err)
   714  	}
   715  	tdBlob, err := rlp.EncodeToBytes(td)
   716  	if err != nil {
   717  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   718  	}
   719  	// Write all blob to flatten files.
   720  	err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
   721  	if err != nil {
   722  		log.Crit("Failed to write block data to ancient store", "err", err)
   723  	}
   724  	return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
   725  }
   726  
   727  // DeleteBlock removes all block data associated with a hash.
   728  func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   729  	DeleteReceipts(db, hash, number)
   730  	DeleteHeader(db, hash, number)
   731  	DeleteBody(db, hash, number)
   732  	DeleteTd(db, hash, number)
   733  }
   734  
   735  // DeleteBlockWithoutNumber removes all block data associated with a hash, except
   736  // the hash to number mapping.
   737  func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   738  	DeleteReceipts(db, hash, number)
   739  	deleteHeaderWithoutNumber(db, hash, number)
   740  	DeleteBody(db, hash, number)
   741  	DeleteTd(db, hash, number)
   742  }
   743  
   744  const badBlockToKeep = 10
   745  
   746  type badBlock struct {
   747  	Header *types.Header
   748  	Body   *types.Body
   749  }
   750  
   751  // badBlockList implements the sort interface to allow sorting a list of
   752  // bad blocks by their number in the reverse order.
   753  type badBlockList []*badBlock
   754  
   755  func (s badBlockList) Len() int { return len(s) }
   756  func (s badBlockList) Less(i, j int) bool {
   757  	return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
   758  }
   759  func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
   760  
   761  // ReadBadBlock retrieves the bad block with the corresponding block hash.
   762  func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
   763  	blob, err := db.Get(badBlockKey)
   764  	if err != nil {
   765  		return nil
   766  	}
   767  	var badBlocks badBlockList
   768  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   769  		return nil
   770  	}
   771  	for _, bad := range badBlocks {
   772  		if bad.Header.Hash() == hash {
   773  			return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
   774  		}
   775  	}
   776  	return nil
   777  }
   778  
   779  // ReadAllBadBlocks retrieves all the bad blocks in the database.
   780  // All returned blocks are sorted in reverse order by number.
   781  func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
   782  	blob, err := db.Get(badBlockKey)
   783  	if err != nil {
   784  		return nil
   785  	}
   786  	var badBlocks badBlockList
   787  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   788  		return nil
   789  	}
   790  	var blocks []*types.Block
   791  	for _, bad := range badBlocks {
   792  		blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
   793  	}
   794  	return blocks
   795  }
   796  
   797  // WriteBadBlock serializes the bad block into the database. If the cumulated
   798  // bad blocks exceeds the limitation, the oldest will be dropped.
   799  func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
   800  	blob, err := db.Get(badBlockKey)
   801  	if err != nil {
   802  		log.Warn("Failed to load old bad blocks", "error", err)
   803  	}
   804  	var badBlocks badBlockList
   805  	if len(blob) > 0 {
   806  		if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   807  			log.Crit("Failed to decode old bad blocks", "error", err)
   808  		}
   809  	}
   810  	for _, b := range badBlocks {
   811  		if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
   812  			log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
   813  			return
   814  		}
   815  	}
   816  	badBlocks = append(badBlocks, &badBlock{
   817  		Header: block.Header(),
   818  		Body:   block.Body(),
   819  	})
   820  	sort.Sort(sort.Reverse(badBlocks))
   821  	if len(badBlocks) > badBlockToKeep {
   822  		badBlocks = badBlocks[:badBlockToKeep]
   823  	}
   824  	data, err := rlp.EncodeToBytes(badBlocks)
   825  	if err != nil {
   826  		log.Crit("Failed to encode bad blocks", "err", err)
   827  	}
   828  	if err := db.Put(badBlockKey, data); err != nil {
   829  		log.Crit("Failed to write bad blocks", "err", err)
   830  	}
   831  }
   832  
   833  // DeleteBadBlocks deletes all the bad blocks from the database
   834  func DeleteBadBlocks(db ethdb.KeyValueWriter) {
   835  	if err := db.Delete(badBlockKey); err != nil {
   836  		log.Crit("Failed to delete bad blocks", "err", err)
   837  	}
   838  }
   839  
   840  // FindCommonAncestor returns the last common ancestor of two block headers
   841  func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
   842  	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
   843  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   844  		if a == nil {
   845  			return nil
   846  		}
   847  	}
   848  	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
   849  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   850  		if b == nil {
   851  			return nil
   852  		}
   853  	}
   854  	for a.Hash() != b.Hash() {
   855  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   856  		if a == nil {
   857  			return nil
   858  		}
   859  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   860  		if b == nil {
   861  			return nil
   862  		}
   863  	}
   864  	return a
   865  }
   866  
   867  // ReadHeadHeader returns the current canonical head header.
   868  func ReadHeadHeader(db ethdb.Reader) *types.Header {
   869  	headHeaderHash := ReadHeadHeaderHash(db)
   870  	if headHeaderHash == (common.Hash{}) {
   871  		return nil
   872  	}
   873  	headHeaderNumber := ReadHeaderNumber(db, headHeaderHash)
   874  	if headHeaderNumber == nil {
   875  		return nil
   876  	}
   877  	return ReadHeader(db, headHeaderHash, *headHeaderNumber)
   878  }
   879  
   880  // ReadHeadBlock returns the current canonical head block.
   881  func ReadHeadBlock(db ethdb.Reader) *types.Block {
   882  	headBlockHash := ReadHeadBlockHash(db)
   883  	if headBlockHash == (common.Hash{}) {
   884  		return nil
   885  	}
   886  	headBlockNumber := ReadHeaderNumber(db, headBlockHash)
   887  	if headBlockNumber == nil {
   888  		return nil
   889  	}
   890  	return ReadBlock(db, headBlockHash, *headBlockNumber)
   891  }