github.com/gilgames000/kcc-geth@v1.0.6/core/rawdb/accessors_chain.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"math/big"
    23  	"sort"
    24  
    25  	"github.com/ethereum/go-ethereum/common"
    26  	"github.com/ethereum/go-ethereum/core/types"
    27  	"github.com/ethereum/go-ethereum/crypto"
    28  	"github.com/ethereum/go-ethereum/ethdb"
    29  	"github.com/ethereum/go-ethereum/log"
    30  	"github.com/ethereum/go-ethereum/params"
    31  	"github.com/ethereum/go-ethereum/rlp"
    32  )
    33  
    34  // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
    35  func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
    36  	data, _ := db.Ancient(freezerHashTable, number)
    37  	if len(data) == 0 {
    38  		data, _ = db.Get(headerHashKey(number))
    39  		// In the background freezer is moving data from leveldb to flatten files.
    40  		// So during the first check for ancient db, the data is not yet in there,
    41  		// but when we reach into leveldb, the data was already moved. That would
    42  		// result in a not found error.
    43  		if len(data) == 0 {
    44  			data, _ = db.Ancient(freezerHashTable, number)
    45  		}
    46  	}
    47  	if len(data) == 0 {
    48  		return common.Hash{}
    49  	}
    50  	return common.BytesToHash(data)
    51  }
    52  
    53  // WriteCanonicalHash stores the hash assigned to a canonical block number.
    54  func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
    55  	if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
    56  		log.Crit("Failed to store number to hash mapping", "err", err)
    57  	}
    58  }
    59  
    60  // DeleteCanonicalHash removes the number to hash canonical mapping.
    61  func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
    62  	if err := db.Delete(headerHashKey(number)); err != nil {
    63  		log.Crit("Failed to delete number to hash mapping", "err", err)
    64  	}
    65  }
    66  
    67  // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
    68  // both canonical and reorged forks included.
    69  func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
    70  	prefix := headerKeyPrefix(number)
    71  
    72  	hashes := make([]common.Hash, 0, 1)
    73  	it := db.NewIterator(prefix, nil)
    74  	defer it.Release()
    75  
    76  	for it.Next() {
    77  		if key := it.Key(); len(key) == len(prefix)+32 {
    78  			hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
    79  		}
    80  	}
    81  	return hashes
    82  }
    83  
    84  // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
    85  // certain chain range. If the accumulated entries reaches the given threshold,
    86  // abort the iteration and return the semi-finish result.
    87  func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
    88  	// Short circuit if the limit is 0.
    89  	if limit == 0 {
    90  		return nil, nil
    91  	}
    92  	var (
    93  		numbers []uint64
    94  		hashes  []common.Hash
    95  	)
    96  	// Construct the key prefix of start point.
    97  	start, end := headerHashKey(from), headerHashKey(to)
    98  	it := db.NewIterator(nil, start)
    99  	defer it.Release()
   100  
   101  	for it.Next() {
   102  		if bytes.Compare(it.Key(), end) >= 0 {
   103  			break
   104  		}
   105  		if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
   106  			numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
   107  			hashes = append(hashes, common.BytesToHash(it.Value()))
   108  			// If the accumulated entries reaches the limit threshold, return.
   109  			if len(numbers) >= limit {
   110  				break
   111  			}
   112  		}
   113  	}
   114  	return numbers, hashes
   115  }
   116  
   117  // ReadHeaderNumber returns the header number assigned to a hash.
   118  func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
   119  	data, _ := db.Get(headerNumberKey(hash))
   120  	if len(data) != 8 {
   121  		return nil
   122  	}
   123  	number := binary.BigEndian.Uint64(data)
   124  	return &number
   125  }
   126  
   127  // WriteHeaderNumber stores the hash->number mapping.
   128  func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   129  	key := headerNumberKey(hash)
   130  	enc := encodeBlockNumber(number)
   131  	if err := db.Put(key, enc); err != nil {
   132  		log.Crit("Failed to store hash to number mapping", "err", err)
   133  	}
   134  }
   135  
   136  // DeleteHeaderNumber removes hash->number mapping.
   137  func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
   138  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   139  		log.Crit("Failed to delete hash to number mapping", "err", err)
   140  	}
   141  }
   142  
   143  // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
   144  func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
   145  	data, _ := db.Get(headHeaderKey)
   146  	if len(data) == 0 {
   147  		return common.Hash{}
   148  	}
   149  	return common.BytesToHash(data)
   150  }
   151  
   152  // WriteHeadHeaderHash stores the hash of the current canonical head header.
   153  func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
   154  	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
   155  		log.Crit("Failed to store last header's hash", "err", err)
   156  	}
   157  }
   158  
   159  // ReadHeadBlockHash retrieves the hash of the current canonical head block.
   160  func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
   161  	data, _ := db.Get(headBlockKey)
   162  	if len(data) == 0 {
   163  		return common.Hash{}
   164  	}
   165  	return common.BytesToHash(data)
   166  }
   167  
   168  // WriteHeadBlockHash stores the head block's hash.
   169  func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   170  	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
   171  		log.Crit("Failed to store last block's hash", "err", err)
   172  	}
   173  }
   174  
   175  // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
   176  func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
   177  	data, _ := db.Get(headFastBlockKey)
   178  	if len(data) == 0 {
   179  		return common.Hash{}
   180  	}
   181  	return common.BytesToHash(data)
   182  }
   183  
   184  // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
   185  func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   186  	if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
   187  		log.Crit("Failed to store last fast block's hash", "err", err)
   188  	}
   189  }
   190  
   191  // ReadLastPivotNumber retrieves the number of the last pivot block. If the node
   192  // full synced, the last pivot will always be nil.
   193  func ReadLastPivotNumber(db ethdb.KeyValueReader) *uint64 {
   194  	data, _ := db.Get(lastPivotKey)
   195  	if len(data) == 0 {
   196  		return nil
   197  	}
   198  	var pivot uint64
   199  	if err := rlp.DecodeBytes(data, &pivot); err != nil {
   200  		log.Error("Invalid pivot block number in database", "err", err)
   201  		return nil
   202  	}
   203  	return &pivot
   204  }
   205  
   206  // WriteLastPivotNumber stores the number of the last pivot block.
   207  func WriteLastPivotNumber(db ethdb.KeyValueWriter, pivot uint64) {
   208  	enc, err := rlp.EncodeToBytes(pivot)
   209  	if err != nil {
   210  		log.Crit("Failed to encode pivot block number", "err", err)
   211  	}
   212  	if err := db.Put(lastPivotKey, enc); err != nil {
   213  		log.Crit("Failed to store pivot block number", "err", err)
   214  	}
   215  }
   216  
   217  // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
   218  // reporting correct numbers across restarts.
   219  func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
   220  	data, _ := db.Get(fastTrieProgressKey)
   221  	if len(data) == 0 {
   222  		return 0
   223  	}
   224  	return new(big.Int).SetBytes(data).Uint64()
   225  }
   226  
   227  // WriteFastTrieProgress stores the fast sync trie process counter to support
   228  // retrieving it across restarts.
   229  func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
   230  	if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
   231  		log.Crit("Failed to store fast sync trie progress", "err", err)
   232  	}
   233  }
   234  
   235  // ReadTxIndexTail retrieves the number of oldest indexed block
   236  // whose transaction indices has been indexed. If the corresponding entry
   237  // is non-existent in database it means the indexing has been finished.
   238  func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
   239  	data, _ := db.Get(txIndexTailKey)
   240  	if len(data) != 8 {
   241  		return nil
   242  	}
   243  	number := binary.BigEndian.Uint64(data)
   244  	return &number
   245  }
   246  
   247  // WriteTxIndexTail stores the number of oldest indexed block
   248  // into database.
   249  func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
   250  	if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
   251  		log.Crit("Failed to store the transaction index tail", "err", err)
   252  	}
   253  }
   254  
   255  // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
   256  func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
   257  	data, _ := db.Get(fastTxLookupLimitKey)
   258  	if len(data) != 8 {
   259  		return nil
   260  	}
   261  	number := binary.BigEndian.Uint64(data)
   262  	return &number
   263  }
   264  
   265  // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
   266  func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
   267  	if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
   268  		log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
   269  	}
   270  }
   271  
   272  // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
   273  func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   274  	// First try to look up the data in ancient database. Extra hash
   275  	// comparison is necessary since ancient database only maintains
   276  	// the canonical data.
   277  	data, _ := db.Ancient(freezerHeaderTable, number)
   278  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   279  		return data
   280  	}
   281  	// Then try to look up the data in leveldb.
   282  	data, _ = db.Get(headerKey(number, hash))
   283  	if len(data) > 0 {
   284  		return data
   285  	}
   286  	// In the background freezer is moving data from leveldb to flatten files.
   287  	// So during the first check for ancient db, the data is not yet in there,
   288  	// but when we reach into leveldb, the data was already moved. That would
   289  	// result in a not found error.
   290  	data, _ = db.Ancient(freezerHeaderTable, number)
   291  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   292  		return data
   293  	}
   294  	return nil // Can't find the data anywhere.
   295  }
   296  
   297  // HasHeader verifies the existence of a block header corresponding to the hash.
   298  func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
   299  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   300  		return true
   301  	}
   302  	if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
   303  		return false
   304  	}
   305  	return true
   306  }
   307  
   308  // ReadHeader retrieves the block header corresponding to the hash.
   309  func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
   310  	data := ReadHeaderRLP(db, hash, number)
   311  	if len(data) == 0 {
   312  		return nil
   313  	}
   314  	header := new(types.Header)
   315  	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
   316  		log.Error("Invalid block header RLP", "hash", hash, "err", err)
   317  		return nil
   318  	}
   319  	return header
   320  }
   321  
   322  // WriteHeader stores a block header into the database and also stores the hash-
   323  // to-number mapping.
   324  func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
   325  	var (
   326  		hash   = header.Hash()
   327  		number = header.Number.Uint64()
   328  	)
   329  	// Write the hash -> number mapping
   330  	WriteHeaderNumber(db, hash, number)
   331  
   332  	// Write the encoded header
   333  	data, err := rlp.EncodeToBytes(header)
   334  	if err != nil {
   335  		log.Crit("Failed to RLP encode header", "err", err)
   336  	}
   337  	key := headerKey(number, hash)
   338  	if err := db.Put(key, data); err != nil {
   339  		log.Crit("Failed to store header", "err", err)
   340  	}
   341  }
   342  
   343  // DeleteHeader removes all block header data associated with a hash.
   344  func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   345  	deleteHeaderWithoutNumber(db, hash, number)
   346  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   347  		log.Crit("Failed to delete hash to number mapping", "err", err)
   348  	}
   349  }
   350  
   351  // deleteHeaderWithoutNumber removes only the block header but does not remove
   352  // the hash to number mapping.
   353  func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   354  	if err := db.Delete(headerKey(number, hash)); err != nil {
   355  		log.Crit("Failed to delete header", "err", err)
   356  	}
   357  }
   358  
   359  // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
   360  func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   361  	// First try to look up the data in ancient database. Extra hash
   362  	// comparison is necessary since ancient database only maintains
   363  	// the canonical data.
   364  	data, _ := db.Ancient(freezerBodiesTable, number)
   365  	if len(data) > 0 {
   366  		h, _ := db.Ancient(freezerHashTable, number)
   367  		if common.BytesToHash(h) == hash {
   368  			return data
   369  		}
   370  	}
   371  	// Then try to look up the data in leveldb.
   372  	data, _ = db.Get(blockBodyKey(number, hash))
   373  	if len(data) > 0 {
   374  		return data
   375  	}
   376  	// In the background freezer is moving data from leveldb to flatten files.
   377  	// So during the first check for ancient db, the data is not yet in there,
   378  	// but when we reach into leveldb, the data was already moved. That would
   379  	// result in a not found error.
   380  	data, _ = db.Ancient(freezerBodiesTable, number)
   381  	if len(data) > 0 {
   382  		h, _ := db.Ancient(freezerHashTable, number)
   383  		if common.BytesToHash(h) == hash {
   384  			return data
   385  		}
   386  	}
   387  	return nil // Can't find the data anywhere.
   388  }
   389  
   390  // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
   391  // block at number, in RLP encoding.
   392  func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
   393  	// If it's an ancient one, we don't need the canonical hash
   394  	data, _ := db.Ancient(freezerBodiesTable, number)
   395  	if len(data) == 0 {
   396  		// Need to get the hash
   397  		data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
   398  		// In the background freezer is moving data from leveldb to flatten files.
   399  		// So during the first check for ancient db, the data is not yet in there,
   400  		// but when we reach into leveldb, the data was already moved. That would
   401  		// result in a not found error.
   402  		if len(data) == 0 {
   403  			data, _ = db.Ancient(freezerBodiesTable, number)
   404  		}
   405  	}
   406  	return data
   407  }
   408  
   409  // WriteBodyRLP stores an RLP encoded block body into the database.
   410  func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
   411  	if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
   412  		log.Crit("Failed to store block body", "err", err)
   413  	}
   414  }
   415  
   416  // HasBody verifies the existence of a block body corresponding to the hash.
   417  func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
   418  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   419  		return true
   420  	}
   421  	if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
   422  		return false
   423  	}
   424  	return true
   425  }
   426  
   427  // ReadBody retrieves the block body corresponding to the hash.
   428  func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
   429  	data := ReadBodyRLP(db, hash, number)
   430  	if len(data) == 0 {
   431  		return nil
   432  	}
   433  	body := new(types.Body)
   434  	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
   435  		log.Error("Invalid block body RLP", "hash", hash, "err", err)
   436  		return nil
   437  	}
   438  	return body
   439  }
   440  
   441  // WriteBody stores a block body into the database.
   442  func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
   443  	data, err := rlp.EncodeToBytes(body)
   444  	if err != nil {
   445  		log.Crit("Failed to RLP encode body", "err", err)
   446  	}
   447  	WriteBodyRLP(db, hash, number, data)
   448  }
   449  
   450  // DeleteBody removes all block body data associated with a hash.
   451  func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   452  	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
   453  		log.Crit("Failed to delete block body", "err", err)
   454  	}
   455  }
   456  
   457  // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
   458  func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   459  	// First try to look up the data in ancient database. Extra hash
   460  	// comparison is necessary since ancient database only maintains
   461  	// the canonical data.
   462  	data, _ := db.Ancient(freezerDifficultyTable, number)
   463  	if len(data) > 0 {
   464  		h, _ := db.Ancient(freezerHashTable, number)
   465  		if common.BytesToHash(h) == hash {
   466  			return data
   467  		}
   468  	}
   469  	// Then try to look up the data in leveldb.
   470  	data, _ = db.Get(headerTDKey(number, hash))
   471  	if len(data) > 0 {
   472  		return data
   473  	}
   474  	// In the background freezer is moving data from leveldb to flatten files.
   475  	// So during the first check for ancient db, the data is not yet in there,
   476  	// but when we reach into leveldb, the data was already moved. That would
   477  	// result in a not found error.
   478  	data, _ = db.Ancient(freezerDifficultyTable, number)
   479  	if len(data) > 0 {
   480  		h, _ := db.Ancient(freezerHashTable, number)
   481  		if common.BytesToHash(h) == hash {
   482  			return data
   483  		}
   484  	}
   485  	return nil // Can't find the data anywhere.
   486  }
   487  
   488  // ReadTd retrieves a block's total difficulty corresponding to the hash.
   489  func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
   490  	data := ReadTdRLP(db, hash, number)
   491  	if len(data) == 0 {
   492  		return nil
   493  	}
   494  	td := new(big.Int)
   495  	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
   496  		log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
   497  		return nil
   498  	}
   499  	return td
   500  }
   501  
   502  // WriteTd stores the total difficulty of a block into the database.
   503  func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
   504  	data, err := rlp.EncodeToBytes(td)
   505  	if err != nil {
   506  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   507  	}
   508  	if err := db.Put(headerTDKey(number, hash), data); err != nil {
   509  		log.Crit("Failed to store block total difficulty", "err", err)
   510  	}
   511  }
   512  
   513  // DeleteTd removes all block total difficulty data associated with a hash.
   514  func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   515  	if err := db.Delete(headerTDKey(number, hash)); err != nil {
   516  		log.Crit("Failed to delete block total difficulty", "err", err)
   517  	}
   518  }
   519  
   520  // HasReceipts verifies the existence of all the transaction receipts belonging
   521  // to a block.
   522  func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
   523  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   524  		return true
   525  	}
   526  	if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
   527  		return false
   528  	}
   529  	return true
   530  }
   531  
   532  // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
   533  func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   534  	// First try to look up the data in ancient database. Extra hash
   535  	// comparison is necessary since ancient database only maintains
   536  	// the canonical data.
   537  	data, _ := db.Ancient(freezerReceiptTable, number)
   538  	if len(data) > 0 {
   539  		h, _ := db.Ancient(freezerHashTable, number)
   540  		if common.BytesToHash(h) == hash {
   541  			return data
   542  		}
   543  	}
   544  	// Then try to look up the data in leveldb.
   545  	data, _ = db.Get(blockReceiptsKey(number, hash))
   546  	if len(data) > 0 {
   547  		return data
   548  	}
   549  	// In the background freezer is moving data from leveldb to flatten files.
   550  	// So during the first check for ancient db, the data is not yet in there,
   551  	// but when we reach into leveldb, the data was already moved. That would
   552  	// result in a not found error.
   553  	data, _ = db.Ancient(freezerReceiptTable, number)
   554  	if len(data) > 0 {
   555  		h, _ := db.Ancient(freezerHashTable, number)
   556  		if common.BytesToHash(h) == hash {
   557  			return data
   558  		}
   559  	}
   560  	return nil // Can't find the data anywhere.
   561  }
   562  
   563  // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
   564  // The receipt metadata fields are not guaranteed to be populated, so they
   565  // should not be used. Use ReadReceipts instead if the metadata is needed.
   566  func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
   567  	// Retrieve the flattened receipt slice
   568  	data := ReadReceiptsRLP(db, hash, number)
   569  	if len(data) == 0 {
   570  		return nil
   571  	}
   572  	// Convert the receipts from their storage form to their internal representation
   573  	storageReceipts := []*types.ReceiptForStorage{}
   574  	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
   575  		log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
   576  		return nil
   577  	}
   578  	receipts := make(types.Receipts, len(storageReceipts))
   579  	for i, storageReceipt := range storageReceipts {
   580  		receipts[i] = (*types.Receipt)(storageReceipt)
   581  	}
   582  	return receipts
   583  }
   584  
   585  // ReadReceipts retrieves all the transaction receipts belonging to a block, including
   586  // its correspoinding metadata fields. If it is unable to populate these metadata
   587  // fields then nil is returned.
   588  //
   589  // The current implementation populates these metadata fields by reading the receipts'
   590  // corresponding block body, so if the block body is not found it will return nil even
   591  // if the receipt itself is stored.
   592  func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
   593  	// We're deriving many fields from the block body, retrieve beside the receipt
   594  	receipts := ReadRawReceipts(db, hash, number)
   595  	if receipts == nil {
   596  		return nil
   597  	}
   598  	body := ReadBody(db, hash, number)
   599  	if body == nil {
   600  		log.Error("Missing body but have receipt", "hash", hash, "number", number)
   601  		return nil
   602  	}
   603  	if err := receipts.DeriveFields(config, hash, number, body.Transactions); err != nil {
   604  		log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
   605  		return nil
   606  	}
   607  	return receipts
   608  }
   609  
   610  // WriteReceipts stores all the transaction receipts belonging to a block.
   611  func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
   612  	// Convert the receipts into their storage form and serialize them
   613  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   614  	for i, receipt := range receipts {
   615  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   616  	}
   617  	bytes, err := rlp.EncodeToBytes(storageReceipts)
   618  	if err != nil {
   619  		log.Crit("Failed to encode block receipts", "err", err)
   620  	}
   621  	// Store the flattened receipt slice
   622  	if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
   623  		log.Crit("Failed to store block receipts", "err", err)
   624  	}
   625  }
   626  
   627  // DeleteReceipts removes all receipt data associated with a block hash.
   628  func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   629  	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
   630  		log.Crit("Failed to delete block receipts", "err", err)
   631  	}
   632  }
   633  
   634  // ReadBlock retrieves an entire block corresponding to the hash, assembling it
   635  // back from the stored header and body. If either the header or body could not
   636  // be retrieved nil is returned.
   637  //
   638  // Note, due to concurrent download of header and block body the header and thus
   639  // canonical hash can be stored in the database but the body data not (yet).
   640  func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
   641  	header := ReadHeader(db, hash, number)
   642  	if header == nil {
   643  		return nil
   644  	}
   645  	body := ReadBody(db, hash, number)
   646  	if body == nil {
   647  		return nil
   648  	}
   649  	return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles)
   650  }
   651  
   652  // WriteBlock serializes a block into the database, header and body separately.
   653  func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
   654  	WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
   655  	WriteHeader(db, block.Header())
   656  }
   657  
   658  // WriteAncientBlock writes entire block data into ancient store and returns the total written size.
   659  func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
   660  	// Encode all block components to RLP format.
   661  	headerBlob, err := rlp.EncodeToBytes(block.Header())
   662  	if err != nil {
   663  		log.Crit("Failed to RLP encode block header", "err", err)
   664  	}
   665  	bodyBlob, err := rlp.EncodeToBytes(block.Body())
   666  	if err != nil {
   667  		log.Crit("Failed to RLP encode body", "err", err)
   668  	}
   669  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   670  	for i, receipt := range receipts {
   671  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   672  	}
   673  	receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
   674  	if err != nil {
   675  		log.Crit("Failed to RLP encode block receipts", "err", err)
   676  	}
   677  	tdBlob, err := rlp.EncodeToBytes(td)
   678  	if err != nil {
   679  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   680  	}
   681  	// Write all blob to flatten files.
   682  	err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
   683  	if err != nil {
   684  		log.Crit("Failed to write block data to ancient store", "err", err)
   685  	}
   686  	return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
   687  }
   688  
   689  // DeleteBlock removes all block data associated with a hash.
   690  func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   691  	DeleteReceipts(db, hash, number)
   692  	DeleteHeader(db, hash, number)
   693  	DeleteBody(db, hash, number)
   694  	DeleteTd(db, hash, number)
   695  }
   696  
   697  // DeleteBlockWithoutNumber removes all block data associated with a hash, except
   698  // the hash to number mapping.
   699  func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   700  	DeleteReceipts(db, hash, number)
   701  	deleteHeaderWithoutNumber(db, hash, number)
   702  	DeleteBody(db, hash, number)
   703  	DeleteTd(db, hash, number)
   704  }
   705  
   706  const badBlockToKeep = 10
   707  
   708  type badBlock struct {
   709  	Header *types.Header
   710  	Body   *types.Body
   711  }
   712  
   713  // badBlockList implements the sort interface to allow sorting a list of
   714  // bad blocks by their number in the reverse order.
   715  type badBlockList []*badBlock
   716  
   717  func (s badBlockList) Len() int { return len(s) }
   718  func (s badBlockList) Less(i, j int) bool {
   719  	return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
   720  }
   721  func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
   722  
   723  // ReadBadBlock retrieves the bad block with the corresponding block hash.
   724  func ReadBadBlock(db ethdb.Reader, hash common.Hash) *types.Block {
   725  	blob, err := db.Get(badBlockKey)
   726  	if err != nil {
   727  		return nil
   728  	}
   729  	var badBlocks badBlockList
   730  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   731  		return nil
   732  	}
   733  	for _, bad := range badBlocks {
   734  		if bad.Header.Hash() == hash {
   735  			return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles)
   736  		}
   737  	}
   738  	return nil
   739  }
   740  
   741  // ReadAllBadBlocks retrieves all the bad blocks in the database.
   742  // All returned blocks are sorted in reverse order by number.
   743  func ReadAllBadBlocks(db ethdb.Reader) []*types.Block {
   744  	blob, err := db.Get(badBlockKey)
   745  	if err != nil {
   746  		return nil
   747  	}
   748  	var badBlocks badBlockList
   749  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   750  		return nil
   751  	}
   752  	var blocks []*types.Block
   753  	for _, bad := range badBlocks {
   754  		blocks = append(blocks, types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions, bad.Body.Uncles))
   755  	}
   756  	return blocks
   757  }
   758  
   759  // WriteBadBlock serializes the bad block into the database. If the cumulated
   760  // bad blocks exceeds the limitation, the oldest will be dropped.
   761  func WriteBadBlock(db ethdb.KeyValueStore, block *types.Block) {
   762  	blob, err := db.Get(badBlockKey)
   763  	if err != nil {
   764  		log.Warn("Failed to load old bad blocks", "error", err)
   765  	}
   766  	var badBlocks badBlockList
   767  	if len(blob) > 0 {
   768  		if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
   769  			log.Crit("Failed to decode old bad blocks", "error", err)
   770  		}
   771  	}
   772  	for _, b := range badBlocks {
   773  		if b.Header.Number.Uint64() == block.NumberU64() && b.Header.Hash() == block.Hash() {
   774  			log.Info("Skip duplicated bad block", "number", block.NumberU64(), "hash", block.Hash())
   775  			return
   776  		}
   777  	}
   778  	badBlocks = append(badBlocks, &badBlock{
   779  		Header: block.Header(),
   780  		Body:   block.Body(),
   781  	})
   782  	sort.Sort(sort.Reverse(badBlocks))
   783  	if len(badBlocks) > badBlockToKeep {
   784  		badBlocks = badBlocks[:badBlockToKeep]
   785  	}
   786  	data, err := rlp.EncodeToBytes(badBlocks)
   787  	if err != nil {
   788  		log.Crit("Failed to encode bad blocks", "err", err)
   789  	}
   790  	if err := db.Put(badBlockKey, data); err != nil {
   791  		log.Crit("Failed to write bad blocks", "err", err)
   792  	}
   793  }
   794  
   795  // DeleteBadBlocks deletes all the bad blocks from the database
   796  func DeleteBadBlocks(db ethdb.KeyValueWriter) {
   797  	if err := db.Delete(badBlockKey); err != nil {
   798  		log.Crit("Failed to delete bad blocks", "err", err)
   799  	}
   800  }
   801  
   802  // FindCommonAncestor returns the last common ancestor of two block headers
   803  func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
   804  	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
   805  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   806  		if a == nil {
   807  			return nil
   808  		}
   809  	}
   810  	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
   811  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   812  		if b == nil {
   813  			return nil
   814  		}
   815  	}
   816  	for a.Hash() != b.Hash() {
   817  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   818  		if a == nil {
   819  			return nil
   820  		}
   821  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   822  		if b == nil {
   823  			return nil
   824  		}
   825  	}
   826  	return a
   827  }