gitlab.com/flarenetwork/coreth@v0.1.1/core/rawdb/accessors_chain.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2018 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  package rawdb
    28  
    29  import (
    30  	"bytes"
    31  	"encoding/binary"
    32  	"math/big"
    33  
    34  	"github.com/ethereum/go-ethereum/common"
    35  	"github.com/ethereum/go-ethereum/crypto"
    36  	"github.com/ethereum/go-ethereum/ethdb"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/rlp"
    39  	"gitlab.com/flarenetwork/coreth/core/types"
    40  	"gitlab.com/flarenetwork/coreth/params"
    41  )
    42  
    43  // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
    44  func ReadCanonicalHash(db ethdb.Reader, number uint64) common.Hash {
    45  	data, _ := db.Ancient(freezerHashTable, number)
    46  	if len(data) == 0 {
    47  		data, _ = db.Get(headerHashKey(number))
    48  		// In the background freezer is moving data from leveldb to flatten files.
    49  		// So during the first check for ancient db, the data is not yet in there,
    50  		// but when we reach into leveldb, the data was already moved. That would
    51  		// result in a not found error.
    52  		if len(data) == 0 {
    53  			data, _ = db.Ancient(freezerHashTable, number)
    54  		}
    55  	}
    56  	if len(data) == 0 {
    57  		return common.Hash{}
    58  	}
    59  	return common.BytesToHash(data)
    60  }
    61  
    62  // WriteCanonicalHash stores the hash assigned to a canonical block number.
    63  func WriteCanonicalHash(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
    64  	if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
    65  		log.Crit("Failed to store number to hash mapping", "err", err)
    66  	}
    67  }
    68  
    69  // DeleteCanonicalHash removes the number to hash canonical mapping.
    70  func DeleteCanonicalHash(db ethdb.KeyValueWriter, number uint64) {
    71  	if err := db.Delete(headerHashKey(number)); err != nil {
    72  		log.Crit("Failed to delete number to hash mapping", "err", err)
    73  	}
    74  }
    75  
    76  // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
    77  // both canonical and reorged forks included.
    78  func ReadAllHashes(db ethdb.Iteratee, number uint64) []common.Hash {
    79  	prefix := headerKeyPrefix(number)
    80  
    81  	hashes := make([]common.Hash, 0, 1)
    82  	it := db.NewIterator(prefix, nil)
    83  	defer it.Release()
    84  
    85  	for it.Next() {
    86  		if key := it.Key(); len(key) == len(prefix)+32 {
    87  			hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
    88  		}
    89  	}
    90  	return hashes
    91  }
    92  
    93  // ReadAllCanonicalHashes retrieves all canonical number and hash mappings at the
    94  // certain chain range. If the accumulated entries reaches the given threshold,
    95  // abort the iteration and return the semi-finish result.
    96  func ReadAllCanonicalHashes(db ethdb.Iteratee, from uint64, to uint64, limit int) ([]uint64, []common.Hash) {
    97  	// Short circuit if the limit is 0.
    98  	if limit == 0 {
    99  		return nil, nil
   100  	}
   101  	var (
   102  		numbers []uint64
   103  		hashes  []common.Hash
   104  	)
   105  	// Construct the key prefix of start point.
   106  	start, end := headerHashKey(from), headerHashKey(to)
   107  	it := db.NewIterator(nil, start)
   108  	defer it.Release()
   109  
   110  	for it.Next() {
   111  		if bytes.Compare(it.Key(), end) >= 0 {
   112  			break
   113  		}
   114  		if key := it.Key(); len(key) == len(headerPrefix)+8+1 && bytes.Equal(key[len(key)-1:], headerHashSuffix) {
   115  			numbers = append(numbers, binary.BigEndian.Uint64(key[len(headerPrefix):len(headerPrefix)+8]))
   116  			hashes = append(hashes, common.BytesToHash(it.Value()))
   117  			// If the accumulated entries reaches the limit threshold, return.
   118  			if len(numbers) >= limit {
   119  				break
   120  			}
   121  		}
   122  	}
   123  	return numbers, hashes
   124  }
   125  
   126  // ReadHeaderNumber returns the header number assigned to a hash.
   127  func ReadHeaderNumber(db ethdb.KeyValueReader, hash common.Hash) *uint64 {
   128  	data, _ := db.Get(headerNumberKey(hash))
   129  	if len(data) != 8 {
   130  		return nil
   131  	}
   132  	number := binary.BigEndian.Uint64(data)
   133  	return &number
   134  }
   135  
   136  // WriteHeaderNumber stores the hash->number mapping.
   137  func WriteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   138  	key := headerNumberKey(hash)
   139  	enc := encodeBlockNumber(number)
   140  	if err := db.Put(key, enc); err != nil {
   141  		log.Crit("Failed to store hash to number mapping", "err", err)
   142  	}
   143  }
   144  
   145  // DeleteHeaderNumber removes hash->number mapping.
   146  func DeleteHeaderNumber(db ethdb.KeyValueWriter, hash common.Hash) {
   147  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   148  		log.Crit("Failed to delete hash to number mapping", "err", err)
   149  	}
   150  }
   151  
   152  // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
   153  func ReadHeadHeaderHash(db ethdb.KeyValueReader) common.Hash {
   154  	data, _ := db.Get(headHeaderKey)
   155  	if len(data) == 0 {
   156  		return common.Hash{}
   157  	}
   158  	return common.BytesToHash(data)
   159  }
   160  
   161  // WriteHeadHeaderHash stores the hash of the current canonical head header.
   162  func WriteHeadHeaderHash(db ethdb.KeyValueWriter, hash common.Hash) {
   163  	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
   164  		log.Crit("Failed to store last header's hash", "err", err)
   165  	}
   166  }
   167  
   168  // ReadHeadBlockHash retrieves the hash of the current canonical head block.
   169  func ReadHeadBlockHash(db ethdb.KeyValueReader) common.Hash {
   170  	data, _ := db.Get(headBlockKey)
   171  	if len(data) == 0 {
   172  		return common.Hash{}
   173  	}
   174  	return common.BytesToHash(data)
   175  }
   176  
   177  // WriteHeadBlockHash stores the head block's hash.
   178  func WriteHeadBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   179  	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
   180  		log.Crit("Failed to store last block's hash", "err", err)
   181  	}
   182  }
   183  
   184  // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
   185  func ReadHeadFastBlockHash(db ethdb.KeyValueReader) common.Hash {
   186  	data, _ := db.Get(headFastBlockKey)
   187  	if len(data) == 0 {
   188  		return common.Hash{}
   189  	}
   190  	return common.BytesToHash(data)
   191  }
   192  
   193  // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
   194  func WriteHeadFastBlockHash(db ethdb.KeyValueWriter, hash common.Hash) {
   195  	if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
   196  		log.Crit("Failed to store last fast block's hash", "err", err)
   197  	}
   198  }
   199  
   200  // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
   201  // reporting correct numbers across restarts.
   202  func ReadFastTrieProgress(db ethdb.KeyValueReader) uint64 {
   203  	data, _ := db.Get(fastTrieProgressKey)
   204  	if len(data) == 0 {
   205  		return 0
   206  	}
   207  	return new(big.Int).SetBytes(data).Uint64()
   208  }
   209  
   210  // WriteFastTrieProgress stores the fast sync trie process counter to support
   211  // retrieving it across restarts.
   212  func WriteFastTrieProgress(db ethdb.KeyValueWriter, count uint64) {
   213  	if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
   214  		log.Crit("Failed to store fast sync trie progress", "err", err)
   215  	}
   216  }
   217  
   218  // ReadTxIndexTail retrieves the number of oldest indexed block
   219  // whose transaction indices has been indexed. If the corresponding entry
   220  // is non-existent in database it means the indexing has been finished.
   221  func ReadTxIndexTail(db ethdb.KeyValueReader) *uint64 {
   222  	data, _ := db.Get(txIndexTailKey)
   223  	if len(data) != 8 {
   224  		return nil
   225  	}
   226  	number := binary.BigEndian.Uint64(data)
   227  	return &number
   228  }
   229  
   230  // WriteTxIndexTail stores the number of oldest indexed block
   231  // into database.
   232  func WriteTxIndexTail(db ethdb.KeyValueWriter, number uint64) {
   233  	if err := db.Put(txIndexTailKey, encodeBlockNumber(number)); err != nil {
   234  		log.Crit("Failed to store the transaction index tail", "err", err)
   235  	}
   236  }
   237  
   238  // ReadFastTxLookupLimit retrieves the tx lookup limit used in fast sync.
   239  func ReadFastTxLookupLimit(db ethdb.KeyValueReader) *uint64 {
   240  	data, _ := db.Get(fastTxLookupLimitKey)
   241  	if len(data) != 8 {
   242  		return nil
   243  	}
   244  	number := binary.BigEndian.Uint64(data)
   245  	return &number
   246  }
   247  
   248  // WriteFastTxLookupLimit stores the txlookup limit used in fast sync into database.
   249  func WriteFastTxLookupLimit(db ethdb.KeyValueWriter, number uint64) {
   250  	if err := db.Put(fastTxLookupLimitKey, encodeBlockNumber(number)); err != nil {
   251  		log.Crit("Failed to store transaction lookup limit for fast sync", "err", err)
   252  	}
   253  }
   254  
   255  // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
   256  func ReadHeaderRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   257  	// First try to look up the data in ancient database. Extra hash
   258  	// comparison is necessary since ancient database only maintains
   259  	// the canonical data.
   260  	data, _ := db.Ancient(freezerHeaderTable, number)
   261  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   262  		return data
   263  	}
   264  	// Then try to look up the data in leveldb.
   265  	data, _ = db.Get(headerKey(number, hash))
   266  	if len(data) > 0 {
   267  		return data
   268  	}
   269  	// In the background freezer is moving data from leveldb to flatten files.
   270  	// So during the first check for ancient db, the data is not yet in there,
   271  	// but when we reach into leveldb, the data was already moved. That would
   272  	// result in a not found error.
   273  	data, _ = db.Ancient(freezerHeaderTable, number)
   274  	if len(data) > 0 && crypto.Keccak256Hash(data) == hash {
   275  		return data
   276  	}
   277  	return nil // Can't find the data anywhere.
   278  }
   279  
   280  // HasHeader verifies the existence of a block header corresponding to the hash.
   281  func HasHeader(db ethdb.Reader, hash common.Hash, number uint64) bool {
   282  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   283  		return true
   284  	}
   285  	if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
   286  		return false
   287  	}
   288  	return true
   289  }
   290  
   291  // ReadHeader retrieves the block header corresponding to the hash.
   292  func ReadHeader(db ethdb.Reader, hash common.Hash, number uint64) *types.Header {
   293  	data := ReadHeaderRLP(db, hash, number)
   294  	if len(data) == 0 {
   295  		return nil
   296  	}
   297  	header := new(types.Header)
   298  	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
   299  		log.Error("Invalid block header RLP", "hash", hash, "err", err)
   300  		return nil
   301  	}
   302  	return header
   303  }
   304  
   305  // WriteHeader stores a block header into the database and also stores the hash-
   306  // to-number mapping.
   307  func WriteHeader(db ethdb.KeyValueWriter, header *types.Header) {
   308  	var (
   309  		hash   = header.Hash()
   310  		number = header.Number.Uint64()
   311  	)
   312  	// Write the hash -> number mapping
   313  	WriteHeaderNumber(db, hash, number)
   314  
   315  	// Write the encoded header
   316  	data, err := rlp.EncodeToBytes(header)
   317  	if err != nil {
   318  		log.Crit("Failed to RLP encode header", "err", err)
   319  	}
   320  	key := headerKey(number, hash)
   321  	if err := db.Put(key, data); err != nil {
   322  		log.Crit("Failed to store header", "err", err)
   323  	}
   324  }
   325  
   326  // DeleteHeader removes all block header data associated with a hash.
   327  func DeleteHeader(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   328  	deleteHeaderWithoutNumber(db, hash, number)
   329  	if err := db.Delete(headerNumberKey(hash)); err != nil {
   330  		log.Crit("Failed to delete hash to number mapping", "err", err)
   331  	}
   332  }
   333  
   334  // deleteHeaderWithoutNumber removes only the block header but does not remove
   335  // the hash to number mapping.
   336  func deleteHeaderWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   337  	if err := db.Delete(headerKey(number, hash)); err != nil {
   338  		log.Crit("Failed to delete header", "err", err)
   339  	}
   340  }
   341  
   342  // ReadBodyRLP retrieves the block body (transactions and uncles) in RLP encoding.
   343  func ReadBodyRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   344  	// First try to look up the data in ancient database. Extra hash
   345  	// comparison is necessary since ancient database only maintains
   346  	// the canonical data.
   347  	data, _ := db.Ancient(freezerBodiesTable, number)
   348  	if len(data) > 0 {
   349  		h, _ := db.Ancient(freezerHashTable, number)
   350  		if common.BytesToHash(h) == hash {
   351  			return data
   352  		}
   353  	}
   354  	// Then try to look up the data in leveldb.
   355  	data, _ = db.Get(blockBodyKey(number, hash))
   356  	if len(data) > 0 {
   357  		return data
   358  	}
   359  	// In the background freezer is moving data from leveldb to flatten files.
   360  	// So during the first check for ancient db, the data is not yet in there,
   361  	// but when we reach into leveldb, the data was already moved. That would
   362  	// result in a not found error.
   363  	data, _ = db.Ancient(freezerBodiesTable, number)
   364  	if len(data) > 0 {
   365  		h, _ := db.Ancient(freezerHashTable, number)
   366  		if common.BytesToHash(h) == hash {
   367  			return data
   368  		}
   369  	}
   370  	return nil // Can't find the data anywhere.
   371  }
   372  
   373  // ReadCanonicalBodyRLP retrieves the block body (transactions and uncles) for the canonical
   374  // block at number, in RLP encoding.
   375  func ReadCanonicalBodyRLP(db ethdb.Reader, number uint64) rlp.RawValue {
   376  	// If it's an ancient one, we don't need the canonical hash
   377  	data, _ := db.Ancient(freezerBodiesTable, number)
   378  	if len(data) == 0 {
   379  		// Need to get the hash
   380  		data, _ = db.Get(blockBodyKey(number, ReadCanonicalHash(db, number)))
   381  		// In the background freezer is moving data from leveldb to flatten files.
   382  		// So during the first check for ancient db, the data is not yet in there,
   383  		// but when we reach into leveldb, the data was already moved. That would
   384  		// result in a not found error.
   385  		if len(data) == 0 {
   386  			data, _ = db.Ancient(freezerBodiesTable, number)
   387  		}
   388  	}
   389  	return data
   390  }
   391  
   392  // WriteBodyRLP stores an RLP encoded block body into the database.
   393  func WriteBodyRLP(db ethdb.KeyValueWriter, hash common.Hash, number uint64, rlp rlp.RawValue) {
   394  	if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
   395  		log.Crit("Failed to store block body", "err", err)
   396  	}
   397  }
   398  
   399  // HasBody verifies the existence of a block body corresponding to the hash.
   400  func HasBody(db ethdb.Reader, hash common.Hash, number uint64) bool {
   401  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   402  		return true
   403  	}
   404  	if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
   405  		return false
   406  	}
   407  	return true
   408  }
   409  
   410  // ReadBody retrieves the block body corresponding to the hash.
   411  func ReadBody(db ethdb.Reader, hash common.Hash, number uint64) *types.Body {
   412  	data := ReadBodyRLP(db, hash, number)
   413  	if len(data) == 0 {
   414  		return nil
   415  	}
   416  	body := new(types.Body)
   417  	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
   418  		log.Error("Invalid block body RLP", "hash", hash, "err", err)
   419  		return nil
   420  	}
   421  	return body
   422  }
   423  
   424  // WriteBody stores a block body into the database.
   425  func WriteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64, body *types.Body) {
   426  	data, err := rlp.EncodeToBytes(body)
   427  	if err != nil {
   428  		log.Crit("Failed to RLP encode body", "err", err)
   429  	}
   430  	WriteBodyRLP(db, hash, number, data)
   431  }
   432  
   433  // DeleteBody removes all block body data associated with a hash.
   434  func DeleteBody(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   435  	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
   436  		log.Crit("Failed to delete block body", "err", err)
   437  	}
   438  }
   439  
   440  // ReadTdRLP retrieves a block's total difficulty corresponding to the hash in RLP encoding.
   441  func ReadTdRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   442  	// First try to look up the data in ancient database. Extra hash
   443  	// comparison is necessary since ancient database only maintains
   444  	// the canonical data.
   445  	data, _ := db.Ancient(freezerDifficultyTable, number)
   446  	if len(data) > 0 {
   447  		h, _ := db.Ancient(freezerHashTable, number)
   448  		if common.BytesToHash(h) == hash {
   449  			return data
   450  		}
   451  	}
   452  	// Then try to look up the data in leveldb.
   453  	data, _ = db.Get(headerTDKey(number, hash))
   454  	if len(data) > 0 {
   455  		return data
   456  	}
   457  	// In the background freezer is moving data from leveldb to flatten files.
   458  	// So during the first check for ancient db, the data is not yet in there,
   459  	// but when we reach into leveldb, the data was already moved. That would
   460  	// result in a not found error.
   461  	data, _ = db.Ancient(freezerDifficultyTable, number)
   462  	if len(data) > 0 {
   463  		h, _ := db.Ancient(freezerHashTable, number)
   464  		if common.BytesToHash(h) == hash {
   465  			return data
   466  		}
   467  	}
   468  	return nil // Can't find the data anywhere.
   469  }
   470  
   471  // ReadTd retrieves a block's total difficulty corresponding to the hash.
   472  func ReadTd(db ethdb.Reader, hash common.Hash, number uint64) *big.Int {
   473  	data := ReadTdRLP(db, hash, number)
   474  	if len(data) == 0 {
   475  		return nil
   476  	}
   477  	td := new(big.Int)
   478  	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
   479  		log.Error("Invalid block total difficulty RLP", "hash", hash, "err", err)
   480  		return nil
   481  	}
   482  	return td
   483  }
   484  
   485  // WriteTd stores the total difficulty of a block into the database.
   486  func WriteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64, td *big.Int) {
   487  	data, err := rlp.EncodeToBytes(td)
   488  	if err != nil {
   489  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   490  	}
   491  	if err := db.Put(headerTDKey(number, hash), data); err != nil {
   492  		log.Crit("Failed to store block total difficulty", "err", err)
   493  	}
   494  }
   495  
   496  // DeleteTd removes all block total difficulty data associated with a hash.
   497  func DeleteTd(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   498  	if err := db.Delete(headerTDKey(number, hash)); err != nil {
   499  		log.Crit("Failed to delete block total difficulty", "err", err)
   500  	}
   501  }
   502  
   503  // HasReceipts verifies the existence of all the transaction receipts belonging
   504  // to a block.
   505  func HasReceipts(db ethdb.Reader, hash common.Hash, number uint64) bool {
   506  	if has, err := db.Ancient(freezerHashTable, number); err == nil && common.BytesToHash(has) == hash {
   507  		return true
   508  	}
   509  	if has, err := db.Has(blockReceiptsKey(number, hash)); !has || err != nil {
   510  		return false
   511  	}
   512  	return true
   513  }
   514  
   515  // ReadReceiptsRLP retrieves all the transaction receipts belonging to a block in RLP encoding.
   516  func ReadReceiptsRLP(db ethdb.Reader, hash common.Hash, number uint64) rlp.RawValue {
   517  	// First try to look up the data in ancient database. Extra hash
   518  	// comparison is necessary since ancient database only maintains
   519  	// the canonical data.
   520  	data, _ := db.Ancient(freezerReceiptTable, number)
   521  	if len(data) > 0 {
   522  		h, _ := db.Ancient(freezerHashTable, number)
   523  		if common.BytesToHash(h) == hash {
   524  			return data
   525  		}
   526  	}
   527  	// Then try to look up the data in leveldb.
   528  	data, _ = db.Get(blockReceiptsKey(number, hash))
   529  	if len(data) > 0 {
   530  		return data
   531  	}
   532  	// In the background freezer is moving data from leveldb to flatten files.
   533  	// So during the first check for ancient db, the data is not yet in there,
   534  	// but when we reach into leveldb, the data was already moved. That would
   535  	// result in a not found error.
   536  	data, _ = db.Ancient(freezerReceiptTable, number)
   537  	if len(data) > 0 {
   538  		h, _ := db.Ancient(freezerHashTable, number)
   539  		if common.BytesToHash(h) == hash {
   540  			return data
   541  		}
   542  	}
   543  	return nil // Can't find the data anywhere.
   544  }
   545  
   546  // ReadRawReceipts retrieves all the transaction receipts belonging to a block.
   547  // The receipt metadata fields are not guaranteed to be populated, so they
   548  // should not be used. Use ReadReceipts instead if the metadata is needed.
   549  func ReadRawReceipts(db ethdb.Reader, hash common.Hash, number uint64) types.Receipts {
   550  	// Retrieve the flattened receipt slice
   551  	data := ReadReceiptsRLP(db, hash, number)
   552  	if len(data) == 0 {
   553  		return nil
   554  	}
   555  	// Convert the receipts from their storage form to their internal representation
   556  	storageReceipts := []*types.ReceiptForStorage{}
   557  	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
   558  		log.Error("Invalid receipt array RLP", "hash", hash, "err", err)
   559  		return nil
   560  	}
   561  	receipts := make(types.Receipts, len(storageReceipts))
   562  	for i, storageReceipt := range storageReceipts {
   563  		receipts[i] = (*types.Receipt)(storageReceipt)
   564  	}
   565  	return receipts
   566  }
   567  
   568  // ReadReceipts retrieves all the transaction receipts belonging to a block, including
   569  // its correspoinding metadata fields. If it is unable to populate these metadata
   570  // fields then nil is returned.
   571  //
   572  // The current implementation populates these metadata fields by reading the receipts'
   573  // corresponding block body, so if the block body is not found it will return nil even
   574  // if the receipt itself is stored.
   575  func ReadReceipts(db ethdb.Reader, hash common.Hash, number uint64, config *params.ChainConfig) types.Receipts {
   576  	// We're deriving many fields from the block body, retrieve beside the receipt
   577  	receipts := ReadRawReceipts(db, hash, number)
   578  	if receipts == nil {
   579  		return nil
   580  	}
   581  	header := ReadHeader(db, hash, number)
   582  	if header == nil {
   583  		return nil
   584  	}
   585  	body := ReadBody(db, hash, number)
   586  	if body == nil {
   587  		log.Error("Missing body but have receipt", "hash", hash, "number", number)
   588  		return nil
   589  	}
   590  	if err := receipts.DeriveFields(config, hash, number, header.Time, body.Transactions); err != nil {
   591  		log.Error("Failed to derive block receipts fields", "hash", hash, "number", number, "err", err)
   592  		return nil
   593  	}
   594  	return receipts
   595  }
   596  
   597  // WriteReceipts stores all the transaction receipts belonging to a block.
   598  func WriteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts) {
   599  	// Convert the receipts into their storage form and serialize them
   600  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   601  	for i, receipt := range receipts {
   602  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   603  	}
   604  	bytes, err := rlp.EncodeToBytes(storageReceipts)
   605  	if err != nil {
   606  		log.Crit("Failed to encode block receipts", "err", err)
   607  	}
   608  	// Store the flattened receipt slice
   609  	if err := db.Put(blockReceiptsKey(number, hash), bytes); err != nil {
   610  		log.Crit("Failed to store block receipts", "err", err)
   611  	}
   612  }
   613  
   614  // DeleteReceipts removes all receipt data associated with a block hash.
   615  func DeleteReceipts(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   616  	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
   617  		log.Crit("Failed to delete block receipts", "err", err)
   618  	}
   619  }
   620  
   621  // ReadBlock retrieves an entire block corresponding to the hash, assembling it
   622  // back from the stored header and body. If either the header or body could not
   623  // be retrieved nil is returned.
   624  //
   625  // Note, due to concurrent download of header and block body the header and thus
   626  // canonical hash can be stored in the database but the body data not (yet).
   627  func ReadBlock(db ethdb.Reader, hash common.Hash, number uint64) *types.Block {
   628  	header := ReadHeader(db, hash, number)
   629  	if header == nil {
   630  		return nil
   631  	}
   632  	body := ReadBody(db, hash, number)
   633  	if body == nil {
   634  		return nil
   635  	}
   636  	return types.NewBlockWithHeader(header).WithBody(body.Transactions, body.Uncles, body.Version, body.ExtData)
   637  }
   638  
   639  // WriteBlock serializes a block into the database, header and body separately.
   640  func WriteBlock(db ethdb.KeyValueWriter, block *types.Block) {
   641  	WriteBody(db, block.Hash(), block.NumberU64(), block.Body())
   642  	WriteHeader(db, block.Header())
   643  }
   644  
   645  // WriteAncientBlock writes entire block data into ancient store and returns the total written size.
   646  func WriteAncientBlock(db ethdb.AncientWriter, block *types.Block, receipts types.Receipts, td *big.Int) int {
   647  	// Encode all block components to RLP format.
   648  	headerBlob, err := rlp.EncodeToBytes(block.Header())
   649  	if err != nil {
   650  		log.Crit("Failed to RLP encode block header", "err", err)
   651  	}
   652  	bodyBlob, err := rlp.EncodeToBytes(block.Body())
   653  	if err != nil {
   654  		log.Crit("Failed to RLP encode body", "err", err)
   655  	}
   656  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
   657  	for i, receipt := range receipts {
   658  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
   659  	}
   660  	receiptBlob, err := rlp.EncodeToBytes(storageReceipts)
   661  	if err != nil {
   662  		log.Crit("Failed to RLP encode block receipts", "err", err)
   663  	}
   664  	tdBlob, err := rlp.EncodeToBytes(td)
   665  	if err != nil {
   666  		log.Crit("Failed to RLP encode block total difficulty", "err", err)
   667  	}
   668  	// Write all blob to flatten files.
   669  	err = db.AppendAncient(block.NumberU64(), block.Hash().Bytes(), headerBlob, bodyBlob, receiptBlob, tdBlob)
   670  	if err != nil {
   671  		log.Crit("Failed to write block data to ancient store", "err", err)
   672  	}
   673  	return len(headerBlob) + len(bodyBlob) + len(receiptBlob) + len(tdBlob) + common.HashLength
   674  }
   675  
   676  // DeleteBlock removes all block data associated with a hash.
   677  func DeleteBlock(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   678  	DeleteReceipts(db, hash, number)
   679  	DeleteHeader(db, hash, number)
   680  	DeleteBody(db, hash, number)
   681  	DeleteTd(db, hash, number)
   682  }
   683  
   684  // DeleteBlockWithoutNumber removes all block data associated with a hash, except
   685  // the hash to number mapping.
   686  func DeleteBlockWithoutNumber(db ethdb.KeyValueWriter, hash common.Hash, number uint64) {
   687  	DeleteReceipts(db, hash, number)
   688  	deleteHeaderWithoutNumber(db, hash, number)
   689  	DeleteBody(db, hash, number)
   690  	DeleteTd(db, hash, number)
   691  }
   692  
   693  // FindCommonAncestor returns the last common ancestor of two block headers
   694  func FindCommonAncestor(db ethdb.Reader, a, b *types.Header) *types.Header {
   695  	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
   696  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   697  		if a == nil {
   698  			return nil
   699  		}
   700  	}
   701  	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
   702  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   703  		if b == nil {
   704  			return nil
   705  		}
   706  	}
   707  	for a.Hash() != b.Hash() {
   708  		a = ReadHeader(db, a.ParentHash, a.Number.Uint64()-1)
   709  		if a == nil {
   710  			return nil
   711  		}
   712  		b = ReadHeader(db, b.ParentHash, b.Number.Uint64()-1)
   713  		if b == nil {
   714  			return nil
   715  		}
   716  	}
   717  	return a
   718  }
   719  
   720  // ReadHeadBlock returns the current canonical head block.
   721  func ReadHeadBlock(db ethdb.Reader) *types.Block {
   722  	headBlockHash := ReadHeadBlockHash(db)
   723  	if headBlockHash == (common.Hash{}) {
   724  		return nil
   725  	}
   726  	headBlockNumber := ReadHeaderNumber(db, headBlockHash)
   727  	if headBlockNumber == nil {
   728  		return nil
   729  	}
   730  	return ReadBlock(db, headBlockHash, *headBlockNumber)
   731  }