github.com/klaytn/klaytn@v1.12.1/storage/database/db_manager.go (about)

     1  // Copyright 2018 The klaytn Authors
     2  // This file is part of the klaytn library.
     3  //
     4  // The klaytn library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The klaytn library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the klaytn library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package database
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"encoding/json"
    23  	"fmt"
    24  	"math/big"
    25  	"os"
    26  	"path/filepath"
    27  	"sort"
    28  	"strconv"
    29  	"strings"
    30  	"sync"
    31  
    32  	"github.com/dgraph-io/badger"
    33  	"github.com/klaytn/klaytn/blockchain/types"
    34  	"github.com/klaytn/klaytn/common"
    35  	"github.com/klaytn/klaytn/log"
    36  	"github.com/klaytn/klaytn/params"
    37  	"github.com/klaytn/klaytn/rlp"
    38  	"github.com/pkg/errors"
    39  	"github.com/syndtr/goleveldb/leveldb"
    40  )
    41  
    42  var (
    43  	logger = log.NewModuleLogger(log.StorageDatabase)
    44  
    45  	errGovIdxAlreadyExist = errors.New("a governance idx of the more recent or the same block exist")
    46  
    47  	HeadBlockQ backupHashQueue
    48  	FastBlockQ backupHashQueue
    49  )
    50  
    51  type DBManager interface {
    52  	IsParallelDBWrite() bool
    53  	IsSingle() bool
    54  	InMigration() bool
    55  	MigrationBlockNumber() uint64
    56  	getStateTrieMigrationInfo() uint64
    57  
    58  	Close()
    59  	NewBatch(dbType DBEntryType) Batch
    60  	getDBDir(dbEntry DBEntryType) string
    61  	setDBDir(dbEntry DBEntryType, newDBDir string)
    62  	setStateTrieMigrationStatus(uint64)
    63  	GetMemDB() *MemDB
    64  	GetDBConfig() *DBConfig
    65  	getDatabase(DBEntryType) Database
    66  	CreateMigrationDBAndSetStatus(blockNum uint64) error
    67  	FinishStateMigration(succeed bool) chan struct{}
    68  	GetStateTrieDB() Database
    69  	GetStateTrieMigrationDB() Database
    70  	GetMiscDB() Database
    71  	GetSnapshotDB() Database
    72  
    73  	// from accessors_chain.go
    74  	ReadCanonicalHash(number uint64) common.Hash
    75  	WriteCanonicalHash(hash common.Hash, number uint64)
    76  	DeleteCanonicalHash(number uint64)
    77  
    78  	ReadHeadHeaderHash() common.Hash
    79  	WriteHeadHeaderHash(hash common.Hash)
    80  
    81  	ReadHeadBlockHash() common.Hash
    82  	ReadHeadBlockBackupHash() common.Hash
    83  	WriteHeadBlockHash(hash common.Hash)
    84  
    85  	ReadHeadFastBlockHash() common.Hash
    86  	ReadHeadFastBlockBackupHash() common.Hash
    87  	WriteHeadFastBlockHash(hash common.Hash)
    88  
    89  	ReadFastTrieProgress() uint64
    90  	WriteFastTrieProgress(count uint64)
    91  
    92  	HasHeader(hash common.Hash, number uint64) bool
    93  	ReadHeader(hash common.Hash, number uint64) *types.Header
    94  	ReadHeaderRLP(hash common.Hash, number uint64) rlp.RawValue
    95  	WriteHeader(header *types.Header)
    96  	DeleteHeader(hash common.Hash, number uint64)
    97  	ReadHeaderNumber(hash common.Hash) *uint64
    98  
    99  	HasBody(hash common.Hash, number uint64) bool
   100  	ReadBody(hash common.Hash, number uint64) *types.Body
   101  	ReadBodyInCache(hash common.Hash) *types.Body
   102  	ReadBodyRLP(hash common.Hash, number uint64) rlp.RawValue
   103  	ReadBodyRLPByHash(hash common.Hash) rlp.RawValue
   104  	WriteBody(hash common.Hash, number uint64, body *types.Body)
   105  	PutBodyToBatch(batch Batch, hash common.Hash, number uint64, body *types.Body)
   106  	WriteBodyRLP(hash common.Hash, number uint64, rlp rlp.RawValue)
   107  	DeleteBody(hash common.Hash, number uint64)
   108  
   109  	ReadTd(hash common.Hash, number uint64) *big.Int
   110  	WriteTd(hash common.Hash, number uint64, td *big.Int)
   111  	DeleteTd(hash common.Hash, number uint64)
   112  
   113  	ReadReceipt(txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64)
   114  	ReadReceipts(blockHash common.Hash, number uint64) types.Receipts
   115  	ReadReceiptsByBlockHash(hash common.Hash) types.Receipts
   116  	WriteReceipts(hash common.Hash, number uint64, receipts types.Receipts)
   117  	PutReceiptsToBatch(batch Batch, hash common.Hash, number uint64, receipts types.Receipts)
   118  	DeleteReceipts(hash common.Hash, number uint64)
   119  
   120  	ReadBlock(hash common.Hash, number uint64) *types.Block
   121  	ReadBlockByHash(hash common.Hash) *types.Block
   122  	ReadBlockByNumber(number uint64) *types.Block
   123  	HasBlock(hash common.Hash, number uint64) bool
   124  	WriteBlock(block *types.Block)
   125  	DeleteBlock(hash common.Hash, number uint64)
   126  
   127  	ReadBadBlock(hash common.Hash) *types.Block
   128  	WriteBadBlock(block *types.Block)
   129  	ReadAllBadBlocks() ([]*types.Block, error)
   130  	DeleteBadBlocks()
   131  
   132  	FindCommonAncestor(a, b *types.Header) *types.Header
   133  
   134  	ReadIstanbulSnapshot(hash common.Hash) ([]byte, error)
   135  	WriteIstanbulSnapshot(hash common.Hash, blob []byte) error
   136  	DeleteIstanbulSnapshot(hash common.Hash)
   137  
   138  	WriteMerkleProof(key, value []byte)
   139  
   140  	// Bytecodes related operations
   141  	ReadCode(hash common.Hash) []byte
   142  	ReadCodeWithPrefix(hash common.Hash) []byte
   143  	WriteCode(hash common.Hash, code []byte)
   144  	PutCodeToBatch(batch Batch, hash common.Hash, code []byte)
   145  	DeleteCode(hash common.Hash)
   146  	HasCode(hash common.Hash) bool
   147  
   148  	// State Trie Database related operations
   149  	ReadTrieNode(hash common.ExtHash) ([]byte, error)
   150  	HasTrieNode(hash common.ExtHash) (bool, error)
   151  	HasCodeWithPrefix(hash common.Hash) bool
   152  	ReadPreimage(hash common.Hash) []byte
   153  
   154  	// Read StateTrie from new DB
   155  	ReadTrieNodeFromNew(hash common.ExtHash) ([]byte, error)
   156  	HasTrieNodeFromNew(hash common.ExtHash) (bool, error)
   157  	HasCodeWithPrefixFromNew(hash common.Hash) bool
   158  	ReadPreimageFromNew(hash common.Hash) []byte
   159  
   160  	// Read StateTrie from old DB
   161  	ReadTrieNodeFromOld(hash common.ExtHash) ([]byte, error)
   162  	HasTrieNodeFromOld(hash common.ExtHash) (bool, error)
   163  	HasCodeWithPrefixFromOld(hash common.Hash) bool
   164  	ReadPreimageFromOld(hash common.Hash) []byte
   165  
   166  	// Write StateTrie
   167  	WriteTrieNode(hash common.ExtHash, node []byte)
   168  	PutTrieNodeToBatch(batch Batch, hash common.ExtHash, node []byte)
   169  	DeleteTrieNode(hash common.ExtHash)
   170  	WritePreimages(number uint64, preimages map[common.Hash][]byte)
   171  
   172  	// Trie pruning
   173  	ReadPruningEnabled() bool
   174  	WritePruningEnabled()
   175  	DeletePruningEnabled()
   176  
   177  	WritePruningMarks(marks []PruningMark)
   178  	ReadPruningMarks(startNumber, endNumber uint64) []PruningMark
   179  	DeletePruningMarks(marks []PruningMark)
   180  	PruneTrieNodes(marks []PruningMark)
   181  	WriteLastPrunedBlockNumber(blockNumber uint64)
   182  	ReadLastPrunedBlockNumber() (uint64, error)
   183  
   184  	// from accessors_indexes.go
   185  	ReadTxLookupEntry(hash common.Hash) (common.Hash, uint64, uint64)
   186  	WriteTxLookupEntries(block *types.Block)
   187  	WriteAndCacheTxLookupEntries(block *types.Block) error
   188  	PutTxLookupEntriesToBatch(batch Batch, block *types.Block)
   189  	DeleteTxLookupEntry(hash common.Hash)
   190  
   191  	ReadTxAndLookupInfo(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64)
   192  
   193  	NewSenderTxHashToTxHashBatch() Batch
   194  	PutSenderTxHashToTxHashToBatch(batch Batch, senderTxHash, txHash common.Hash) error
   195  	ReadTxHashFromSenderTxHash(senderTxHash common.Hash) common.Hash
   196  
   197  	ReadBloomBits(bloomBitsKey []byte) ([]byte, error)
   198  	WriteBloomBits(bloomBitsKey []byte, bits []byte) error
   199  
   200  	ReadValidSections() ([]byte, error)
   201  	WriteValidSections(encodedSections []byte)
   202  
   203  	ReadSectionHead(encodedSection []byte) ([]byte, error)
   204  	WriteSectionHead(encodedSection []byte, hash common.Hash)
   205  	DeleteSectionHead(encodedSection []byte)
   206  
   207  	// from accessors_metadata.go
   208  	ReadDatabaseVersion() *uint64
   209  	WriteDatabaseVersion(version uint64)
   210  
   211  	ReadChainConfig(hash common.Hash) *params.ChainConfig
   212  	WriteChainConfig(hash common.Hash, cfg *params.ChainConfig)
   213  
   214  	// from accessors_snapshot.go
   215  	ReadSnapshotJournal() []byte
   216  	WriteSnapshotJournal(journal []byte)
   217  	DeleteSnapshotJournal()
   218  
   219  	ReadSnapshotGenerator() []byte
   220  	WriteSnapshotGenerator(generator []byte)
   221  	DeleteSnapshotGenerator()
   222  
   223  	ReadSnapshotDisabled() bool
   224  	WriteSnapshotDisabled()
   225  	DeleteSnapshotDisabled()
   226  
   227  	ReadSnapshotRecoveryNumber() *uint64
   228  	WriteSnapshotRecoveryNumber(number uint64)
   229  	DeleteSnapshotRecoveryNumber()
   230  
   231  	ReadSnapshotSyncStatus() []byte
   232  	WriteSnapshotSyncStatus(status []byte)
   233  	DeleteSnapshotSyncStatus()
   234  
   235  	ReadSnapshotRoot() common.Hash
   236  	WriteSnapshotRoot(root common.Hash)
   237  	DeleteSnapshotRoot()
   238  
   239  	ReadAccountSnapshot(hash common.Hash) []byte
   240  	WriteAccountSnapshot(hash common.Hash, entry []byte)
   241  	DeleteAccountSnapshot(hash common.Hash)
   242  
   243  	ReadStorageSnapshot(accountHash, storageHash common.Hash) []byte
   244  	WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte)
   245  	DeleteStorageSnapshot(accountHash, storageHash common.Hash)
   246  
   247  	NewSnapshotDBIterator(prefix []byte, start []byte) Iterator
   248  
   249  	NewSnapshotDBBatch() SnapshotDBBatch
   250  
   251  	// below operations are used in parent chain side, not child chain side.
   252  	WriteChildChainTxHash(ccBlockHash common.Hash, ccTxHash common.Hash)
   253  	ConvertChildChainBlockHashToParentChainTxHash(scBlockHash common.Hash) common.Hash
   254  
   255  	WriteLastIndexedBlockNumber(blockNum uint64)
   256  	GetLastIndexedBlockNumber() uint64
   257  
   258  	// below operations are used in child chain side, not parent chain side.
   259  	WriteAnchoredBlockNumber(blockNum uint64)
   260  	ReadAnchoredBlockNumber() uint64
   261  
   262  	WriteReceiptFromParentChain(blockHash common.Hash, receipt *types.Receipt)
   263  	ReadReceiptFromParentChain(blockHash common.Hash) *types.Receipt
   264  
   265  	WriteHandleTxHashFromRequestTxHash(rTx, hTx common.Hash)
   266  	ReadHandleTxHashFromRequestTxHash(rTx common.Hash) common.Hash
   267  
   268  	WriteParentOperatorFeePayer(feePayer common.Address)
   269  	WriteChildOperatorFeePayer(feePayer common.Address)
   270  	ReadParentOperatorFeePayer() common.Address
   271  	ReadChildOperatorFeePayer() common.Address
   272  
   273  	// cacheManager related functions.
   274  	ClearHeaderChainCache()
   275  	ClearBlockChainCache()
   276  	ReadTxAndLookupInfoInCache(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64)
   277  	ReadBlockReceiptsInCache(blockHash common.Hash) types.Receipts
   278  	ReadTxReceiptInCache(txHash common.Hash) *types.Receipt
   279  
   280  	// snapshot in clique(ConsensusClique) consensus
   281  	WriteCliqueSnapshot(snapshotBlockHash common.Hash, encodedSnapshot []byte) error
   282  	ReadCliqueSnapshot(snapshotBlockHash common.Hash) ([]byte, error)
   283  
   284  	// Governance related functions
   285  	WriteGovernance(data map[string]interface{}, num uint64) error
   286  	WriteGovernanceIdx(num uint64) error
   287  	ReadGovernance(num uint64) (map[string]interface{}, error)
   288  	ReadRecentGovernanceIdx(count int) ([]uint64, error)
   289  	ReadGovernanceAtNumber(num uint64, epoch uint64) (uint64, map[string]interface{}, error)
   290  	WriteGovernanceState(b []byte) error
   291  	ReadGovernanceState() ([]byte, error)
   292  	DeleteGovernance(num uint64)
   293  	// TODO-Klaytn implement governance DB deletion methods.
   294  
   295  	// StakingInfo related functions
   296  	ReadStakingInfo(blockNum uint64) ([]byte, error)
   297  	WriteStakingInfo(blockNum uint64, stakingInfo []byte) error
   298  	HasStakingInfo(blockNum uint64) (bool, error)
   299  	DeleteStakingInfo(blockNum uint64)
   300  
   301  	// DB migration related function
   302  	StartDBMigration(DBManager) error
   303  
   304  	// ChainDataFetcher checkpoint function
   305  	WriteChainDataFetcherCheckpoint(checkpoint uint64) error
   306  	ReadChainDataFetcherCheckpoint() (uint64, error)
   307  
   308  	TryCatchUpWithPrimary() error
   309  
   310  	Stat(string) (string, error)
   311  	Compact([]byte, []byte) error
   312  }
   313  
   314  type DBEntryType uint8
   315  
   316  const (
   317  	MiscDB DBEntryType = iota // Do not move MiscDB which has the path of others DB.
   318  	headerDB
   319  	BodyDB
   320  	ReceiptsDB
   321  	StateTrieDB
   322  	StateTrieMigrationDB
   323  	TxLookUpEntryDB
   324  	bridgeServiceDB
   325  	SnapshotDB
   326  	// databaseEntryTypeSize should be the last item in this list!!
   327  	databaseEntryTypeSize
   328  )
   329  
   330  type backupHashQueue struct {
   331  	backupHashes [backupHashCnt]common.Hash
   332  	idx          int
   333  }
   334  
   335  func (b *backupHashQueue) push(h common.Hash) {
   336  	b.backupHashes[b.idx%backupHashCnt] = h
   337  	b.idx = (b.idx + 1) % backupHashCnt
   338  }
   339  
   340  func (b *backupHashQueue) pop() common.Hash {
   341  	if b.backupHashes[b.idx] == (common.Hash{}) {
   342  		return common.Hash{}
   343  	}
   344  	return b.backupHashes[b.idx]
   345  }
   346  
   347  func (et DBEntryType) String() string {
   348  	return dbBaseDirs[et]
   349  }
   350  
   351  const (
   352  	notInMigrationFlag = 0
   353  	inMigrationFlag    = 1
   354  	backupHashCnt      = 128
   355  )
   356  
   357  var dbBaseDirs = [databaseEntryTypeSize]string{
   358  	"misc", // do not move misc
   359  	"header",
   360  	"body",
   361  	"receipts",
   362  	"statetrie",
   363  	"statetrie_migrated", // "statetrie_migrated_#N" path will be used. (#N is a migrated block number.)
   364  	"txlookup",
   365  	"bridgeservice",
   366  	"snapshot",
   367  }
   368  
   369  // Sum of dbConfigRatio should be 100.
   370  // Otherwise, logger.Crit will be called at checkDBEntryConfigRatio.
   371  var dbConfigRatio = [databaseEntryTypeSize]int{
   372  	2,  // MiscDB
   373  	5,  // headerDB
   374  	5,  // BodyDB
   375  	5,  // ReceiptsDB
   376  	40, // StateTrieDB
   377  	37, // StateTrieMigrationDB
   378  	2,  // TXLookUpEntryDB
   379  	1,  // bridgeServiceDB
   380  	3,  // SnapshotDB
   381  }
   382  
   383  // checkDBEntryConfigRatio checks if sum of dbConfigRatio is 100.
   384  // If it isn't, logger.Crit is called.
   385  func checkDBEntryConfigRatio() {
   386  	entryConfigRatioSum := 0
   387  	for i := 0; i < int(databaseEntryTypeSize); i++ {
   388  		entryConfigRatioSum += dbConfigRatio[i]
   389  	}
   390  	if entryConfigRatioSum != 100 {
   391  		logger.Crit("Sum of dbConfigRatio elements should be 100", "actual", entryConfigRatioSum)
   392  	}
   393  }
   394  
   395  // getDBEntryConfig returns a new DBConfig with original DBConfig, DBEntryType and dbDir.
   396  // It adjusts configuration according to the ratio specified in dbConfigRatio and dbDirs.
   397  func getDBEntryConfig(originalDBC *DBConfig, i DBEntryType, dbDir string) *DBConfig {
   398  	newDBC := *originalDBC
   399  	ratio := dbConfigRatio[i]
   400  
   401  	newDBC.LevelDBCacheSize = originalDBC.LevelDBCacheSize * ratio / 100
   402  	newDBC.OpenFilesLimit = originalDBC.OpenFilesLimit * ratio / 100
   403  
   404  	// Update dir to each Database specific directory.
   405  	newDBC.Dir = filepath.Join(originalDBC.Dir, dbDir)
   406  	// Update dynmao table name to Database specific name.
   407  	if newDBC.DynamoDBConfig != nil {
   408  		newDynamoDBConfig := *originalDBC.DynamoDBConfig
   409  		newDynamoDBConfig.TableName += "-" + dbDir
   410  		newDBC.DynamoDBConfig = &newDynamoDBConfig
   411  	}
   412  
   413  	if newDBC.RocksDBConfig != nil {
   414  		newRocksDBConfig := *originalDBC.RocksDBConfig
   415  		newRocksDBConfig.CacheSize = originalDBC.RocksDBConfig.CacheSize * uint64(ratio) / 100
   416  		newRocksDBConfig.MaxOpenFiles = originalDBC.RocksDBConfig.MaxOpenFiles * ratio / 100
   417  		newDBC.RocksDBConfig = &newRocksDBConfig
   418  	}
   419  
   420  	return &newDBC
   421  }
   422  
   423  type databaseManager struct {
   424  	config *DBConfig
   425  	dbs    []Database
   426  	cm     *cacheManager
   427  
   428  	// TODO-Klaytn need to refine below.
   429  	// -merge status variable
   430  	lockInMigration      sync.RWMutex
   431  	inMigration          bool
   432  	migrationBlockNumber uint64
   433  }
   434  
   435  func NewMemoryDBManager() DBManager {
   436  	dbc := &DBConfig{DBType: MemoryDB}
   437  
   438  	dbm := databaseManager{
   439  		config: dbc,
   440  		dbs:    make([]Database, 1, 1),
   441  		cm:     newCacheManager(),
   442  	}
   443  	dbm.dbs[0] = NewMemDB()
   444  
   445  	return &dbm
   446  }
   447  
   448  // DBConfig handles database related configurations.
   449  type DBConfig struct {
   450  	// General configurations for all types of DB.
   451  	Dir                 string
   452  	DBType              DBType
   453  	SingleDB            bool // whether dbs (such as MiscDB, headerDB and etc) share one physical DB
   454  	NumStateTrieShards  uint // the number of shards of state trie db
   455  	ParallelDBWrite     bool
   456  	OpenFilesLimit      int
   457  	EnableDBPerfMetrics bool // If true, read and write performance will be logged
   458  
   459  	// LevelDB related configurations.
   460  	LevelDBCacheSize   int // LevelDBCacheSize = BlockCacheCapacity + WriteBuffer
   461  	LevelDBCompression LevelDBCompressionType
   462  	LevelDBBufferPool  bool
   463  
   464  	// RocksDB related configurations
   465  	RocksDBConfig *RocksDBConfig
   466  
   467  	// DynamoDB related configurations
   468  	DynamoDBConfig *DynamoDBConfig
   469  }
   470  
   471  const dbMetricPrefix = "klay/db/chaindata/"
   472  
   473  // singleDatabaseDBManager returns DBManager which handles one single Database.
   474  // Each Database will share one common Database.
   475  func singleDatabaseDBManager(dbc *DBConfig) (DBManager, error) {
   476  	dbm := newDatabaseManager(dbc)
   477  	db, err := newDatabase(dbc, 0)
   478  	if err != nil {
   479  		return nil, err
   480  	}
   481  
   482  	db.Meter(dbMetricPrefix)
   483  	for i := 0; i < int(databaseEntryTypeSize); i++ {
   484  		dbm.dbs[i] = db
   485  	}
   486  	return dbm, nil
   487  }
   488  
   489  // newMiscDB returns misc DBManager. If not exist, the function create DB before returning.
   490  func newMiscDB(dbc *DBConfig) Database {
   491  	newDBC := getDBEntryConfig(dbc, MiscDB, dbBaseDirs[MiscDB])
   492  	db, err := newDatabase(newDBC, MiscDB)
   493  	if err != nil {
   494  		logger.Crit("Failed while generating a MISC database", "err", err)
   495  	}
   496  
   497  	db.Meter(dbMetricPrefix + dbBaseDirs[MiscDB] + "/")
   498  	return db
   499  }
   500  
   501  // databaseDBManager returns DBManager which handles Databases.
   502  // Each Database will have its own separated Database.
   503  func databaseDBManager(dbc *DBConfig) (*databaseManager, error) {
   504  	dbm := newDatabaseManager(dbc)
   505  	var db Database
   506  	var err error
   507  
   508  	// Create Misc DB first to get the DB directory of stateTrieDB.
   509  	miscDB := newMiscDB(dbc)
   510  	dbm.dbs[MiscDB] = miscDB
   511  
   512  	// Create other DBs
   513  	for et := int(MiscDB) + 1; et < int(databaseEntryTypeSize); et++ {
   514  		entryType := DBEntryType(et)
   515  		dir := dbm.getDBDir(entryType)
   516  
   517  		switch entryType {
   518  		case StateTrieMigrationDB:
   519  			if dir == dbBaseDirs[StateTrieMigrationDB] {
   520  				// If there is no migration DB, skip to set.
   521  				continue
   522  			}
   523  			fallthrough
   524  		case StateTrieDB:
   525  			newDBC := getDBEntryConfig(dbc, entryType, dir)
   526  			if dbc.NumStateTrieShards > 1 && !dbc.DBType.selfShardable() { // make non-sharding db if the db is sharding itself
   527  				db, err = newShardedDB(newDBC, entryType, dbc.NumStateTrieShards)
   528  			} else {
   529  				db, err = newDatabase(newDBC, entryType)
   530  			}
   531  		default:
   532  			newDBC := getDBEntryConfig(dbc, entryType, dir)
   533  			db, err = newDatabase(newDBC, entryType)
   534  		}
   535  
   536  		if err != nil {
   537  			logger.Crit("Failed while generating databases", "DBType", dbBaseDirs[et], "err", err)
   538  		}
   539  
   540  		dbm.dbs[et] = db
   541  		db.Meter(dbMetricPrefix + dbBaseDirs[et] + "/") // Each database collects metrics independently.
   542  	}
   543  	return dbm, nil
   544  }
   545  
   546  // newDatabase returns Database interface with given DBConfig.
   547  func newDatabase(dbc *DBConfig, entryType DBEntryType) (Database, error) {
   548  	switch dbc.DBType {
   549  	case LevelDB:
   550  		return NewLevelDB(dbc, entryType)
   551  	case RocksDB:
   552  		return NewRocksDB(dbc.Dir, dbc.RocksDBConfig)
   553  	case BadgerDB:
   554  		return NewBadgerDB(dbc.Dir)
   555  	case MemoryDB:
   556  		return NewMemDB(), nil
   557  	case DynamoDB:
   558  		return NewDynamoDB(dbc.DynamoDBConfig)
   559  	default:
   560  		logger.Info("database type is not set, fall back to default LevelDB")
   561  		return NewLevelDB(dbc, 0)
   562  	}
   563  }
   564  
   565  // newDatabaseManager returns the pointer of databaseManager with default configuration.
   566  func newDatabaseManager(dbc *DBConfig) *databaseManager {
   567  	return &databaseManager{
   568  		config: dbc,
   569  		dbs:    make([]Database, databaseEntryTypeSize),
   570  		cm:     newCacheManager(),
   571  	}
   572  }
   573  
   574  // NewDBManager returns DBManager interface.
   575  // If SingleDB is false, each Database will have its own DB.
   576  // If not, each Database will share one common DB.
   577  func NewDBManager(dbc *DBConfig) DBManager {
   578  	if dbc.SingleDB {
   579  		logger.Info("Single database is used for persistent storage", "DBType", dbc.DBType)
   580  		if dbm, err := singleDatabaseDBManager(dbc); err != nil {
   581  			logger.Crit("Failed to create a single database", "DBType", dbc.DBType, "err", err)
   582  		} else {
   583  			return dbm
   584  		}
   585  	} else {
   586  		checkDBEntryConfigRatio()
   587  		logger.Info("Non-single database is used for persistent storage", "DBType", dbc.DBType)
   588  		dbm, err := databaseDBManager(dbc)
   589  		if err != nil {
   590  			logger.Crit("Failed to create databases", "DBType", dbc.DBType, "err", err)
   591  		}
   592  		if migrationBlockNum := dbm.getStateTrieMigrationInfo(); migrationBlockNum > 0 {
   593  			mdb := dbm.getDatabase(StateTrieMigrationDB)
   594  			if mdb == nil {
   595  				logger.Error("Failed to load StateTrieMigrationDB database", "migrationBlockNumber", migrationBlockNum)
   596  			} else {
   597  				dbm.inMigration = true
   598  				dbm.migrationBlockNumber = migrationBlockNum
   599  			}
   600  		}
   601  		return dbm
   602  	}
   603  	logger.Crit("Must not reach here!")
   604  	return nil
   605  }
   606  
   607  func (dbm *databaseManager) IsParallelDBWrite() bool {
   608  	return dbm.config.ParallelDBWrite
   609  }
   610  
   611  func (dbm *databaseManager) IsSingle() bool {
   612  	return dbm.config.SingleDB
   613  }
   614  
   615  func (dbm *databaseManager) InMigration() bool {
   616  	dbm.lockInMigration.RLock()
   617  	defer dbm.lockInMigration.RUnlock()
   618  
   619  	return dbm.inMigration
   620  }
   621  
   622  func (dbm *databaseManager) MigrationBlockNumber() uint64 {
   623  	return dbm.migrationBlockNumber
   624  }
   625  
   626  func (dbm *databaseManager) NewBatch(dbEntryType DBEntryType) Batch {
   627  	if dbEntryType == StateTrieDB {
   628  		dbm.lockInMigration.RLock()
   629  		defer dbm.lockInMigration.RUnlock()
   630  
   631  		if dbm.inMigration {
   632  			newDBBatch := dbm.getDatabase(StateTrieMigrationDB).NewBatch()
   633  			oldDBBatch := dbm.getDatabase(StateTrieDB).NewBatch()
   634  			return NewStateTrieDBBatch([]Batch{oldDBBatch, newDBBatch})
   635  		}
   636  	} else if dbEntryType == StateTrieMigrationDB {
   637  		return dbm.GetStateTrieMigrationDB().NewBatch()
   638  	}
   639  	return dbm.getDatabase(dbEntryType).NewBatch()
   640  }
   641  
   642  func NewStateTrieDBBatch(batches []Batch) Batch {
   643  	return &stateTrieDBBatch{batches: batches}
   644  }
   645  
   646  type stateTrieDBBatch struct {
   647  	batches []Batch
   648  }
   649  
   650  func (stdBatch *stateTrieDBBatch) Put(key []byte, value []byte) error {
   651  	var errResult error
   652  	for _, batch := range stdBatch.batches {
   653  		if err := batch.Put(key, value); err != nil {
   654  			errResult = err
   655  		}
   656  	}
   657  	return errResult
   658  }
   659  
   660  func (stdBatch *stateTrieDBBatch) Delete(key []byte) error {
   661  	var errResult error
   662  	for _, batch := range stdBatch.batches {
   663  		if err := batch.Delete(key); err != nil {
   664  			errResult = err
   665  		}
   666  	}
   667  	return errResult
   668  }
   669  
   670  // ValueSize is called to determine whether to write batches when it exceeds
   671  // certain limit. stdBatch returns the largest size of its batches to
   672  // write all batches at once when one of batch exceeds the limit.
   673  func (stdBatch *stateTrieDBBatch) ValueSize() int {
   674  	maxSize := 0
   675  	for _, batch := range stdBatch.batches {
   676  		if batch.ValueSize() > maxSize {
   677  			maxSize = batch.ValueSize()
   678  		}
   679  	}
   680  
   681  	return maxSize
   682  }
   683  
   684  // Write passes the list of batch to WriteBatchesParallel for writing batches.
   685  func (stdBatch *stateTrieDBBatch) Write() error {
   686  	_, err := WriteBatchesParallel(stdBatch.batches...)
   687  	return err
   688  }
   689  
   690  func (stdBatch *stateTrieDBBatch) Reset() {
   691  	for _, batch := range stdBatch.batches {
   692  		batch.Reset()
   693  	}
   694  }
   695  
   696  func (stdBatch *stateTrieDBBatch) Release() {
   697  	for _, batch := range stdBatch.batches {
   698  		batch.Release()
   699  	}
   700  }
   701  
   702  func (stdBatch *stateTrieDBBatch) Replay(w KeyValueWriter) error {
   703  	var errResult error
   704  	for _, batch := range stdBatch.batches {
   705  		if err := batch.Replay(w); err != nil {
   706  			errResult = err
   707  		}
   708  	}
   709  	return errResult
   710  }
   711  
   712  func (dbm *databaseManager) getDBDir(dbEntry DBEntryType) string {
   713  	miscDB := dbm.getDatabase(MiscDB)
   714  
   715  	enc, _ := miscDB.Get(databaseDirKey(uint64(dbEntry)))
   716  	if len(enc) == 0 {
   717  		return dbBaseDirs[dbEntry]
   718  	}
   719  	return string(enc)
   720  }
   721  
   722  func (dbm *databaseManager) setDBDir(dbEntry DBEntryType, newDBDir string) {
   723  	miscDB := dbm.getDatabase(MiscDB)
   724  	if err := miscDB.Put(databaseDirKey(uint64(dbEntry)), []byte(newDBDir)); err != nil {
   725  		logger.Crit("Failed to put DB dir", "err", err)
   726  	}
   727  }
   728  
   729  func (dbm *databaseManager) getStateTrieMigrationInfo() uint64 {
   730  	miscDB := dbm.getDatabase(MiscDB)
   731  
   732  	enc, _ := miscDB.Get(migrationStatusKey)
   733  	if len(enc) != 8 {
   734  		return 0
   735  	}
   736  
   737  	blockNum := binary.BigEndian.Uint64(enc)
   738  	return blockNum
   739  }
   740  
   741  func (dbm *databaseManager) setStateTrieMigrationStatus(blockNum uint64) {
   742  	miscDB := dbm.getDatabase(MiscDB)
   743  	if err := miscDB.Put(migrationStatusKey, common.Int64ToByteBigEndian(blockNum)); err != nil {
   744  		logger.Crit("Failed to set state trie migration status", "err", err)
   745  	}
   746  
   747  	if blockNum == 0 {
   748  		dbm.inMigration = false
   749  		return
   750  	}
   751  
   752  	dbm.inMigration, dbm.migrationBlockNumber = true, blockNum
   753  }
   754  
   755  func newStateTrieMigrationDB(dbc *DBConfig, blockNum uint64) (Database, string) {
   756  	dbDir := dbBaseDirs[StateTrieMigrationDB] + "_" + strconv.FormatUint(blockNum, 10)
   757  	newDBConfig := getDBEntryConfig(dbc, StateTrieMigrationDB, dbDir)
   758  	var newDB Database
   759  	var err error
   760  	if newDBConfig.NumStateTrieShards > 1 {
   761  		newDB, err = newShardedDB(newDBConfig, StateTrieMigrationDB, newDBConfig.NumStateTrieShards)
   762  	} else {
   763  		newDB, err = newDatabase(newDBConfig, StateTrieMigrationDB)
   764  	}
   765  	if err != nil {
   766  		logger.Crit("Failed to create a new database for state trie migration", "err", err)
   767  	}
   768  
   769  	newDB.Meter(dbMetricPrefix + dbBaseDirs[StateTrieMigrationDB] + "/") // Each database collects metrics independently.
   770  	logger.Info("Created a new database for state trie migration", "newStateTrieDB", newDBConfig.Dir)
   771  
   772  	return newDB, dbDir
   773  }
   774  
   775  // CreateMigrationDBAndSetStatus create migrationDB and set migration status.
   776  func (dbm *databaseManager) CreateMigrationDBAndSetStatus(blockNum uint64) error {
   777  	if dbm.InMigration() {
   778  		logger.Warn("Failed to set a new state trie migration db. Already in migration")
   779  		return errors.New("already in migration")
   780  	}
   781  	if dbm.config.SingleDB {
   782  		logger.Warn("Setting a new database for state trie migration is allowed for non-single database only")
   783  		return errors.New("singleDB does not support state trie migration")
   784  	}
   785  
   786  	logger.Info("Start setting a new database for state trie migration", "blockNum", blockNum)
   787  
   788  	// Create a new database for migration process.
   789  	newDB, newDBDir := newStateTrieMigrationDB(dbm.config, blockNum)
   790  
   791  	// lock to prevent from a conflict of reading state DB and changing state DB
   792  	dbm.lockInMigration.Lock()
   793  	defer dbm.lockInMigration.Unlock()
   794  
   795  	// Store migration db path in misc db
   796  	dbm.setDBDir(StateTrieMigrationDB, newDBDir)
   797  
   798  	// Set migration db
   799  	dbm.dbs[StateTrieMigrationDB] = newDB
   800  
   801  	// Store the migration status
   802  	dbm.setStateTrieMigrationStatus(blockNum)
   803  
   804  	return nil
   805  }
   806  
   807  // FinishStateMigration updates stateTrieDB and removes the old one.
   808  // The function should be called only after when state trie migration is finished.
   809  // It returns a channel that closes when removeDB is finished.
   810  func (dbm *databaseManager) FinishStateMigration(succeed bool) chan struct{} {
   811  	// lock to prevent from a conflict of reading state DB and changing state DB
   812  	dbm.lockInMigration.Lock()
   813  	defer dbm.lockInMigration.Unlock()
   814  
   815  	dbRemoved := StateTrieDB
   816  	dbUsed := StateTrieMigrationDB
   817  
   818  	if !succeed {
   819  		dbRemoved, dbUsed = dbUsed, dbRemoved
   820  	}
   821  
   822  	dbToBeRemoved := dbm.dbs[dbRemoved]
   823  	dbToBeUsed := dbm.dbs[dbUsed]
   824  	dbDirToBeRemoved := dbm.getDBDir(dbRemoved)
   825  	dbDirToBeUsed := dbm.getDBDir(dbUsed)
   826  
   827  	// Replace StateTrieDB with new one
   828  	dbm.setDBDir(StateTrieDB, dbDirToBeUsed)
   829  	dbm.dbs[StateTrieDB] = dbToBeUsed
   830  
   831  	dbm.setStateTrieMigrationStatus(0)
   832  
   833  	dbm.dbs[StateTrieMigrationDB] = nil
   834  	dbm.setDBDir(StateTrieMigrationDB, "")
   835  
   836  	dbPathToBeRemoved := filepath.Join(dbm.config.Dir, dbDirToBeRemoved)
   837  	dbToBeRemoved.Close()
   838  
   839  	endCheck := make(chan struct{})
   840  	go removeDB(dbPathToBeRemoved, endCheck)
   841  	return endCheck
   842  }
   843  
   844  func removeDB(dbPath string, endCheck chan struct{}) {
   845  	defer func() {
   846  		if endCheck != nil {
   847  			close(endCheck)
   848  		}
   849  	}()
   850  	if err := os.RemoveAll(dbPath); err != nil {
   851  		logger.Error("Failed to remove the database due to an error", "err", err, "dir", dbPath)
   852  		return
   853  	}
   854  	logger.Info("Successfully removed database", "path", dbPath)
   855  }
   856  
   857  func (dbm *databaseManager) GetStateTrieDB() Database {
   858  	return dbm.dbs[StateTrieDB]
   859  }
   860  
   861  func (dbm *databaseManager) GetStateTrieMigrationDB() Database {
   862  	return dbm.dbs[StateTrieMigrationDB]
   863  }
   864  
   865  func (dbm *databaseManager) GetMiscDB() Database {
   866  	return dbm.dbs[MiscDB]
   867  }
   868  
   869  func (dbm *databaseManager) GetSnapshotDB() Database {
   870  	return dbm.getDatabase(SnapshotDB)
   871  }
   872  
   873  func (dbm *databaseManager) TryCatchUpWithPrimary() error {
   874  	for _, db := range dbm.dbs {
   875  		if db != nil {
   876  			if err := db.TryCatchUpWithPrimary(); err != nil {
   877  				return err
   878  			}
   879  		}
   880  	}
   881  	return nil
   882  }
   883  
   884  func (dbm *databaseManager) Stat(property string) (string, error) {
   885  	stats := ""
   886  	errs := ""
   887  	for idx, db := range dbm.dbs {
   888  		if db != nil {
   889  			stat, err := db.Stat(property)
   890  			headInfo := fmt.Sprintf(" [%s:%s]\n", DBEntryType(idx), db.Type())
   891  			if err == nil {
   892  				stats += headInfo + stat
   893  			} else {
   894  				errs += headInfo + err.Error()
   895  			}
   896  		}
   897  	}
   898  	if errs == "" {
   899  		return stats, nil
   900  	} else {
   901  		return stats, errors.New(errs)
   902  	}
   903  }
   904  
   905  func (dbm *databaseManager) Compact(start []byte, limit []byte) error {
   906  	errs := ""
   907  	for idx, db := range dbm.dbs {
   908  		if db != nil {
   909  			if err := db.Compact(start, limit); err != nil {
   910  				headInfo := fmt.Sprintf(" [%s:%s]\n", DBEntryType(idx), db.Type())
   911  				errs = headInfo + err.Error()
   912  			}
   913  		}
   914  	}
   915  	if errs == "" {
   916  		return nil
   917  	} else {
   918  		return errors.New(errs)
   919  	}
   920  }
   921  
   922  func (dbm *databaseManager) GetMemDB() *MemDB {
   923  	if dbm.config.DBType == MemoryDB {
   924  		if memDB, ok := dbm.dbs[0].(*MemDB); ok {
   925  			return memDB
   926  		} else {
   927  			logger.Error("DBManager is set as memory DBManager, but actual value is not set as memory DBManager.")
   928  			return nil
   929  		}
   930  	}
   931  	logger.Error("GetMemDB() call to non memory DBManager object.")
   932  	return nil
   933  }
   934  
   935  // GetDBConfig returns DBConfig of the DB manager.
   936  func (dbm *databaseManager) GetDBConfig() *DBConfig {
   937  	return dbm.config
   938  }
   939  
   940  func (dbm *databaseManager) getDatabase(dbEntryType DBEntryType) Database {
   941  	if dbm.config.DBType == MemoryDB {
   942  		return dbm.dbs[0]
   943  	} else {
   944  		return dbm.dbs[dbEntryType]
   945  	}
   946  }
   947  
   948  func (dbm *databaseManager) Close() {
   949  	// If single DB, only close the first database.
   950  	if dbm.config.SingleDB {
   951  		dbm.dbs[0].Close()
   952  		return
   953  	}
   954  
   955  	// If not single DB, close all databases.
   956  	for _, db := range dbm.dbs {
   957  		if db != nil {
   958  			db.Close()
   959  		}
   960  	}
   961  }
   962  
   963  // TODO-Klaytn Some of below need to be invisible outside database package
   964  // Canonical Hash operations.
   965  // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
   966  func (dbm *databaseManager) ReadCanonicalHash(number uint64) common.Hash {
   967  	if cached := dbm.cm.readCanonicalHashCache(number); !common.EmptyHash(cached) {
   968  		return cached
   969  	}
   970  
   971  	db := dbm.getDatabase(headerDB)
   972  	data, _ := db.Get(headerHashKey(number))
   973  	if len(data) == 0 {
   974  		return common.Hash{}
   975  	}
   976  
   977  	hash := common.BytesToHash(data)
   978  	dbm.cm.writeCanonicalHashCache(number, hash)
   979  	return hash
   980  }
   981  
   982  // WriteCanonicalHash stores the hash assigned to a canonical block number.
   983  func (dbm *databaseManager) WriteCanonicalHash(hash common.Hash, number uint64) {
   984  	db := dbm.getDatabase(headerDB)
   985  	if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
   986  		logger.Crit("Failed to store number to hash mapping", "err", err)
   987  	}
   988  	dbm.cm.writeCanonicalHashCache(number, hash)
   989  }
   990  
   991  // DeleteCanonicalHash removes the number to hash canonical mapping.
   992  func (dbm *databaseManager) DeleteCanonicalHash(number uint64) {
   993  	db := dbm.getDatabase(headerDB)
   994  	if err := db.Delete(headerHashKey(number)); err != nil {
   995  		logger.Crit("Failed to delete number to hash mapping", "err", err)
   996  	}
   997  	dbm.cm.writeCanonicalHashCache(number, common.Hash{})
   998  }
   999  
  1000  // Head Header Hash operations.
  1001  // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
  1002  func (dbm *databaseManager) ReadHeadHeaderHash() common.Hash {
  1003  	db := dbm.getDatabase(headerDB)
  1004  	data, _ := db.Get(headHeaderKey)
  1005  	if len(data) == 0 {
  1006  		return common.Hash{}
  1007  	}
  1008  	return common.BytesToHash(data)
  1009  }
  1010  
  1011  // WriteHeadHeaderHash stores the hash of the current canonical head header.
  1012  func (dbm *databaseManager) WriteHeadHeaderHash(hash common.Hash) {
  1013  	db := dbm.getDatabase(headerDB)
  1014  	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
  1015  		logger.Crit("Failed to store last header's hash", "err", err)
  1016  	}
  1017  }
  1018  
  1019  // Block Hash operations.
  1020  func (dbm *databaseManager) ReadHeadBlockHash() common.Hash {
  1021  	db := dbm.getDatabase(headerDB)
  1022  	data, _ := db.Get(headBlockKey)
  1023  	if len(data) == 0 {
  1024  		return common.Hash{}
  1025  	}
  1026  	return common.BytesToHash(data)
  1027  }
  1028  
  1029  // Block Backup Hash operations.
  1030  func (dbm *databaseManager) ReadHeadBlockBackupHash() common.Hash {
  1031  	db := dbm.getDatabase(headerDB)
  1032  	data, _ := db.Get(headBlockBackupKey)
  1033  	if len(data) == 0 {
  1034  		return common.Hash{}
  1035  	}
  1036  	return common.BytesToHash(data)
  1037  }
  1038  
  1039  // WriteHeadBlockHash stores the head block's hash.
  1040  func (dbm *databaseManager) WriteHeadBlockHash(hash common.Hash) {
  1041  	HeadBlockQ.push(hash)
  1042  
  1043  	db := dbm.getDatabase(headerDB)
  1044  	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
  1045  		logger.Crit("Failed to store last block's hash", "err", err)
  1046  	}
  1047  
  1048  	backupHash := HeadBlockQ.pop()
  1049  	if backupHash == (common.Hash{}) {
  1050  		return
  1051  	}
  1052  	if err := db.Put(headBlockBackupKey, backupHash.Bytes()); err != nil {
  1053  		logger.Crit("Failed to store last block's backup hash", "err", err)
  1054  	}
  1055  }
  1056  
  1057  // Head Fast Block Hash operations.
  1058  // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
  1059  func (dbm *databaseManager) ReadHeadFastBlockHash() common.Hash {
  1060  	db := dbm.getDatabase(headerDB)
  1061  	data, _ := db.Get(headFastBlockKey)
  1062  	if len(data) == 0 {
  1063  		return common.Hash{}
  1064  	}
  1065  	return common.BytesToHash(data)
  1066  }
  1067  
  1068  // Head Fast Block Backup Hash operations.
  1069  // ReadHeadFastBlockBackupHash retrieves the hash of the current fast-sync head block.
  1070  func (dbm *databaseManager) ReadHeadFastBlockBackupHash() common.Hash {
  1071  	db := dbm.getDatabase(headerDB)
  1072  	data, _ := db.Get(headFastBlockBackupKey)
  1073  	if len(data) == 0 {
  1074  		return common.Hash{}
  1075  	}
  1076  	return common.BytesToHash(data)
  1077  }
  1078  
  1079  // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
  1080  func (dbm *databaseManager) WriteHeadFastBlockHash(hash common.Hash) {
  1081  	FastBlockQ.push(hash)
  1082  
  1083  	db := dbm.getDatabase(headerDB)
  1084  	if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
  1085  		logger.Crit("Failed to store last fast block's hash", "err", err)
  1086  	}
  1087  
  1088  	backupHash := FastBlockQ.pop()
  1089  	if backupHash == (common.Hash{}) {
  1090  		return
  1091  	}
  1092  	if err := db.Put(headFastBlockBackupKey, backupHash.Bytes()); err != nil {
  1093  		logger.Crit("Failed to store last fast block's backup hash", "err", err)
  1094  	}
  1095  }
  1096  
  1097  // Fast Trie Progress operations.
  1098  // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
  1099  // reporting correct numbers across restarts.
  1100  func (dbm *databaseManager) ReadFastTrieProgress() uint64 {
  1101  	db := dbm.getDatabase(MiscDB)
  1102  	data, _ := db.Get(fastTrieProgressKey)
  1103  	if len(data) == 0 {
  1104  		return 0
  1105  	}
  1106  	return new(big.Int).SetBytes(data).Uint64()
  1107  }
  1108  
  1109  // WriteFastTrieProgress stores the fast sync trie process counter to support
  1110  // retrieving it across restarts.
  1111  func (dbm *databaseManager) WriteFastTrieProgress(count uint64) {
  1112  	db := dbm.getDatabase(MiscDB)
  1113  	if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
  1114  		logger.Crit("Failed to store fast sync trie progress", "err", err)
  1115  	}
  1116  }
  1117  
  1118  // (Block)Header operations.
  1119  // HasHeader verifies the existence of a block header corresponding to the hash.
  1120  func (dbm *databaseManager) HasHeader(hash common.Hash, number uint64) bool {
  1121  	if dbm.cm.hasHeaderInCache(hash) {
  1122  		return true
  1123  	}
  1124  
  1125  	db := dbm.getDatabase(headerDB)
  1126  	if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
  1127  		return false
  1128  	}
  1129  	return true
  1130  }
  1131  
  1132  // ReadHeader retrieves the block header corresponding to the hash.
  1133  func (dbm *databaseManager) ReadHeader(hash common.Hash, number uint64) *types.Header {
  1134  	if cachedHeader := dbm.cm.readHeaderCache(hash); cachedHeader != nil {
  1135  		return cachedHeader
  1136  	}
  1137  
  1138  	data := dbm.ReadHeaderRLP(hash, number)
  1139  	if len(data) == 0 {
  1140  		return nil
  1141  	}
  1142  	header := new(types.Header)
  1143  	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
  1144  		logger.Error("Invalid block header RLP", "hash", hash, "err", err)
  1145  		return nil
  1146  	}
  1147  
  1148  	// Write to cache before returning found value.
  1149  	dbm.cm.writeHeaderCache(hash, header)
  1150  	return header
  1151  }
  1152  
  1153  // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
  1154  func (dbm *databaseManager) ReadHeaderRLP(hash common.Hash, number uint64) rlp.RawValue {
  1155  	db := dbm.getDatabase(headerDB)
  1156  	data, _ := db.Get(headerKey(number, hash))
  1157  	return data
  1158  }
  1159  
  1160  // WriteHeader stores a block header into the database and also stores the hash-
  1161  // to-number mapping.
  1162  func (dbm *databaseManager) WriteHeader(header *types.Header) {
  1163  	db := dbm.getDatabase(headerDB)
  1164  	// Write the hash -> number mapping
  1165  	var (
  1166  		hash    = header.Hash()
  1167  		number  = header.Number.Uint64()
  1168  		encoded = common.Int64ToByteBigEndian(number)
  1169  	)
  1170  	key := headerNumberKey(hash)
  1171  	if err := db.Put(key, encoded); err != nil {
  1172  		logger.Crit("Failed to store hash to number mapping", "err", err)
  1173  	}
  1174  	// Write the encoded header
  1175  	data, err := rlp.EncodeToBytes(header)
  1176  	if err != nil {
  1177  		logger.Crit("Failed to RLP encode header", "err", err)
  1178  	}
  1179  	key = headerKey(number, hash)
  1180  	if err := db.Put(key, data); err != nil {
  1181  		logger.Crit("Failed to store header", "err", err)
  1182  	}
  1183  
  1184  	// Write to cache at the end of successful write.
  1185  	dbm.cm.writeHeaderCache(hash, header)
  1186  	dbm.cm.writeBlockNumberCache(hash, number)
  1187  }
  1188  
  1189  // DeleteHeader removes all block header data associated with a hash.
  1190  func (dbm *databaseManager) DeleteHeader(hash common.Hash, number uint64) {
  1191  	db := dbm.getDatabase(headerDB)
  1192  	if err := db.Delete(headerKey(number, hash)); err != nil {
  1193  		logger.Crit("Failed to delete header", "err", err)
  1194  	}
  1195  	if err := db.Delete(headerNumberKey(hash)); err != nil {
  1196  		logger.Crit("Failed to delete hash to number mapping", "err", err)
  1197  	}
  1198  
  1199  	// Delete cache at the end of successful delete.
  1200  	dbm.cm.deleteHeaderCache(hash)
  1201  	dbm.cm.deleteBlockNumberCache(hash)
  1202  }
  1203  
  1204  // Head Number operations.
  1205  // ReadHeaderNumber returns the header number assigned to a hash.
  1206  func (dbm *databaseManager) ReadHeaderNumber(hash common.Hash) *uint64 {
  1207  	if cachedHeaderNumber := dbm.cm.readBlockNumberCache(hash); cachedHeaderNumber != nil {
  1208  		return cachedHeaderNumber
  1209  	}
  1210  
  1211  	db := dbm.getDatabase(headerDB)
  1212  	data, _ := db.Get(headerNumberKey(hash))
  1213  	if len(data) != 8 {
  1214  		return nil
  1215  	}
  1216  	number := binary.BigEndian.Uint64(data)
  1217  
  1218  	// Write to cache before returning found value.
  1219  	dbm.cm.writeBlockNumberCache(hash, number)
  1220  	return &number
  1221  }
  1222  
  1223  // (Block)Body operations.
  1224  // HasBody verifies the existence of a block body corresponding to the hash.
  1225  func (dbm *databaseManager) HasBody(hash common.Hash, number uint64) bool {
  1226  	db := dbm.getDatabase(BodyDB)
  1227  	if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
  1228  		return false
  1229  	}
  1230  	return true
  1231  }
  1232  
  1233  // ReadBody retrieves the block body corresponding to the hash.
  1234  func (dbm *databaseManager) ReadBody(hash common.Hash, number uint64) *types.Body {
  1235  	if cachedBody := dbm.cm.readBodyCache(hash); cachedBody != nil {
  1236  		return cachedBody
  1237  	}
  1238  
  1239  	data := dbm.ReadBodyRLP(hash, number)
  1240  	if len(data) == 0 {
  1241  		return nil
  1242  	}
  1243  	body := new(types.Body)
  1244  	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
  1245  		logger.Error("Invalid block body RLP", "hash", hash, "err", err)
  1246  		return nil
  1247  	}
  1248  
  1249  	// Write to cache at the end of successful read.
  1250  	dbm.cm.writeBodyCache(hash, body)
  1251  	return body
  1252  }
  1253  
  1254  // ReadBodyInCache retrieves the block body in bodyCache.
  1255  // It only searches cache.
  1256  func (dbm *databaseManager) ReadBodyInCache(hash common.Hash) *types.Body {
  1257  	return dbm.cm.readBodyCache(hash)
  1258  }
  1259  
  1260  // ReadBodyRLP retrieves the block body (transactions) in RLP encoding.
  1261  func (dbm *databaseManager) ReadBodyRLP(hash common.Hash, number uint64) rlp.RawValue {
  1262  	// Short circuit if the rlp encoded body's already in the cache, retrieve otherwise
  1263  	if cachedBodyRLP := dbm.readBodyRLPInCache(hash); cachedBodyRLP != nil {
  1264  		return cachedBodyRLP
  1265  	}
  1266  
  1267  	// find cached body and encode it to return
  1268  	if cachedBody := dbm.ReadBodyInCache(hash); cachedBody != nil {
  1269  		if bodyRLP, err := rlp.EncodeToBytes(cachedBody); err != nil {
  1270  			dbm.cm.writeBodyRLPCache(hash, bodyRLP)
  1271  			return bodyRLP
  1272  		}
  1273  	}
  1274  
  1275  	// not found in cache, find body in database
  1276  	db := dbm.getDatabase(BodyDB)
  1277  	data, _ := db.Get(blockBodyKey(number, hash))
  1278  
  1279  	// Write to cache at the end of successful read.
  1280  	dbm.cm.writeBodyRLPCache(hash, data)
  1281  	return data
  1282  }
  1283  
  1284  // ReadBodyRLPByHash retrieves the block body (transactions) in RLP encoding.
  1285  func (dbm *databaseManager) ReadBodyRLPByHash(hash common.Hash) rlp.RawValue {
  1286  	// Short circuit if the rlp encoded body's already in the cache, retrieve otherwise
  1287  	if cachedBodyRLP := dbm.readBodyRLPInCache(hash); cachedBodyRLP != nil {
  1288  		return cachedBodyRLP
  1289  	}
  1290  
  1291  	// find cached body and encode it to return
  1292  	if cachedBody := dbm.ReadBodyInCache(hash); cachedBody != nil {
  1293  		if bodyRLP, err := rlp.EncodeToBytes(cachedBody); err != nil {
  1294  			dbm.cm.writeBodyRLPCache(hash, bodyRLP)
  1295  			return bodyRLP
  1296  		}
  1297  	}
  1298  
  1299  	// not found in cache, find body in database
  1300  	number := dbm.ReadHeaderNumber(hash)
  1301  	if number == nil {
  1302  		return nil
  1303  	}
  1304  
  1305  	db := dbm.getDatabase(BodyDB)
  1306  	data, _ := db.Get(blockBodyKey(*number, hash))
  1307  
  1308  	// Write to cache at the end of successful read.
  1309  	dbm.cm.writeBodyRLPCache(hash, data)
  1310  	return data
  1311  }
  1312  
  1313  // readBodyRLPInCache retrieves the block body (transactions) in RLP encoding
  1314  // in bodyRLPCache. It only searches cache.
  1315  func (dbm *databaseManager) readBodyRLPInCache(hash common.Hash) rlp.RawValue {
  1316  	return dbm.cm.readBodyRLPCache(hash)
  1317  }
  1318  
  1319  // WriteBody storea a block body into the database.
  1320  func (dbm *databaseManager) WriteBody(hash common.Hash, number uint64, body *types.Body) {
  1321  	data, err := rlp.EncodeToBytes(body)
  1322  	if err != nil {
  1323  		logger.Crit("Failed to RLP encode body", "err", err)
  1324  	}
  1325  	dbm.WriteBodyRLP(hash, number, data)
  1326  }
  1327  
  1328  func (dbm *databaseManager) PutBodyToBatch(batch Batch, hash common.Hash, number uint64, body *types.Body) {
  1329  	data, err := rlp.EncodeToBytes(body)
  1330  	if err != nil {
  1331  		logger.Crit("Failed to RLP encode body", "err", err)
  1332  	}
  1333  
  1334  	if err := batch.Put(blockBodyKey(number, hash), data); err != nil {
  1335  		logger.Crit("Failed to store block body", "err", err)
  1336  	}
  1337  }
  1338  
  1339  // WriteBodyRLP stores an RLP encoded block body into the database.
  1340  func (dbm *databaseManager) WriteBodyRLP(hash common.Hash, number uint64, rlp rlp.RawValue) {
  1341  	dbm.cm.writeBodyRLPCache(hash, rlp)
  1342  
  1343  	db := dbm.getDatabase(BodyDB)
  1344  	if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
  1345  		logger.Crit("Failed to store block body", "err", err)
  1346  	}
  1347  }
  1348  
  1349  // DeleteBody removes all block body data associated with a hash.
  1350  func (dbm *databaseManager) DeleteBody(hash common.Hash, number uint64) {
  1351  	db := dbm.getDatabase(BodyDB)
  1352  	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
  1353  		logger.Crit("Failed to delete block body", "err", err)
  1354  	}
  1355  	dbm.cm.deleteBodyCache(hash)
  1356  }
  1357  
  1358  // TotalDifficulty operations.
  1359  // ReadTd retrieves a block's total blockscore corresponding to the hash.
  1360  func (dbm *databaseManager) ReadTd(hash common.Hash, number uint64) *big.Int {
  1361  	if cachedTd := dbm.cm.readTdCache(hash); cachedTd != nil {
  1362  		return cachedTd
  1363  	}
  1364  
  1365  	db := dbm.getDatabase(MiscDB)
  1366  	data, _ := db.Get(headerTDKey(number, hash))
  1367  	if len(data) == 0 {
  1368  		return nil
  1369  	}
  1370  	td := new(big.Int)
  1371  	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
  1372  		logger.Error("Invalid block total blockscore RLP", "hash", hash, "err", err)
  1373  		return nil
  1374  	}
  1375  
  1376  	// Write to cache before returning found value.
  1377  	dbm.cm.writeTdCache(hash, td)
  1378  	return td
  1379  }
  1380  
  1381  // WriteTd stores the total blockscore of a block into the database.
  1382  func (dbm *databaseManager) WriteTd(hash common.Hash, number uint64, td *big.Int) {
  1383  	db := dbm.getDatabase(MiscDB)
  1384  	data, err := rlp.EncodeToBytes(td)
  1385  	if err != nil {
  1386  		logger.Crit("Failed to RLP encode block total blockscore", "err", err)
  1387  	}
  1388  	if err := db.Put(headerTDKey(number, hash), data); err != nil {
  1389  		logger.Crit("Failed to store block total blockscore", "err", err)
  1390  	}
  1391  
  1392  	// Write to cache at the end of successful write.
  1393  	dbm.cm.writeTdCache(hash, td)
  1394  }
  1395  
  1396  // DeleteTd removes all block total blockscore data associated with a hash.
  1397  func (dbm *databaseManager) DeleteTd(hash common.Hash, number uint64) {
  1398  	db := dbm.getDatabase(MiscDB)
  1399  	if err := db.Delete(headerTDKey(number, hash)); err != nil {
  1400  		logger.Crit("Failed to delete block total blockscore", "err", err)
  1401  	}
  1402  	// Delete cache at the end of successful delete.
  1403  	dbm.cm.deleteTdCache(hash)
  1404  }
  1405  
  1406  // Receipts operations.
  1407  // ReadReceipt retrieves a receipt, blockHash, blockNumber and receiptIndex found by the given txHash.
  1408  func (dbm *databaseManager) ReadReceipt(txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
  1409  	blockHash, blockNumber, receiptIndex := dbm.ReadTxLookupEntry(txHash)
  1410  	if blockHash == (common.Hash{}) {
  1411  		return nil, common.Hash{}, 0, 0
  1412  	}
  1413  	receipts := dbm.ReadReceipts(blockHash, blockNumber)
  1414  	if len(receipts) <= int(receiptIndex) {
  1415  		logger.Error("Receipt refereced missing", "number", blockNumber, "txHash", blockHash, "index", receiptIndex)
  1416  		return nil, common.Hash{}, 0, 0
  1417  	}
  1418  	return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
  1419  }
  1420  
  1421  // ReadReceipts retrieves all the transaction receipts belonging to a block.
  1422  func (dbm *databaseManager) ReadReceipts(blockHash common.Hash, number uint64) types.Receipts {
  1423  	db := dbm.getDatabase(ReceiptsDB)
  1424  	// Retrieve the flattened receipt slice
  1425  	data, _ := db.Get(blockReceiptsKey(number, blockHash))
  1426  	if len(data) == 0 {
  1427  		return nil
  1428  	}
  1429  	// Convert the revceipts from their database form to their internal representation
  1430  	storageReceipts := []*types.ReceiptForStorage{}
  1431  	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
  1432  		logger.Error("Invalid receipt array RLP", "blockHash", blockHash, "err", err)
  1433  		return nil
  1434  	}
  1435  	receipts := make(types.Receipts, len(storageReceipts))
  1436  	for i, receipt := range storageReceipts {
  1437  		receipts[i] = (*types.Receipt)(receipt)
  1438  	}
  1439  	return receipts
  1440  }
  1441  
  1442  func (dbm *databaseManager) ReadReceiptsByBlockHash(hash common.Hash) types.Receipts {
  1443  	receipts := dbm.ReadBlockReceiptsInCache(hash)
  1444  	if receipts != nil {
  1445  		return receipts
  1446  	}
  1447  	number := dbm.ReadHeaderNumber(hash)
  1448  	if number == nil {
  1449  		return nil
  1450  	}
  1451  	return dbm.ReadReceipts(hash, *number)
  1452  }
  1453  
  1454  // WriteReceipts stores all the transaction receipts belonging to a block.
  1455  func (dbm *databaseManager) WriteReceipts(hash common.Hash, number uint64, receipts types.Receipts) {
  1456  	dbm.cm.writeBlockReceiptsCache(hash, receipts)
  1457  
  1458  	db := dbm.getDatabase(ReceiptsDB)
  1459  	// When putReceiptsToPutter is called from WriteReceipts, txReceipt is cached.
  1460  	dbm.putReceiptsToPutter(db, hash, number, receipts, true)
  1461  }
  1462  
  1463  func (dbm *databaseManager) PutReceiptsToBatch(batch Batch, hash common.Hash, number uint64, receipts types.Receipts) {
  1464  	// When putReceiptsToPutter is called from PutReceiptsToBatch, txReceipt is not cached.
  1465  	dbm.putReceiptsToPutter(batch, hash, number, receipts, false)
  1466  }
  1467  
  1468  func (dbm *databaseManager) putReceiptsToPutter(putter KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts, addToCache bool) {
  1469  	// Convert the receipts into their database form and serialize them
  1470  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
  1471  	for i, receipt := range receipts {
  1472  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
  1473  
  1474  		if addToCache {
  1475  			dbm.cm.writeTxReceiptCache(receipt.TxHash, receipt)
  1476  		}
  1477  	}
  1478  	bytes, err := rlp.EncodeToBytes(storageReceipts)
  1479  	if err != nil {
  1480  		logger.Crit("Failed to encode block receipts", "err", err)
  1481  	}
  1482  	// Store the flattened receipt slice
  1483  	if err := putter.Put(blockReceiptsKey(number, hash), bytes); err != nil {
  1484  		logger.Crit("Failed to store block receipts", "err", err)
  1485  	}
  1486  }
  1487  
  1488  // DeleteReceipts removes all receipt data associated with a block hash.
  1489  func (dbm *databaseManager) DeleteReceipts(hash common.Hash, number uint64) {
  1490  	receipts := dbm.ReadReceipts(hash, number)
  1491  
  1492  	db := dbm.getDatabase(ReceiptsDB)
  1493  	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
  1494  		logger.Crit("Failed to delete block receipts", "err", err)
  1495  	}
  1496  
  1497  	// Delete blockReceiptsCache and txReceiptCache.
  1498  	dbm.cm.deleteBlockReceiptsCache(hash)
  1499  	if receipts != nil {
  1500  		for _, receipt := range receipts {
  1501  			dbm.cm.deleteTxReceiptCache(receipt.TxHash)
  1502  		}
  1503  	}
  1504  }
  1505  
  1506  // Block operations.
  1507  // ReadBlock retrieves an entire block corresponding to the hash, assembling it
  1508  // back from the stored header and body. If either the header or body could not
  1509  // be retrieved nil is returned.
  1510  //
  1511  // Note, due to concurrent download of header and block body the header and thus
  1512  // canonical hash can be stored in the database but the body data not (yet).
  1513  func (dbm *databaseManager) ReadBlock(hash common.Hash, number uint64) *types.Block {
  1514  	if cachedBlock := dbm.cm.readBlockCache(hash); cachedBlock != nil {
  1515  		return cachedBlock
  1516  	}
  1517  
  1518  	header := dbm.ReadHeader(hash, number)
  1519  	if header == nil {
  1520  		return nil
  1521  	}
  1522  
  1523  	body := dbm.ReadBody(hash, number)
  1524  	if body == nil {
  1525  		return nil
  1526  	}
  1527  
  1528  	block := types.NewBlockWithHeader(header).WithBody(body.Transactions)
  1529  
  1530  	// Write to cache at the end of successful write.
  1531  	dbm.cm.writeBlockCache(hash, block)
  1532  	return block
  1533  }
  1534  
  1535  func (dbm *databaseManager) ReadBlockByHash(hash common.Hash) *types.Block {
  1536  	if cachedBlock := dbm.cm.readBlockCache(hash); cachedBlock != nil {
  1537  		return cachedBlock
  1538  	}
  1539  
  1540  	number := dbm.ReadHeaderNumber(hash)
  1541  	if number == nil {
  1542  		return nil
  1543  	}
  1544  
  1545  	header := dbm.ReadHeader(hash, *number)
  1546  	if header == nil {
  1547  		return nil
  1548  	}
  1549  
  1550  	body := dbm.ReadBody(hash, *number)
  1551  	if body == nil {
  1552  		return nil
  1553  	}
  1554  
  1555  	block := types.NewBlockWithHeader(header).WithBody(body.Transactions)
  1556  
  1557  	// Write to cache at the end of successful write.
  1558  	dbm.cm.writeBlockCache(hash, block)
  1559  	return block
  1560  }
  1561  
  1562  func (dbm *databaseManager) ReadBlockByNumber(number uint64) *types.Block {
  1563  	hash := dbm.ReadCanonicalHash(number)
  1564  	if hash == (common.Hash{}) {
  1565  		return nil
  1566  	}
  1567  	return dbm.ReadBlock(hash, number)
  1568  }
  1569  
  1570  func (dbm *databaseManager) HasBlock(hash common.Hash, number uint64) bool {
  1571  	if dbm.cm.hasBlockInCache(hash) {
  1572  		return true
  1573  	}
  1574  	return dbm.HasBody(hash, number)
  1575  }
  1576  
  1577  func (dbm *databaseManager) WriteBlock(block *types.Block) {
  1578  	dbm.cm.writeBodyCache(block.Hash(), block.Body())
  1579  	dbm.cm.blockCache.Add(block.Hash(), block)
  1580  
  1581  	dbm.WriteBody(block.Hash(), block.NumberU64(), block.Body())
  1582  	dbm.WriteHeader(block.Header())
  1583  }
  1584  
  1585  func (dbm *databaseManager) DeleteBlock(hash common.Hash, number uint64) {
  1586  	dbm.DeleteReceipts(hash, number)
  1587  	dbm.DeleteHeader(hash, number)
  1588  	dbm.DeleteBody(hash, number)
  1589  	dbm.DeleteTd(hash, number)
  1590  	dbm.cm.deleteBlockCache(hash)
  1591  }
  1592  
  1593  const badBlockToKeep = 100
  1594  
  1595  type badBlock struct {
  1596  	Header *types.Header
  1597  	Body   *types.Body
  1598  }
  1599  
  1600  // badBlockList implements the sort interface to allow sorting a list of
  1601  // bad blocks by their number in the reverse order.
  1602  type badBlockList []*badBlock
  1603  
  1604  func (s badBlockList) Len() int { return len(s) }
  1605  func (s badBlockList) Less(i, j int) bool {
  1606  	return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
  1607  }
  1608  func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
  1609  
  1610  // ReadBadBlock retrieves the bad block with the corresponding block hash.
  1611  func (dbm *databaseManager) ReadBadBlock(hash common.Hash) *types.Block {
  1612  	db := dbm.getDatabase(MiscDB)
  1613  	blob, err := db.Get(badBlockKey)
  1614  	if err != nil {
  1615  		return nil
  1616  	}
  1617  	var badBlocks badBlockList
  1618  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1619  		return nil
  1620  	}
  1621  	for _, bad := range badBlocks {
  1622  		if bad.Header.Hash() == hash {
  1623  			return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions)
  1624  		}
  1625  	}
  1626  	return nil
  1627  }
  1628  
  1629  // ReadAllBadBlocks retrieves all the bad blocks in the database.
  1630  // All returned blocks are sorted in reverse order by number.
  1631  func (dbm *databaseManager) ReadAllBadBlocks() ([]*types.Block, error) {
  1632  	var badBlocks badBlockList
  1633  	db := dbm.getDatabase(MiscDB)
  1634  	blob, err := db.Get(badBlockKey)
  1635  	if err != nil {
  1636  		return nil, err
  1637  	}
  1638  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1639  		return nil, err
  1640  	}
  1641  	blocks := make([]*types.Block, len(badBlocks))
  1642  	for i, bad := range badBlocks {
  1643  		blocks[i] = types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions)
  1644  	}
  1645  	return blocks, nil
  1646  }
  1647  
  1648  // WriteBadBlock serializes the bad block into the database. If the cumulated
  1649  // bad blocks exceed the capacity, the oldest will be dropped.
  1650  func (dbm *databaseManager) WriteBadBlock(block *types.Block) {
  1651  	db := dbm.getDatabase(MiscDB)
  1652  	blob, err := db.Get(badBlockKey)
  1653  	if err != nil {
  1654  		logger.Warn("Failed to load old bad blocks", "error", err)
  1655  	}
  1656  	var badBlocks badBlockList
  1657  	if len(blob) > 0 {
  1658  		if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1659  			logger.Error("failed to decode old bad blocks")
  1660  			return
  1661  		}
  1662  	}
  1663  
  1664  	for _, badblock := range badBlocks {
  1665  		if badblock.Header.Hash() == block.Hash() && badblock.Header.Number.Uint64() == block.NumberU64() {
  1666  			logger.Info("There is already corresponding badblock in db.", "badblock number", block.NumberU64())
  1667  			return
  1668  		}
  1669  	}
  1670  
  1671  	badBlocks = append(badBlocks, &badBlock{
  1672  		Header: block.Header(),
  1673  		Body:   block.Body(),
  1674  	})
  1675  	sort.Sort(sort.Reverse(badBlocks))
  1676  	if len(badBlocks) > badBlockToKeep {
  1677  		badBlocks = badBlocks[:badBlockToKeep]
  1678  	}
  1679  	data, err := rlp.EncodeToBytes(badBlocks)
  1680  	if err != nil {
  1681  		logger.Crit("Failed to encode bad blocks", "err", err)
  1682  		return
  1683  	}
  1684  	if err := db.Put(badBlockKey, data); err != nil {
  1685  		logger.Crit("Failed to write bad blocks", "err", err)
  1686  		return
  1687  	}
  1688  }
  1689  
  1690  func (dbm *databaseManager) DeleteBadBlocks() {
  1691  	db := dbm.getDatabase(MiscDB)
  1692  	if err := db.Delete(badBlockKey); err != nil {
  1693  		logger.Error("Failed to delete bad blocks", "err", err)
  1694  	}
  1695  }
  1696  
  1697  // Find Common Ancestor operation
  1698  // FindCommonAncestor returns the last common ancestor of two block headers
  1699  func (dbm *databaseManager) FindCommonAncestor(a, b *types.Header) *types.Header {
  1700  	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
  1701  		a = dbm.ReadHeader(a.ParentHash, a.Number.Uint64()-1)
  1702  		if a == nil {
  1703  			return nil
  1704  		}
  1705  	}
  1706  	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
  1707  		b = dbm.ReadHeader(b.ParentHash, b.Number.Uint64()-1)
  1708  		if b == nil {
  1709  			return nil
  1710  		}
  1711  	}
  1712  	for a.Hash() != b.Hash() {
  1713  		a = dbm.ReadHeader(a.ParentHash, a.Number.Uint64()-1)
  1714  		if a == nil {
  1715  			return nil
  1716  		}
  1717  		b = dbm.ReadHeader(b.ParentHash, b.Number.Uint64()-1)
  1718  		if b == nil {
  1719  			return nil
  1720  		}
  1721  	}
  1722  	return a
  1723  }
  1724  
  1725  // Istanbul Snapshot operations.
  1726  func (dbm *databaseManager) ReadIstanbulSnapshot(hash common.Hash) ([]byte, error) {
  1727  	db := dbm.getDatabase(MiscDB)
  1728  	return db.Get(snapshotKey(hash))
  1729  }
  1730  
  1731  func (dbm *databaseManager) WriteIstanbulSnapshot(hash common.Hash, blob []byte) error {
  1732  	db := dbm.getDatabase(MiscDB)
  1733  	return db.Put(snapshotKey(hash), blob)
  1734  }
  1735  
  1736  func (dbm *databaseManager) DeleteIstanbulSnapshot(hash common.Hash) {
  1737  	db := dbm.getDatabase(MiscDB)
  1738  	if err := db.Delete(snapshotKey(hash)); err != nil {
  1739  		logger.Crit("Failed to delete snpahost", "err", err)
  1740  	}
  1741  }
  1742  
  1743  // Merkle Proof operation.
  1744  func (dbm *databaseManager) WriteMerkleProof(key, value []byte) {
  1745  	db := dbm.getDatabase(MiscDB)
  1746  	if err := db.Put(key, value); err != nil {
  1747  		logger.Crit("Failed to write merkle proof", "err", err)
  1748  	}
  1749  }
  1750  
  1751  // ReadCode retrieves the contract code of the provided code hash.
  1752  func (dbm *databaseManager) ReadCode(hash common.Hash) []byte {
  1753  	// Try with the legacy code scheme first, if not then try with current
  1754  	// scheme. Since most of the code will be found with legacy scheme.
  1755  	//
  1756  	// TODO-Klaytn-Snapsync change the order when we forcibly upgrade the code scheme with snapshot.
  1757  	db := dbm.getDatabase(StateTrieDB)
  1758  	if data, _ := db.Get(hash[:]); len(data) > 0 {
  1759  		return data
  1760  	}
  1761  
  1762  	return dbm.ReadCodeWithPrefix(hash)
  1763  }
  1764  
  1765  // ReadCodeWithPrefix retrieves the contract code of the provided code hash.
  1766  // The main difference between this function and ReadCode is this function
  1767  // will only check the existence with latest scheme(with prefix).
  1768  func (dbm *databaseManager) ReadCodeWithPrefix(hash common.Hash) []byte {
  1769  	db := dbm.getDatabase(StateTrieDB)
  1770  	data, _ := db.Get(CodeKey(hash))
  1771  	return data
  1772  }
  1773  
  1774  // HasCode checks if the contract code corresponding to the
  1775  // provided code hash is present in the db.
  1776  func (dbm *databaseManager) HasCode(hash common.Hash) bool {
  1777  	// Try with the prefixed code scheme first, if not then try with legacy
  1778  	// scheme.
  1779  	//
  1780  	// TODO-Klaytn-Snapsync change the order when we forcibly upgrade the code scheme with snapshot.
  1781  	db := dbm.getDatabase(StateTrieDB)
  1782  	if ok, _ := db.Has(hash.Bytes()); ok {
  1783  		return true
  1784  	}
  1785  	return dbm.HasCodeWithPrefix(hash)
  1786  }
  1787  
  1788  // HasCodeWithPrefix checks if the contract code corresponding to the
  1789  // provided code hash is present in the db. This function will only check
  1790  // presence using the prefix-scheme.
  1791  func (dbm *databaseManager) HasCodeWithPrefix(hash common.Hash) bool {
  1792  	db := dbm.getDatabase(StateTrieDB)
  1793  	ok, _ := db.Has(CodeKey(hash))
  1794  	return ok
  1795  }
  1796  
  1797  // WriteCode writes the provided contract code database.
  1798  func (dbm *databaseManager) WriteCode(hash common.Hash, code []byte) {
  1799  	dbm.lockInMigration.RLock()
  1800  	defer dbm.lockInMigration.RUnlock()
  1801  
  1802  	dbs := make([]Database, 0, 2)
  1803  	if dbm.inMigration {
  1804  		dbs = append(dbs, dbm.getDatabase(StateTrieMigrationDB))
  1805  	}
  1806  	dbs = append(dbs, dbm.getDatabase(StateTrieDB))
  1807  	for _, db := range dbs {
  1808  		if err := db.Put(CodeKey(hash), code); err != nil {
  1809  			logger.Crit("Failed to store contract code", "err", err)
  1810  		}
  1811  	}
  1812  }
  1813  
  1814  func (dbm *databaseManager) PutCodeToBatch(batch Batch, hash common.Hash, code []byte) {
  1815  	if err := batch.Put(CodeKey(hash), code); err != nil {
  1816  		logger.Crit("Failed to store contract code", "err", err)
  1817  	}
  1818  }
  1819  
  1820  // DeleteCode deletes the specified contract code from the database.
  1821  func (dbm *databaseManager) DeleteCode(hash common.Hash) {
  1822  	db := dbm.getDatabase(StateTrieDB)
  1823  	if err := db.Delete(CodeKey(hash)); err != nil {
  1824  		logger.Crit("Failed to delete contract code", "err", err)
  1825  	}
  1826  }
  1827  
  1828  func (dbm *databaseManager) ReadTrieNode(hash common.ExtHash) ([]byte, error) {
  1829  	dbm.lockInMigration.RLock()
  1830  	defer dbm.lockInMigration.RUnlock()
  1831  
  1832  	if dbm.inMigration {
  1833  		if val, err := dbm.ReadTrieNodeFromNew(hash); err == nil {
  1834  			return val, nil
  1835  		} else if err != dataNotFoundErr {
  1836  			// TODO-Klaytn-Database Need to be properly handled
  1837  			logger.Error("Unexpected error while reading cached trie node from state migration database", "err", err)
  1838  		}
  1839  	}
  1840  	val, err := dbm.ReadTrieNodeFromOld(hash)
  1841  	if err != nil && err != dataNotFoundErr {
  1842  		// TODO-Klaytn-Database Need to be properly handled
  1843  		logger.Error("Unexpected error while reading cached trie node", "err", err)
  1844  	}
  1845  	return val, err
  1846  }
  1847  
  1848  func (dbm *databaseManager) HasTrieNode(hash common.ExtHash) (bool, error) {
  1849  	val, err := dbm.ReadTrieNode(hash)
  1850  	if val == nil || err != nil {
  1851  		return false, err
  1852  	} else {
  1853  		return true, nil
  1854  	}
  1855  }
  1856  
  1857  // ReadPreimage retrieves a single preimage of the provided hash.
  1858  func (dbm *databaseManager) ReadPreimage(hash common.Hash) []byte {
  1859  	dbm.lockInMigration.RLock()
  1860  	defer dbm.lockInMigration.RUnlock()
  1861  
  1862  	if dbm.inMigration {
  1863  		if val, err := dbm.GetStateTrieMigrationDB().Get(preimageKey(hash)); err == nil {
  1864  			return val
  1865  		}
  1866  	}
  1867  	return dbm.ReadPreimageFromOld(hash)
  1868  }
  1869  
  1870  func (dbm *databaseManager) ReadTrieNodeFromNew(hash common.ExtHash) ([]byte, error) {
  1871  	return dbm.GetStateTrieMigrationDB().Get(TrieNodeKey(hash))
  1872  }
  1873  
  1874  func (dbm *databaseManager) HasTrieNodeFromNew(hash common.ExtHash) (bool, error) {
  1875  	val, err := dbm.ReadTrieNodeFromNew(hash)
  1876  	if val == nil || err != nil {
  1877  		return false, err
  1878  	} else {
  1879  		return true, nil
  1880  	}
  1881  }
  1882  
  1883  func (dbm *databaseManager) HasCodeWithPrefixFromNew(hash common.Hash) bool {
  1884  	db := dbm.GetStateTrieMigrationDB()
  1885  	ok, _ := db.Has(CodeKey(hash))
  1886  	return ok
  1887  }
  1888  
  1889  // ReadPreimage retrieves a single preimage of the provided hash.
  1890  func (dbm *databaseManager) ReadPreimageFromNew(hash common.Hash) []byte {
  1891  	data, _ := dbm.GetStateTrieMigrationDB().Get(preimageKey(hash))
  1892  	return data
  1893  }
  1894  
  1895  func (dbm *databaseManager) ReadTrieNodeFromOld(hash common.ExtHash) ([]byte, error) {
  1896  	db := dbm.getDatabase(StateTrieDB)
  1897  	return db.Get(TrieNodeKey(hash))
  1898  }
  1899  
  1900  func (dbm *databaseManager) HasTrieNodeFromOld(hash common.ExtHash) (bool, error) {
  1901  	val, err := dbm.ReadTrieNodeFromOld(hash)
  1902  	if val == nil || err != nil {
  1903  		return false, err
  1904  	} else {
  1905  		return true, nil
  1906  	}
  1907  }
  1908  
  1909  func (dbm *databaseManager) HasCodeWithPrefixFromOld(hash common.Hash) bool {
  1910  	db := dbm.getDatabase(StateTrieDB)
  1911  	ok, _ := db.Has(CodeKey(hash))
  1912  	return ok
  1913  }
  1914  
  1915  // ReadPreimage retrieves a single preimage of the provided hash.
  1916  func (dbm *databaseManager) ReadPreimageFromOld(hash common.Hash) []byte {
  1917  	db := dbm.getDatabase(StateTrieDB)
  1918  	data, _ := db.Get(preimageKey(hash))
  1919  	return data
  1920  }
  1921  
  1922  func (dbm *databaseManager) WriteTrieNode(hash common.ExtHash, node []byte) {
  1923  	dbm.lockInMigration.RLock()
  1924  	defer dbm.lockInMigration.RUnlock()
  1925  
  1926  	if dbm.inMigration {
  1927  		if err := dbm.getDatabase(StateTrieMigrationDB).Put(TrieNodeKey(hash), node); err != nil {
  1928  			logger.Crit("Failed to store trie node", "err", err)
  1929  		}
  1930  	}
  1931  	if err := dbm.getDatabase(StateTrieDB).Put(TrieNodeKey(hash), node); err != nil {
  1932  		logger.Crit("Failed to store trie node", "err", err)
  1933  	}
  1934  }
  1935  
  1936  func (dbm *databaseManager) PutTrieNodeToBatch(batch Batch, hash common.ExtHash, node []byte) {
  1937  	if err := batch.Put(TrieNodeKey(hash), node); err != nil {
  1938  		logger.Crit("Failed to store trie node", "err", err)
  1939  	}
  1940  }
  1941  
  1942  // DeleteTrieNode deletes a trie node having a specific hash. It is used only for testing.
  1943  func (dbm *databaseManager) DeleteTrieNode(hash common.ExtHash) {
  1944  	if err := dbm.getDatabase(StateTrieDB).Delete(TrieNodeKey(hash)); err != nil {
  1945  		logger.Crit("Failed to delete trie node", "err", err)
  1946  	}
  1947  }
  1948  
  1949  // WritePreimages writes the provided set of preimages to the database. `number` is the
  1950  // current block number, and is used for debug messages only.
  1951  func (dbm *databaseManager) WritePreimages(number uint64, preimages map[common.Hash][]byte) {
  1952  	batch := dbm.NewBatch(StateTrieDB)
  1953  	defer batch.Release()
  1954  	for hash, preimage := range preimages {
  1955  		if err := batch.Put(preimageKey(hash), preimage); err != nil {
  1956  			logger.Crit("Failed to store trie preimage", "err", err)
  1957  		}
  1958  		if _, err := WriteBatchesOverThreshold(batch); err != nil {
  1959  			logger.Crit("Failed to store trie preimage", "err", err)
  1960  		}
  1961  	}
  1962  	if err := batch.Write(); err != nil {
  1963  		logger.Crit("Failed to batch write trie preimage", "err", err, "blockNumber", number)
  1964  	}
  1965  	preimageCounter.Inc(int64(len(preimages)))
  1966  	preimageHitCounter.Inc(int64(len(preimages)))
  1967  }
  1968  
  1969  // ReadPruningEnabled reads if the live pruning flag is stored in database.
  1970  func (dbm *databaseManager) ReadPruningEnabled() bool {
  1971  	ok, _ := dbm.getDatabase(MiscDB).Has(pruningEnabledKey)
  1972  	return ok
  1973  }
  1974  
  1975  // WritePruningEnabled writes the live pruning flag to the database.
  1976  func (dbm *databaseManager) WritePruningEnabled() {
  1977  	if err := dbm.getDatabase(MiscDB).Put(pruningEnabledKey, []byte("42")); err != nil {
  1978  		logger.Crit("Failed to store pruning enabled flag", "err", err)
  1979  	}
  1980  }
  1981  
  1982  // DeletePruningEnabled deletes the live pruning flag. It is used only for testing.
  1983  func (dbm *databaseManager) DeletePruningEnabled() {
  1984  	if err := dbm.getDatabase(MiscDB).Delete(pruningEnabledKey); err != nil {
  1985  		logger.Crit("Failed to remove pruning enabled flag", "err", err)
  1986  	}
  1987  }
  1988  
  1989  // WritePruningMarks writes the provided set of pruning marks to the database.
  1990  func (dbm *databaseManager) WritePruningMarks(marks []PruningMark) {
  1991  	batch := dbm.NewBatch(MiscDB)
  1992  	defer batch.Release()
  1993  	for _, mark := range marks {
  1994  		if err := batch.Put(pruningMarkKey(mark), pruningMarkValue); err != nil {
  1995  			logger.Crit("Failed to store trie pruning mark", "err", err)
  1996  		}
  1997  		if _, err := WriteBatchesOverThreshold(batch); err != nil {
  1998  			logger.Crit("Failed to store trie pruning mark", "err", err)
  1999  		}
  2000  	}
  2001  	if err := batch.Write(); err != nil {
  2002  		logger.Crit("Failed to batch write pruning mark", "err", err)
  2003  	}
  2004  }
  2005  
  2006  // ReadPruningMarks reads the pruning marks in the block number range [startNumber, endNumber).
  2007  func (dbm *databaseManager) ReadPruningMarks(startNumber, endNumber uint64) []PruningMark {
  2008  	prefix := pruningMarkPrefix
  2009  	startKey := pruningMarkKey(PruningMark{startNumber, common.ExtHash{}})
  2010  	it := dbm.getDatabase(MiscDB).NewIterator(prefix, startKey[len(prefix):])
  2011  	defer it.Release()
  2012  
  2013  	var marks []PruningMark
  2014  	for it.Next() {
  2015  		mark := parsePruningMarkKey(it.Key())
  2016  		if endNumber != 0 && mark.Number >= endNumber {
  2017  			break
  2018  		}
  2019  		marks = append(marks, mark)
  2020  	}
  2021  	return marks
  2022  }
  2023  
  2024  // DeletePruningMarks deletes the provided set of pruning marks from the database.
  2025  // Note that trie nodes are not deleted by this function. To prune trie nodes, use
  2026  // the PruneTrieNodes or DeleteTrieNode functions.
  2027  func (dbm *databaseManager) DeletePruningMarks(marks []PruningMark) {
  2028  	batch := dbm.NewBatch(MiscDB)
  2029  	defer batch.Release()
  2030  	for _, mark := range marks {
  2031  		if err := batch.Delete(pruningMarkKey(mark)); err != nil {
  2032  			logger.Crit("Failed to delete trie pruning mark", "err", err)
  2033  		}
  2034  		if _, err := WriteBatchesOverThreshold(batch); err != nil {
  2035  			logger.Crit("Failed to delete trie pruning mark", "err", err)
  2036  		}
  2037  	}
  2038  	if err := batch.Write(); err != nil {
  2039  		logger.Crit("Failed to batch delete pruning mark", "err", err)
  2040  	}
  2041  }
  2042  
  2043  // PruneTrieNodes deletes the trie nodes according to the provided set of pruning marks.
  2044  func (dbm *databaseManager) PruneTrieNodes(marks []PruningMark) {
  2045  	batch := dbm.NewBatch(StateTrieDB)
  2046  	defer batch.Release()
  2047  	for _, mark := range marks {
  2048  		if err := batch.Delete(TrieNodeKey(mark.Hash)); err != nil {
  2049  			logger.Crit("Failed to prune trie node", "err", err)
  2050  		}
  2051  		if _, err := WriteBatchesOverThreshold(batch); err != nil {
  2052  			logger.Crit("Failed to prune trie node", "err", err)
  2053  		}
  2054  	}
  2055  	if err := batch.Write(); err != nil {
  2056  		logger.Crit("Failed to batch prune trie node", "err", err)
  2057  	}
  2058  }
  2059  
  2060  // WriteLastPrunedBlockNumber records a block number of the most recent pruning block
  2061  func (dbm *databaseManager) WriteLastPrunedBlockNumber(blockNumber uint64) {
  2062  	db := dbm.getDatabase(MiscDB)
  2063  	if err := db.Put(lastPrunedBlockNumberKey, common.Int64ToByteLittleEndian(blockNumber)); err != nil {
  2064  		logger.Crit("Failed to store the last pruned block number", "err", err)
  2065  	}
  2066  }
  2067  
  2068  // ReadLastPrunedBlockNumber reads a block number of the most recent pruning block
  2069  func (dbm *databaseManager) ReadLastPrunedBlockNumber() (uint64, error) {
  2070  	db := dbm.getDatabase(MiscDB)
  2071  	lastPruned, err := db.Get(lastPrunedBlockNumberKey)
  2072  	if err != nil {
  2073  		return 0, err
  2074  	}
  2075  	return binary.LittleEndian.Uint64(lastPruned), nil
  2076  }
  2077  
  2078  // ReadTxLookupEntry retrieves the positional metadata associated with a transaction
  2079  // hash to allow retrieving the transaction or receipt by hash.
  2080  func (dbm *databaseManager) ReadTxLookupEntry(hash common.Hash) (common.Hash, uint64, uint64) {
  2081  	db := dbm.getDatabase(TxLookUpEntryDB)
  2082  	data, _ := db.Get(TxLookupKey(hash))
  2083  	if len(data) == 0 {
  2084  		return common.Hash{}, 0, 0
  2085  	}
  2086  	var entry TxLookupEntry
  2087  	if err := rlp.DecodeBytes(data, &entry); err != nil {
  2088  		logger.Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err)
  2089  		return common.Hash{}, 0, 0
  2090  	}
  2091  	return entry.BlockHash, entry.BlockIndex, entry.Index
  2092  }
  2093  
  2094  // WriteTxLookupEntries stores a positional metadata for every transaction from
  2095  // a block, enabling hash based transaction and receipt lookups.
  2096  func (dbm *databaseManager) WriteTxLookupEntries(block *types.Block) {
  2097  	db := dbm.getDatabase(TxLookUpEntryDB)
  2098  	putTxLookupEntriesToPutter(db, block)
  2099  }
  2100  
  2101  func (dbm *databaseManager) WriteAndCacheTxLookupEntries(block *types.Block) error {
  2102  	batch := dbm.NewBatch(TxLookUpEntryDB)
  2103  	defer batch.Release()
  2104  	for i, tx := range block.Transactions() {
  2105  		entry := TxLookupEntry{
  2106  			BlockHash:  block.Hash(),
  2107  			BlockIndex: block.NumberU64(),
  2108  			Index:      uint64(i),
  2109  		}
  2110  		data, err := rlp.EncodeToBytes(entry)
  2111  		if err != nil {
  2112  			logger.Crit("Failed to encode transaction lookup entry", "err", err)
  2113  		}
  2114  		if err := batch.Put(TxLookupKey(tx.Hash()), data); err != nil {
  2115  			logger.Crit("Failed to store transaction lookup entry", "err", err)
  2116  		}
  2117  
  2118  		// Write to cache at the end of successful Put.
  2119  		dbm.cm.writeTxAndLookupInfoCache(tx.Hash(), &TransactionLookup{tx, &entry})
  2120  	}
  2121  	if err := batch.Write(); err != nil {
  2122  		logger.Error("Failed to write TxLookupEntries in batch", "err", err, "blockNumber", block.Number())
  2123  		return err
  2124  	}
  2125  	return nil
  2126  }
  2127  
  2128  func (dbm *databaseManager) PutTxLookupEntriesToBatch(batch Batch, block *types.Block) {
  2129  	putTxLookupEntriesToPutter(batch, block)
  2130  }
  2131  
  2132  func putTxLookupEntriesToPutter(putter KeyValueWriter, block *types.Block) {
  2133  	for i, tx := range block.Transactions() {
  2134  		entry := TxLookupEntry{
  2135  			BlockHash:  block.Hash(),
  2136  			BlockIndex: block.NumberU64(),
  2137  			Index:      uint64(i),
  2138  		}
  2139  		data, err := rlp.EncodeToBytes(entry)
  2140  		if err != nil {
  2141  			logger.Crit("Failed to encode transaction lookup entry", "err", err)
  2142  		}
  2143  		if err := putter.Put(TxLookupKey(tx.Hash()), data); err != nil {
  2144  			logger.Crit("Failed to store transaction lookup entry", "err", err)
  2145  		}
  2146  	}
  2147  }
  2148  
  2149  // DeleteTxLookupEntry removes all transaction data associated with a hash.
  2150  func (dbm *databaseManager) DeleteTxLookupEntry(hash common.Hash) {
  2151  	db := dbm.getDatabase(TxLookUpEntryDB)
  2152  	db.Delete(TxLookupKey(hash))
  2153  }
  2154  
  2155  // ReadTxAndLookupInfo retrieves a specific transaction from the database, along with
  2156  // its added positional metadata.
  2157  func (dbm *databaseManager) ReadTxAndLookupInfo(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
  2158  	blockHash, blockNumber, txIndex := dbm.ReadTxLookupEntry(hash)
  2159  	if blockHash == (common.Hash{}) {
  2160  		return nil, common.Hash{}, 0, 0
  2161  	}
  2162  	body := dbm.ReadBody(blockHash, blockNumber)
  2163  	if body == nil || len(body.Transactions) <= int(txIndex) {
  2164  		logger.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
  2165  		return nil, common.Hash{}, 0, 0
  2166  	}
  2167  	return body.Transactions[txIndex], blockHash, blockNumber, txIndex
  2168  }
  2169  
  2170  // NewSenderTxHashToTxHashBatch returns a batch to write senderTxHash to txHash mapping information.
  2171  func (dbm *databaseManager) NewSenderTxHashToTxHashBatch() Batch {
  2172  	return dbm.NewBatch(MiscDB) // batch.Release should be called from caller
  2173  }
  2174  
  2175  // PutSenderTxHashToTxHashToBatch 1) puts the given senderTxHash and txHash to the given batch and
  2176  // 2) writes the information to the cache.
  2177  func (dbm *databaseManager) PutSenderTxHashToTxHashToBatch(batch Batch, senderTxHash, txHash common.Hash) error {
  2178  	if err := batch.Put(SenderTxHashToTxHashKey(senderTxHash), txHash.Bytes()); err != nil {
  2179  		return err
  2180  	}
  2181  
  2182  	dbm.cm.writeSenderTxHashToTxHashCache(senderTxHash, txHash)
  2183  
  2184  	if batch.ValueSize() > IdealBatchSize {
  2185  		batch.Write()
  2186  		batch.Reset()
  2187  	}
  2188  
  2189  	return nil
  2190  }
  2191  
  2192  // ReadTxHashFromSenderTxHash retrieves a txHash corresponding to the given senderTxHash.
  2193  func (dbm *databaseManager) ReadTxHashFromSenderTxHash(senderTxHash common.Hash) common.Hash {
  2194  	if txHash := dbm.cm.readSenderTxHashToTxHashCache(senderTxHash); !common.EmptyHash(txHash) {
  2195  		return txHash
  2196  	}
  2197  
  2198  	data, _ := dbm.getDatabase(MiscDB).Get(SenderTxHashToTxHashKey(senderTxHash))
  2199  	if len(data) == 0 {
  2200  		return common.Hash{}
  2201  	}
  2202  
  2203  	txHash := common.BytesToHash(data)
  2204  	dbm.cm.writeSenderTxHashToTxHashCache(senderTxHash, txHash)
  2205  	return txHash
  2206  }
  2207  
  2208  // BloomBits operations.
  2209  // ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
  2210  // section and bit index from the.
  2211  func (dbm *databaseManager) ReadBloomBits(bloomBitsKey []byte) ([]byte, error) {
  2212  	db := dbm.getDatabase(MiscDB)
  2213  	return db.Get(bloomBitsKey)
  2214  }
  2215  
  2216  // WriteBloomBits stores the compressed bloom bits vector belonging to the given
  2217  // section and bit index.
  2218  func (dbm *databaseManager) WriteBloomBits(bloomBitsKey, bits []byte) error {
  2219  	db := dbm.getDatabase(MiscDB)
  2220  	return db.Put(bloomBitsKey, bits)
  2221  }
  2222  
  2223  // ValidSections operation.
  2224  func (dbm *databaseManager) ReadValidSections() ([]byte, error) {
  2225  	db := dbm.getDatabase(MiscDB)
  2226  	return db.Get(validSectionKey)
  2227  }
  2228  
  2229  func (dbm *databaseManager) WriteValidSections(encodedSections []byte) {
  2230  	db := dbm.getDatabase(MiscDB)
  2231  	db.Put(validSectionKey, encodedSections)
  2232  }
  2233  
  2234  // SectionHead operation.
  2235  func (dbm *databaseManager) ReadSectionHead(encodedSection []byte) ([]byte, error) {
  2236  	db := dbm.getDatabase(MiscDB)
  2237  	return db.Get(sectionHeadKey(encodedSection))
  2238  }
  2239  
  2240  func (dbm *databaseManager) WriteSectionHead(encodedSection []byte, hash common.Hash) {
  2241  	db := dbm.getDatabase(MiscDB)
  2242  	db.Put(sectionHeadKey(encodedSection), hash.Bytes())
  2243  }
  2244  
  2245  func (dbm *databaseManager) DeleteSectionHead(encodedSection []byte) {
  2246  	db := dbm.getDatabase(MiscDB)
  2247  	db.Delete(sectionHeadKey(encodedSection))
  2248  }
  2249  
  2250  // ReadDatabaseVersion retrieves the version number of the database.
  2251  func (dbm *databaseManager) ReadDatabaseVersion() *uint64 {
  2252  	db := dbm.getDatabase(MiscDB)
  2253  	var version uint64
  2254  
  2255  	enc, _ := db.Get(databaseVerisionKey)
  2256  	if len(enc) == 0 {
  2257  		return nil
  2258  	}
  2259  
  2260  	if err := rlp.DecodeBytes(enc, &version); err != nil {
  2261  		logger.Error("Failed to decode database version", "err", err)
  2262  		return nil
  2263  	}
  2264  
  2265  	return &version
  2266  }
  2267  
  2268  // WriteDatabaseVersion stores the version number of the database
  2269  func (dbm *databaseManager) WriteDatabaseVersion(version uint64) {
  2270  	db := dbm.getDatabase(MiscDB)
  2271  	enc, err := rlp.EncodeToBytes(version)
  2272  	if err != nil {
  2273  		logger.Crit("Failed to encode database version", "err", err)
  2274  	}
  2275  	if err := db.Put(databaseVerisionKey, enc); err != nil {
  2276  		logger.Crit("Failed to store the database version", "err", err)
  2277  	}
  2278  }
  2279  
  2280  // ReadChainConfig retrieves the consensus settings based on the given genesis hash.
  2281  func (dbm *databaseManager) ReadChainConfig(hash common.Hash) *params.ChainConfig {
  2282  	db := dbm.getDatabase(MiscDB)
  2283  	data, _ := db.Get(configKey(hash))
  2284  	if len(data) == 0 {
  2285  		return nil
  2286  	}
  2287  	var config params.ChainConfig
  2288  	if err := json.Unmarshal(data, &config); err != nil {
  2289  		logger.Error("Invalid chain config JSON", "hash", hash, "err", err)
  2290  		return nil
  2291  	}
  2292  	return &config
  2293  }
  2294  
  2295  func (dbm *databaseManager) WriteChainConfig(hash common.Hash, cfg *params.ChainConfig) {
  2296  	db := dbm.getDatabase(MiscDB)
  2297  	if cfg == nil {
  2298  		return
  2299  	}
  2300  	data, err := json.Marshal(cfg)
  2301  	if err != nil {
  2302  		logger.Crit("Failed to JSON encode chain config", "err", err)
  2303  	}
  2304  	if err := db.Put(configKey(hash), data); err != nil {
  2305  		logger.Crit("Failed to store chain config", "err", err)
  2306  	}
  2307  }
  2308  
  2309  // ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
  2310  // the last shutdown. The blob is expected to be max a few 10s of megabytes.
  2311  func (dbm *databaseManager) ReadSnapshotJournal() []byte {
  2312  	db := dbm.getDatabase(SnapshotDB)
  2313  	data, _ := db.Get(snapshotJournalKey)
  2314  	return data
  2315  }
  2316  
  2317  // WriteSnapshotJournal stores the serialized in-memory diff layers to save at
  2318  // shutdown. The blob is expected to be max a few 10s of megabytes.
  2319  func (dbm *databaseManager) WriteSnapshotJournal(journal []byte) {
  2320  	db := dbm.getDatabase(SnapshotDB)
  2321  	if err := db.Put(snapshotJournalKey, journal); err != nil {
  2322  		logger.Crit("Failed to store snapshot journal", "err", err)
  2323  	}
  2324  }
  2325  
  2326  // DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
  2327  // the last shutdown
  2328  func (dbm *databaseManager) DeleteSnapshotJournal() {
  2329  	db := dbm.getDatabase(SnapshotDB)
  2330  	if err := db.Delete(snapshotJournalKey); err != nil {
  2331  		logger.Crit("Failed to remove snapshot journal", "err", err)
  2332  	}
  2333  }
  2334  
  2335  // ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
  2336  // the last shutdown.
  2337  func (dbm *databaseManager) ReadSnapshotGenerator() []byte {
  2338  	db := dbm.getDatabase(SnapshotDB)
  2339  	data, _ := db.Get(SnapshotGeneratorKey)
  2340  	return data
  2341  }
  2342  
  2343  // WriteSnapshotGenerator stores the serialized snapshot generator to save at
  2344  // shutdown.
  2345  func (dbm *databaseManager) WriteSnapshotGenerator(generator []byte) {
  2346  	db := dbm.getDatabase(SnapshotDB)
  2347  	if err := db.Put(SnapshotGeneratorKey, generator); err != nil {
  2348  		logger.Crit("Failed to store snapshot generator", "err", err)
  2349  	}
  2350  }
  2351  
  2352  // DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
  2353  // the last shutdown
  2354  func (dbm *databaseManager) DeleteSnapshotGenerator() {
  2355  	db := dbm.getDatabase(SnapshotDB)
  2356  	if err := db.Delete(SnapshotGeneratorKey); err != nil {
  2357  		logger.Crit("Failed to remove snapshot generator", "err", err)
  2358  	}
  2359  }
  2360  
  2361  // ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
  2362  func (dbm *databaseManager) ReadSnapshotDisabled() bool {
  2363  	db := dbm.getDatabase(SnapshotDB)
  2364  	disabled, _ := db.Has(snapshotDisabledKey)
  2365  	return disabled
  2366  }
  2367  
  2368  // WriteSnapshotDisabled stores the snapshot pause flag.
  2369  func (dbm *databaseManager) WriteSnapshotDisabled() {
  2370  	db := dbm.getDatabase(SnapshotDB)
  2371  	if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
  2372  		logger.Crit("Failed to store snapshot disabled flag", "err", err)
  2373  	}
  2374  }
  2375  
  2376  // DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
  2377  func (dbm *databaseManager) DeleteSnapshotDisabled() {
  2378  	db := dbm.getDatabase(SnapshotDB)
  2379  	if err := db.Delete(snapshotDisabledKey); err != nil {
  2380  		logger.Crit("Failed to remove snapshot disabled flag", "err", err)
  2381  	}
  2382  }
  2383  
  2384  // ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
  2385  // snapshot layer.
  2386  func (dbm *databaseManager) ReadSnapshotRecoveryNumber() *uint64 {
  2387  	db := dbm.getDatabase(SnapshotDB)
  2388  	data, _ := db.Get(snapshotRecoveryKey)
  2389  	if len(data) == 0 {
  2390  		return nil
  2391  	}
  2392  	if len(data) != 8 {
  2393  		return nil
  2394  	}
  2395  	number := binary.BigEndian.Uint64(data)
  2396  	return &number
  2397  }
  2398  
  2399  // WriteSnapshotRecoveryNumber stores the block number of the last persisted
  2400  // snapshot layer.
  2401  func (dbm *databaseManager) WriteSnapshotRecoveryNumber(number uint64) {
  2402  	db := dbm.getDatabase(SnapshotDB)
  2403  	var buf [8]byte
  2404  	binary.BigEndian.PutUint64(buf[:], number)
  2405  	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
  2406  		logger.Crit("Failed to store snapshot recovery number", "err", err)
  2407  	}
  2408  }
  2409  
  2410  // DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
  2411  // snapshot layer.
  2412  func (dbm *databaseManager) DeleteSnapshotRecoveryNumber() {
  2413  	db := dbm.getDatabase(SnapshotDB)
  2414  	if err := db.Delete(snapshotRecoveryKey); err != nil {
  2415  		logger.Crit("Failed to remove snapshot recovery number", "err", err)
  2416  	}
  2417  }
  2418  
  2419  // ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
  2420  func (dbm *databaseManager) ReadSnapshotSyncStatus() []byte {
  2421  	db := dbm.getDatabase(SnapshotDB)
  2422  	data, _ := db.Get(snapshotSyncStatusKey)
  2423  	return data
  2424  }
  2425  
  2426  // WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
  2427  func (dbm *databaseManager) WriteSnapshotSyncStatus(status []byte) {
  2428  	db := dbm.getDatabase(SnapshotDB)
  2429  	if err := db.Put(snapshotSyncStatusKey, status); err != nil {
  2430  		logger.Crit("Failed to store snapshot sync status", "err", err)
  2431  	}
  2432  }
  2433  
  2434  // DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last
  2435  // shutdown
  2436  func (dbm *databaseManager) DeleteSnapshotSyncStatus() {
  2437  	db := dbm.getDatabase(SnapshotDB)
  2438  	if err := db.Delete(snapshotSyncStatusKey); err != nil {
  2439  		logger.Crit("Failed to remove snapshot sync status", "err", err)
  2440  	}
  2441  }
  2442  
  2443  // ReadSnapshotRoot retrieves the root of the block whose state is contained in
  2444  // the persisted snapshot.
  2445  func (dbm *databaseManager) ReadSnapshotRoot() common.Hash {
  2446  	db := dbm.getDatabase(SnapshotDB)
  2447  	data, _ := db.Get(snapshotRootKey)
  2448  	if len(data) != common.HashLength {
  2449  		return common.Hash{}
  2450  	}
  2451  	return common.BytesToHash(data)
  2452  }
  2453  
  2454  // WriteSnapshotRoot stores the root of the block whose state is contained in
  2455  // the persisted snapshot.
  2456  func (dbm *databaseManager) WriteSnapshotRoot(root common.Hash) {
  2457  	db := dbm.getDatabase(SnapshotDB)
  2458  	if err := db.Put(snapshotRootKey, root[:]); err != nil {
  2459  		logger.Crit("Failed to store snapshot root", "err", err)
  2460  	}
  2461  }
  2462  
  2463  // DeleteSnapshotRoot deletes the hash of the block whose state is contained in
  2464  // the persisted snapshot. Since snapshots are not immutable, this  method can
  2465  // be used during updates, so a crash or failure will mark the entire snapshot
  2466  // invalid.
  2467  func (dbm *databaseManager) DeleteSnapshotRoot() {
  2468  	db := dbm.getDatabase(SnapshotDB)
  2469  	if err := db.Delete(snapshotRootKey); err != nil {
  2470  		logger.Crit("Failed to remove snapshot root", "err", err)
  2471  	}
  2472  }
  2473  
  2474  // ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
  2475  func (dbm *databaseManager) ReadAccountSnapshot(hash common.Hash) []byte {
  2476  	db := dbm.getDatabase(SnapshotDB)
  2477  	data, _ := db.Get(AccountSnapshotKey(hash))
  2478  	return data
  2479  }
  2480  
  2481  // WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
  2482  func (dbm *databaseManager) WriteAccountSnapshot(hash common.Hash, entry []byte) {
  2483  	db := dbm.getDatabase(SnapshotDB)
  2484  	writeAccountSnapshot(db, hash, entry)
  2485  }
  2486  
  2487  // DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
  2488  func (dbm *databaseManager) DeleteAccountSnapshot(hash common.Hash) {
  2489  	db := dbm.getDatabase(SnapshotDB)
  2490  	deleteAccountSnapshot(db, hash)
  2491  }
  2492  
  2493  // ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
  2494  func (dbm *databaseManager) ReadStorageSnapshot(accountHash, storageHash common.Hash) []byte {
  2495  	db := dbm.getDatabase(SnapshotDB)
  2496  	data, _ := db.Get(StorageSnapshotKey(accountHash, storageHash))
  2497  	return data
  2498  }
  2499  
  2500  // WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
  2501  func (dbm *databaseManager) WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte) {
  2502  	db := dbm.getDatabase(SnapshotDB)
  2503  	writeStorageSnapshot(db, accountHash, storageHash, entry)
  2504  }
  2505  
  2506  // DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
  2507  func (dbm *databaseManager) DeleteStorageSnapshot(accountHash, storageHash common.Hash) {
  2508  	db := dbm.getDatabase(SnapshotDB)
  2509  	deleteStorageSnapshot(db, accountHash, storageHash)
  2510  }
  2511  
  2512  func (dbm *databaseManager) NewSnapshotDBIterator(prefix []byte, start []byte) Iterator {
  2513  	db := dbm.getDatabase(SnapshotDB)
  2514  	return db.NewIterator(prefix, start)
  2515  }
  2516  
  2517  // WriteChildChainTxHash writes stores a transaction hash of a transaction which contains
  2518  // AnchoringData, with the key made with given child chain block hash.
  2519  func (dbm *databaseManager) WriteChildChainTxHash(ccBlockHash common.Hash, ccTxHash common.Hash) {
  2520  	key := childChainTxHashKey(ccBlockHash)
  2521  	db := dbm.getDatabase(bridgeServiceDB)
  2522  	if err := db.Put(key, ccTxHash.Bytes()); err != nil {
  2523  		logger.Crit("Failed to store ChildChainTxHash", "ccBlockHash", ccBlockHash.String(), "ccTxHash", ccTxHash.String(), "err", err)
  2524  	}
  2525  }
  2526  
  2527  // ConvertChildChainBlockHashToParentChainTxHash returns a transaction hash of a transaction which contains
  2528  // AnchoringData, with the key made with given child chain block hash.
  2529  func (dbm *databaseManager) ConvertChildChainBlockHashToParentChainTxHash(scBlockHash common.Hash) common.Hash {
  2530  	key := childChainTxHashKey(scBlockHash)
  2531  	db := dbm.getDatabase(bridgeServiceDB)
  2532  	data, _ := db.Get(key)
  2533  	if len(data) == 0 {
  2534  		return common.Hash{}
  2535  	}
  2536  	return common.BytesToHash(data)
  2537  }
  2538  
  2539  // WriteLastIndexedBlockNumber writes the block number which is indexed lastly.
  2540  func (dbm *databaseManager) WriteLastIndexedBlockNumber(blockNum uint64) {
  2541  	key := lastIndexedBlockKey
  2542  	db := dbm.getDatabase(bridgeServiceDB)
  2543  	if err := db.Put(key, common.Int64ToByteBigEndian(blockNum)); err != nil {
  2544  		logger.Crit("Failed to store LastIndexedBlockNumber", "blockNumber", blockNum, "err", err)
  2545  	}
  2546  }
  2547  
  2548  // GetLastIndexedBlockNumber returns the last block number which is indexed.
  2549  func (dbm *databaseManager) GetLastIndexedBlockNumber() uint64 {
  2550  	key := lastIndexedBlockKey
  2551  	db := dbm.getDatabase(bridgeServiceDB)
  2552  	data, _ := db.Get(key)
  2553  	if len(data) != 8 {
  2554  		return 0
  2555  	}
  2556  	return binary.BigEndian.Uint64(data)
  2557  }
  2558  
  2559  // WriteAnchoredBlockNumber writes the block number whose data has been anchored to the parent chain.
  2560  func (dbm *databaseManager) WriteAnchoredBlockNumber(blockNum uint64) {
  2561  	key := lastServiceChainTxReceiptKey
  2562  	db := dbm.getDatabase(bridgeServiceDB)
  2563  	if err := db.Put(key, common.Int64ToByteBigEndian(blockNum)); err != nil {
  2564  		logger.Crit("Failed to store LatestServiceChainBlockNum", "blockNumber", blockNum, "err", err)
  2565  	}
  2566  }
  2567  
  2568  // ReadAnchoredBlockNumber returns the latest block number whose data has been anchored to the parent chain.
  2569  func (dbm *databaseManager) ReadAnchoredBlockNumber() uint64 {
  2570  	key := lastServiceChainTxReceiptKey
  2571  	db := dbm.getDatabase(bridgeServiceDB)
  2572  	data, _ := db.Get(key)
  2573  	if len(data) != 8 {
  2574  		return 0
  2575  	}
  2576  	return binary.BigEndian.Uint64(data)
  2577  }
  2578  
  2579  // WriteHandleTxHashFromRequestTxHash writes handle value transfer tx hash
  2580  // with corresponding request value transfer tx hash.
  2581  func (dbm *databaseManager) WriteHandleTxHashFromRequestTxHash(rTx, hTx common.Hash) {
  2582  	db := dbm.getDatabase(bridgeServiceDB)
  2583  	key := valueTransferTxHashKey(rTx)
  2584  	if err := db.Put(key, hTx.Bytes()); err != nil {
  2585  		logger.Crit("Failed to store handle value transfer tx hash", "request tx hash", rTx.String(), "handle tx hash", hTx.String(), "err", err)
  2586  	}
  2587  }
  2588  
  2589  // ReadHandleTxHashFromRequestTxHash returns handle value transfer tx hash
  2590  // with corresponding the given request value transfer tx hash.
  2591  func (dbm *databaseManager) ReadHandleTxHashFromRequestTxHash(rTx common.Hash) common.Hash {
  2592  	key := valueTransferTxHashKey(rTx)
  2593  	db := dbm.getDatabase(bridgeServiceDB)
  2594  	data, _ := db.Get(key)
  2595  	if len(data) == 0 {
  2596  		return common.Hash{}
  2597  	}
  2598  	return common.BytesToHash(data)
  2599  }
  2600  
  2601  // WriteReceiptFromParentChain writes a receipt received from parent chain to child chain
  2602  // with corresponding block hash. It assumes that a child chain has only one parent chain.
  2603  func (dbm *databaseManager) WriteReceiptFromParentChain(blockHash common.Hash, receipt *types.Receipt) {
  2604  	receiptForStorage := (*types.ReceiptForStorage)(receipt)
  2605  	db := dbm.getDatabase(bridgeServiceDB)
  2606  	byte, err := rlp.EncodeToBytes(receiptForStorage)
  2607  	if err != nil {
  2608  		logger.Crit("Failed to RLP encode receipt received from parent chain", "receipt.TxHash", receipt.TxHash, "err", err)
  2609  	}
  2610  	key := receiptFromParentChainKey(blockHash)
  2611  	if err = db.Put(key, byte); err != nil {
  2612  		logger.Crit("Failed to store receipt received from parent chain", "receipt.TxHash", receipt.TxHash, "err", err)
  2613  	}
  2614  }
  2615  
  2616  // ReadReceiptFromParentChain returns a receipt received from parent chain to child chain
  2617  // with corresponding block hash. It assumes that a child chain has only one parent chain.
  2618  func (dbm *databaseManager) ReadReceiptFromParentChain(blockHash common.Hash) *types.Receipt {
  2619  	db := dbm.getDatabase(bridgeServiceDB)
  2620  	key := receiptFromParentChainKey(blockHash)
  2621  	data, _ := db.Get(key)
  2622  	if data == nil || len(data) == 0 {
  2623  		return nil
  2624  	}
  2625  	serviceChainTxReceipt := new(types.ReceiptForStorage)
  2626  	if err := rlp.Decode(bytes.NewReader(data), serviceChainTxReceipt); err != nil {
  2627  		logger.Error("Invalid Receipt RLP received from parent chain", "err", err)
  2628  		return nil
  2629  	}
  2630  	return (*types.Receipt)(serviceChainTxReceipt)
  2631  }
  2632  
  2633  // WriteParentOperatorFeePayer writes a fee payer of parent operator.
  2634  func (dbm *databaseManager) WriteParentOperatorFeePayer(feePayer common.Address) {
  2635  	key := parentOperatorFeePayerPrefix
  2636  	db := dbm.getDatabase(bridgeServiceDB)
  2637  
  2638  	if err := db.Put(key, feePayer.Bytes()); err != nil {
  2639  		logger.Crit("Failed to store parent operator fee payer", "feePayer", feePayer.String(), "err", err)
  2640  	}
  2641  }
  2642  
  2643  // ReadParentOperatorFeePayer returns a fee payer of parent operator.
  2644  func (dbm *databaseManager) ReadParentOperatorFeePayer() common.Address {
  2645  	key := parentOperatorFeePayerPrefix
  2646  	db := dbm.getDatabase(bridgeServiceDB)
  2647  	data, _ := db.Get(key)
  2648  	if data == nil || len(data) == 0 {
  2649  		return common.Address{}
  2650  	}
  2651  	return common.BytesToAddress(data)
  2652  }
  2653  
  2654  // WriteChildOperatorFeePayer writes a fee payer of child operator.
  2655  func (dbm *databaseManager) WriteChildOperatorFeePayer(feePayer common.Address) {
  2656  	key := childOperatorFeePayerPrefix
  2657  	db := dbm.getDatabase(bridgeServiceDB)
  2658  
  2659  	if err := db.Put(key, feePayer.Bytes()); err != nil {
  2660  		logger.Crit("Failed to store parent operator fee payer", "feePayer", feePayer.String(), "err", err)
  2661  	}
  2662  }
  2663  
  2664  // ReadChildOperatorFeePayer returns a fee payer of child operator.
  2665  func (dbm *databaseManager) ReadChildOperatorFeePayer() common.Address {
  2666  	key := childOperatorFeePayerPrefix
  2667  	db := dbm.getDatabase(bridgeServiceDB)
  2668  	data, _ := db.Get(key)
  2669  	if data == nil || len(data) == 0 {
  2670  		return common.Address{}
  2671  	}
  2672  	return common.BytesToAddress(data)
  2673  }
  2674  
  2675  // ClearHeaderChainCache calls cacheManager.clearHeaderChainCache to flush out caches of HeaderChain.
  2676  func (dbm *databaseManager) ClearHeaderChainCache() {
  2677  	dbm.cm.clearHeaderChainCache()
  2678  }
  2679  
  2680  // ClearBlockChainCache calls cacheManager.clearBlockChainCache to flush out caches of BlockChain.
  2681  func (dbm *databaseManager) ClearBlockChainCache() {
  2682  	dbm.cm.clearBlockChainCache()
  2683  }
  2684  
  2685  func (dbm *databaseManager) ReadTxAndLookupInfoInCache(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
  2686  	return dbm.cm.readTxAndLookupInfoInCache(hash)
  2687  }
  2688  
  2689  func (dbm *databaseManager) ReadBlockReceiptsInCache(blockHash common.Hash) types.Receipts {
  2690  	return dbm.cm.readBlockReceiptsInCache(blockHash)
  2691  }
  2692  
  2693  func (dbm *databaseManager) ReadTxReceiptInCache(txHash common.Hash) *types.Receipt {
  2694  	return dbm.cm.readTxReceiptInCache(txHash)
  2695  }
  2696  
  2697  func (dbm *databaseManager) WriteCliqueSnapshot(snapshotBlockHash common.Hash, encodedSnapshot []byte) error {
  2698  	db := dbm.getDatabase(MiscDB)
  2699  	return db.Put(snapshotKey(snapshotBlockHash), encodedSnapshot)
  2700  }
  2701  
  2702  func (dbm *databaseManager) ReadCliqueSnapshot(snapshotBlockHash common.Hash) ([]byte, error) {
  2703  	db := dbm.getDatabase(MiscDB)
  2704  	return db.Get(snapshotKey(snapshotBlockHash))
  2705  }
  2706  
  2707  func (dbm *databaseManager) WriteGovernance(data map[string]interface{}, num uint64) error {
  2708  	db := dbm.getDatabase(MiscDB)
  2709  	b, err := json.Marshal(data)
  2710  	if err != nil {
  2711  		return err
  2712  	}
  2713  	if err := dbm.WriteGovernanceIdx(num); err != nil {
  2714  		if err == errGovIdxAlreadyExist {
  2715  			// Overwriting existing data is not allowed, but the attempt is not considered as a failure.
  2716  			return nil
  2717  		}
  2718  		return err
  2719  	}
  2720  	return db.Put(makeKey(governancePrefix, num), b)
  2721  }
  2722  
  2723  func (dbm *databaseManager) DeleteGovernance(num uint64) {
  2724  	db := dbm.getDatabase(MiscDB)
  2725  	if err := dbm.deleteLastGovernance(num); err != nil {
  2726  		logger.Crit("Failed to delete Governance index", "err", err)
  2727  	}
  2728  	if err := db.Delete(makeKey(governancePrefix, num)); err != nil {
  2729  		logger.Crit("Failed to delete Governance", "err", err)
  2730  	}
  2731  }
  2732  
  2733  func (dbm *databaseManager) WriteGovernanceIdx(num uint64) error {
  2734  	db := dbm.getDatabase(MiscDB)
  2735  	newSlice := make([]uint64, 0)
  2736  
  2737  	if data, err := db.Get(governanceHistoryKey); err == nil {
  2738  		if err = json.Unmarshal(data, &newSlice); err != nil {
  2739  			return err
  2740  		}
  2741  	}
  2742  
  2743  	if len(newSlice) > 0 && num <= newSlice[len(newSlice)-1] {
  2744  		logger.Error("The same or more recent governance index exist. Skip writing governance index",
  2745  			"newIdx", num, "govIdxes", newSlice)
  2746  		return errGovIdxAlreadyExist
  2747  	}
  2748  
  2749  	newSlice = append(newSlice, num)
  2750  
  2751  	data, err := json.Marshal(newSlice)
  2752  	if err != nil {
  2753  		return err
  2754  	}
  2755  	return db.Put(governanceHistoryKey, data)
  2756  }
  2757  
  2758  // deleteLastGovernance deletes the last governanceIdx only if it is equal to `num`
  2759  func (dbm *databaseManager) deleteLastGovernance(num uint64) error {
  2760  	db := dbm.getDatabase(MiscDB)
  2761  	idxHistory, err := dbm.ReadRecentGovernanceIdx(0)
  2762  	if err != nil {
  2763  		return nil // Do nothing and return nil if no recent index found
  2764  	}
  2765  	end := len(idxHistory)
  2766  	if idxHistory[end-1] != num {
  2767  		return nil // Do nothing and return nil if the target number does not match the tip number
  2768  	}
  2769  	data, err := json.Marshal(idxHistory[0 : end-1])
  2770  	if err != nil {
  2771  		return err
  2772  	}
  2773  	return db.Put(governanceHistoryKey, data)
  2774  }
  2775  
  2776  func (dbm *databaseManager) ReadGovernance(num uint64) (map[string]interface{}, error) {
  2777  	db := dbm.getDatabase(MiscDB)
  2778  
  2779  	if data, err := db.Get(makeKey(governancePrefix, num)); err != nil {
  2780  		return nil, err
  2781  	} else {
  2782  		result := make(map[string]interface{})
  2783  		if e := json.Unmarshal(data, &result); e != nil {
  2784  			return nil, e
  2785  		}
  2786  		return result, nil
  2787  	}
  2788  }
  2789  
  2790  // ReadRecentGovernanceIdx returns latest `count` number of indices. If `count` is 0, it returns all indices.
  2791  func (dbm *databaseManager) ReadRecentGovernanceIdx(count int) ([]uint64, error) {
  2792  	db := dbm.getDatabase(MiscDB)
  2793  
  2794  	if history, err := db.Get(governanceHistoryKey); err != nil {
  2795  		return nil, err
  2796  	} else {
  2797  		idxHistory := make([]uint64, 0)
  2798  		if e := json.Unmarshal(history, &idxHistory); e != nil {
  2799  			return nil, e
  2800  		}
  2801  
  2802  		// Make sure idxHistory should be in ascending order
  2803  		sort.Slice(idxHistory, func(i, j int) bool {
  2804  			return idxHistory[i] < idxHistory[j]
  2805  		})
  2806  
  2807  		max := 0
  2808  		leng := len(idxHistory)
  2809  		if leng < count || count == 0 {
  2810  			max = leng
  2811  		} else {
  2812  			max = count
  2813  		}
  2814  		if count > 0 {
  2815  			return idxHistory[leng-max:], nil
  2816  		}
  2817  		return idxHistory, nil
  2818  	}
  2819  }
  2820  
  2821  // ReadGovernanceAtNumber returns the block number and governance information which to be used for the block `num`
  2822  func (dbm *databaseManager) ReadGovernanceAtNumber(num uint64, epoch uint64) (uint64, map[string]interface{}, error) {
  2823  	minimum := num - (num % epoch)
  2824  	if minimum >= epoch {
  2825  		minimum -= epoch
  2826  	}
  2827  	totalIdx, _ := dbm.ReadRecentGovernanceIdx(0)
  2828  	for i := len(totalIdx) - 1; i >= 0; i-- {
  2829  		if totalIdx[i] <= minimum {
  2830  			result, err := dbm.ReadGovernance(totalIdx[i])
  2831  			return totalIdx[i], result, err
  2832  		}
  2833  	}
  2834  	return 0, nil, errors.New("No governance data found")
  2835  }
  2836  
  2837  func (dbm *databaseManager) WriteGovernanceState(b []byte) error {
  2838  	db := dbm.getDatabase(MiscDB)
  2839  	return db.Put(governanceStateKey, b)
  2840  }
  2841  
  2842  func (dbm *databaseManager) ReadGovernanceState() ([]byte, error) {
  2843  	db := dbm.getDatabase(MiscDB)
  2844  	return db.Get(governanceStateKey)
  2845  }
  2846  
  2847  func (dbm *databaseManager) WriteChainDataFetcherCheckpoint(checkpoint uint64) error {
  2848  	db := dbm.getDatabase(MiscDB)
  2849  	return db.Put(chaindatafetcherCheckpointKey, common.Int64ToByteBigEndian(checkpoint))
  2850  }
  2851  
  2852  func (dbm *databaseManager) ReadChainDataFetcherCheckpoint() (uint64, error) {
  2853  	db := dbm.getDatabase(MiscDB)
  2854  	data, err := db.Get(chaindatafetcherCheckpointKey)
  2855  	if err != nil {
  2856  		// if the key is not in the database, 0 is returned as the checkpoint
  2857  		if err == leveldb.ErrNotFound || err == badger.ErrKeyNotFound ||
  2858  			strings.Contains(err.Error(), "not found") { // memoryDB
  2859  			return 0, nil
  2860  		}
  2861  		return 0, err
  2862  	}
  2863  	// in case that error is nil, but the data does not exist
  2864  	if len(data) != 8 {
  2865  		logger.Warn("the returned error is nil, but the data is wrong", "len(data)", len(data))
  2866  		return 0, nil
  2867  	}
  2868  	return binary.BigEndian.Uint64(data), nil
  2869  }
  2870  
  2871  func (dbm *databaseManager) NewSnapshotDBBatch() SnapshotDBBatch {
  2872  	return &snapshotDBBatch{dbm.NewBatch(SnapshotDB)}
  2873  }
  2874  
  2875  type SnapshotDBBatch interface {
  2876  	Batch
  2877  
  2878  	WriteSnapshotRoot(root common.Hash)
  2879  	DeleteSnapshotRoot()
  2880  
  2881  	WriteAccountSnapshot(hash common.Hash, entry []byte)
  2882  	DeleteAccountSnapshot(hash common.Hash)
  2883  
  2884  	WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte)
  2885  	DeleteStorageSnapshot(accountHash, storageHash common.Hash)
  2886  
  2887  	WriteSnapshotJournal(journal []byte)
  2888  	DeleteSnapshotJournal()
  2889  
  2890  	WriteSnapshotGenerator(generator []byte)
  2891  	DeleteSnapshotGenerator()
  2892  
  2893  	WriteSnapshotDisabled()
  2894  	DeleteSnapshotDisabled()
  2895  
  2896  	WriteSnapshotRecoveryNumber(number uint64)
  2897  	DeleteSnapshotRecoveryNumber()
  2898  }
  2899  
  2900  type snapshotDBBatch struct {
  2901  	Batch
  2902  }
  2903  
  2904  func (batch *snapshotDBBatch) WriteSnapshotRoot(root common.Hash) {
  2905  	writeSnapshotRoot(batch, root)
  2906  }
  2907  
  2908  func (batch *snapshotDBBatch) DeleteSnapshotRoot() {
  2909  	deleteSnapshotRoot(batch)
  2910  }
  2911  
  2912  func (batch *snapshotDBBatch) WriteAccountSnapshot(hash common.Hash, entry []byte) {
  2913  	writeAccountSnapshot(batch, hash, entry)
  2914  }
  2915  
  2916  func (batch *snapshotDBBatch) DeleteAccountSnapshot(hash common.Hash) {
  2917  	deleteAccountSnapshot(batch, hash)
  2918  }
  2919  
  2920  func (batch *snapshotDBBatch) WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte) {
  2921  	writeStorageSnapshot(batch, accountHash, storageHash, entry)
  2922  }
  2923  
  2924  func (batch *snapshotDBBatch) DeleteStorageSnapshot(accountHash, storageHash common.Hash) {
  2925  	deleteStorageSnapshot(batch, accountHash, storageHash)
  2926  }
  2927  
  2928  func (batch *snapshotDBBatch) WriteSnapshotJournal(journal []byte) {
  2929  	writeSnapshotJournal(batch, journal)
  2930  }
  2931  
  2932  func (batch *snapshotDBBatch) DeleteSnapshotJournal() {
  2933  	deleteSnapshotJournal(batch)
  2934  }
  2935  
  2936  func (batch *snapshotDBBatch) WriteSnapshotGenerator(generator []byte) {
  2937  	writeSnapshotGenerator(batch, generator)
  2938  }
  2939  
  2940  func (batch *snapshotDBBatch) DeleteSnapshotGenerator() {
  2941  	deleteSnapshotGenerator(batch)
  2942  }
  2943  
  2944  func (batch *snapshotDBBatch) WriteSnapshotDisabled() {
  2945  	writeSnapshotDisabled(batch)
  2946  }
  2947  
  2948  func (batch *snapshotDBBatch) DeleteSnapshotDisabled() {
  2949  	deleteSnapshotDisabled(batch)
  2950  }
  2951  
  2952  func (batch *snapshotDBBatch) WriteSnapshotRecoveryNumber(number uint64) {
  2953  	writeSnapshotRecoveryNumber(batch, number)
  2954  }
  2955  
  2956  func (batch *snapshotDBBatch) DeleteSnapshotRecoveryNumber() {
  2957  	deleteSnapshotRecoveryNumber(batch)
  2958  }
  2959  
  2960  func writeSnapshotRoot(db KeyValueWriter, root common.Hash) {
  2961  	if err := db.Put(snapshotRootKey, root[:]); err != nil {
  2962  		logger.Crit("Failed to store snapshot root", "err", err)
  2963  	}
  2964  }
  2965  
  2966  func deleteSnapshotRoot(db KeyValueWriter) {
  2967  	if err := db.Delete(snapshotRootKey); err != nil {
  2968  		logger.Crit("Failed to remove snapshot root", "err", err)
  2969  	}
  2970  }
  2971  
  2972  func writeAccountSnapshot(db KeyValueWriter, hash common.Hash, entry []byte) {
  2973  	if err := db.Put(AccountSnapshotKey(hash), entry); err != nil {
  2974  		logger.Crit("Failed to store account snapshot", "err", err)
  2975  	}
  2976  }
  2977  
  2978  func deleteAccountSnapshot(db KeyValueWriter, hash common.Hash) {
  2979  	if err := db.Delete(AccountSnapshotKey(hash)); err != nil {
  2980  		logger.Crit("Failed to delete account snapshot", "err", err)
  2981  	}
  2982  }
  2983  
  2984  func writeStorageSnapshot(db KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
  2985  	if err := db.Put(StorageSnapshotKey(accountHash, storageHash), entry); err != nil {
  2986  		logger.Crit("Failed to store storage snapshot", "err", err)
  2987  	}
  2988  }
  2989  
  2990  func deleteStorageSnapshot(db KeyValueWriter, accountHash, storageHash common.Hash) {
  2991  	if err := db.Delete(StorageSnapshotKey(accountHash, storageHash)); err != nil {
  2992  		logger.Crit("Failed to delete storage snapshot", "err", err)
  2993  	}
  2994  }
  2995  
  2996  func writeSnapshotJournal(db KeyValueWriter, journal []byte) {
  2997  	if err := db.Put(snapshotJournalKey, journal); err != nil {
  2998  		logger.Crit("Failed to store snapshot journal", "err", err)
  2999  	}
  3000  }
  3001  
  3002  func deleteSnapshotJournal(db KeyValueWriter) {
  3003  	if err := db.Delete(snapshotJournalKey); err != nil {
  3004  		logger.Crit("Failed to remove snapshot journal", "err", err)
  3005  	}
  3006  }
  3007  
  3008  func writeSnapshotGenerator(db KeyValueWriter, generator []byte) {
  3009  	if err := db.Put(SnapshotGeneratorKey, generator); err != nil {
  3010  		logger.Crit("Failed to store snapshot generator", "err", err)
  3011  	}
  3012  }
  3013  
  3014  func deleteSnapshotGenerator(db KeyValueWriter) {
  3015  	if err := db.Delete(SnapshotGeneratorKey); err != nil {
  3016  		logger.Crit("Failed to remove snapshot generator", "err", err)
  3017  	}
  3018  }
  3019  
  3020  func writeSnapshotDisabled(db KeyValueWriter) {
  3021  	if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
  3022  		logger.Crit("Failed to store snapshot disabled flag", "err", err)
  3023  	}
  3024  }
  3025  
  3026  func deleteSnapshotDisabled(db KeyValueWriter) {
  3027  	if err := db.Delete(snapshotDisabledKey); err != nil {
  3028  		logger.Crit("Failed to remove snapshot disabled flag", "err", err)
  3029  	}
  3030  }
  3031  
  3032  func writeSnapshotRecoveryNumber(db KeyValueWriter, number uint64) {
  3033  	var buf [8]byte
  3034  	binary.BigEndian.PutUint64(buf[:], number)
  3035  	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
  3036  		logger.Crit("Failed to store snapshot recovery number", "err", err)
  3037  	}
  3038  }
  3039  
  3040  func deleteSnapshotRecoveryNumber(db KeyValueWriter) {
  3041  	if err := db.Delete(snapshotRecoveryKey); err != nil {
  3042  		logger.Crit("Failed to remove snapshot recovery number", "err", err)
  3043  	}
  3044  }