github.com/klaytn/klaytn@v1.10.2/storage/database/db_manager.go (about)

     1  // Copyright 2018 The klaytn Authors
     2  // This file is part of the klaytn library.
     3  //
     4  // The klaytn library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The klaytn library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the klaytn library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package database
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"encoding/json"
    23  	"math/big"
    24  	"os"
    25  	"path/filepath"
    26  	"sort"
    27  	"strconv"
    28  	"strings"
    29  	"sync"
    30  
    31  	"github.com/dgraph-io/badger"
    32  	"github.com/klaytn/klaytn/blockchain/types"
    33  	"github.com/klaytn/klaytn/common"
    34  	"github.com/klaytn/klaytn/log"
    35  	"github.com/klaytn/klaytn/params"
    36  	"github.com/klaytn/klaytn/rlp"
    37  	"github.com/pkg/errors"
    38  	"github.com/syndtr/goleveldb/leveldb"
    39  )
    40  
    41  var (
    42  	logger = log.NewModuleLogger(log.StorageDatabase)
    43  
    44  	errGovIdxAlreadyExist = errors.New("a governance idx of the more recent or the same block exist")
    45  
    46  	HeadBlockQ backupHashQueue
    47  	FastBlockQ backupHashQueue
    48  )
    49  
    50  type DBManager interface {
    51  	IsParallelDBWrite() bool
    52  	IsSingle() bool
    53  	InMigration() bool
    54  	MigrationBlockNumber() uint64
    55  	getStateTrieMigrationInfo() uint64
    56  
    57  	Close()
    58  	NewBatch(dbType DBEntryType) Batch
    59  	getDBDir(dbEntry DBEntryType) string
    60  	setDBDir(dbEntry DBEntryType, newDBDir string)
    61  	setStateTrieMigrationStatus(uint64)
    62  	GetMemDB() *MemDB
    63  	GetDBConfig() *DBConfig
    64  	getDatabase(DBEntryType) Database
    65  	CreateMigrationDBAndSetStatus(blockNum uint64) error
    66  	FinishStateMigration(succeed bool) chan struct{}
    67  	GetStateTrieDB() Database
    68  	GetStateTrieMigrationDB() Database
    69  	GetMiscDB() Database
    70  	GetSnapshotDB() Database
    71  
    72  	// from accessors_chain.go
    73  	ReadCanonicalHash(number uint64) common.Hash
    74  	WriteCanonicalHash(hash common.Hash, number uint64)
    75  	DeleteCanonicalHash(number uint64)
    76  
    77  	ReadAllHashes(number uint64) []common.Hash
    78  	ReadHeadHeaderHash() common.Hash
    79  	WriteHeadHeaderHash(hash common.Hash)
    80  
    81  	ReadHeadBlockHash() common.Hash
    82  	ReadHeadBlockBackupHash() common.Hash
    83  	WriteHeadBlockHash(hash common.Hash)
    84  
    85  	ReadHeadFastBlockHash() common.Hash
    86  	ReadHeadFastBlockBackupHash() common.Hash
    87  	WriteHeadFastBlockHash(hash common.Hash)
    88  
    89  	ReadFastTrieProgress() uint64
    90  	WriteFastTrieProgress(count uint64)
    91  
    92  	HasHeader(hash common.Hash, number uint64) bool
    93  	ReadHeader(hash common.Hash, number uint64) *types.Header
    94  	ReadHeaderRLP(hash common.Hash, number uint64) rlp.RawValue
    95  	WriteHeader(header *types.Header)
    96  	DeleteHeader(hash common.Hash, number uint64)
    97  	ReadHeaderNumber(hash common.Hash) *uint64
    98  
    99  	HasBody(hash common.Hash, number uint64) bool
   100  	ReadBody(hash common.Hash, number uint64) *types.Body
   101  	ReadBodyInCache(hash common.Hash) *types.Body
   102  	ReadBodyRLP(hash common.Hash, number uint64) rlp.RawValue
   103  	ReadBodyRLPByHash(hash common.Hash) rlp.RawValue
   104  	WriteBody(hash common.Hash, number uint64, body *types.Body)
   105  	PutBodyToBatch(batch Batch, hash common.Hash, number uint64, body *types.Body)
   106  	WriteBodyRLP(hash common.Hash, number uint64, rlp rlp.RawValue)
   107  	DeleteBody(hash common.Hash, number uint64)
   108  
   109  	ReadTd(hash common.Hash, number uint64) *big.Int
   110  	WriteTd(hash common.Hash, number uint64, td *big.Int)
   111  	DeleteTd(hash common.Hash, number uint64)
   112  
   113  	ReadReceipt(txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64)
   114  	ReadReceipts(blockHash common.Hash, number uint64) types.Receipts
   115  	ReadReceiptsByBlockHash(hash common.Hash) types.Receipts
   116  	WriteReceipts(hash common.Hash, number uint64, receipts types.Receipts)
   117  	PutReceiptsToBatch(batch Batch, hash common.Hash, number uint64, receipts types.Receipts)
   118  	DeleteReceipts(hash common.Hash, number uint64)
   119  
   120  	ReadBlock(hash common.Hash, number uint64) *types.Block
   121  	ReadBlockByHash(hash common.Hash) *types.Block
   122  	ReadBlockByNumber(number uint64) *types.Block
   123  	HasBlock(hash common.Hash, number uint64) bool
   124  	WriteBlock(block *types.Block)
   125  	DeleteBlock(hash common.Hash, number uint64)
   126  
   127  	ReadBadBlock(hash common.Hash) *types.Block
   128  	WriteBadBlock(block *types.Block)
   129  	ReadAllBadBlocks() ([]*types.Block, error)
   130  	DeleteBadBlocks()
   131  
   132  	FindCommonAncestor(a, b *types.Header) *types.Header
   133  
   134  	ReadIstanbulSnapshot(hash common.Hash) ([]byte, error)
   135  	WriteIstanbulSnapshot(hash common.Hash, blob []byte) error
   136  
   137  	WriteMerkleProof(key, value []byte)
   138  
   139  	// Bytecodes related operations
   140  	ReadCode(hash common.Hash) []byte
   141  	ReadCodeWithPrefix(hash common.Hash) []byte
   142  	WriteCode(hash common.Hash, code []byte)
   143  	DeleteCode(hash common.Hash)
   144  	HasCode(hash common.Hash) bool
   145  
   146  	// State Trie Database related operations
   147  	ReadCachedTrieNode(hash common.Hash) ([]byte, error)
   148  	ReadCachedTrieNodePreimage(secureKey []byte) ([]byte, error)
   149  	ReadStateTrieNode(key []byte) ([]byte, error)
   150  	HasStateTrieNode(key []byte) (bool, error)
   151  	HasCodeWithPrefix(hash common.Hash) bool
   152  	ReadPreimage(hash common.Hash) []byte
   153  
   154  	// Read StateTrie from new DB
   155  	ReadCachedTrieNodeFromNew(hash common.Hash) ([]byte, error)
   156  	ReadCachedTrieNodePreimageFromNew(secureKey []byte) ([]byte, error)
   157  	ReadStateTrieNodeFromNew(key []byte) ([]byte, error)
   158  	HasStateTrieNodeFromNew(key []byte) (bool, error)
   159  	HasCodeWithPrefixFromNew(hash common.Hash) bool
   160  	ReadPreimageFromNew(hash common.Hash) []byte
   161  
   162  	// Read StateTrie from old DB
   163  	ReadCachedTrieNodeFromOld(hash common.Hash) ([]byte, error)
   164  	ReadCachedTrieNodePreimageFromOld(secureKey []byte) ([]byte, error)
   165  	ReadStateTrieNodeFromOld(key []byte) ([]byte, error)
   166  	HasStateTrieNodeFromOld(key []byte) (bool, error)
   167  	HasCodeWithPrefixFromOld(hash common.Hash) bool
   168  	ReadPreimageFromOld(hash common.Hash) []byte
   169  
   170  	WritePreimages(number uint64, preimages map[common.Hash][]byte)
   171  
   172  	// from accessors_indexes.go
   173  	ReadTxLookupEntry(hash common.Hash) (common.Hash, uint64, uint64)
   174  	WriteTxLookupEntries(block *types.Block)
   175  	WriteAndCacheTxLookupEntries(block *types.Block) error
   176  	PutTxLookupEntriesToBatch(batch Batch, block *types.Block)
   177  	DeleteTxLookupEntry(hash common.Hash)
   178  
   179  	ReadTxAndLookupInfo(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64)
   180  
   181  	NewSenderTxHashToTxHashBatch() Batch
   182  	PutSenderTxHashToTxHashToBatch(batch Batch, senderTxHash, txHash common.Hash) error
   183  	ReadTxHashFromSenderTxHash(senderTxHash common.Hash) common.Hash
   184  
   185  	ReadBloomBits(bloomBitsKey []byte) ([]byte, error)
   186  	WriteBloomBits(bloomBitsKey []byte, bits []byte) error
   187  
   188  	ReadValidSections() ([]byte, error)
   189  	WriteValidSections(encodedSections []byte)
   190  
   191  	ReadSectionHead(encodedSection []byte) ([]byte, error)
   192  	WriteSectionHead(encodedSection []byte, hash common.Hash)
   193  	DeleteSectionHead(encodedSection []byte)
   194  
   195  	// from accessors_metadata.go
   196  	ReadDatabaseVersion() *uint64
   197  	WriteDatabaseVersion(version uint64)
   198  
   199  	ReadChainConfig(hash common.Hash) *params.ChainConfig
   200  	WriteChainConfig(hash common.Hash, cfg *params.ChainConfig)
   201  
   202  	// from accessors_snapshot.go
   203  	ReadSnapshotJournal() []byte
   204  	WriteSnapshotJournal(journal []byte)
   205  	DeleteSnapshotJournal()
   206  
   207  	ReadSnapshotGenerator() []byte
   208  	WriteSnapshotGenerator(generator []byte)
   209  	DeleteSnapshotGenerator()
   210  
   211  	ReadSnapshotDisabled() bool
   212  	WriteSnapshotDisabled()
   213  	DeleteSnapshotDisabled()
   214  
   215  	ReadSnapshotRecoveryNumber() *uint64
   216  	WriteSnapshotRecoveryNumber(number uint64)
   217  	DeleteSnapshotRecoveryNumber()
   218  
   219  	ReadSnapshotSyncStatus() []byte
   220  	WriteSnapshotSyncStatus(status []byte)
   221  	DeleteSnapshotSyncStatus()
   222  
   223  	ReadSnapshotRoot() common.Hash
   224  	WriteSnapshotRoot(root common.Hash)
   225  	DeleteSnapshotRoot()
   226  
   227  	ReadAccountSnapshot(hash common.Hash) []byte
   228  	WriteAccountSnapshot(hash common.Hash, entry []byte)
   229  	DeleteAccountSnapshot(hash common.Hash)
   230  
   231  	ReadStorageSnapshot(accountHash, storageHash common.Hash) []byte
   232  	WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte)
   233  	DeleteStorageSnapshot(accountHash, storageHash common.Hash)
   234  
   235  	NewSnapshotDBIterator(prefix []byte, start []byte) Iterator
   236  
   237  	NewSnapshotDBBatch() SnapshotDBBatch
   238  
   239  	// below operations are used in parent chain side, not child chain side.
   240  	WriteChildChainTxHash(ccBlockHash common.Hash, ccTxHash common.Hash)
   241  	ConvertChildChainBlockHashToParentChainTxHash(scBlockHash common.Hash) common.Hash
   242  
   243  	WriteLastIndexedBlockNumber(blockNum uint64)
   244  	GetLastIndexedBlockNumber() uint64
   245  
   246  	// below operations are used in child chain side, not parent chain side.
   247  	WriteAnchoredBlockNumber(blockNum uint64)
   248  	ReadAnchoredBlockNumber() uint64
   249  
   250  	WriteReceiptFromParentChain(blockHash common.Hash, receipt *types.Receipt)
   251  	ReadReceiptFromParentChain(blockHash common.Hash) *types.Receipt
   252  
   253  	WriteHandleTxHashFromRequestTxHash(rTx, hTx common.Hash)
   254  	ReadHandleTxHashFromRequestTxHash(rTx common.Hash) common.Hash
   255  
   256  	WriteParentOperatorFeePayer(feePayer common.Address)
   257  	WriteChildOperatorFeePayer(feePayer common.Address)
   258  	ReadParentOperatorFeePayer() common.Address
   259  	ReadChildOperatorFeePayer() common.Address
   260  
   261  	// cacheManager related functions.
   262  	ClearHeaderChainCache()
   263  	ClearBlockChainCache()
   264  	ReadTxAndLookupInfoInCache(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64)
   265  	ReadBlockReceiptsInCache(blockHash common.Hash) types.Receipts
   266  	ReadTxReceiptInCache(txHash common.Hash) *types.Receipt
   267  
   268  	// snapshot in clique(ConsensusClique) consensus
   269  	WriteCliqueSnapshot(snapshotBlockHash common.Hash, encodedSnapshot []byte) error
   270  	ReadCliqueSnapshot(snapshotBlockHash common.Hash) ([]byte, error)
   271  
   272  	// Governance related functions
   273  	WriteGovernance(data map[string]interface{}, num uint64) error
   274  	WriteGovernanceIdx(num uint64) error
   275  	ReadGovernance(num uint64) (map[string]interface{}, error)
   276  	ReadRecentGovernanceIdx(count int) ([]uint64, error)
   277  	ReadGovernanceAtNumber(num uint64, epoch uint64) (uint64, map[string]interface{}, error)
   278  	WriteGovernanceState(b []byte) error
   279  	ReadGovernanceState() ([]byte, error)
   280  	// TODO-Klaytn implement governance DB deletion methods.
   281  
   282  	// StakingInfo related functions
   283  	ReadStakingInfo(blockNum uint64) ([]byte, error)
   284  	WriteStakingInfo(blockNum uint64, stakingInfo []byte) error
   285  	HasStakingInfo(blockNum uint64) (bool, error)
   286  
   287  	// DB migration related function
   288  	StartDBMigration(DBManager) error
   289  
   290  	// ChainDataFetcher checkpoint function
   291  	WriteChainDataFetcherCheckpoint(checkpoint uint64) error
   292  	ReadChainDataFetcherCheckpoint() (uint64, error)
   293  }
   294  
   295  type DBEntryType uint8
   296  
   297  const (
   298  	MiscDB DBEntryType = iota // Do not move MiscDB which has the path of others DB.
   299  	headerDB
   300  	BodyDB
   301  	ReceiptsDB
   302  	StateTrieDB
   303  	StateTrieMigrationDB
   304  	TxLookUpEntryDB
   305  	bridgeServiceDB
   306  	SnapshotDB
   307  	// databaseEntryTypeSize should be the last item in this list!!
   308  	databaseEntryTypeSize
   309  )
   310  
   311  type backupHashQueue struct {
   312  	backupHashes [backupHashCnt]common.Hash
   313  	idx          int
   314  }
   315  
   316  func (b *backupHashQueue) push(h common.Hash) {
   317  	b.backupHashes[b.idx%backupHashCnt] = h
   318  	b.idx = (b.idx + 1) % backupHashCnt
   319  }
   320  
   321  func (b *backupHashQueue) pop() common.Hash {
   322  	if b.backupHashes[b.idx] == (common.Hash{}) {
   323  		return common.Hash{}
   324  	}
   325  	return b.backupHashes[b.idx]
   326  }
   327  
   328  func (et DBEntryType) String() string {
   329  	return dbBaseDirs[et]
   330  }
   331  
   332  const (
   333  	notInMigrationFlag = 0
   334  	inMigrationFlag    = 1
   335  	backupHashCnt      = 128
   336  )
   337  
   338  var dbBaseDirs = [databaseEntryTypeSize]string{
   339  	"misc", // do not move misc
   340  	"header",
   341  	"body",
   342  	"receipts",
   343  	"statetrie",
   344  	"statetrie_migrated", // "statetrie_migrated_#N" path will be used. (#N is a migrated block number.)
   345  	"txlookup",
   346  	"bridgeservice",
   347  	"snapshot",
   348  }
   349  
   350  // Sum of dbConfigRatio should be 100.
   351  // Otherwise, logger.Crit will be called at checkDBEntryConfigRatio.
   352  var dbConfigRatio = [databaseEntryTypeSize]int{
   353  	2,  // MiscDB
   354  	5,  // headerDB
   355  	5,  // BodyDB
   356  	5,  // ReceiptsDB
   357  	40, // StateTrieDB
   358  	37, // StateTrieMigrationDB
   359  	2,  // TXLookUpEntryDB
   360  	1,  // bridgeServiceDB
   361  	3,  // SnapshotDB
   362  }
   363  
   364  // checkDBEntryConfigRatio checks if sum of dbConfigRatio is 100.
   365  // If it isn't, logger.Crit is called.
   366  func checkDBEntryConfigRatio() {
   367  	entryConfigRatioSum := 0
   368  	for i := 0; i < int(databaseEntryTypeSize); i++ {
   369  		entryConfigRatioSum += dbConfigRatio[i]
   370  	}
   371  	if entryConfigRatioSum != 100 {
   372  		logger.Crit("Sum of dbConfigRatio elements should be 100", "actual", entryConfigRatioSum)
   373  	}
   374  }
   375  
   376  // getDBEntryConfig returns a new DBConfig with original DBConfig, DBEntryType and dbDir.
   377  // It adjusts configuration according to the ratio specified in dbConfigRatio and dbDirs.
   378  func getDBEntryConfig(originalDBC *DBConfig, i DBEntryType, dbDir string) *DBConfig {
   379  	newDBC := *originalDBC
   380  	ratio := dbConfigRatio[i]
   381  
   382  	newDBC.LevelDBCacheSize = originalDBC.LevelDBCacheSize * ratio / 100
   383  	newDBC.OpenFilesLimit = originalDBC.OpenFilesLimit * ratio / 100
   384  
   385  	// Update dir to each Database specific directory.
   386  	newDBC.Dir = filepath.Join(originalDBC.Dir, dbDir)
   387  	// Update dynmao table name to Database specific name.
   388  	if newDBC.DynamoDBConfig != nil {
   389  		newDynamoDBConfig := *originalDBC.DynamoDBConfig
   390  		newDynamoDBConfig.TableName += "-" + dbDir
   391  		newDBC.DynamoDBConfig = &newDynamoDBConfig
   392  	}
   393  
   394  	return &newDBC
   395  }
   396  
   397  type databaseManager struct {
   398  	config *DBConfig
   399  	dbs    []Database
   400  	cm     *cacheManager
   401  
   402  	// TODO-Klaytn need to refine below.
   403  	// -merge status variable
   404  	lockInMigration      sync.RWMutex
   405  	inMigration          bool
   406  	migrationBlockNumber uint64
   407  }
   408  
   409  func NewMemoryDBManager() DBManager {
   410  	dbc := &DBConfig{DBType: MemoryDB}
   411  
   412  	dbm := databaseManager{
   413  		config: dbc,
   414  		dbs:    make([]Database, 1, 1),
   415  		cm:     newCacheManager(),
   416  	}
   417  	dbm.dbs[0] = NewMemDB()
   418  
   419  	return &dbm
   420  }
   421  
   422  // DBConfig handles database related configurations.
   423  type DBConfig struct {
   424  	// General configurations for all types of DB.
   425  	Dir                 string
   426  	DBType              DBType
   427  	SingleDB            bool // whether dbs (such as MiscDB, headerDB and etc) share one physical DB
   428  	NumStateTrieShards  uint // the number of shards of state trie db
   429  	ParallelDBWrite     bool
   430  	OpenFilesLimit      int
   431  	EnableDBPerfMetrics bool // If true, read and write performance will be logged
   432  
   433  	// LevelDB related configurations.
   434  	LevelDBCacheSize   int // LevelDBCacheSize = BlockCacheCapacity + WriteBuffer
   435  	LevelDBCompression LevelDBCompressionType
   436  	LevelDBBufferPool  bool
   437  
   438  	// DynamoDB related configurations
   439  	DynamoDBConfig *DynamoDBConfig
   440  }
   441  
   442  const dbMetricPrefix = "klay/db/chaindata/"
   443  
   444  // singleDatabaseDBManager returns DBManager which handles one single Database.
   445  // Each Database will share one common Database.
   446  func singleDatabaseDBManager(dbc *DBConfig) (DBManager, error) {
   447  	dbm := newDatabaseManager(dbc)
   448  	db, err := newDatabase(dbc, 0)
   449  	if err != nil {
   450  		return nil, err
   451  	}
   452  
   453  	db.Meter(dbMetricPrefix)
   454  	for i := 0; i < int(databaseEntryTypeSize); i++ {
   455  		dbm.dbs[i] = db
   456  	}
   457  	return dbm, nil
   458  }
   459  
   460  // newMiscDB returns misc DBManager. If not exist, the function create DB before returning.
   461  func newMiscDB(dbc *DBConfig) Database {
   462  	newDBC := getDBEntryConfig(dbc, MiscDB, dbBaseDirs[MiscDB])
   463  	db, err := newDatabase(newDBC, MiscDB)
   464  	if err != nil {
   465  		logger.Crit("Failed while generating a MISC database", "err", err)
   466  	}
   467  
   468  	db.Meter(dbMetricPrefix + dbBaseDirs[MiscDB] + "/")
   469  	return db
   470  }
   471  
   472  // databaseDBManager returns DBManager which handles Databases.
   473  // Each Database will have its own separated Database.
   474  func databaseDBManager(dbc *DBConfig) (*databaseManager, error) {
   475  	dbm := newDatabaseManager(dbc)
   476  	var db Database
   477  	var err error
   478  
   479  	// Create Misc DB first to get the DB directory of stateTrieDB.
   480  	miscDB := newMiscDB(dbc)
   481  	dbm.dbs[MiscDB] = miscDB
   482  
   483  	// Create other DBs
   484  	for et := int(MiscDB) + 1; et < int(databaseEntryTypeSize); et++ {
   485  		entryType := DBEntryType(et)
   486  		dir := dbm.getDBDir(entryType)
   487  
   488  		switch entryType {
   489  		case StateTrieMigrationDB:
   490  			if dir == dbBaseDirs[StateTrieMigrationDB] {
   491  				// If there is no migration DB, skip to set.
   492  				continue
   493  			}
   494  			fallthrough
   495  		case StateTrieDB:
   496  			newDBC := getDBEntryConfig(dbc, entryType, dir)
   497  			if dbc.NumStateTrieShards > 1 && !dbc.DBType.selfShardable() { // make non-sharding db if the db is sharding itself
   498  				db, err = newShardedDB(newDBC, entryType, dbc.NumStateTrieShards)
   499  			} else {
   500  				db, err = newDatabase(newDBC, entryType)
   501  			}
   502  		default:
   503  			newDBC := getDBEntryConfig(dbc, entryType, dir)
   504  			db, err = newDatabase(newDBC, entryType)
   505  		}
   506  
   507  		if err != nil {
   508  			logger.Crit("Failed while generating databases", "DBType", dbBaseDirs[et], "err", err)
   509  		}
   510  
   511  		dbm.dbs[et] = db
   512  		db.Meter(dbMetricPrefix + dbBaseDirs[et] + "/") // Each database collects metrics independently.
   513  	}
   514  	return dbm, nil
   515  }
   516  
   517  // newDatabase returns Database interface with given DBConfig.
   518  func newDatabase(dbc *DBConfig, entryType DBEntryType) (Database, error) {
   519  	switch dbc.DBType {
   520  	case LevelDB:
   521  		return NewLevelDB(dbc, entryType)
   522  	case BadgerDB:
   523  		return NewBadgerDB(dbc.Dir)
   524  	case MemoryDB:
   525  		return NewMemDB(), nil
   526  	case DynamoDB:
   527  		return NewDynamoDB(dbc.DynamoDBConfig)
   528  	default:
   529  		logger.Info("database type is not set, fall back to default LevelDB")
   530  		return NewLevelDB(dbc, 0)
   531  	}
   532  }
   533  
   534  // newDatabaseManager returns the pointer of databaseManager with default configuration.
   535  func newDatabaseManager(dbc *DBConfig) *databaseManager {
   536  	return &databaseManager{
   537  		config: dbc,
   538  		dbs:    make([]Database, databaseEntryTypeSize),
   539  		cm:     newCacheManager(),
   540  	}
   541  }
   542  
   543  // NewDBManager returns DBManager interface.
   544  // If SingleDB is false, each Database will have its own DB.
   545  // If not, each Database will share one common DB.
   546  func NewDBManager(dbc *DBConfig) DBManager {
   547  	if dbc.SingleDB {
   548  		logger.Info("Single database is used for persistent storage", "DBType", dbc.DBType)
   549  		if dbm, err := singleDatabaseDBManager(dbc); err != nil {
   550  			logger.Crit("Failed to create a single database", "DBType", dbc.DBType, "err", err)
   551  		} else {
   552  			return dbm
   553  		}
   554  	} else {
   555  		checkDBEntryConfigRatio()
   556  		logger.Info("Non-single database is used for persistent storage", "DBType", dbc.DBType)
   557  		dbm, err := databaseDBManager(dbc)
   558  		if err != nil {
   559  			logger.Crit("Failed to create databases", "DBType", dbc.DBType, "err", err)
   560  		}
   561  		if migrationBlockNum := dbm.getStateTrieMigrationInfo(); migrationBlockNum > 0 {
   562  			mdb := dbm.getDatabase(StateTrieMigrationDB)
   563  			if mdb == nil {
   564  				logger.Error("Failed to load StateTrieMigrationDB database", "migrationBlockNumber", migrationBlockNum)
   565  			} else {
   566  				dbm.inMigration = true
   567  				dbm.migrationBlockNumber = migrationBlockNum
   568  			}
   569  		}
   570  		return dbm
   571  	}
   572  	logger.Crit("Must not reach here!")
   573  	return nil
   574  }
   575  
   576  func (dbm *databaseManager) IsParallelDBWrite() bool {
   577  	return dbm.config.ParallelDBWrite
   578  }
   579  
   580  func (dbm *databaseManager) IsSingle() bool {
   581  	return dbm.config.SingleDB
   582  }
   583  
   584  func (dbm *databaseManager) InMigration() bool {
   585  	dbm.lockInMigration.RLock()
   586  	defer dbm.lockInMigration.RUnlock()
   587  
   588  	return dbm.inMigration
   589  }
   590  
   591  func (dbm *databaseManager) MigrationBlockNumber() uint64 {
   592  	return dbm.migrationBlockNumber
   593  }
   594  
   595  func (dbm *databaseManager) NewBatch(dbEntryType DBEntryType) Batch {
   596  	if dbEntryType == StateTrieDB {
   597  		dbm.lockInMigration.RLock()
   598  		defer dbm.lockInMigration.RUnlock()
   599  
   600  		if dbm.inMigration {
   601  			newDBBatch := dbm.getDatabase(StateTrieMigrationDB).NewBatch()
   602  			oldDBBatch := dbm.getDatabase(StateTrieDB).NewBatch()
   603  			return NewStateTrieDBBatch([]Batch{oldDBBatch, newDBBatch})
   604  		}
   605  	} else if dbEntryType == StateTrieMigrationDB {
   606  		return dbm.GetStateTrieMigrationDB().NewBatch()
   607  	}
   608  	return dbm.getDatabase(dbEntryType).NewBatch()
   609  }
   610  
   611  func NewStateTrieDBBatch(batches []Batch) Batch {
   612  	return &stateTrieDBBatch{batches: batches}
   613  }
   614  
   615  type stateTrieDBBatch struct {
   616  	batches []Batch
   617  }
   618  
   619  func (stdBatch *stateTrieDBBatch) Put(key []byte, value []byte) error {
   620  	var errResult error
   621  	for _, batch := range stdBatch.batches {
   622  		if err := batch.Put(key, value); err != nil {
   623  			errResult = err
   624  		}
   625  	}
   626  	return errResult
   627  }
   628  
   629  func (stdBatch *stateTrieDBBatch) Delete(key []byte) error {
   630  	var errResult error
   631  	for _, batch := range stdBatch.batches {
   632  		if err := batch.Delete(key); err != nil {
   633  			errResult = err
   634  		}
   635  	}
   636  	return errResult
   637  }
   638  
   639  // ValueSize is called to determine whether to write batches when it exceeds
   640  // certain limit. stdBatch returns the largest size of its batches to
   641  // write all batches at once when one of batch exceeds the limit.
   642  func (stdBatch *stateTrieDBBatch) ValueSize() int {
   643  	maxSize := 0
   644  	for _, batch := range stdBatch.batches {
   645  		if batch.ValueSize() > maxSize {
   646  			maxSize = batch.ValueSize()
   647  		}
   648  	}
   649  
   650  	return maxSize
   651  }
   652  
   653  // Write passes the list of batch to WriteBatchesParallel for writing batches.
   654  func (stdBatch *stateTrieDBBatch) Write() error {
   655  	_, err := WriteBatchesParallel(stdBatch.batches...)
   656  	return err
   657  }
   658  
   659  func (stdBatch *stateTrieDBBatch) Reset() {
   660  	for _, batch := range stdBatch.batches {
   661  		batch.Reset()
   662  	}
   663  }
   664  
   665  func (stdBatch *stateTrieDBBatch) Replay(w KeyValueWriter) error {
   666  	var errResult error
   667  	for _, batch := range stdBatch.batches {
   668  		if err := batch.Replay(w); err != nil {
   669  			errResult = err
   670  		}
   671  	}
   672  	return errResult
   673  }
   674  
   675  func (dbm *databaseManager) getDBDir(dbEntry DBEntryType) string {
   676  	miscDB := dbm.getDatabase(MiscDB)
   677  
   678  	enc, _ := miscDB.Get(databaseDirKey(uint64(dbEntry)))
   679  	if len(enc) == 0 {
   680  		return dbBaseDirs[dbEntry]
   681  	}
   682  	return string(enc)
   683  }
   684  
   685  func (dbm *databaseManager) setDBDir(dbEntry DBEntryType, newDBDir string) {
   686  	miscDB := dbm.getDatabase(MiscDB)
   687  	if err := miscDB.Put(databaseDirKey(uint64(dbEntry)), []byte(newDBDir)); err != nil {
   688  		logger.Crit("Failed to put DB dir", "err", err)
   689  	}
   690  }
   691  
   692  func (dbm *databaseManager) getStateTrieMigrationInfo() uint64 {
   693  	miscDB := dbm.getDatabase(MiscDB)
   694  
   695  	enc, _ := miscDB.Get(migrationStatusKey)
   696  	if len(enc) != 8 {
   697  		return 0
   698  	}
   699  
   700  	blockNum := binary.BigEndian.Uint64(enc)
   701  	return blockNum
   702  }
   703  
   704  func (dbm *databaseManager) setStateTrieMigrationStatus(blockNum uint64) {
   705  	miscDB := dbm.getDatabase(MiscDB)
   706  	if err := miscDB.Put(migrationStatusKey, common.Int64ToByteBigEndian(blockNum)); err != nil {
   707  		logger.Crit("Failed to set state trie migration status", "err", err)
   708  	}
   709  
   710  	if blockNum == 0 {
   711  		dbm.inMigration = false
   712  		return
   713  	}
   714  
   715  	dbm.inMigration, dbm.migrationBlockNumber = true, blockNum
   716  }
   717  
   718  func newStateTrieMigrationDB(dbc *DBConfig, blockNum uint64) (Database, string) {
   719  	dbDir := dbBaseDirs[StateTrieMigrationDB] + "_" + strconv.FormatUint(blockNum, 10)
   720  	newDBConfig := getDBEntryConfig(dbc, StateTrieMigrationDB, dbDir)
   721  	var newDB Database
   722  	var err error
   723  	if newDBConfig.NumStateTrieShards > 1 {
   724  		newDB, err = newShardedDB(newDBConfig, StateTrieMigrationDB, newDBConfig.NumStateTrieShards)
   725  	} else {
   726  		newDB, err = newDatabase(newDBConfig, StateTrieMigrationDB)
   727  	}
   728  	if err != nil {
   729  		logger.Crit("Failed to create a new database for state trie migration", "err", err)
   730  	}
   731  
   732  	newDB.Meter(dbMetricPrefix + dbBaseDirs[StateTrieMigrationDB] + "/") // Each database collects metrics independently.
   733  	logger.Info("Created a new database for state trie migration", "newStateTrieDB", newDBConfig.Dir)
   734  
   735  	return newDB, dbDir
   736  }
   737  
   738  // CreateMigrationDBAndSetStatus create migrationDB and set migration status.
   739  func (dbm *databaseManager) CreateMigrationDBAndSetStatus(blockNum uint64) error {
   740  	if dbm.InMigration() {
   741  		logger.Warn("Failed to set a new state trie migration db. Already in migration")
   742  		return errors.New("already in migration")
   743  	}
   744  	if dbm.config.SingleDB {
   745  		logger.Warn("Setting a new database for state trie migration is allowed for non-single database only")
   746  		return errors.New("singleDB does not support state trie migration")
   747  	}
   748  
   749  	logger.Info("Start setting a new database for state trie migration", "blockNum", blockNum)
   750  
   751  	// Create a new database for migration process.
   752  	newDB, newDBDir := newStateTrieMigrationDB(dbm.config, blockNum)
   753  
   754  	// lock to prevent from a conflict of reading state DB and changing state DB
   755  	dbm.lockInMigration.Lock()
   756  	defer dbm.lockInMigration.Unlock()
   757  
   758  	// Store migration db path in misc db
   759  	dbm.setDBDir(StateTrieMigrationDB, newDBDir)
   760  
   761  	// Set migration db
   762  	dbm.dbs[StateTrieMigrationDB] = newDB
   763  
   764  	// Store the migration status
   765  	dbm.setStateTrieMigrationStatus(blockNum)
   766  
   767  	return nil
   768  }
   769  
   770  // FinishStateMigration updates stateTrieDB and removes the old one.
   771  // The function should be called only after when state trie migration is finished.
   772  // It returns a channel that closes when removeDB is finished.
   773  func (dbm *databaseManager) FinishStateMigration(succeed bool) chan struct{} {
   774  	// lock to prevent from a conflict of reading state DB and changing state DB
   775  	dbm.lockInMigration.Lock()
   776  	defer dbm.lockInMigration.Unlock()
   777  
   778  	dbRemoved := StateTrieDB
   779  	dbUsed := StateTrieMigrationDB
   780  
   781  	if !succeed {
   782  		dbRemoved, dbUsed = dbUsed, dbRemoved
   783  	}
   784  
   785  	dbToBeRemoved := dbm.dbs[dbRemoved]
   786  	dbToBeUsed := dbm.dbs[dbUsed]
   787  	dbDirToBeRemoved := dbm.getDBDir(dbRemoved)
   788  	dbDirToBeUsed := dbm.getDBDir(dbUsed)
   789  
   790  	// Replace StateTrieDB with new one
   791  	dbm.setDBDir(StateTrieDB, dbDirToBeUsed)
   792  	dbm.dbs[StateTrieDB] = dbToBeUsed
   793  
   794  	dbm.setStateTrieMigrationStatus(0)
   795  
   796  	dbm.dbs[StateTrieMigrationDB] = nil
   797  	dbm.setDBDir(StateTrieMigrationDB, "")
   798  
   799  	dbPathToBeRemoved := filepath.Join(dbm.config.Dir, dbDirToBeRemoved)
   800  	dbToBeRemoved.Close()
   801  
   802  	endCheck := make(chan struct{})
   803  	go removeDB(dbPathToBeRemoved, endCheck)
   804  	return endCheck
   805  }
   806  
   807  func removeDB(dbPath string, endCheck chan struct{}) {
   808  	defer func() {
   809  		if endCheck != nil {
   810  			close(endCheck)
   811  		}
   812  	}()
   813  	if err := os.RemoveAll(dbPath); err != nil {
   814  		logger.Error("Failed to remove the database due to an error", "err", err, "dir", dbPath)
   815  		return
   816  	}
   817  	logger.Info("Successfully removed database", "path", dbPath)
   818  }
   819  
   820  func (dbm *databaseManager) GetStateTrieDB() Database {
   821  	return dbm.dbs[StateTrieDB]
   822  }
   823  
   824  func (dbm *databaseManager) GetStateTrieMigrationDB() Database {
   825  	return dbm.dbs[StateTrieMigrationDB]
   826  }
   827  
   828  func (dbm *databaseManager) GetMiscDB() Database {
   829  	return dbm.dbs[MiscDB]
   830  }
   831  
   832  func (dbm *databaseManager) GetSnapshotDB() Database {
   833  	return dbm.getDatabase(SnapshotDB)
   834  }
   835  
   836  func (dbm *databaseManager) GetMemDB() *MemDB {
   837  	if dbm.config.DBType == MemoryDB {
   838  		if memDB, ok := dbm.dbs[0].(*MemDB); ok {
   839  			return memDB
   840  		} else {
   841  			logger.Error("DBManager is set as memory DBManager, but actual value is not set as memory DBManager.")
   842  			return nil
   843  		}
   844  	}
   845  	logger.Error("GetMemDB() call to non memory DBManager object.")
   846  	return nil
   847  }
   848  
   849  // GetDBConfig returns DBConfig of the DB manager.
   850  func (dbm *databaseManager) GetDBConfig() *DBConfig {
   851  	return dbm.config
   852  }
   853  
   854  func (dbm *databaseManager) getDatabase(dbEntryType DBEntryType) Database {
   855  	if dbm.config.DBType == MemoryDB {
   856  		return dbm.dbs[0]
   857  	} else {
   858  		return dbm.dbs[dbEntryType]
   859  	}
   860  }
   861  
   862  func (dbm *databaseManager) Close() {
   863  	// If single DB, only close the first database.
   864  	if dbm.config.SingleDB {
   865  		dbm.dbs[0].Close()
   866  		return
   867  	}
   868  
   869  	// If not single DB, close all databases.
   870  	for _, db := range dbm.dbs {
   871  		if db != nil {
   872  			db.Close()
   873  		}
   874  	}
   875  }
   876  
   877  // TODO-Klaytn Some of below need to be invisible outside database package
   878  // Canonical Hash operations.
   879  // ReadCanonicalHash retrieves the hash assigned to a canonical block number.
   880  func (dbm *databaseManager) ReadCanonicalHash(number uint64) common.Hash {
   881  	if cached := dbm.cm.readCanonicalHashCache(number); !common.EmptyHash(cached) {
   882  		return cached
   883  	}
   884  
   885  	db := dbm.getDatabase(headerDB)
   886  	data, _ := db.Get(headerHashKey(number))
   887  	if len(data) == 0 {
   888  		return common.Hash{}
   889  	}
   890  
   891  	hash := common.BytesToHash(data)
   892  	dbm.cm.writeCanonicalHashCache(number, hash)
   893  	return hash
   894  }
   895  
   896  // WriteCanonicalHash stores the hash assigned to a canonical block number.
   897  func (dbm *databaseManager) WriteCanonicalHash(hash common.Hash, number uint64) {
   898  	db := dbm.getDatabase(headerDB)
   899  	if err := db.Put(headerHashKey(number), hash.Bytes()); err != nil {
   900  		logger.Crit("Failed to store number to hash mapping", "err", err)
   901  	}
   902  	dbm.cm.writeCanonicalHashCache(number, hash)
   903  }
   904  
   905  // DeleteCanonicalHash removes the number to hash canonical mapping.
   906  func (dbm *databaseManager) DeleteCanonicalHash(number uint64) {
   907  	db := dbm.getDatabase(headerDB)
   908  	if err := db.Delete(headerHashKey(number)); err != nil {
   909  		logger.Crit("Failed to delete number to hash mapping", "err", err)
   910  	}
   911  	dbm.cm.writeCanonicalHashCache(number, common.Hash{})
   912  }
   913  
   914  // ReadAllHashes retrieves all the hashes assigned to blocks at a certain heights,
   915  // both canonical and reorged forks included.
   916  func (dbm *databaseManager) ReadAllHashes(number uint64) []common.Hash {
   917  	db := dbm.getDatabase(headerDB)
   918  	prefix := headerKeyPrefix(number)
   919  
   920  	hashes := make([]common.Hash, 0, 1)
   921  	it := db.NewIterator(prefix, nil)
   922  	defer it.Release()
   923  
   924  	for it.Next() {
   925  		if key := it.Key(); len(key) == len(prefix)+32 {
   926  			hashes = append(hashes, common.BytesToHash(key[len(key)-32:]))
   927  		}
   928  	}
   929  	return hashes
   930  }
   931  
   932  // Head Header Hash operations.
   933  // ReadHeadHeaderHash retrieves the hash of the current canonical head header.
   934  func (dbm *databaseManager) ReadHeadHeaderHash() common.Hash {
   935  	db := dbm.getDatabase(headerDB)
   936  	data, _ := db.Get(headHeaderKey)
   937  	if len(data) == 0 {
   938  		return common.Hash{}
   939  	}
   940  	return common.BytesToHash(data)
   941  }
   942  
   943  // WriteHeadHeaderHash stores the hash of the current canonical head header.
   944  func (dbm *databaseManager) WriteHeadHeaderHash(hash common.Hash) {
   945  	db := dbm.getDatabase(headerDB)
   946  	if err := db.Put(headHeaderKey, hash.Bytes()); err != nil {
   947  		logger.Crit("Failed to store last header's hash", "err", err)
   948  	}
   949  }
   950  
   951  // Block Hash operations.
   952  func (dbm *databaseManager) ReadHeadBlockHash() common.Hash {
   953  	db := dbm.getDatabase(headerDB)
   954  	data, _ := db.Get(headBlockKey)
   955  	if len(data) == 0 {
   956  		return common.Hash{}
   957  	}
   958  	return common.BytesToHash(data)
   959  }
   960  
   961  // Block Backup Hash operations.
   962  func (dbm *databaseManager) ReadHeadBlockBackupHash() common.Hash {
   963  	db := dbm.getDatabase(headerDB)
   964  	data, _ := db.Get(headBlockBackupKey)
   965  	if len(data) == 0 {
   966  		return common.Hash{}
   967  	}
   968  	return common.BytesToHash(data)
   969  }
   970  
   971  // WriteHeadBlockHash stores the head block's hash.
   972  func (dbm *databaseManager) WriteHeadBlockHash(hash common.Hash) {
   973  	HeadBlockQ.push(hash)
   974  
   975  	db := dbm.getDatabase(headerDB)
   976  	if err := db.Put(headBlockKey, hash.Bytes()); err != nil {
   977  		logger.Crit("Failed to store last block's hash", "err", err)
   978  	}
   979  
   980  	backupHash := HeadBlockQ.pop()
   981  	if backupHash == (common.Hash{}) {
   982  		return
   983  	}
   984  	if err := db.Put(headBlockBackupKey, backupHash.Bytes()); err != nil {
   985  		logger.Crit("Failed to store last block's backup hash", "err", err)
   986  	}
   987  }
   988  
   989  // Head Fast Block Hash operations.
   990  // ReadHeadFastBlockHash retrieves the hash of the current fast-sync head block.
   991  func (dbm *databaseManager) ReadHeadFastBlockHash() common.Hash {
   992  	db := dbm.getDatabase(headerDB)
   993  	data, _ := db.Get(headFastBlockKey)
   994  	if len(data) == 0 {
   995  		return common.Hash{}
   996  	}
   997  	return common.BytesToHash(data)
   998  }
   999  
  1000  // Head Fast Block Backup Hash operations.
  1001  // ReadHeadFastBlockBackupHash retrieves the hash of the current fast-sync head block.
  1002  func (dbm *databaseManager) ReadHeadFastBlockBackupHash() common.Hash {
  1003  	db := dbm.getDatabase(headerDB)
  1004  	data, _ := db.Get(headFastBlockBackupKey)
  1005  	if len(data) == 0 {
  1006  		return common.Hash{}
  1007  	}
  1008  	return common.BytesToHash(data)
  1009  }
  1010  
  1011  // WriteHeadFastBlockHash stores the hash of the current fast-sync head block.
  1012  func (dbm *databaseManager) WriteHeadFastBlockHash(hash common.Hash) {
  1013  	FastBlockQ.push(hash)
  1014  
  1015  	db := dbm.getDatabase(headerDB)
  1016  	if err := db.Put(headFastBlockKey, hash.Bytes()); err != nil {
  1017  		logger.Crit("Failed to store last fast block's hash", "err", err)
  1018  	}
  1019  
  1020  	backupHash := FastBlockQ.pop()
  1021  	if backupHash == (common.Hash{}) {
  1022  		return
  1023  	}
  1024  	if err := db.Put(headFastBlockBackupKey, backupHash.Bytes()); err != nil {
  1025  		logger.Crit("Failed to store last fast block's backup hash", "err", err)
  1026  	}
  1027  }
  1028  
  1029  // Fast Trie Progress operations.
  1030  // ReadFastTrieProgress retrieves the number of tries nodes fast synced to allow
  1031  // reporting correct numbers across restarts.
  1032  func (dbm *databaseManager) ReadFastTrieProgress() uint64 {
  1033  	db := dbm.getDatabase(MiscDB)
  1034  	data, _ := db.Get(fastTrieProgressKey)
  1035  	if len(data) == 0 {
  1036  		return 0
  1037  	}
  1038  	return new(big.Int).SetBytes(data).Uint64()
  1039  }
  1040  
  1041  // WriteFastTrieProgress stores the fast sync trie process counter to support
  1042  // retrieving it across restarts.
  1043  func (dbm *databaseManager) WriteFastTrieProgress(count uint64) {
  1044  	db := dbm.getDatabase(MiscDB)
  1045  	if err := db.Put(fastTrieProgressKey, new(big.Int).SetUint64(count).Bytes()); err != nil {
  1046  		logger.Crit("Failed to store fast sync trie progress", "err", err)
  1047  	}
  1048  }
  1049  
  1050  // (Block)Header operations.
  1051  // HasHeader verifies the existence of a block header corresponding to the hash.
  1052  func (dbm *databaseManager) HasHeader(hash common.Hash, number uint64) bool {
  1053  	if dbm.cm.hasHeaderInCache(hash) {
  1054  		return true
  1055  	}
  1056  
  1057  	db := dbm.getDatabase(headerDB)
  1058  	if has, err := db.Has(headerKey(number, hash)); !has || err != nil {
  1059  		return false
  1060  	}
  1061  	return true
  1062  }
  1063  
  1064  // ReadHeader retrieves the block header corresponding to the hash.
  1065  func (dbm *databaseManager) ReadHeader(hash common.Hash, number uint64) *types.Header {
  1066  	if cachedHeader := dbm.cm.readHeaderCache(hash); cachedHeader != nil {
  1067  		return cachedHeader
  1068  	}
  1069  
  1070  	data := dbm.ReadHeaderRLP(hash, number)
  1071  	if len(data) == 0 {
  1072  		return nil
  1073  	}
  1074  	header := new(types.Header)
  1075  	if err := rlp.Decode(bytes.NewReader(data), header); err != nil {
  1076  		logger.Error("Invalid block header RLP", "hash", hash, "err", err)
  1077  		return nil
  1078  	}
  1079  
  1080  	// Write to cache before returning found value.
  1081  	dbm.cm.writeHeaderCache(hash, header)
  1082  	return header
  1083  }
  1084  
  1085  // ReadHeaderRLP retrieves a block header in its raw RLP database encoding.
  1086  func (dbm *databaseManager) ReadHeaderRLP(hash common.Hash, number uint64) rlp.RawValue {
  1087  	db := dbm.getDatabase(headerDB)
  1088  	data, _ := db.Get(headerKey(number, hash))
  1089  	return data
  1090  }
  1091  
  1092  // WriteHeader stores a block header into the database and also stores the hash-
  1093  // to-number mapping.
  1094  func (dbm *databaseManager) WriteHeader(header *types.Header) {
  1095  	db := dbm.getDatabase(headerDB)
  1096  	// Write the hash -> number mapping
  1097  	var (
  1098  		hash    = header.Hash()
  1099  		number  = header.Number.Uint64()
  1100  		encoded = common.Int64ToByteBigEndian(number)
  1101  	)
  1102  	key := headerNumberKey(hash)
  1103  	if err := db.Put(key, encoded); err != nil {
  1104  		logger.Crit("Failed to store hash to number mapping", "err", err)
  1105  	}
  1106  	// Write the encoded header
  1107  	data, err := rlp.EncodeToBytes(header)
  1108  	if err != nil {
  1109  		logger.Crit("Failed to RLP encode header", "err", err)
  1110  	}
  1111  	key = headerKey(number, hash)
  1112  	if err := db.Put(key, data); err != nil {
  1113  		logger.Crit("Failed to store header", "err", err)
  1114  	}
  1115  
  1116  	// Write to cache at the end of successful write.
  1117  	dbm.cm.writeHeaderCache(hash, header)
  1118  	dbm.cm.writeBlockNumberCache(hash, number)
  1119  }
  1120  
  1121  // DeleteHeader removes all block header data associated with a hash.
  1122  func (dbm *databaseManager) DeleteHeader(hash common.Hash, number uint64) {
  1123  	db := dbm.getDatabase(headerDB)
  1124  	if err := db.Delete(headerKey(number, hash)); err != nil {
  1125  		logger.Crit("Failed to delete header", "err", err)
  1126  	}
  1127  	if err := db.Delete(headerNumberKey(hash)); err != nil {
  1128  		logger.Crit("Failed to delete hash to number mapping", "err", err)
  1129  	}
  1130  
  1131  	// Delete cache at the end of successful delete.
  1132  	dbm.cm.deleteHeaderCache(hash)
  1133  	dbm.cm.deleteBlockNumberCache(hash)
  1134  }
  1135  
  1136  // Head Number operations.
  1137  // ReadHeaderNumber returns the header number assigned to a hash.
  1138  func (dbm *databaseManager) ReadHeaderNumber(hash common.Hash) *uint64 {
  1139  	if cachedHeaderNumber := dbm.cm.readBlockNumberCache(hash); cachedHeaderNumber != nil {
  1140  		return cachedHeaderNumber
  1141  	}
  1142  
  1143  	db := dbm.getDatabase(headerDB)
  1144  	data, _ := db.Get(headerNumberKey(hash))
  1145  	if len(data) != 8 {
  1146  		return nil
  1147  	}
  1148  	number := binary.BigEndian.Uint64(data)
  1149  
  1150  	// Write to cache before returning found value.
  1151  	dbm.cm.writeBlockNumberCache(hash, number)
  1152  	return &number
  1153  }
  1154  
  1155  // (Block)Body operations.
  1156  // HasBody verifies the existence of a block body corresponding to the hash.
  1157  func (dbm *databaseManager) HasBody(hash common.Hash, number uint64) bool {
  1158  	db := dbm.getDatabase(BodyDB)
  1159  	if has, err := db.Has(blockBodyKey(number, hash)); !has || err != nil {
  1160  		return false
  1161  	}
  1162  	return true
  1163  }
  1164  
  1165  // ReadBody retrieves the block body corresponding to the hash.
  1166  func (dbm *databaseManager) ReadBody(hash common.Hash, number uint64) *types.Body {
  1167  	if cachedBody := dbm.cm.readBodyCache(hash); cachedBody != nil {
  1168  		return cachedBody
  1169  	}
  1170  
  1171  	data := dbm.ReadBodyRLP(hash, number)
  1172  	if len(data) == 0 {
  1173  		return nil
  1174  	}
  1175  	body := new(types.Body)
  1176  	if err := rlp.Decode(bytes.NewReader(data), body); err != nil {
  1177  		logger.Error("Invalid block body RLP", "hash", hash, "err", err)
  1178  		return nil
  1179  	}
  1180  
  1181  	// Write to cache at the end of successful read.
  1182  	dbm.cm.writeBodyCache(hash, body)
  1183  	return body
  1184  }
  1185  
  1186  // ReadBodyInCache retrieves the block body in bodyCache.
  1187  // It only searches cache.
  1188  func (dbm *databaseManager) ReadBodyInCache(hash common.Hash) *types.Body {
  1189  	return dbm.cm.readBodyCache(hash)
  1190  }
  1191  
  1192  // ReadBodyRLP retrieves the block body (transactions) in RLP encoding.
  1193  func (dbm *databaseManager) ReadBodyRLP(hash common.Hash, number uint64) rlp.RawValue {
  1194  	// Short circuit if the rlp encoded body's already in the cache, retrieve otherwise
  1195  	if cachedBodyRLP := dbm.readBodyRLPInCache(hash); cachedBodyRLP != nil {
  1196  		return cachedBodyRLP
  1197  	}
  1198  
  1199  	// find cached body and encode it to return
  1200  	if cachedBody := dbm.ReadBodyInCache(hash); cachedBody != nil {
  1201  		if bodyRLP, err := rlp.EncodeToBytes(cachedBody); err != nil {
  1202  			dbm.cm.writeBodyRLPCache(hash, bodyRLP)
  1203  			return bodyRLP
  1204  		}
  1205  	}
  1206  
  1207  	// not found in cache, find body in database
  1208  	db := dbm.getDatabase(BodyDB)
  1209  	data, _ := db.Get(blockBodyKey(number, hash))
  1210  
  1211  	// Write to cache at the end of successful read.
  1212  	dbm.cm.writeBodyRLPCache(hash, data)
  1213  	return data
  1214  }
  1215  
  1216  // ReadBodyRLPByHash retrieves the block body (transactions) in RLP encoding.
  1217  func (dbm *databaseManager) ReadBodyRLPByHash(hash common.Hash) rlp.RawValue {
  1218  	// Short circuit if the rlp encoded body's already in the cache, retrieve otherwise
  1219  	if cachedBodyRLP := dbm.readBodyRLPInCache(hash); cachedBodyRLP != nil {
  1220  		return cachedBodyRLP
  1221  	}
  1222  
  1223  	// find cached body and encode it to return
  1224  	if cachedBody := dbm.ReadBodyInCache(hash); cachedBody != nil {
  1225  		if bodyRLP, err := rlp.EncodeToBytes(cachedBody); err != nil {
  1226  			dbm.cm.writeBodyRLPCache(hash, bodyRLP)
  1227  			return bodyRLP
  1228  		}
  1229  	}
  1230  
  1231  	// not found in cache, find body in database
  1232  	number := dbm.ReadHeaderNumber(hash)
  1233  	if number == nil {
  1234  		return nil
  1235  	}
  1236  
  1237  	db := dbm.getDatabase(BodyDB)
  1238  	data, _ := db.Get(blockBodyKey(*number, hash))
  1239  
  1240  	// Write to cache at the end of successful read.
  1241  	dbm.cm.writeBodyRLPCache(hash, data)
  1242  	return data
  1243  }
  1244  
  1245  // readBodyRLPInCache retrieves the block body (transactions) in RLP encoding
  1246  // in bodyRLPCache. It only searches cache.
  1247  func (dbm *databaseManager) readBodyRLPInCache(hash common.Hash) rlp.RawValue {
  1248  	return dbm.cm.readBodyRLPCache(hash)
  1249  }
  1250  
  1251  // WriteBody storea a block body into the database.
  1252  func (dbm *databaseManager) WriteBody(hash common.Hash, number uint64, body *types.Body) {
  1253  	data, err := rlp.EncodeToBytes(body)
  1254  	if err != nil {
  1255  		logger.Crit("Failed to RLP encode body", "err", err)
  1256  	}
  1257  	dbm.WriteBodyRLP(hash, number, data)
  1258  }
  1259  
  1260  func (dbm *databaseManager) PutBodyToBatch(batch Batch, hash common.Hash, number uint64, body *types.Body) {
  1261  	data, err := rlp.EncodeToBytes(body)
  1262  	if err != nil {
  1263  		logger.Crit("Failed to RLP encode body", "err", err)
  1264  	}
  1265  
  1266  	if err := batch.Put(blockBodyKey(number, hash), data); err != nil {
  1267  		logger.Crit("Failed to store block body", "err", err)
  1268  	}
  1269  }
  1270  
  1271  // WriteBodyRLP stores an RLP encoded block body into the database.
  1272  func (dbm *databaseManager) WriteBodyRLP(hash common.Hash, number uint64, rlp rlp.RawValue) {
  1273  	dbm.cm.writeBodyRLPCache(hash, rlp)
  1274  
  1275  	db := dbm.getDatabase(BodyDB)
  1276  	if err := db.Put(blockBodyKey(number, hash), rlp); err != nil {
  1277  		logger.Crit("Failed to store block body", "err", err)
  1278  	}
  1279  }
  1280  
  1281  // DeleteBody removes all block body data associated with a hash.
  1282  func (dbm *databaseManager) DeleteBody(hash common.Hash, number uint64) {
  1283  	db := dbm.getDatabase(BodyDB)
  1284  	if err := db.Delete(blockBodyKey(number, hash)); err != nil {
  1285  		logger.Crit("Failed to delete block body", "err", err)
  1286  	}
  1287  	dbm.cm.deleteBodyCache(hash)
  1288  }
  1289  
  1290  // TotalDifficulty operations.
  1291  // ReadTd retrieves a block's total blockscore corresponding to the hash.
  1292  func (dbm *databaseManager) ReadTd(hash common.Hash, number uint64) *big.Int {
  1293  	if cachedTd := dbm.cm.readTdCache(hash); cachedTd != nil {
  1294  		return cachedTd
  1295  	}
  1296  
  1297  	db := dbm.getDatabase(MiscDB)
  1298  	data, _ := db.Get(headerTDKey(number, hash))
  1299  	if len(data) == 0 {
  1300  		return nil
  1301  	}
  1302  	td := new(big.Int)
  1303  	if err := rlp.Decode(bytes.NewReader(data), td); err != nil {
  1304  		logger.Error("Invalid block total blockscore RLP", "hash", hash, "err", err)
  1305  		return nil
  1306  	}
  1307  
  1308  	// Write to cache before returning found value.
  1309  	dbm.cm.writeTdCache(hash, td)
  1310  	return td
  1311  }
  1312  
  1313  // WriteTd stores the total blockscore of a block into the database.
  1314  func (dbm *databaseManager) WriteTd(hash common.Hash, number uint64, td *big.Int) {
  1315  	db := dbm.getDatabase(MiscDB)
  1316  	data, err := rlp.EncodeToBytes(td)
  1317  	if err != nil {
  1318  		logger.Crit("Failed to RLP encode block total blockscore", "err", err)
  1319  	}
  1320  	if err := db.Put(headerTDKey(number, hash), data); err != nil {
  1321  		logger.Crit("Failed to store block total blockscore", "err", err)
  1322  	}
  1323  
  1324  	// Write to cache at the end of successful write.
  1325  	dbm.cm.writeTdCache(hash, td)
  1326  }
  1327  
  1328  // DeleteTd removes all block total blockscore data associated with a hash.
  1329  func (dbm *databaseManager) DeleteTd(hash common.Hash, number uint64) {
  1330  	db := dbm.getDatabase(MiscDB)
  1331  	if err := db.Delete(headerTDKey(number, hash)); err != nil {
  1332  		logger.Crit("Failed to delete block total blockscore", "err", err)
  1333  	}
  1334  	// Delete cache at the end of successful delete.
  1335  	dbm.cm.deleteTdCache(hash)
  1336  }
  1337  
  1338  // Receipts operations.
  1339  // ReadReceipt retrieves a receipt, blockHash, blockNumber and receiptIndex found by the given txHash.
  1340  func (dbm *databaseManager) ReadReceipt(txHash common.Hash) (*types.Receipt, common.Hash, uint64, uint64) {
  1341  	blockHash, blockNumber, receiptIndex := dbm.ReadTxLookupEntry(txHash)
  1342  	if blockHash == (common.Hash{}) {
  1343  		return nil, common.Hash{}, 0, 0
  1344  	}
  1345  	receipts := dbm.ReadReceipts(blockHash, blockNumber)
  1346  	if len(receipts) <= int(receiptIndex) {
  1347  		logger.Error("Receipt refereced missing", "number", blockNumber, "txHash", blockHash, "index", receiptIndex)
  1348  		return nil, common.Hash{}, 0, 0
  1349  	}
  1350  	return receipts[receiptIndex], blockHash, blockNumber, receiptIndex
  1351  }
  1352  
  1353  // ReadReceipts retrieves all the transaction receipts belonging to a block.
  1354  func (dbm *databaseManager) ReadReceipts(blockHash common.Hash, number uint64) types.Receipts {
  1355  	db := dbm.getDatabase(ReceiptsDB)
  1356  	// Retrieve the flattened receipt slice
  1357  	data, _ := db.Get(blockReceiptsKey(number, blockHash))
  1358  	if len(data) == 0 {
  1359  		return nil
  1360  	}
  1361  	// Convert the revceipts from their database form to their internal representation
  1362  	storageReceipts := []*types.ReceiptForStorage{}
  1363  	if err := rlp.DecodeBytes(data, &storageReceipts); err != nil {
  1364  		logger.Error("Invalid receipt array RLP", "blockHash", blockHash, "err", err)
  1365  		return nil
  1366  	}
  1367  	receipts := make(types.Receipts, len(storageReceipts))
  1368  	for i, receipt := range storageReceipts {
  1369  		receipts[i] = (*types.Receipt)(receipt)
  1370  	}
  1371  	return receipts
  1372  }
  1373  
  1374  func (dbm *databaseManager) ReadReceiptsByBlockHash(hash common.Hash) types.Receipts {
  1375  	receipts := dbm.ReadBlockReceiptsInCache(hash)
  1376  	if receipts != nil {
  1377  		return receipts
  1378  	}
  1379  	number := dbm.ReadHeaderNumber(hash)
  1380  	if number == nil {
  1381  		return nil
  1382  	}
  1383  	return dbm.ReadReceipts(hash, *number)
  1384  }
  1385  
  1386  // WriteReceipts stores all the transaction receipts belonging to a block.
  1387  func (dbm *databaseManager) WriteReceipts(hash common.Hash, number uint64, receipts types.Receipts) {
  1388  	dbm.cm.writeBlockReceiptsCache(hash, receipts)
  1389  
  1390  	db := dbm.getDatabase(ReceiptsDB)
  1391  	// When putReceiptsToPutter is called from WriteReceipts, txReceipt is cached.
  1392  	dbm.putReceiptsToPutter(db, hash, number, receipts, true)
  1393  }
  1394  
  1395  func (dbm *databaseManager) PutReceiptsToBatch(batch Batch, hash common.Hash, number uint64, receipts types.Receipts) {
  1396  	// When putReceiptsToPutter is called from PutReceiptsToBatch, txReceipt is not cached.
  1397  	dbm.putReceiptsToPutter(batch, hash, number, receipts, false)
  1398  }
  1399  
  1400  func (dbm *databaseManager) putReceiptsToPutter(putter KeyValueWriter, hash common.Hash, number uint64, receipts types.Receipts, addToCache bool) {
  1401  	// Convert the receipts into their database form and serialize them
  1402  	storageReceipts := make([]*types.ReceiptForStorage, len(receipts))
  1403  	for i, receipt := range receipts {
  1404  		storageReceipts[i] = (*types.ReceiptForStorage)(receipt)
  1405  
  1406  		if addToCache {
  1407  			dbm.cm.writeTxReceiptCache(receipt.TxHash, receipt)
  1408  		}
  1409  	}
  1410  	bytes, err := rlp.EncodeToBytes(storageReceipts)
  1411  	if err != nil {
  1412  		logger.Crit("Failed to encode block receipts", "err", err)
  1413  	}
  1414  	// Store the flattened receipt slice
  1415  	if err := putter.Put(blockReceiptsKey(number, hash), bytes); err != nil {
  1416  		logger.Crit("Failed to store block receipts", "err", err)
  1417  	}
  1418  }
  1419  
  1420  // DeleteReceipts removes all receipt data associated with a block hash.
  1421  func (dbm *databaseManager) DeleteReceipts(hash common.Hash, number uint64) {
  1422  	receipts := dbm.ReadReceipts(hash, number)
  1423  
  1424  	db := dbm.getDatabase(ReceiptsDB)
  1425  	if err := db.Delete(blockReceiptsKey(number, hash)); err != nil {
  1426  		logger.Crit("Failed to delete block receipts", "err", err)
  1427  	}
  1428  
  1429  	// Delete blockReceiptsCache and txReceiptCache.
  1430  	dbm.cm.deleteBlockReceiptsCache(hash)
  1431  	if receipts != nil {
  1432  		for _, receipt := range receipts {
  1433  			dbm.cm.deleteTxReceiptCache(receipt.TxHash)
  1434  		}
  1435  	}
  1436  }
  1437  
  1438  // Block operations.
  1439  // ReadBlock retrieves an entire block corresponding to the hash, assembling it
  1440  // back from the stored header and body. If either the header or body could not
  1441  // be retrieved nil is returned.
  1442  //
  1443  // Note, due to concurrent download of header and block body the header and thus
  1444  // canonical hash can be stored in the database but the body data not (yet).
  1445  func (dbm *databaseManager) ReadBlock(hash common.Hash, number uint64) *types.Block {
  1446  	if cachedBlock := dbm.cm.readBlockCache(hash); cachedBlock != nil {
  1447  		return cachedBlock
  1448  	}
  1449  
  1450  	header := dbm.ReadHeader(hash, number)
  1451  	if header == nil {
  1452  		return nil
  1453  	}
  1454  
  1455  	body := dbm.ReadBody(hash, number)
  1456  	if body == nil {
  1457  		return nil
  1458  	}
  1459  
  1460  	block := types.NewBlockWithHeader(header).WithBody(body.Transactions)
  1461  
  1462  	// Write to cache at the end of successful write.
  1463  	dbm.cm.writeBlockCache(hash, block)
  1464  	return block
  1465  }
  1466  
  1467  func (dbm *databaseManager) ReadBlockByHash(hash common.Hash) *types.Block {
  1468  	if cachedBlock := dbm.cm.readBlockCache(hash); cachedBlock != nil {
  1469  		return cachedBlock
  1470  	}
  1471  
  1472  	number := dbm.ReadHeaderNumber(hash)
  1473  	if number == nil {
  1474  		return nil
  1475  	}
  1476  
  1477  	header := dbm.ReadHeader(hash, *number)
  1478  	if header == nil {
  1479  		return nil
  1480  	}
  1481  
  1482  	body := dbm.ReadBody(hash, *number)
  1483  	if body == nil {
  1484  		return nil
  1485  	}
  1486  
  1487  	block := types.NewBlockWithHeader(header).WithBody(body.Transactions)
  1488  
  1489  	// Write to cache at the end of successful write.
  1490  	dbm.cm.writeBlockCache(hash, block)
  1491  	return block
  1492  }
  1493  
  1494  func (dbm *databaseManager) ReadBlockByNumber(number uint64) *types.Block {
  1495  	hash := dbm.ReadCanonicalHash(number)
  1496  	if hash == (common.Hash{}) {
  1497  		return nil
  1498  	}
  1499  	return dbm.ReadBlock(hash, number)
  1500  }
  1501  
  1502  func (dbm *databaseManager) HasBlock(hash common.Hash, number uint64) bool {
  1503  	if dbm.cm.hasBlockInCache(hash) {
  1504  		return true
  1505  	}
  1506  	return dbm.HasBody(hash, number)
  1507  }
  1508  
  1509  func (dbm *databaseManager) WriteBlock(block *types.Block) {
  1510  	dbm.cm.writeBodyCache(block.Hash(), block.Body())
  1511  	dbm.cm.blockCache.Add(block.Hash(), block)
  1512  
  1513  	dbm.WriteBody(block.Hash(), block.NumberU64(), block.Body())
  1514  	dbm.WriteHeader(block.Header())
  1515  }
  1516  
  1517  func (dbm *databaseManager) DeleteBlock(hash common.Hash, number uint64) {
  1518  	dbm.DeleteReceipts(hash, number)
  1519  	dbm.DeleteHeader(hash, number)
  1520  	dbm.DeleteBody(hash, number)
  1521  	dbm.DeleteTd(hash, number)
  1522  	dbm.cm.deleteBlockCache(hash)
  1523  }
  1524  
  1525  const badBlockToKeep = 100
  1526  
  1527  type badBlock struct {
  1528  	Header *types.Header
  1529  	Body   *types.Body
  1530  }
  1531  
  1532  // badBlockList implements the sort interface to allow sorting a list of
  1533  // bad blocks by their number in the reverse order.
  1534  type badBlockList []*badBlock
  1535  
  1536  func (s badBlockList) Len() int { return len(s) }
  1537  func (s badBlockList) Less(i, j int) bool {
  1538  	return s[i].Header.Number.Uint64() < s[j].Header.Number.Uint64()
  1539  }
  1540  func (s badBlockList) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
  1541  
  1542  // ReadBadBlock retrieves the bad block with the corresponding block hash.
  1543  func (dbm *databaseManager) ReadBadBlock(hash common.Hash) *types.Block {
  1544  	db := dbm.getDatabase(MiscDB)
  1545  	blob, err := db.Get(badBlockKey)
  1546  	if err != nil {
  1547  		return nil
  1548  	}
  1549  	var badBlocks badBlockList
  1550  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1551  		return nil
  1552  	}
  1553  	for _, bad := range badBlocks {
  1554  		if bad.Header.Hash() == hash {
  1555  			return types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions)
  1556  		}
  1557  	}
  1558  	return nil
  1559  }
  1560  
  1561  // ReadAllBadBlocks retrieves all the bad blocks in the database.
  1562  // All returned blocks are sorted in reverse order by number.
  1563  func (dbm *databaseManager) ReadAllBadBlocks() ([]*types.Block, error) {
  1564  	var badBlocks badBlockList
  1565  	db := dbm.getDatabase(MiscDB)
  1566  	blob, err := db.Get(badBlockKey)
  1567  	if err != nil {
  1568  		return nil, err
  1569  	}
  1570  	if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1571  		return nil, err
  1572  	}
  1573  	blocks := make([]*types.Block, len(badBlocks))
  1574  	for i, bad := range badBlocks {
  1575  		blocks[i] = types.NewBlockWithHeader(bad.Header).WithBody(bad.Body.Transactions)
  1576  	}
  1577  	return blocks, nil
  1578  }
  1579  
  1580  // WriteBadBlock serializes the bad block into the database. If the cumulated
  1581  // bad blocks exceed the capacity, the oldest will be dropped.
  1582  func (dbm *databaseManager) WriteBadBlock(block *types.Block) {
  1583  	db := dbm.getDatabase(MiscDB)
  1584  	blob, err := db.Get(badBlockKey)
  1585  	if err != nil {
  1586  		logger.Warn("Failed to load old bad blocks", "error", err)
  1587  	}
  1588  	var badBlocks badBlockList
  1589  	if len(blob) > 0 {
  1590  		if err := rlp.DecodeBytes(blob, &badBlocks); err != nil {
  1591  			logger.Error("failed to decode old bad blocks")
  1592  			return
  1593  		}
  1594  	}
  1595  
  1596  	for _, badblock := range badBlocks {
  1597  		if badblock.Header.Hash() == block.Hash() && badblock.Header.Number.Uint64() == block.NumberU64() {
  1598  			logger.Info("There is already corresponding badblock in db.", "badblock number", block.NumberU64())
  1599  			return
  1600  		}
  1601  	}
  1602  
  1603  	badBlocks = append(badBlocks, &badBlock{
  1604  		Header: block.Header(),
  1605  		Body:   block.Body(),
  1606  	})
  1607  	sort.Sort(sort.Reverse(badBlocks))
  1608  	if len(badBlocks) > badBlockToKeep {
  1609  		badBlocks = badBlocks[:badBlockToKeep]
  1610  	}
  1611  	data, err := rlp.EncodeToBytes(badBlocks)
  1612  	if err != nil {
  1613  		logger.Crit("Failed to encode bad blocks", "err", err)
  1614  		return
  1615  	}
  1616  	if err := db.Put(badBlockKey, data); err != nil {
  1617  		logger.Crit("Failed to write bad blocks", "err", err)
  1618  		return
  1619  	}
  1620  }
  1621  
  1622  func (dbm *databaseManager) DeleteBadBlocks() {
  1623  	db := dbm.getDatabase(MiscDB)
  1624  	if err := db.Delete(badBlockKey); err != nil {
  1625  		logger.Error("Failed to delete bad blocks", "err", err)
  1626  	}
  1627  }
  1628  
  1629  // Find Common Ancestor operation
  1630  // FindCommonAncestor returns the last common ancestor of two block headers
  1631  func (dbm *databaseManager) FindCommonAncestor(a, b *types.Header) *types.Header {
  1632  	for bn := b.Number.Uint64(); a.Number.Uint64() > bn; {
  1633  		a = dbm.ReadHeader(a.ParentHash, a.Number.Uint64()-1)
  1634  		if a == nil {
  1635  			return nil
  1636  		}
  1637  	}
  1638  	for an := a.Number.Uint64(); an < b.Number.Uint64(); {
  1639  		b = dbm.ReadHeader(b.ParentHash, b.Number.Uint64()-1)
  1640  		if b == nil {
  1641  			return nil
  1642  		}
  1643  	}
  1644  	for a.Hash() != b.Hash() {
  1645  		a = dbm.ReadHeader(a.ParentHash, a.Number.Uint64()-1)
  1646  		if a == nil {
  1647  			return nil
  1648  		}
  1649  		b = dbm.ReadHeader(b.ParentHash, b.Number.Uint64()-1)
  1650  		if b == nil {
  1651  			return nil
  1652  		}
  1653  	}
  1654  	return a
  1655  }
  1656  
  1657  // Istanbul Snapshot operations.
  1658  func (dbm *databaseManager) ReadIstanbulSnapshot(hash common.Hash) ([]byte, error) {
  1659  	db := dbm.getDatabase(MiscDB)
  1660  	return db.Get(snapshotKey(hash))
  1661  }
  1662  
  1663  func (dbm *databaseManager) WriteIstanbulSnapshot(hash common.Hash, blob []byte) error {
  1664  	db := dbm.getDatabase(MiscDB)
  1665  	return db.Put(snapshotKey(hash), blob)
  1666  }
  1667  
  1668  // Merkle Proof operation.
  1669  func (dbm *databaseManager) WriteMerkleProof(key, value []byte) {
  1670  	db := dbm.getDatabase(MiscDB)
  1671  	if err := db.Put(key, value); err != nil {
  1672  		logger.Crit("Failed to write merkle proof", "err", err)
  1673  	}
  1674  }
  1675  
  1676  // ReadCode retrieves the contract code of the provided code hash.
  1677  func (dbm *databaseManager) ReadCode(hash common.Hash) []byte {
  1678  	// Try with the legacy code scheme first, if not then try with current
  1679  	// scheme. Since most of the code will be found with legacy scheme.
  1680  	//
  1681  	// TODO-Klaytn-Snapsync change the order when we forcibly upgrade the code scheme with snapshot.
  1682  	db := dbm.getDatabase(StateTrieDB)
  1683  	if data, _ := db.Get(hash[:]); len(data) > 0 {
  1684  		return data
  1685  	}
  1686  
  1687  	return dbm.ReadCodeWithPrefix(hash)
  1688  }
  1689  
  1690  // ReadCodeWithPrefix retrieves the contract code of the provided code hash.
  1691  // The main difference between this function and ReadCode is this function
  1692  // will only check the existence with latest scheme(with prefix).
  1693  func (dbm *databaseManager) ReadCodeWithPrefix(hash common.Hash) []byte {
  1694  	db := dbm.getDatabase(StateTrieDB)
  1695  	data, _ := db.Get(CodeKey(hash))
  1696  	return data
  1697  }
  1698  
  1699  // HasCode checks if the contract code corresponding to the
  1700  // provided code hash is present in the db.
  1701  func (dbm *databaseManager) HasCode(hash common.Hash) bool {
  1702  	// Try with the prefixed code scheme first, if not then try with legacy
  1703  	// scheme.
  1704  	//
  1705  	// TODO-Klaytn-Snapsync change the order when we forcibly upgrade the code scheme with snapshot.
  1706  	db := dbm.getDatabase(StateTrieDB)
  1707  	if ok, _ := db.Has(hash.Bytes()); ok {
  1708  		return true
  1709  	}
  1710  	return dbm.HasCodeWithPrefix(hash)
  1711  }
  1712  
  1713  // HasCodeWithPrefix checks if the contract code corresponding to the
  1714  // provided code hash is present in the db. This function will only check
  1715  // presence using the prefix-scheme.
  1716  func (dbm *databaseManager) HasCodeWithPrefix(hash common.Hash) bool {
  1717  	db := dbm.getDatabase(StateTrieDB)
  1718  	ok, _ := db.Has(CodeKey(hash))
  1719  	return ok
  1720  }
  1721  
  1722  // WriteCode writes the provided contract code database.
  1723  func (dbm *databaseManager) WriteCode(hash common.Hash, code []byte) {
  1724  	dbm.lockInMigration.RLock()
  1725  	defer dbm.lockInMigration.RUnlock()
  1726  
  1727  	dbs := make([]Database, 0, 2)
  1728  	if dbm.inMigration {
  1729  		dbs = append(dbs, dbm.getDatabase(StateTrieMigrationDB))
  1730  	}
  1731  	dbs = append(dbs, dbm.getDatabase(StateTrieDB))
  1732  	for _, db := range dbs {
  1733  		if err := db.Put(CodeKey(hash), code); err != nil {
  1734  			logger.Crit("Failed to store contract code", "err", err)
  1735  		}
  1736  	}
  1737  }
  1738  
  1739  // DeleteCode deletes the specified contract code from the database.
  1740  func (dbm *databaseManager) DeleteCode(hash common.Hash) {
  1741  	db := dbm.getDatabase(StateTrieDB)
  1742  	if err := db.Delete(CodeKey(hash)); err != nil {
  1743  		logger.Crit("Failed to delete contract code", "err", err)
  1744  	}
  1745  }
  1746  
  1747  // Cached Trie Node operation.
  1748  func (dbm *databaseManager) ReadCachedTrieNode(hash common.Hash) ([]byte, error) {
  1749  	dbm.lockInMigration.RLock()
  1750  	defer dbm.lockInMigration.RUnlock()
  1751  
  1752  	if dbm.inMigration {
  1753  		if val, err := dbm.GetStateTrieMigrationDB().Get(hash[:]); err == nil {
  1754  			return val, nil
  1755  		} else if err != dataNotFoundErr {
  1756  			// TODO-Klaytn-Database Need to be properly handled
  1757  			logger.Error("Unexpected error while reading cached trie node from state migration database", "err", err)
  1758  		}
  1759  	}
  1760  	val, err := dbm.ReadCachedTrieNodeFromOld(hash)
  1761  	if err != nil && err != dataNotFoundErr {
  1762  		// TODO-Klaytn-Database Need to be properly handled
  1763  		logger.Error("Unexpected error while reading cached trie node", "err", err)
  1764  	}
  1765  	return val, err
  1766  }
  1767  
  1768  // Cached Trie Node Preimage operation.
  1769  func (dbm *databaseManager) ReadCachedTrieNodePreimage(secureKey []byte) ([]byte, error) {
  1770  	dbm.lockInMigration.RLock()
  1771  	defer dbm.lockInMigration.RUnlock()
  1772  
  1773  	if dbm.inMigration {
  1774  		if val, err := dbm.GetStateTrieMigrationDB().Get(secureKey); err == nil {
  1775  			return val, nil
  1776  		}
  1777  	}
  1778  	return dbm.ReadCachedTrieNodePreimageFromOld(secureKey)
  1779  }
  1780  
  1781  // State Trie Related operations.
  1782  func (dbm *databaseManager) ReadStateTrieNode(key []byte) ([]byte, error) {
  1783  	dbm.lockInMigration.RLock()
  1784  	defer dbm.lockInMigration.RUnlock()
  1785  
  1786  	if dbm.inMigration {
  1787  		if val, err := dbm.GetStateTrieMigrationDB().Get(key); err == nil {
  1788  			return val, nil
  1789  		}
  1790  	}
  1791  	return dbm.ReadStateTrieNodeFromOld(key)
  1792  }
  1793  
  1794  func (dbm *databaseManager) HasStateTrieNode(key []byte) (bool, error) {
  1795  	val, err := dbm.ReadStateTrieNode(key)
  1796  	if val == nil || err != nil {
  1797  		return false, err
  1798  	}
  1799  	return true, nil
  1800  }
  1801  
  1802  // ReadPreimage retrieves a single preimage of the provided hash.
  1803  func (dbm *databaseManager) ReadPreimage(hash common.Hash) []byte {
  1804  	dbm.lockInMigration.RLock()
  1805  	defer dbm.lockInMigration.RUnlock()
  1806  
  1807  	if dbm.inMigration {
  1808  		if val, err := dbm.GetStateTrieMigrationDB().Get(preimageKey(hash)); err == nil {
  1809  			return val
  1810  		}
  1811  	}
  1812  	return dbm.ReadPreimageFromOld(hash)
  1813  }
  1814  
  1815  // Cached Trie Node operation.
  1816  func (dbm *databaseManager) ReadCachedTrieNodeFromNew(hash common.Hash) ([]byte, error) {
  1817  	return dbm.GetStateTrieMigrationDB().Get(hash[:])
  1818  }
  1819  
  1820  // Cached Trie Node Preimage operation.
  1821  func (dbm *databaseManager) ReadCachedTrieNodePreimageFromNew(secureKey []byte) ([]byte, error) {
  1822  	return dbm.GetStateTrieMigrationDB().Get(secureKey)
  1823  }
  1824  
  1825  // State Trie Related operations.
  1826  func (dbm *databaseManager) ReadStateTrieNodeFromNew(key []byte) ([]byte, error) {
  1827  	return dbm.GetStateTrieMigrationDB().Get(key)
  1828  }
  1829  
  1830  func (dbm *databaseManager) HasStateTrieNodeFromNew(key []byte) (bool, error) {
  1831  	val, err := dbm.GetStateTrieMigrationDB().Get(key)
  1832  	if val == nil || err != nil {
  1833  		return false, err
  1834  	}
  1835  	return true, nil
  1836  }
  1837  
  1838  func (dbm *databaseManager) HasCodeWithPrefixFromNew(hash common.Hash) bool {
  1839  	db := dbm.GetStateTrieMigrationDB()
  1840  	ok, _ := db.Has(CodeKey(hash))
  1841  	return ok
  1842  }
  1843  
  1844  // ReadPreimage retrieves a single preimage of the provided hash.
  1845  func (dbm *databaseManager) ReadPreimageFromNew(hash common.Hash) []byte {
  1846  	data, _ := dbm.GetStateTrieMigrationDB().Get(preimageKey(hash))
  1847  	return data
  1848  }
  1849  
  1850  func (dbm *databaseManager) ReadCachedTrieNodeFromOld(hash common.Hash) ([]byte, error) {
  1851  	db := dbm.getDatabase(StateTrieDB)
  1852  	return db.Get(hash[:])
  1853  }
  1854  
  1855  // Cached Trie Node Preimage operation.
  1856  func (dbm *databaseManager) ReadCachedTrieNodePreimageFromOld(secureKey []byte) ([]byte, error) {
  1857  	db := dbm.getDatabase(StateTrieDB)
  1858  	return db.Get(secureKey)
  1859  }
  1860  
  1861  // State Trie Related operations.
  1862  func (dbm *databaseManager) ReadStateTrieNodeFromOld(key []byte) ([]byte, error) {
  1863  	db := dbm.getDatabase(StateTrieDB)
  1864  	return db.Get(key)
  1865  }
  1866  
  1867  func (dbm *databaseManager) HasStateTrieNodeFromOld(key []byte) (bool, error) {
  1868  	val, err := dbm.ReadStateTrieNodeFromOld(key)
  1869  	if val == nil || err != nil {
  1870  		return false, err
  1871  	}
  1872  	return true, nil
  1873  }
  1874  
  1875  func (dbm *databaseManager) HasCodeWithPrefixFromOld(hash common.Hash) bool {
  1876  	db := dbm.getDatabase(StateTrieDB)
  1877  	ok, _ := db.Has(CodeKey(hash))
  1878  	return ok
  1879  }
  1880  
  1881  // ReadPreimage retrieves a single preimage of the provided hash.
  1882  func (dbm *databaseManager) ReadPreimageFromOld(hash common.Hash) []byte {
  1883  	db := dbm.getDatabase(StateTrieDB)
  1884  	data, _ := db.Get(preimageKey(hash))
  1885  	return data
  1886  }
  1887  
  1888  // WritePreimages writes the provided set of preimages to the database. `number` is the
  1889  // current block number, and is used for debug messages only.
  1890  func (dbm *databaseManager) WritePreimages(number uint64, preimages map[common.Hash][]byte) {
  1891  	batch := dbm.NewBatch(StateTrieDB)
  1892  	for hash, preimage := range preimages {
  1893  		if err := batch.Put(preimageKey(hash), preimage); err != nil {
  1894  			logger.Crit("Failed to store trie preimage", "err", err)
  1895  		}
  1896  	}
  1897  	if err := batch.Write(); err != nil {
  1898  		logger.Crit("Failed to batch write trie preimage", "err", err, "blockNumber", number)
  1899  	}
  1900  	preimageCounter.Inc(int64(len(preimages)))
  1901  	preimageHitCounter.Inc(int64(len(preimages)))
  1902  }
  1903  
  1904  // ReadTxLookupEntry retrieves the positional metadata associated with a transaction
  1905  // hash to allow retrieving the transaction or receipt by hash.
  1906  func (dbm *databaseManager) ReadTxLookupEntry(hash common.Hash) (common.Hash, uint64, uint64) {
  1907  	db := dbm.getDatabase(TxLookUpEntryDB)
  1908  	data, _ := db.Get(TxLookupKey(hash))
  1909  	if len(data) == 0 {
  1910  		return common.Hash{}, 0, 0
  1911  	}
  1912  	var entry TxLookupEntry
  1913  	if err := rlp.DecodeBytes(data, &entry); err != nil {
  1914  		logger.Error("Invalid transaction lookup entry RLP", "hash", hash, "err", err)
  1915  		return common.Hash{}, 0, 0
  1916  	}
  1917  	return entry.BlockHash, entry.BlockIndex, entry.Index
  1918  }
  1919  
  1920  // WriteTxLookupEntries stores a positional metadata for every transaction from
  1921  // a block, enabling hash based transaction and receipt lookups.
  1922  func (dbm *databaseManager) WriteTxLookupEntries(block *types.Block) {
  1923  	db := dbm.getDatabase(TxLookUpEntryDB)
  1924  	putTxLookupEntriesToPutter(db, block)
  1925  }
  1926  
  1927  func (dbm *databaseManager) WriteAndCacheTxLookupEntries(block *types.Block) error {
  1928  	batch := dbm.NewBatch(TxLookUpEntryDB)
  1929  	for i, tx := range block.Transactions() {
  1930  		entry := TxLookupEntry{
  1931  			BlockHash:  block.Hash(),
  1932  			BlockIndex: block.NumberU64(),
  1933  			Index:      uint64(i),
  1934  		}
  1935  		data, err := rlp.EncodeToBytes(entry)
  1936  		if err != nil {
  1937  			logger.Crit("Failed to encode transaction lookup entry", "err", err)
  1938  		}
  1939  		if err := batch.Put(TxLookupKey(tx.Hash()), data); err != nil {
  1940  			logger.Crit("Failed to store transaction lookup entry", "err", err)
  1941  		}
  1942  
  1943  		// Write to cache at the end of successful Put.
  1944  		dbm.cm.writeTxAndLookupInfoCache(tx.Hash(), &TransactionLookup{tx, &entry})
  1945  	}
  1946  	if err := batch.Write(); err != nil {
  1947  		logger.Error("Failed to write TxLookupEntries in batch", "err", err, "blockNumber", block.Number())
  1948  		return err
  1949  	}
  1950  	return nil
  1951  }
  1952  
  1953  func (dbm *databaseManager) PutTxLookupEntriesToBatch(batch Batch, block *types.Block) {
  1954  	putTxLookupEntriesToPutter(batch, block)
  1955  }
  1956  
  1957  func putTxLookupEntriesToPutter(putter KeyValueWriter, block *types.Block) {
  1958  	for i, tx := range block.Transactions() {
  1959  		entry := TxLookupEntry{
  1960  			BlockHash:  block.Hash(),
  1961  			BlockIndex: block.NumberU64(),
  1962  			Index:      uint64(i),
  1963  		}
  1964  		data, err := rlp.EncodeToBytes(entry)
  1965  		if err != nil {
  1966  			logger.Crit("Failed to encode transaction lookup entry", "err", err)
  1967  		}
  1968  		if err := putter.Put(TxLookupKey(tx.Hash()), data); err != nil {
  1969  			logger.Crit("Failed to store transaction lookup entry", "err", err)
  1970  		}
  1971  	}
  1972  }
  1973  
  1974  // DeleteTxLookupEntry removes all transaction data associated with a hash.
  1975  func (dbm *databaseManager) DeleteTxLookupEntry(hash common.Hash) {
  1976  	db := dbm.getDatabase(TxLookUpEntryDB)
  1977  	db.Delete(TxLookupKey(hash))
  1978  }
  1979  
  1980  // ReadTxAndLookupInfo retrieves a specific transaction from the database, along with
  1981  // its added positional metadata.
  1982  func (dbm *databaseManager) ReadTxAndLookupInfo(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
  1983  	blockHash, blockNumber, txIndex := dbm.ReadTxLookupEntry(hash)
  1984  	if blockHash == (common.Hash{}) {
  1985  		return nil, common.Hash{}, 0, 0
  1986  	}
  1987  	body := dbm.ReadBody(blockHash, blockNumber)
  1988  	if body == nil || len(body.Transactions) <= int(txIndex) {
  1989  		logger.Error("Transaction referenced missing", "number", blockNumber, "hash", blockHash, "index", txIndex)
  1990  		return nil, common.Hash{}, 0, 0
  1991  	}
  1992  	return body.Transactions[txIndex], blockHash, blockNumber, txIndex
  1993  }
  1994  
  1995  // NewSenderTxHashToTxHashBatch returns a batch to write senderTxHash to txHash mapping information.
  1996  func (dbm *databaseManager) NewSenderTxHashToTxHashBatch() Batch {
  1997  	return dbm.NewBatch(MiscDB)
  1998  }
  1999  
  2000  // PutSenderTxHashToTxHashToBatch 1) puts the given senderTxHash and txHash to the given batch and
  2001  // 2) writes the information to the cache.
  2002  func (dbm *databaseManager) PutSenderTxHashToTxHashToBatch(batch Batch, senderTxHash, txHash common.Hash) error {
  2003  	if err := batch.Put(SenderTxHashToTxHashKey(senderTxHash), txHash.Bytes()); err != nil {
  2004  		return err
  2005  	}
  2006  
  2007  	dbm.cm.writeSenderTxHashToTxHashCache(senderTxHash, txHash)
  2008  
  2009  	if batch.ValueSize() > IdealBatchSize {
  2010  		batch.Write()
  2011  		batch.Reset()
  2012  	}
  2013  
  2014  	return nil
  2015  }
  2016  
  2017  // ReadTxHashFromSenderTxHash retrieves a txHash corresponding to the given senderTxHash.
  2018  func (dbm *databaseManager) ReadTxHashFromSenderTxHash(senderTxHash common.Hash) common.Hash {
  2019  	if txHash := dbm.cm.readSenderTxHashToTxHashCache(senderTxHash); !common.EmptyHash(txHash) {
  2020  		return txHash
  2021  	}
  2022  
  2023  	data, _ := dbm.getDatabase(MiscDB).Get(SenderTxHashToTxHashKey(senderTxHash))
  2024  	if len(data) == 0 {
  2025  		return common.Hash{}
  2026  	}
  2027  
  2028  	txHash := common.BytesToHash(data)
  2029  	dbm.cm.writeSenderTxHashToTxHashCache(senderTxHash, txHash)
  2030  	return txHash
  2031  }
  2032  
  2033  // BloomBits operations.
  2034  // ReadBloomBits retrieves the compressed bloom bit vector belonging to the given
  2035  // section and bit index from the.
  2036  func (dbm *databaseManager) ReadBloomBits(bloomBitsKey []byte) ([]byte, error) {
  2037  	db := dbm.getDatabase(MiscDB)
  2038  	return db.Get(bloomBitsKey)
  2039  }
  2040  
  2041  // WriteBloomBits stores the compressed bloom bits vector belonging to the given
  2042  // section and bit index.
  2043  func (dbm *databaseManager) WriteBloomBits(bloomBitsKey, bits []byte) error {
  2044  	db := dbm.getDatabase(MiscDB)
  2045  	return db.Put(bloomBitsKey, bits)
  2046  }
  2047  
  2048  // ValidSections operation.
  2049  func (dbm *databaseManager) ReadValidSections() ([]byte, error) {
  2050  	db := dbm.getDatabase(MiscDB)
  2051  	return db.Get(validSectionKey)
  2052  }
  2053  
  2054  func (dbm *databaseManager) WriteValidSections(encodedSections []byte) {
  2055  	db := dbm.getDatabase(MiscDB)
  2056  	db.Put(validSectionKey, encodedSections)
  2057  }
  2058  
  2059  // SectionHead operation.
  2060  func (dbm *databaseManager) ReadSectionHead(encodedSection []byte) ([]byte, error) {
  2061  	db := dbm.getDatabase(MiscDB)
  2062  	return db.Get(sectionHeadKey(encodedSection))
  2063  }
  2064  
  2065  func (dbm *databaseManager) WriteSectionHead(encodedSection []byte, hash common.Hash) {
  2066  	db := dbm.getDatabase(MiscDB)
  2067  	db.Put(sectionHeadKey(encodedSection), hash.Bytes())
  2068  }
  2069  
  2070  func (dbm *databaseManager) DeleteSectionHead(encodedSection []byte) {
  2071  	db := dbm.getDatabase(MiscDB)
  2072  	db.Delete(sectionHeadKey(encodedSection))
  2073  }
  2074  
  2075  // ReadDatabaseVersion retrieves the version number of the database.
  2076  func (dbm *databaseManager) ReadDatabaseVersion() *uint64 {
  2077  	db := dbm.getDatabase(MiscDB)
  2078  	var version uint64
  2079  
  2080  	enc, _ := db.Get(databaseVerisionKey)
  2081  	if len(enc) == 0 {
  2082  		return nil
  2083  	}
  2084  
  2085  	if err := rlp.DecodeBytes(enc, &version); err != nil {
  2086  		logger.Error("Failed to decode database version", "err", err)
  2087  		return nil
  2088  	}
  2089  
  2090  	return &version
  2091  }
  2092  
  2093  // WriteDatabaseVersion stores the version number of the database
  2094  func (dbm *databaseManager) WriteDatabaseVersion(version uint64) {
  2095  	db := dbm.getDatabase(MiscDB)
  2096  	enc, err := rlp.EncodeToBytes(version)
  2097  	if err != nil {
  2098  		logger.Crit("Failed to encode database version", "err", err)
  2099  	}
  2100  	if err := db.Put(databaseVerisionKey, enc); err != nil {
  2101  		logger.Crit("Failed to store the database version", "err", err)
  2102  	}
  2103  }
  2104  
  2105  // ReadChainConfig retrieves the consensus settings based on the given genesis hash.
  2106  func (dbm *databaseManager) ReadChainConfig(hash common.Hash) *params.ChainConfig {
  2107  	db := dbm.getDatabase(MiscDB)
  2108  	data, _ := db.Get(configKey(hash))
  2109  	if len(data) == 0 {
  2110  		return nil
  2111  	}
  2112  	var config params.ChainConfig
  2113  	if err := json.Unmarshal(data, &config); err != nil {
  2114  		logger.Error("Invalid chain config JSON", "hash", hash, "err", err)
  2115  		return nil
  2116  	}
  2117  	return &config
  2118  }
  2119  
  2120  func (dbm *databaseManager) WriteChainConfig(hash common.Hash, cfg *params.ChainConfig) {
  2121  	db := dbm.getDatabase(MiscDB)
  2122  	if cfg == nil {
  2123  		return
  2124  	}
  2125  	data, err := json.Marshal(cfg)
  2126  	if err != nil {
  2127  		logger.Crit("Failed to JSON encode chain config", "err", err)
  2128  	}
  2129  	if err := db.Put(configKey(hash), data); err != nil {
  2130  		logger.Crit("Failed to store chain config", "err", err)
  2131  	}
  2132  }
  2133  
  2134  // ReadSnapshotJournal retrieves the serialized in-memory diff layers saved at
  2135  // the last shutdown. The blob is expected to be max a few 10s of megabytes.
  2136  func (dbm *databaseManager) ReadSnapshotJournal() []byte {
  2137  	db := dbm.getDatabase(SnapshotDB)
  2138  	data, _ := db.Get(snapshotJournalKey)
  2139  	return data
  2140  }
  2141  
  2142  // WriteSnapshotJournal stores the serialized in-memory diff layers to save at
  2143  // shutdown. The blob is expected to be max a few 10s of megabytes.
  2144  func (dbm *databaseManager) WriteSnapshotJournal(journal []byte) {
  2145  	db := dbm.getDatabase(SnapshotDB)
  2146  	if err := db.Put(snapshotJournalKey, journal); err != nil {
  2147  		logger.Crit("Failed to store snapshot journal", "err", err)
  2148  	}
  2149  }
  2150  
  2151  // DeleteSnapshotJournal deletes the serialized in-memory diff layers saved at
  2152  // the last shutdown
  2153  func (dbm *databaseManager) DeleteSnapshotJournal() {
  2154  	db := dbm.getDatabase(SnapshotDB)
  2155  	if err := db.Delete(snapshotJournalKey); err != nil {
  2156  		logger.Crit("Failed to remove snapshot journal", "err", err)
  2157  	}
  2158  }
  2159  
  2160  // ReadSnapshotGenerator retrieves the serialized snapshot generator saved at
  2161  // the last shutdown.
  2162  func (dbm *databaseManager) ReadSnapshotGenerator() []byte {
  2163  	db := dbm.getDatabase(SnapshotDB)
  2164  	data, _ := db.Get(SnapshotGeneratorKey)
  2165  	return data
  2166  }
  2167  
  2168  // WriteSnapshotGenerator stores the serialized snapshot generator to save at
  2169  // shutdown.
  2170  func (dbm *databaseManager) WriteSnapshotGenerator(generator []byte) {
  2171  	db := dbm.getDatabase(SnapshotDB)
  2172  	if err := db.Put(SnapshotGeneratorKey, generator); err != nil {
  2173  		logger.Crit("Failed to store snapshot generator", "err", err)
  2174  	}
  2175  }
  2176  
  2177  // DeleteSnapshotGenerator deletes the serialized snapshot generator saved at
  2178  // the last shutdown
  2179  func (dbm *databaseManager) DeleteSnapshotGenerator() {
  2180  	db := dbm.getDatabase(SnapshotDB)
  2181  	if err := db.Delete(SnapshotGeneratorKey); err != nil {
  2182  		logger.Crit("Failed to remove snapshot generator", "err", err)
  2183  	}
  2184  }
  2185  
  2186  // ReadSnapshotDisabled retrieves if the snapshot maintenance is disabled.
  2187  func (dbm *databaseManager) ReadSnapshotDisabled() bool {
  2188  	db := dbm.getDatabase(SnapshotDB)
  2189  	disabled, _ := db.Has(snapshotDisabledKey)
  2190  	return disabled
  2191  }
  2192  
  2193  // WriteSnapshotDisabled stores the snapshot pause flag.
  2194  func (dbm *databaseManager) WriteSnapshotDisabled() {
  2195  	db := dbm.getDatabase(SnapshotDB)
  2196  	if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
  2197  		logger.Crit("Failed to store snapshot disabled flag", "err", err)
  2198  	}
  2199  }
  2200  
  2201  // DeleteSnapshotDisabled deletes the flag keeping the snapshot maintenance disabled.
  2202  func (dbm *databaseManager) DeleteSnapshotDisabled() {
  2203  	db := dbm.getDatabase(SnapshotDB)
  2204  	if err := db.Delete(snapshotDisabledKey); err != nil {
  2205  		logger.Crit("Failed to remove snapshot disabled flag", "err", err)
  2206  	}
  2207  }
  2208  
  2209  // ReadSnapshotRecoveryNumber retrieves the block number of the last persisted
  2210  // snapshot layer.
  2211  func (dbm *databaseManager) ReadSnapshotRecoveryNumber() *uint64 {
  2212  	db := dbm.getDatabase(SnapshotDB)
  2213  	data, _ := db.Get(snapshotRecoveryKey)
  2214  	if len(data) == 0 {
  2215  		return nil
  2216  	}
  2217  	if len(data) != 8 {
  2218  		return nil
  2219  	}
  2220  	number := binary.BigEndian.Uint64(data)
  2221  	return &number
  2222  }
  2223  
  2224  // WriteSnapshotRecoveryNumber stores the block number of the last persisted
  2225  // snapshot layer.
  2226  func (dbm *databaseManager) WriteSnapshotRecoveryNumber(number uint64) {
  2227  	db := dbm.getDatabase(SnapshotDB)
  2228  	var buf [8]byte
  2229  	binary.BigEndian.PutUint64(buf[:], number)
  2230  	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
  2231  		logger.Crit("Failed to store snapshot recovery number", "err", err)
  2232  	}
  2233  }
  2234  
  2235  // DeleteSnapshotRecoveryNumber deletes the block number of the last persisted
  2236  // snapshot layer.
  2237  func (dbm *databaseManager) DeleteSnapshotRecoveryNumber() {
  2238  	db := dbm.getDatabase(SnapshotDB)
  2239  	if err := db.Delete(snapshotRecoveryKey); err != nil {
  2240  		logger.Crit("Failed to remove snapshot recovery number", "err", err)
  2241  	}
  2242  }
  2243  
  2244  // ReadSnapshotSyncStatus retrieves the serialized sync status saved at shutdown.
  2245  func (dbm *databaseManager) ReadSnapshotSyncStatus() []byte {
  2246  	db := dbm.getDatabase(SnapshotDB)
  2247  	data, _ := db.Get(snapshotSyncStatusKey)
  2248  	return data
  2249  }
  2250  
  2251  // WriteSnapshotSyncStatus stores the serialized sync status to save at shutdown.
  2252  func (dbm *databaseManager) WriteSnapshotSyncStatus(status []byte) {
  2253  	db := dbm.getDatabase(SnapshotDB)
  2254  	if err := db.Put(snapshotSyncStatusKey, status); err != nil {
  2255  		logger.Crit("Failed to store snapshot sync status", "err", err)
  2256  	}
  2257  }
  2258  
  2259  // DeleteSnapshotSyncStatus deletes the serialized sync status saved at the last
  2260  // shutdown
  2261  func (dbm *databaseManager) DeleteSnapshotSyncStatus() {
  2262  	db := dbm.getDatabase(SnapshotDB)
  2263  	if err := db.Delete(snapshotSyncStatusKey); err != nil {
  2264  		logger.Crit("Failed to remove snapshot sync status", "err", err)
  2265  	}
  2266  }
  2267  
  2268  // ReadSnapshotRoot retrieves the root of the block whose state is contained in
  2269  // the persisted snapshot.
  2270  func (dbm *databaseManager) ReadSnapshotRoot() common.Hash {
  2271  	db := dbm.getDatabase(SnapshotDB)
  2272  	data, _ := db.Get(snapshotRootKey)
  2273  	if len(data) != common.HashLength {
  2274  		return common.Hash{}
  2275  	}
  2276  	return common.BytesToHash(data)
  2277  }
  2278  
  2279  // WriteSnapshotRoot stores the root of the block whose state is contained in
  2280  // the persisted snapshot.
  2281  func (dbm *databaseManager) WriteSnapshotRoot(root common.Hash) {
  2282  	db := dbm.getDatabase(SnapshotDB)
  2283  	if err := db.Put(snapshotRootKey, root[:]); err != nil {
  2284  		logger.Crit("Failed to store snapshot root", "err", err)
  2285  	}
  2286  }
  2287  
  2288  // DeleteSnapshotRoot deletes the hash of the block whose state is contained in
  2289  // the persisted snapshot. Since snapshots are not immutable, this  method can
  2290  // be used during updates, so a crash or failure will mark the entire snapshot
  2291  // invalid.
  2292  func (dbm *databaseManager) DeleteSnapshotRoot() {
  2293  	db := dbm.getDatabase(SnapshotDB)
  2294  	if err := db.Delete(snapshotRootKey); err != nil {
  2295  		logger.Crit("Failed to remove snapshot root", "err", err)
  2296  	}
  2297  }
  2298  
  2299  // ReadAccountSnapshot retrieves the snapshot entry of an account trie leaf.
  2300  func (dbm *databaseManager) ReadAccountSnapshot(hash common.Hash) []byte {
  2301  	db := dbm.getDatabase(SnapshotDB)
  2302  	data, _ := db.Get(AccountSnapshotKey(hash))
  2303  	return data
  2304  }
  2305  
  2306  // WriteAccountSnapshot stores the snapshot entry of an account trie leaf.
  2307  func (dbm *databaseManager) WriteAccountSnapshot(hash common.Hash, entry []byte) {
  2308  	db := dbm.getDatabase(SnapshotDB)
  2309  	writeAccountSnapshot(db, hash, entry)
  2310  }
  2311  
  2312  // DeleteAccountSnapshot removes the snapshot entry of an account trie leaf.
  2313  func (dbm *databaseManager) DeleteAccountSnapshot(hash common.Hash) {
  2314  	db := dbm.getDatabase(SnapshotDB)
  2315  	deleteAccountSnapshot(db, hash)
  2316  }
  2317  
  2318  // ReadStorageSnapshot retrieves the snapshot entry of an storage trie leaf.
  2319  func (dbm *databaseManager) ReadStorageSnapshot(accountHash, storageHash common.Hash) []byte {
  2320  	db := dbm.getDatabase(SnapshotDB)
  2321  	data, _ := db.Get(StorageSnapshotKey(accountHash, storageHash))
  2322  	return data
  2323  }
  2324  
  2325  // WriteStorageSnapshot stores the snapshot entry of an storage trie leaf.
  2326  func (dbm *databaseManager) WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte) {
  2327  	db := dbm.getDatabase(SnapshotDB)
  2328  	writeStorageSnapshot(db, accountHash, storageHash, entry)
  2329  }
  2330  
  2331  // DeleteStorageSnapshot removes the snapshot entry of an storage trie leaf.
  2332  func (dbm *databaseManager) DeleteStorageSnapshot(accountHash, storageHash common.Hash) {
  2333  	db := dbm.getDatabase(SnapshotDB)
  2334  	deleteStorageSnapshot(db, accountHash, storageHash)
  2335  }
  2336  
  2337  func (dbm *databaseManager) NewSnapshotDBIterator(prefix []byte, start []byte) Iterator {
  2338  	db := dbm.getDatabase(SnapshotDB)
  2339  	return db.NewIterator(prefix, start)
  2340  }
  2341  
  2342  // WriteChildChainTxHash writes stores a transaction hash of a transaction which contains
  2343  // AnchoringData, with the key made with given child chain block hash.
  2344  func (dbm *databaseManager) WriteChildChainTxHash(ccBlockHash common.Hash, ccTxHash common.Hash) {
  2345  	key := childChainTxHashKey(ccBlockHash)
  2346  	db := dbm.getDatabase(bridgeServiceDB)
  2347  	if err := db.Put(key, ccTxHash.Bytes()); err != nil {
  2348  		logger.Crit("Failed to store ChildChainTxHash", "ccBlockHash", ccBlockHash.String(), "ccTxHash", ccTxHash.String(), "err", err)
  2349  	}
  2350  }
  2351  
  2352  // ConvertChildChainBlockHashToParentChainTxHash returns a transaction hash of a transaction which contains
  2353  // AnchoringData, with the key made with given child chain block hash.
  2354  func (dbm *databaseManager) ConvertChildChainBlockHashToParentChainTxHash(scBlockHash common.Hash) common.Hash {
  2355  	key := childChainTxHashKey(scBlockHash)
  2356  	db := dbm.getDatabase(bridgeServiceDB)
  2357  	data, _ := db.Get(key)
  2358  	if len(data) == 0 {
  2359  		return common.Hash{}
  2360  	}
  2361  	return common.BytesToHash(data)
  2362  }
  2363  
  2364  // WriteLastIndexedBlockNumber writes the block number which is indexed lastly.
  2365  func (dbm *databaseManager) WriteLastIndexedBlockNumber(blockNum uint64) {
  2366  	key := lastIndexedBlockKey
  2367  	db := dbm.getDatabase(bridgeServiceDB)
  2368  	if err := db.Put(key, common.Int64ToByteBigEndian(blockNum)); err != nil {
  2369  		logger.Crit("Failed to store LastIndexedBlockNumber", "blockNumber", blockNum, "err", err)
  2370  	}
  2371  }
  2372  
  2373  // GetLastIndexedBlockNumber returns the last block number which is indexed.
  2374  func (dbm *databaseManager) GetLastIndexedBlockNumber() uint64 {
  2375  	key := lastIndexedBlockKey
  2376  	db := dbm.getDatabase(bridgeServiceDB)
  2377  	data, _ := db.Get(key)
  2378  	if len(data) != 8 {
  2379  		return 0
  2380  	}
  2381  	return binary.BigEndian.Uint64(data)
  2382  }
  2383  
  2384  // WriteAnchoredBlockNumber writes the block number whose data has been anchored to the parent chain.
  2385  func (dbm *databaseManager) WriteAnchoredBlockNumber(blockNum uint64) {
  2386  	key := lastServiceChainTxReceiptKey
  2387  	db := dbm.getDatabase(bridgeServiceDB)
  2388  	if err := db.Put(key, common.Int64ToByteBigEndian(blockNum)); err != nil {
  2389  		logger.Crit("Failed to store LatestServiceChainBlockNum", "blockNumber", blockNum, "err", err)
  2390  	}
  2391  }
  2392  
  2393  // ReadAnchoredBlockNumber returns the latest block number whose data has been anchored to the parent chain.
  2394  func (dbm *databaseManager) ReadAnchoredBlockNumber() uint64 {
  2395  	key := lastServiceChainTxReceiptKey
  2396  	db := dbm.getDatabase(bridgeServiceDB)
  2397  	data, _ := db.Get(key)
  2398  	if len(data) != 8 {
  2399  		return 0
  2400  	}
  2401  	return binary.BigEndian.Uint64(data)
  2402  }
  2403  
  2404  // WriteHandleTxHashFromRequestTxHash writes handle value transfer tx hash
  2405  // with corresponding request value transfer tx hash.
  2406  func (dbm *databaseManager) WriteHandleTxHashFromRequestTxHash(rTx, hTx common.Hash) {
  2407  	db := dbm.getDatabase(bridgeServiceDB)
  2408  	key := valueTransferTxHashKey(rTx)
  2409  	if err := db.Put(key, hTx.Bytes()); err != nil {
  2410  		logger.Crit("Failed to store handle value transfer tx hash", "request tx hash", rTx.String(), "handle tx hash", hTx.String(), "err", err)
  2411  	}
  2412  }
  2413  
  2414  // ReadHandleTxHashFromRequestTxHash returns handle value transfer tx hash
  2415  // with corresponding the given request value transfer tx hash.
  2416  func (dbm *databaseManager) ReadHandleTxHashFromRequestTxHash(rTx common.Hash) common.Hash {
  2417  	key := valueTransferTxHashKey(rTx)
  2418  	db := dbm.getDatabase(bridgeServiceDB)
  2419  	data, _ := db.Get(key)
  2420  	if len(data) == 0 {
  2421  		return common.Hash{}
  2422  	}
  2423  	return common.BytesToHash(data)
  2424  }
  2425  
  2426  // WriteReceiptFromParentChain writes a receipt received from parent chain to child chain
  2427  // with corresponding block hash. It assumes that a child chain has only one parent chain.
  2428  func (dbm *databaseManager) WriteReceiptFromParentChain(blockHash common.Hash, receipt *types.Receipt) {
  2429  	receiptForStorage := (*types.ReceiptForStorage)(receipt)
  2430  	db := dbm.getDatabase(bridgeServiceDB)
  2431  	byte, err := rlp.EncodeToBytes(receiptForStorage)
  2432  	if err != nil {
  2433  		logger.Crit("Failed to RLP encode receipt received from parent chain", "receipt.TxHash", receipt.TxHash, "err", err)
  2434  	}
  2435  	key := receiptFromParentChainKey(blockHash)
  2436  	if err = db.Put(key, byte); err != nil {
  2437  		logger.Crit("Failed to store receipt received from parent chain", "receipt.TxHash", receipt.TxHash, "err", err)
  2438  	}
  2439  }
  2440  
  2441  // ReadReceiptFromParentChain returns a receipt received from parent chain to child chain
  2442  // with corresponding block hash. It assumes that a child chain has only one parent chain.
  2443  func (dbm *databaseManager) ReadReceiptFromParentChain(blockHash common.Hash) *types.Receipt {
  2444  	db := dbm.getDatabase(bridgeServiceDB)
  2445  	key := receiptFromParentChainKey(blockHash)
  2446  	data, _ := db.Get(key)
  2447  	if data == nil || len(data) == 0 {
  2448  		return nil
  2449  	}
  2450  	serviceChainTxReceipt := new(types.ReceiptForStorage)
  2451  	if err := rlp.Decode(bytes.NewReader(data), serviceChainTxReceipt); err != nil {
  2452  		logger.Error("Invalid Receipt RLP received from parent chain", "err", err)
  2453  		return nil
  2454  	}
  2455  	return (*types.Receipt)(serviceChainTxReceipt)
  2456  }
  2457  
  2458  // WriteParentOperatorFeePayer writes a fee payer of parent operator.
  2459  func (dbm *databaseManager) WriteParentOperatorFeePayer(feePayer common.Address) {
  2460  	key := parentOperatorFeePayerPrefix
  2461  	db := dbm.getDatabase(bridgeServiceDB)
  2462  
  2463  	if err := db.Put(key, feePayer.Bytes()); err != nil {
  2464  		logger.Crit("Failed to store parent operator fee payer", "feePayer", feePayer.String(), "err", err)
  2465  	}
  2466  }
  2467  
  2468  // ReadParentOperatorFeePayer returns a fee payer of parent operator.
  2469  func (dbm *databaseManager) ReadParentOperatorFeePayer() common.Address {
  2470  	key := parentOperatorFeePayerPrefix
  2471  	db := dbm.getDatabase(bridgeServiceDB)
  2472  	data, _ := db.Get(key)
  2473  	if data == nil || len(data) == 0 {
  2474  		return common.Address{}
  2475  	}
  2476  	return common.BytesToAddress(data)
  2477  }
  2478  
  2479  // WriteChildOperatorFeePayer writes a fee payer of child operator.
  2480  func (dbm *databaseManager) WriteChildOperatorFeePayer(feePayer common.Address) {
  2481  	key := childOperatorFeePayerPrefix
  2482  	db := dbm.getDatabase(bridgeServiceDB)
  2483  
  2484  	if err := db.Put(key, feePayer.Bytes()); err != nil {
  2485  		logger.Crit("Failed to store parent operator fee payer", "feePayer", feePayer.String(), "err", err)
  2486  	}
  2487  }
  2488  
  2489  // ReadChildOperatorFeePayer returns a fee payer of child operator.
  2490  func (dbm *databaseManager) ReadChildOperatorFeePayer() common.Address {
  2491  	key := childOperatorFeePayerPrefix
  2492  	db := dbm.getDatabase(bridgeServiceDB)
  2493  	data, _ := db.Get(key)
  2494  	if data == nil || len(data) == 0 {
  2495  		return common.Address{}
  2496  	}
  2497  	return common.BytesToAddress(data)
  2498  }
  2499  
  2500  // ClearHeaderChainCache calls cacheManager.clearHeaderChainCache to flush out caches of HeaderChain.
  2501  func (dbm *databaseManager) ClearHeaderChainCache() {
  2502  	dbm.cm.clearHeaderChainCache()
  2503  }
  2504  
  2505  // ClearBlockChainCache calls cacheManager.clearBlockChainCache to flush out caches of BlockChain.
  2506  func (dbm *databaseManager) ClearBlockChainCache() {
  2507  	dbm.cm.clearBlockChainCache()
  2508  }
  2509  
  2510  func (dbm *databaseManager) ReadTxAndLookupInfoInCache(hash common.Hash) (*types.Transaction, common.Hash, uint64, uint64) {
  2511  	return dbm.cm.readTxAndLookupInfoInCache(hash)
  2512  }
  2513  
  2514  func (dbm *databaseManager) ReadBlockReceiptsInCache(blockHash common.Hash) types.Receipts {
  2515  	return dbm.cm.readBlockReceiptsInCache(blockHash)
  2516  }
  2517  
  2518  func (dbm *databaseManager) ReadTxReceiptInCache(txHash common.Hash) *types.Receipt {
  2519  	return dbm.cm.readTxReceiptInCache(txHash)
  2520  }
  2521  
  2522  func (dbm *databaseManager) WriteCliqueSnapshot(snapshotBlockHash common.Hash, encodedSnapshot []byte) error {
  2523  	db := dbm.getDatabase(MiscDB)
  2524  	return db.Put(snapshotKey(snapshotBlockHash), encodedSnapshot)
  2525  }
  2526  
  2527  func (dbm *databaseManager) ReadCliqueSnapshot(snapshotBlockHash common.Hash) ([]byte, error) {
  2528  	db := dbm.getDatabase(MiscDB)
  2529  	return db.Get(snapshotKey(snapshotBlockHash))
  2530  }
  2531  
  2532  func (dbm *databaseManager) WriteGovernance(data map[string]interface{}, num uint64) error {
  2533  	db := dbm.getDatabase(MiscDB)
  2534  	b, err := json.Marshal(data)
  2535  	if err != nil {
  2536  		return err
  2537  	}
  2538  	if err := dbm.WriteGovernanceIdx(num); err != nil {
  2539  		if err == errGovIdxAlreadyExist {
  2540  			// Overwriting existing data is not allowed, but the attempt is not considered as a failure.
  2541  			return nil
  2542  		}
  2543  		return err
  2544  	}
  2545  	return db.Put(makeKey(governancePrefix, num), b)
  2546  }
  2547  
  2548  func (dbm *databaseManager) WriteGovernanceIdx(num uint64) error {
  2549  	db := dbm.getDatabase(MiscDB)
  2550  	newSlice := make([]uint64, 0)
  2551  
  2552  	if data, err := db.Get(governanceHistoryKey); err == nil {
  2553  		if err = json.Unmarshal(data, &newSlice); err != nil {
  2554  			return err
  2555  		}
  2556  	}
  2557  
  2558  	if len(newSlice) > 0 && num <= newSlice[len(newSlice)-1] {
  2559  		logger.Error("The same or more recent governance index exist. Skip writing governance index",
  2560  			"newIdx", num, "govIdxes", newSlice)
  2561  		return errGovIdxAlreadyExist
  2562  	}
  2563  
  2564  	newSlice = append(newSlice, num)
  2565  
  2566  	data, err := json.Marshal(newSlice)
  2567  	if err != nil {
  2568  		return err
  2569  	}
  2570  	return db.Put(governanceHistoryKey, data)
  2571  }
  2572  
  2573  func (dbm *databaseManager) ReadGovernance(num uint64) (map[string]interface{}, error) {
  2574  	db := dbm.getDatabase(MiscDB)
  2575  
  2576  	if data, err := db.Get(makeKey(governancePrefix, num)); err != nil {
  2577  		return nil, err
  2578  	} else {
  2579  		result := make(map[string]interface{})
  2580  		if e := json.Unmarshal(data, &result); e != nil {
  2581  			return nil, e
  2582  		}
  2583  		return result, nil
  2584  	}
  2585  }
  2586  
  2587  // ReadRecentGovernanceIdx returns latest `count` number of indices. If `count` is 0, it returns all indices.
  2588  func (dbm *databaseManager) ReadRecentGovernanceIdx(count int) ([]uint64, error) {
  2589  	db := dbm.getDatabase(MiscDB)
  2590  
  2591  	if history, err := db.Get(governanceHistoryKey); err != nil {
  2592  		return nil, err
  2593  	} else {
  2594  		idxHistory := make([]uint64, 0)
  2595  		if e := json.Unmarshal(history, &idxHistory); e != nil {
  2596  			return nil, e
  2597  		}
  2598  
  2599  		// Make sure idxHistory should be in ascending order
  2600  		sort.Slice(idxHistory, func(i, j int) bool {
  2601  			return idxHistory[i] < idxHistory[j]
  2602  		})
  2603  
  2604  		max := 0
  2605  		leng := len(idxHistory)
  2606  		if leng < count || count == 0 {
  2607  			max = leng
  2608  		} else {
  2609  			max = count
  2610  		}
  2611  		if count > 0 {
  2612  			return idxHistory[leng-max:], nil
  2613  		}
  2614  		return idxHistory, nil
  2615  	}
  2616  }
  2617  
  2618  // ReadGovernanceAtNumber returns the block number and governance information which to be used for the block `num`
  2619  func (dbm *databaseManager) ReadGovernanceAtNumber(num uint64, epoch uint64) (uint64, map[string]interface{}, error) {
  2620  	minimum := num - (num % epoch)
  2621  	if minimum >= epoch {
  2622  		minimum -= epoch
  2623  	}
  2624  	totalIdx, _ := dbm.ReadRecentGovernanceIdx(0)
  2625  	for i := len(totalIdx) - 1; i >= 0; i-- {
  2626  		if totalIdx[i] <= minimum {
  2627  			result, err := dbm.ReadGovernance(totalIdx[i])
  2628  			return totalIdx[i], result, err
  2629  		}
  2630  	}
  2631  	return 0, nil, errors.New("No governance data found")
  2632  }
  2633  
  2634  func (dbm *databaseManager) WriteGovernanceState(b []byte) error {
  2635  	db := dbm.getDatabase(MiscDB)
  2636  	return db.Put(governanceStateKey, b)
  2637  }
  2638  
  2639  func (dbm *databaseManager) ReadGovernanceState() ([]byte, error) {
  2640  	db := dbm.getDatabase(MiscDB)
  2641  	return db.Get(governanceStateKey)
  2642  }
  2643  
  2644  func (dbm *databaseManager) WriteChainDataFetcherCheckpoint(checkpoint uint64) error {
  2645  	db := dbm.getDatabase(MiscDB)
  2646  	return db.Put(chaindatafetcherCheckpointKey, common.Int64ToByteBigEndian(checkpoint))
  2647  }
  2648  
  2649  func (dbm *databaseManager) ReadChainDataFetcherCheckpoint() (uint64, error) {
  2650  	db := dbm.getDatabase(MiscDB)
  2651  	data, err := db.Get(chaindatafetcherCheckpointKey)
  2652  	if err != nil {
  2653  		// if the key is not in the database, 0 is returned as the checkpoint
  2654  		if err == leveldb.ErrNotFound || err == badger.ErrKeyNotFound ||
  2655  			strings.Contains(err.Error(), "not found") { // memoryDB
  2656  			return 0, nil
  2657  		}
  2658  		return 0, err
  2659  	}
  2660  	// in case that error is nil, but the data does not exist
  2661  	if len(data) != 8 {
  2662  		logger.Warn("the returned error is nil, but the data is wrong", "len(data)", len(data))
  2663  		return 0, nil
  2664  	}
  2665  	return binary.BigEndian.Uint64(data), nil
  2666  }
  2667  
  2668  func (dbm *databaseManager) NewSnapshotDBBatch() SnapshotDBBatch {
  2669  	return &snapshotDBBatch{dbm.NewBatch(SnapshotDB)}
  2670  }
  2671  
  2672  type SnapshotDBBatch interface {
  2673  	Batch
  2674  
  2675  	WriteSnapshotRoot(root common.Hash)
  2676  	DeleteSnapshotRoot()
  2677  
  2678  	WriteAccountSnapshot(hash common.Hash, entry []byte)
  2679  	DeleteAccountSnapshot(hash common.Hash)
  2680  
  2681  	WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte)
  2682  	DeleteStorageSnapshot(accountHash, storageHash common.Hash)
  2683  
  2684  	WriteSnapshotJournal(journal []byte)
  2685  	DeleteSnapshotJournal()
  2686  
  2687  	WriteSnapshotGenerator(generator []byte)
  2688  	DeleteSnapshotGenerator()
  2689  
  2690  	WriteSnapshotDisabled()
  2691  	DeleteSnapshotDisabled()
  2692  
  2693  	WriteSnapshotRecoveryNumber(number uint64)
  2694  	DeleteSnapshotRecoveryNumber()
  2695  }
  2696  
  2697  type snapshotDBBatch struct {
  2698  	Batch
  2699  }
  2700  
  2701  func (batch *snapshotDBBatch) WriteSnapshotRoot(root common.Hash) {
  2702  	writeSnapshotRoot(batch, root)
  2703  }
  2704  
  2705  func (batch *snapshotDBBatch) DeleteSnapshotRoot() {
  2706  	deleteSnapshotRoot(batch)
  2707  }
  2708  
  2709  func (batch *snapshotDBBatch) WriteAccountSnapshot(hash common.Hash, entry []byte) {
  2710  	writeAccountSnapshot(batch, hash, entry)
  2711  }
  2712  
  2713  func (batch *snapshotDBBatch) DeleteAccountSnapshot(hash common.Hash) {
  2714  	deleteAccountSnapshot(batch, hash)
  2715  }
  2716  
  2717  func (batch *snapshotDBBatch) WriteStorageSnapshot(accountHash, storageHash common.Hash, entry []byte) {
  2718  	writeStorageSnapshot(batch, accountHash, storageHash, entry)
  2719  }
  2720  
  2721  func (batch *snapshotDBBatch) DeleteStorageSnapshot(accountHash, storageHash common.Hash) {
  2722  	deleteStorageSnapshot(batch, accountHash, storageHash)
  2723  }
  2724  
  2725  func (batch *snapshotDBBatch) WriteSnapshotJournal(journal []byte) {
  2726  	writeSnapshotJournal(batch, journal)
  2727  }
  2728  
  2729  func (batch *snapshotDBBatch) DeleteSnapshotJournal() {
  2730  	deleteSnapshotJournal(batch)
  2731  }
  2732  
  2733  func (batch *snapshotDBBatch) WriteSnapshotGenerator(generator []byte) {
  2734  	writeSnapshotGenerator(batch, generator)
  2735  }
  2736  
  2737  func (batch *snapshotDBBatch) DeleteSnapshotGenerator() {
  2738  	deleteSnapshotGenerator(batch)
  2739  }
  2740  
  2741  func (batch *snapshotDBBatch) WriteSnapshotDisabled() {
  2742  	writeSnapshotDisabled(batch)
  2743  }
  2744  
  2745  func (batch *snapshotDBBatch) DeleteSnapshotDisabled() {
  2746  	deleteSnapshotDisabled(batch)
  2747  }
  2748  
  2749  func (batch *snapshotDBBatch) WriteSnapshotRecoveryNumber(number uint64) {
  2750  	writeSnapshotRecoveryNumber(batch, number)
  2751  }
  2752  
  2753  func (batch *snapshotDBBatch) DeleteSnapshotRecoveryNumber() {
  2754  	deleteSnapshotRecoveryNumber(batch)
  2755  }
  2756  
  2757  func writeSnapshotRoot(db KeyValueWriter, root common.Hash) {
  2758  	if err := db.Put(snapshotRootKey, root[:]); err != nil {
  2759  		logger.Crit("Failed to store snapshot root", "err", err)
  2760  	}
  2761  }
  2762  
  2763  func deleteSnapshotRoot(db KeyValueWriter) {
  2764  	if err := db.Delete(snapshotRootKey); err != nil {
  2765  		logger.Crit("Failed to remove snapshot root", "err", err)
  2766  	}
  2767  }
  2768  
  2769  func writeAccountSnapshot(db KeyValueWriter, hash common.Hash, entry []byte) {
  2770  	if err := db.Put(AccountSnapshotKey(hash), entry); err != nil {
  2771  		logger.Crit("Failed to store account snapshot", "err", err)
  2772  	}
  2773  }
  2774  
  2775  func deleteAccountSnapshot(db KeyValueWriter, hash common.Hash) {
  2776  	if err := db.Delete(AccountSnapshotKey(hash)); err != nil {
  2777  		logger.Crit("Failed to delete account snapshot", "err", err)
  2778  	}
  2779  }
  2780  
  2781  func writeStorageSnapshot(db KeyValueWriter, accountHash, storageHash common.Hash, entry []byte) {
  2782  	if err := db.Put(StorageSnapshotKey(accountHash, storageHash), entry); err != nil {
  2783  		logger.Crit("Failed to store storage snapshot", "err", err)
  2784  	}
  2785  }
  2786  
  2787  func deleteStorageSnapshot(db KeyValueWriter, accountHash, storageHash common.Hash) {
  2788  	if err := db.Delete(StorageSnapshotKey(accountHash, storageHash)); err != nil {
  2789  		logger.Crit("Failed to delete storage snapshot", "err", err)
  2790  	}
  2791  }
  2792  
  2793  func writeSnapshotJournal(db KeyValueWriter, journal []byte) {
  2794  	if err := db.Put(snapshotJournalKey, journal); err != nil {
  2795  		logger.Crit("Failed to store snapshot journal", "err", err)
  2796  	}
  2797  }
  2798  
  2799  func deleteSnapshotJournal(db KeyValueWriter) {
  2800  	if err := db.Delete(snapshotJournalKey); err != nil {
  2801  		logger.Crit("Failed to remove snapshot journal", "err", err)
  2802  	}
  2803  }
  2804  
  2805  func writeSnapshotGenerator(db KeyValueWriter, generator []byte) {
  2806  	if err := db.Put(SnapshotGeneratorKey, generator); err != nil {
  2807  		logger.Crit("Failed to store snapshot generator", "err", err)
  2808  	}
  2809  }
  2810  
  2811  func deleteSnapshotGenerator(db KeyValueWriter) {
  2812  	if err := db.Delete(SnapshotGeneratorKey); err != nil {
  2813  		logger.Crit("Failed to remove snapshot generator", "err", err)
  2814  	}
  2815  }
  2816  
  2817  func writeSnapshotDisabled(db KeyValueWriter) {
  2818  	if err := db.Put(snapshotDisabledKey, []byte("42")); err != nil {
  2819  		logger.Crit("Failed to store snapshot disabled flag", "err", err)
  2820  	}
  2821  }
  2822  
  2823  func deleteSnapshotDisabled(db KeyValueWriter) {
  2824  	if err := db.Delete(snapshotDisabledKey); err != nil {
  2825  		logger.Crit("Failed to remove snapshot disabled flag", "err", err)
  2826  	}
  2827  }
  2828  
  2829  func writeSnapshotRecoveryNumber(db KeyValueWriter, number uint64) {
  2830  	var buf [8]byte
  2831  	binary.BigEndian.PutUint64(buf[:], number)
  2832  	if err := db.Put(snapshotRecoveryKey, buf[:]); err != nil {
  2833  		logger.Crit("Failed to store snapshot recovery number", "err", err)
  2834  	}
  2835  }
  2836  
  2837  func deleteSnapshotRecoveryNumber(db KeyValueWriter) {
  2838  	if err := db.Delete(snapshotRecoveryKey); err != nil {
  2839  		logger.Crit("Failed to remove snapshot recovery number", "err", err)
  2840  	}
  2841  }