github.com/neatio-net/neatio@v1.7.3-0.20231114194659-f4d7a2226baa/chain/trie/database.go (about)

     1  package trie
     2  
     3  import (
     4  	"encoding/binary"
     5  	"errors"
     6  	"fmt"
     7  	"io"
     8  	"sync"
     9  	"time"
    10  
    11  	"github.com/allegro/bigcache"
    12  	"github.com/neatio-net/neatio/chain/log"
    13  	"github.com/neatio-net/neatio/neatdb"
    14  	"github.com/neatio-net/neatio/utilities/common"
    15  	"github.com/neatio-net/neatio/utilities/metrics"
    16  	"github.com/neatio-net/neatio/utilities/rlp"
    17  )
    18  
    19  var (
    20  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
    21  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
    22  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
    23  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
    24  
    25  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
    26  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
    27  	memcacheFlushSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
    28  
    29  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
    30  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
    31  	memcacheGCSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
    32  
    33  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
    34  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
    35  	memcacheCommitSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
    36  )
    37  
    38  var secureKeyPrefix = []byte("secure-key-")
    39  
    40  const secureKeyLength = 11 + 32
    41  
    42  type Database struct {
    43  	diskdb neatdb.KeyValueStore
    44  
    45  	cleans  *bigcache.BigCache
    46  	dirties map[common.Hash]*cachedNode
    47  	oldest  common.Hash
    48  	newest  common.Hash
    49  
    50  	preimages map[common.Hash][]byte
    51  	seckeybuf [secureKeyLength]byte
    52  
    53  	gctime  time.Duration
    54  	gcnodes uint64
    55  	gcsize  common.StorageSize
    56  
    57  	flushtime  time.Duration
    58  	flushnodes uint64
    59  	flushsize  common.StorageSize
    60  
    61  	dirtiesSize   common.StorageSize
    62  	preimagesSize common.StorageSize
    63  
    64  	lock sync.RWMutex
    65  }
    66  
    67  type rawNode []byte
    68  
    69  func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") }
    70  func (n rawNode) cache() (hashNode, bool)       { panic("this should never end up in a live trie") }
    71  func (n rawNode) fstring(ind string) string     { panic("this should never end up in a live trie") }
    72  
    73  type rawFullNode [17]node
    74  
    75  func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") }
    76  func (n rawFullNode) cache() (hashNode, bool)       { panic("this should never end up in a live trie") }
    77  func (n rawFullNode) fstring(ind string) string     { panic("this should never end up in a live trie") }
    78  
    79  func (n rawFullNode) EncodeRLP(w io.Writer) error {
    80  	var nodes [17]node
    81  
    82  	for i, child := range n {
    83  		if child != nil {
    84  			nodes[i] = child
    85  		} else {
    86  			nodes[i] = nilValueNode
    87  		}
    88  	}
    89  	return rlp.Encode(w, nodes)
    90  }
    91  
    92  type rawShortNode struct {
    93  	Key []byte
    94  	Val node
    95  }
    96  
    97  func (n rawShortNode) canUnload(uint16, uint16) bool {
    98  	panic("this should never end up in a live trie")
    99  }
   100  func (n rawShortNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   101  func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   102  
   103  type cachedNode struct {
   104  	node node
   105  	size uint16
   106  
   107  	parents  uint32
   108  	children map[common.Hash]uint16
   109  
   110  	flushPrev common.Hash
   111  	flushNext common.Hash
   112  }
   113  
   114  func (n *cachedNode) rlp() []byte {
   115  	if node, ok := n.node.(rawNode); ok {
   116  		return node
   117  	}
   118  	blob, err := rlp.EncodeToBytes(n.node)
   119  	if err != nil {
   120  		panic(err)
   121  	}
   122  	return blob
   123  }
   124  
   125  func (n *cachedNode) obj(hash common.Hash) node {
   126  	if node, ok := n.node.(rawNode); ok {
   127  		return mustDecodeNode(hash[:], node)
   128  	}
   129  	return expandNode(hash[:], n.node)
   130  }
   131  
   132  func (n *cachedNode) childs() []common.Hash {
   133  	children := make([]common.Hash, 0, 16)
   134  	for child := range n.children {
   135  		children = append(children, child)
   136  	}
   137  	if _, ok := n.node.(rawNode); !ok {
   138  		gatherChildren(n.node, &children)
   139  	}
   140  	return children
   141  }
   142  
   143  func gatherChildren(n node, children *[]common.Hash) {
   144  	switch n := n.(type) {
   145  	case *rawShortNode:
   146  		gatherChildren(n.Val, children)
   147  
   148  	case rawFullNode:
   149  		for i := 0; i < 16; i++ {
   150  			gatherChildren(n[i], children)
   151  		}
   152  	case hashNode:
   153  		*children = append(*children, common.BytesToHash(n))
   154  
   155  	case valueNode, nil:
   156  
   157  	default:
   158  		panic(fmt.Sprintf("unknown node type: %T", n))
   159  	}
   160  }
   161  
   162  func simplifyNode(n node) node {
   163  	switch n := n.(type) {
   164  	case *shortNode:
   165  
   166  		return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)}
   167  
   168  	case *fullNode:
   169  
   170  		node := rawFullNode(n.Children)
   171  		for i := 0; i < len(node); i++ {
   172  			if node[i] != nil {
   173  				node[i] = simplifyNode(node[i])
   174  			}
   175  		}
   176  		return node
   177  
   178  	case valueNode, hashNode, rawNode:
   179  		return n
   180  
   181  	default:
   182  		panic(fmt.Sprintf("unknown node type: %T", n))
   183  	}
   184  }
   185  
   186  func expandNode(hash hashNode, n node) node {
   187  	switch n := n.(type) {
   188  	case *rawShortNode:
   189  
   190  		return &shortNode{
   191  			Key: compactToHex(n.Key),
   192  			Val: expandNode(nil, n.Val),
   193  			flags: nodeFlag{
   194  				hash: hash,
   195  			},
   196  		}
   197  
   198  	case rawFullNode:
   199  
   200  		node := &fullNode{
   201  			flags: nodeFlag{
   202  				hash: hash,
   203  			},
   204  		}
   205  		for i := 0; i < len(node.Children); i++ {
   206  			if n[i] != nil {
   207  				node.Children[i] = expandNode(nil, n[i])
   208  			}
   209  		}
   210  		return node
   211  
   212  	case valueNode, hashNode:
   213  		return n
   214  
   215  	default:
   216  		panic(fmt.Sprintf("unknown node type: %T", n))
   217  	}
   218  }
   219  
   220  type trienodeHasher struct{}
   221  
   222  func (t trienodeHasher) Sum64(key string) uint64 {
   223  	return binary.BigEndian.Uint64([]byte(key))
   224  }
   225  
   226  func NewDatabase(diskdb neatdb.KeyValueStore) *Database {
   227  	return NewDatabaseWithCache(diskdb, 0)
   228  }
   229  
   230  func NewDatabaseWithCache(diskdb neatdb.KeyValueStore, cache int) *Database {
   231  	var cleans *bigcache.BigCache
   232  	if cache > 0 {
   233  		cleans, _ = bigcache.NewBigCache(bigcache.Config{
   234  			Shards:             1024,
   235  			LifeWindow:         time.Hour,
   236  			MaxEntriesInWindow: cache * 1024,
   237  			MaxEntrySize:       512,
   238  			HardMaxCacheSize:   cache,
   239  			Hasher:             trienodeHasher{},
   240  		})
   241  	}
   242  	return &Database{
   243  		diskdb:    diskdb,
   244  		cleans:    cleans,
   245  		dirties:   map[common.Hash]*cachedNode{{}: {}},
   246  		preimages: make(map[common.Hash][]byte),
   247  	}
   248  }
   249  
   250  func (db *Database) DiskDB() neatdb.Reader {
   251  	return db.diskdb
   252  }
   253  
   254  func (db *Database) InsertBlob(hash common.Hash, blob []byte) {
   255  	db.lock.Lock()
   256  	defer db.lock.Unlock()
   257  
   258  	db.insert(hash, blob, rawNode(blob))
   259  }
   260  
   261  func (db *Database) insert(hash common.Hash, blob []byte, node node) {
   262  
   263  	if _, ok := db.dirties[hash]; ok {
   264  		return
   265  	}
   266  
   267  	entry := &cachedNode{
   268  		node:      simplifyNode(node),
   269  		size:      uint16(len(blob)),
   270  		flushPrev: db.newest,
   271  	}
   272  	for _, child := range entry.childs() {
   273  		if c := db.dirties[child]; c != nil {
   274  			c.parents++
   275  		}
   276  	}
   277  	db.dirties[hash] = entry
   278  
   279  	if db.oldest == (common.Hash{}) {
   280  		db.oldest, db.newest = hash, hash
   281  	} else {
   282  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   283  	}
   284  	db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
   285  }
   286  
   287  func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
   288  	if _, ok := db.preimages[hash]; ok {
   289  		return
   290  	}
   291  	db.preimages[hash] = common.CopyBytes(preimage)
   292  	db.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
   293  }
   294  
   295  func (db *Database) node(hash common.Hash) node {
   296  
   297  	if db.cleans != nil {
   298  		if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
   299  			memcacheCleanHitMeter.Mark(1)
   300  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   301  			return mustDecodeNode(hash[:], enc)
   302  		}
   303  	}
   304  
   305  	db.lock.RLock()
   306  	dirty := db.dirties[hash]
   307  	db.lock.RUnlock()
   308  
   309  	if dirty != nil {
   310  		return dirty.obj(hash)
   311  	}
   312  
   313  	enc, err := db.diskdb.Get(hash[:])
   314  	if err != nil || enc == nil {
   315  		return nil
   316  	}
   317  	if db.cleans != nil {
   318  		db.cleans.Set(string(hash[:]), enc)
   319  		memcacheCleanMissMeter.Mark(1)
   320  		memcacheCleanWriteMeter.Mark(int64(len(enc)))
   321  	}
   322  	return mustDecodeNode(hash[:], enc)
   323  }
   324  
   325  func (db *Database) Node(hash common.Hash) ([]byte, error) {
   326  
   327  	if hash == (common.Hash{}) {
   328  		return nil, errors.New("not found")
   329  	}
   330  
   331  	if db.cleans != nil {
   332  		if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil {
   333  			memcacheCleanHitMeter.Mark(1)
   334  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   335  			return enc, nil
   336  		}
   337  	}
   338  
   339  	db.lock.RLock()
   340  	dirty := db.dirties[hash]
   341  	db.lock.RUnlock()
   342  
   343  	if dirty != nil {
   344  		return dirty.rlp(), nil
   345  	}
   346  
   347  	enc, err := db.diskdb.Get(hash[:])
   348  	if err == nil && enc != nil {
   349  		if db.cleans != nil {
   350  			db.cleans.Set(string(hash[:]), enc)
   351  			memcacheCleanMissMeter.Mark(1)
   352  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   353  		}
   354  	}
   355  	return enc, err
   356  }
   357  
   358  func (db *Database) preimage(hash common.Hash) ([]byte, error) {
   359  
   360  	db.lock.RLock()
   361  	preimage := db.preimages[hash]
   362  	db.lock.RUnlock()
   363  
   364  	if preimage != nil {
   365  		return preimage, nil
   366  	}
   367  
   368  	return db.diskdb.Get(db.secureKey(hash[:]))
   369  }
   370  
   371  func (db *Database) secureKey(key []byte) []byte {
   372  
   373  	buf := append(secureKeyPrefix[:], key...)
   374  	return buf
   375  }
   376  
   377  func (db *Database) Nodes() []common.Hash {
   378  	db.lock.RLock()
   379  	defer db.lock.RUnlock()
   380  
   381  	var hashes = make([]common.Hash, 0, len(db.dirties))
   382  	for hash := range db.dirties {
   383  		if hash != (common.Hash{}) {
   384  			hashes = append(hashes, hash)
   385  		}
   386  	}
   387  	return hashes
   388  }
   389  
   390  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   391  	db.lock.Lock()
   392  	defer db.lock.Unlock()
   393  
   394  	db.reference(child, parent)
   395  }
   396  
   397  func (db *Database) reference(child common.Hash, parent common.Hash) {
   398  
   399  	node, ok := db.dirties[child]
   400  	if !ok {
   401  		return
   402  	}
   403  
   404  	if db.dirties[parent].children == nil {
   405  		db.dirties[parent].children = make(map[common.Hash]uint16)
   406  	} else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
   407  		return
   408  	}
   409  	node.parents++
   410  	db.dirties[parent].children[child]++
   411  }
   412  
   413  func (db *Database) Dereference(root common.Hash) {
   414  
   415  	if root == (common.Hash{}) {
   416  		log.Error("Attempted to dereference the trie cache meta root")
   417  		return
   418  	}
   419  	db.lock.Lock()
   420  	defer db.lock.Unlock()
   421  
   422  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   423  	db.dereference(root, common.Hash{})
   424  
   425  	db.gcnodes += uint64(nodes - len(db.dirties))
   426  	db.gcsize += storage - db.dirtiesSize
   427  	db.gctime += time.Since(start)
   428  
   429  	memcacheGCTimeTimer.Update(time.Since(start))
   430  	memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
   431  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   432  
   433  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   434  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   435  }
   436  
   437  func (db *Database) dereference(child common.Hash, parent common.Hash) {
   438  
   439  	node := db.dirties[parent]
   440  
   441  	if node.children != nil && node.children[child] > 0 {
   442  		node.children[child]--
   443  		if node.children[child] == 0 {
   444  			delete(node.children, child)
   445  		}
   446  	}
   447  
   448  	node, ok := db.dirties[child]
   449  	if !ok {
   450  		return
   451  	}
   452  
   453  	if node.parents > 0 {
   454  
   455  		node.parents--
   456  	}
   457  	if node.parents == 0 {
   458  
   459  		switch child {
   460  		case db.oldest:
   461  			db.oldest = node.flushNext
   462  			db.dirties[node.flushNext].flushPrev = common.Hash{}
   463  		case db.newest:
   464  			db.newest = node.flushPrev
   465  			db.dirties[node.flushPrev].flushNext = common.Hash{}
   466  		default:
   467  			db.dirties[node.flushPrev].flushNext = node.flushNext
   468  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   469  		}
   470  
   471  		for _, hash := range node.childs() {
   472  			db.dereference(hash, child)
   473  		}
   474  		delete(db.dirties, child)
   475  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   476  	}
   477  }
   478  
   479  func (db *Database) Cap(limit common.StorageSize) error {
   480  
   481  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   482  	batch := db.diskdb.NewBatch()
   483  
   484  	size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*2*common.HashLength)
   485  
   486  	flushPreimages := db.preimagesSize > 4*1024*1024
   487  	if flushPreimages {
   488  		for hash, preimage := range db.preimages {
   489  			if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
   490  				log.Error("Failed to commit preimage from trie database", "err", err)
   491  				return err
   492  			}
   493  			if batch.ValueSize() > neatdb.IdealBatchSize {
   494  				if err := batch.Write(); err != nil {
   495  					return err
   496  				}
   497  				batch.Reset()
   498  			}
   499  		}
   500  	}
   501  
   502  	oldest := db.oldest
   503  	for size > limit && oldest != (common.Hash{}) {
   504  
   505  		node := db.dirties[oldest]
   506  		if err := batch.Put(oldest[:], node.rlp()); err != nil {
   507  			return err
   508  		}
   509  
   510  		if batch.ValueSize() >= neatdb.IdealBatchSize {
   511  			if err := batch.Write(); err != nil {
   512  				log.Error("Failed to write flush list to disk", "err", err)
   513  				return err
   514  			}
   515  			batch.Reset()
   516  		}
   517  
   518  		size -= common.StorageSize(3*common.HashLength + int(node.size))
   519  		oldest = node.flushNext
   520  	}
   521  
   522  	if err := batch.Write(); err != nil {
   523  		log.Error("Failed to write flush list to disk", "err", err)
   524  		return err
   525  	}
   526  
   527  	db.lock.Lock()
   528  	defer db.lock.Unlock()
   529  
   530  	if flushPreimages {
   531  		db.preimages = make(map[common.Hash][]byte)
   532  		db.preimagesSize = 0
   533  	}
   534  	for db.oldest != oldest {
   535  		node := db.dirties[db.oldest]
   536  		delete(db.dirties, db.oldest)
   537  		db.oldest = node.flushNext
   538  
   539  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   540  	}
   541  	if db.oldest != (common.Hash{}) {
   542  		db.dirties[db.oldest].flushPrev = common.Hash{}
   543  	}
   544  	db.flushnodes += uint64(nodes - len(db.dirties))
   545  	db.flushsize += storage - db.dirtiesSize
   546  	db.flushtime += time.Since(start)
   547  
   548  	memcacheFlushTimeTimer.Update(time.Since(start))
   549  	memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
   550  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   551  
   552  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   553  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   554  
   555  	return nil
   556  }
   557  
   558  func (db *Database) Commit(node common.Hash, report bool) error {
   559  
   560  	start := time.Now()
   561  	batch := db.diskdb.NewBatch()
   562  
   563  	for hash, preimage := range db.preimages {
   564  		if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
   565  			log.Error("Failed to commit preimage from trie database", "err", err)
   566  			return err
   567  		}
   568  
   569  		if batch.ValueSize() > neatdb.IdealBatchSize {
   570  			if err := batch.Write(); err != nil {
   571  				return err
   572  			}
   573  			batch.Reset()
   574  		}
   575  	}
   576  
   577  	if err := batch.Write(); err != nil {
   578  		return err
   579  	}
   580  	batch.Reset()
   581  
   582  	nodes, storage := len(db.dirties), db.dirtiesSize
   583  
   584  	uncacher := &cleaner{db}
   585  	if err := db.commit(node, batch, uncacher); err != nil {
   586  		log.Error("Failed to commit trie from trie database", "err", err)
   587  		return err
   588  	}
   589  
   590  	if err := batch.Write(); err != nil {
   591  		log.Error("Failed to write trie to disk", "err", err)
   592  		return err
   593  	}
   594  
   595  	db.lock.Lock()
   596  	defer db.lock.Unlock()
   597  
   598  	batch.Replay(uncacher)
   599  	batch.Reset()
   600  
   601  	db.preimages = make(map[common.Hash][]byte)
   602  	db.preimagesSize = 0
   603  
   604  	memcacheCommitTimeTimer.Update(time.Since(start))
   605  	memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
   606  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   607  
   608  	logger := log.Info
   609  	if !report {
   610  		logger = log.Debug
   611  	}
   612  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   613  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   614  
   615  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   616  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   617  
   618  	return nil
   619  }
   620  
   621  func (db *Database) commit(hash common.Hash, batch neatdb.Batch, uncacher *cleaner) error {
   622  
   623  	node, ok := db.dirties[hash]
   624  	if !ok {
   625  		return nil
   626  	}
   627  	for _, child := range node.childs() {
   628  		if err := db.commit(child, batch, uncacher); err != nil {
   629  			return err
   630  		}
   631  	}
   632  	if err := batch.Put(hash[:], node.rlp()); err != nil {
   633  		return err
   634  	}
   635  
   636  	if batch.ValueSize() >= neatdb.IdealBatchSize {
   637  		if err := batch.Write(); err != nil {
   638  			return err
   639  		}
   640  		db.lock.Lock()
   641  		batch.Replay(uncacher)
   642  		batch.Reset()
   643  		db.lock.Unlock()
   644  	}
   645  	return nil
   646  }
   647  
   648  type cleaner struct {
   649  	db *Database
   650  }
   651  
   652  func (c *cleaner) Put(key []byte, rlp []byte) error {
   653  	hash := common.BytesToHash(key)
   654  
   655  	node, ok := c.db.dirties[hash]
   656  	if !ok {
   657  		return nil
   658  	}
   659  
   660  	switch hash {
   661  	case c.db.oldest:
   662  		c.db.oldest = node.flushNext
   663  		c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   664  	case c.db.newest:
   665  		c.db.newest = node.flushPrev
   666  		c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   667  	default:
   668  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   669  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   670  	}
   671  
   672  	delete(c.db.dirties, hash)
   673  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   674  
   675  	if c.db.cleans != nil {
   676  		c.db.cleans.Set(string(hash[:]), rlp)
   677  	}
   678  	return nil
   679  }
   680  
   681  func (c *cleaner) Delete(key []byte) error {
   682  	panic("Not implemented")
   683  }
   684  
   685  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   686  	db.lock.RLock()
   687  	defer db.lock.RUnlock()
   688  
   689  	var flushlistSize = common.StorageSize((len(db.dirties) - 1) * 2 * common.HashLength)
   690  	return db.dirtiesSize + flushlistSize, db.preimagesSize
   691  }
   692  
   693  func (db *Database) verifyIntegrity() {
   694  
   695  	reachable := map[common.Hash]struct{}{{}: {}}
   696  
   697  	for child := range db.dirties[common.Hash{}].children {
   698  		db.accumulate(child, reachable)
   699  	}
   700  
   701  	var unreachable []string
   702  	for hash, node := range db.dirties {
   703  		if _, ok := reachable[hash]; !ok {
   704  			unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}",
   705  				hash, node.node, node.parents, node.flushPrev, node.flushNext))
   706  		}
   707  	}
   708  	if len(unreachable) != 0 {
   709  		panic(fmt.Sprintf("trie cache memory leak: %v", unreachable))
   710  	}
   711  }
   712  
   713  func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) {
   714  
   715  	node, ok := db.dirties[hash]
   716  	if !ok {
   717  		return
   718  	}
   719  	reachable[hash] = struct{}{}
   720  
   721  	for _, child := range node.childs() {
   722  		db.accumulate(child, reachable)
   723  	}
   724  }
   725  
   726  var proposedInEpochPrefix = []byte("proposed-in-epoch-")
   727  
   728  func encodeUint64(number uint64) []byte {
   729  	enc := make([]byte, 8)
   730  	binary.BigEndian.PutUint64(enc, number)
   731  	return enc
   732  }
   733  
   734  func decodeUint64(raw []byte) uint64 {
   735  	return binary.BigEndian.Uint64(raw)
   736  }
   737  
   738  func (db *Database) MarkProposedInEpoch(address common.Address, epoch uint64) error {
   739  	return db.diskdb.Put(append(
   740  		append(proposedInEpochPrefix, address.Bytes()...), encodeUint64(epoch)...),
   741  		encodeUint64(1))
   742  }
   743  
   744  func (db *Database) CheckProposedInEpoch(address common.Address, epoch uint64) bool {
   745  	_, err := db.diskdb.Get(append(append(proposedInEpochPrefix, address.Bytes()...), encodeUint64(epoch)...))
   746  	if err != nil {
   747  		return false
   748  	}
   749  	return true
   750  }