github.com/theQRL/go-zond@v0.1.1/trie/triedb/hashdb/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package hashdb
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"reflect"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/VictoriaMetrics/fastcache"
    27  	"github.com/theQRL/go-zond/common"
    28  	"github.com/theQRL/go-zond/core/rawdb"
    29  	"github.com/theQRL/go-zond/core/types"
    30  	"github.com/theQRL/go-zond/log"
    31  	"github.com/theQRL/go-zond/metrics"
    32  	"github.com/theQRL/go-zond/rlp"
    33  	"github.com/theQRL/go-zond/trie/trienode"
    34  	"github.com/theQRL/go-zond/trie/triestate"
    35  	"github.com/theQRL/go-zond/zonddb"
    36  )
    37  
    38  var (
    39  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
    40  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
    41  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
    42  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
    43  
    44  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
    45  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
    46  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
    47  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
    48  
    49  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
    50  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
    51  	memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
    52  
    53  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
    54  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
    55  	memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
    56  
    57  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
    58  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
    59  	memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
    60  )
    61  
    62  // ChildResolver defines the required method to decode the provided
    63  // trie node and iterate the children on top.
    64  type ChildResolver interface {
    65  	ForEach(node []byte, onChild func(common.Hash))
    66  }
    67  
    68  // Config contains the settings for database.
    69  type Config struct {
    70  	CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
    71  }
    72  
    73  // Defaults is the default setting for database if it's not specified.
    74  // Notably, clean cache is disabled explicitly,
    75  var Defaults = &Config{
    76  	// Explicitly set clean cache size to 0 to avoid creating fastcache,
    77  	// otherwise database must be closed when it's no longer needed to
    78  	// prevent memory leak.
    79  	CleanCacheSize: 0,
    80  }
    81  
    82  // Database is an intermediate write layer between the trie data structures and
    83  // the disk database. The aim is to accumulate trie writes in-memory and only
    84  // periodically flush a couple tries to disk, garbage collecting the remainder.
    85  //
    86  // Note, the trie Database is **not** thread safe in its mutations, but it **is**
    87  // thread safe in providing individual, independent node access. The rationale
    88  // behind this split design is to provide read access to RPC handlers and sync
    89  // servers even while the trie is executing expensive garbage collection.
    90  type Database struct {
    91  	diskdb   zonddb.Database // Persistent storage for matured trie nodes
    92  	resolver ChildResolver   // The handler to resolve children of nodes
    93  
    94  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    95  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
    96  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    97  	newest  common.Hash                 // Newest tracked node, flush-list tail
    98  
    99  	gctime  time.Duration      // Time spent on garbage collection since last commit
   100  	gcnodes uint64             // Nodes garbage collected since last commit
   101  	gcsize  common.StorageSize // Data storage garbage collected since last commit
   102  
   103  	flushtime  time.Duration      // Time spent on data flushing since last commit
   104  	flushnodes uint64             // Nodes flushed since last commit
   105  	flushsize  common.StorageSize // Data storage flushed since last commit
   106  
   107  	dirtiesSize  common.StorageSize // Storage size of the dirty node cache (exc. metadata)
   108  	childrenSize common.StorageSize // Storage size of the external children tracking
   109  
   110  	lock sync.RWMutex
   111  }
   112  
   113  // cachedNode is all the information we know about a single cached trie node
   114  // in the memory database write layer.
   115  type cachedNode struct {
   116  	node      []byte                   // Encoded node blob
   117  	parents   uint32                   // Number of live nodes referencing this one
   118  	external  map[common.Hash]struct{} // The set of external children
   119  	flushPrev common.Hash              // Previous node in the flush-list
   120  	flushNext common.Hash              // Next node in the flush-list
   121  }
   122  
   123  // cachedNodeSize is the raw size of a cachedNode data structure without any
   124  // node data included. It's an approximate size, but should be a lot better
   125  // than not counting them.
   126  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   127  
   128  // forChildren invokes the callback for all the tracked children of this node,
   129  // both the implicit ones from inside the node as well as the explicit ones
   130  // from outside the node.
   131  func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
   132  	for child := range n.external {
   133  		onChild(child)
   134  	}
   135  	resolver.ForEach(n.node, onChild)
   136  }
   137  
   138  // New initializes the hash-based node database.
   139  func New(diskdb zonddb.Database, config *Config, resolver ChildResolver) *Database {
   140  	if config == nil {
   141  		config = Defaults
   142  	}
   143  	var cleans *fastcache.Cache
   144  	if config.CleanCacheSize > 0 {
   145  		cleans = fastcache.New(config.CleanCacheSize)
   146  	}
   147  	return &Database{
   148  		diskdb:   diskdb,
   149  		resolver: resolver,
   150  		cleans:   cleans,
   151  		dirties:  make(map[common.Hash]*cachedNode),
   152  	}
   153  }
   154  
   155  // insert inserts a simplified trie node into the memory database.
   156  // All nodes inserted by this function will be reference tracked
   157  // and in theory should only used for **trie nodes** insertion.
   158  func (db *Database) insert(hash common.Hash, node []byte) {
   159  	// If the node's already cached, skip
   160  	if _, ok := db.dirties[hash]; ok {
   161  		return
   162  	}
   163  	memcacheDirtyWriteMeter.Mark(int64(len(node)))
   164  
   165  	// Create the cached entry for this node
   166  	entry := &cachedNode{
   167  		node:      node,
   168  		flushPrev: db.newest,
   169  	}
   170  	entry.forChildren(db.resolver, func(child common.Hash) {
   171  		if c := db.dirties[child]; c != nil {
   172  			c.parents++
   173  		}
   174  	})
   175  	db.dirties[hash] = entry
   176  
   177  	// Update the flush-list endpoints
   178  	if db.oldest == (common.Hash{}) {
   179  		db.oldest, db.newest = hash, hash
   180  	} else {
   181  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   182  	}
   183  	db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
   184  }
   185  
   186  // Node retrieves an encoded cached trie node from memory. If it cannot be found
   187  // cached, the method queries the persistent database for the content.
   188  func (db *Database) Node(hash common.Hash) ([]byte, error) {
   189  	// It doesn't make sense to retrieve the metaroot
   190  	if hash == (common.Hash{}) {
   191  		return nil, errors.New("not found")
   192  	}
   193  	// Retrieve the node from the clean cache if available
   194  	if db.cleans != nil {
   195  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   196  			memcacheCleanHitMeter.Mark(1)
   197  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   198  			return enc, nil
   199  		}
   200  	}
   201  	// Retrieve the node from the dirty cache if available
   202  	db.lock.RLock()
   203  	dirty := db.dirties[hash]
   204  	db.lock.RUnlock()
   205  
   206  	if dirty != nil {
   207  		memcacheDirtyHitMeter.Mark(1)
   208  		memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
   209  		return dirty.node, nil
   210  	}
   211  	memcacheDirtyMissMeter.Mark(1)
   212  
   213  	// Content unavailable in memory, attempt to retrieve from disk
   214  	enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash)
   215  	if len(enc) != 0 {
   216  		if db.cleans != nil {
   217  			db.cleans.Set(hash[:], enc)
   218  			memcacheCleanMissMeter.Mark(1)
   219  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   220  		}
   221  		return enc, nil
   222  	}
   223  	return nil, errors.New("not found")
   224  }
   225  
   226  // Nodes retrieves the hashes of all the nodes cached within the memory database.
   227  // This method is extremely expensive and should only be used to validate internal
   228  // states in test code.
   229  func (db *Database) Nodes() []common.Hash {
   230  	db.lock.RLock()
   231  	defer db.lock.RUnlock()
   232  
   233  	var hashes = make([]common.Hash, 0, len(db.dirties))
   234  	for hash := range db.dirties {
   235  		hashes = append(hashes, hash)
   236  	}
   237  	return hashes
   238  }
   239  
   240  // Reference adds a new reference from a parent node to a child node.
   241  // This function is used to add reference between internal trie node
   242  // and external node(e.g. storage trie root), all internal trie nodes
   243  // are referenced together by database itself.
   244  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   245  	db.lock.Lock()
   246  	defer db.lock.Unlock()
   247  
   248  	db.reference(child, parent)
   249  }
   250  
   251  // reference is the private locked version of Reference.
   252  func (db *Database) reference(child common.Hash, parent common.Hash) {
   253  	// If the node does not exist, it's a node pulled from disk, skip
   254  	node, ok := db.dirties[child]
   255  	if !ok {
   256  		return
   257  	}
   258  	// The reference is for state root, increase the reference counter.
   259  	if parent == (common.Hash{}) {
   260  		node.parents += 1
   261  		return
   262  	}
   263  	// The reference is for external storage trie, don't duplicate if
   264  	// the reference is already existent.
   265  	if db.dirties[parent].external == nil {
   266  		db.dirties[parent].external = make(map[common.Hash]struct{})
   267  	}
   268  	if _, ok := db.dirties[parent].external[child]; ok {
   269  		return
   270  	}
   271  	node.parents++
   272  	db.dirties[parent].external[child] = struct{}{}
   273  	db.childrenSize += common.HashLength
   274  }
   275  
   276  // Dereference removes an existing reference from a root node.
   277  func (db *Database) Dereference(root common.Hash) {
   278  	// Sanity check to ensure that the meta-root is not removed
   279  	if root == (common.Hash{}) {
   280  		log.Error("Attempted to dereference the trie cache meta root")
   281  		return
   282  	}
   283  	db.lock.Lock()
   284  	defer db.lock.Unlock()
   285  
   286  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   287  	db.dereference(root)
   288  
   289  	db.gcnodes += uint64(nodes - len(db.dirties))
   290  	db.gcsize += storage - db.dirtiesSize
   291  	db.gctime += time.Since(start)
   292  
   293  	memcacheGCTimeTimer.Update(time.Since(start))
   294  	memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
   295  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   296  
   297  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   298  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   299  }
   300  
   301  // dereference is the private locked version of Dereference.
   302  func (db *Database) dereference(hash common.Hash) {
   303  	// If the node does not exist, it's a previously committed node.
   304  	node, ok := db.dirties[hash]
   305  	if !ok {
   306  		return
   307  	}
   308  	// If there are no more references to the node, delete it and cascade
   309  	if node.parents > 0 {
   310  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   311  		// memcache any more) gets reinjected as a new node (short node split into full,
   312  		// then reverted into short), causing a cached node to have no parents. That is
   313  		// no problem in itself, but don't make maxint parents out of it.
   314  		node.parents--
   315  	}
   316  	if node.parents == 0 {
   317  		// Remove the node from the flush-list
   318  		switch hash {
   319  		case db.oldest:
   320  			db.oldest = node.flushNext
   321  			if node.flushNext != (common.Hash{}) {
   322  				db.dirties[node.flushNext].flushPrev = common.Hash{}
   323  			}
   324  		case db.newest:
   325  			db.newest = node.flushPrev
   326  			if node.flushPrev != (common.Hash{}) {
   327  				db.dirties[node.flushPrev].flushNext = common.Hash{}
   328  			}
   329  		default:
   330  			db.dirties[node.flushPrev].flushNext = node.flushNext
   331  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   332  		}
   333  		// Dereference all children and delete the node
   334  		node.forChildren(db.resolver, func(child common.Hash) {
   335  			db.dereference(child)
   336  		})
   337  		delete(db.dirties, hash)
   338  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   339  		if node.external != nil {
   340  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   341  		}
   342  	}
   343  }
   344  
   345  // Cap iteratively flushes old but still referenced trie nodes until the total
   346  // memory usage goes below the given threshold.
   347  //
   348  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   349  // concurrently with other mutators.
   350  func (db *Database) Cap(limit common.StorageSize) error {
   351  	// Create a database batch to flush persistent data out. It is important that
   352  	// outside code doesn't see an inconsistent state (referenced data removed from
   353  	// memory cache during commit but not yet in persistent storage). This is ensured
   354  	// by only uncaching existing data when the database write finalizes.
   355  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   356  	batch := db.diskdb.NewBatch()
   357  
   358  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   359  	// the total memory consumption, the maintenance metadata is also needed to be
   360  	// counted.
   361  	size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
   362  	size += db.childrenSize
   363  
   364  	// Keep committing nodes from the flush-list until we're below allowance
   365  	oldest := db.oldest
   366  	for size > limit && oldest != (common.Hash{}) {
   367  		// Fetch the oldest referenced node and push into the batch
   368  		node := db.dirties[oldest]
   369  		rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
   370  
   371  		// If we exceeded the ideal batch size, commit and reset
   372  		if batch.ValueSize() >= zonddb.IdealBatchSize {
   373  			if err := batch.Write(); err != nil {
   374  				log.Error("Failed to write flush list to disk", "err", err)
   375  				return err
   376  			}
   377  			batch.Reset()
   378  		}
   379  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   380  		// is the total size, including the useful cached data (hash -> blob), the
   381  		// cache item metadata, as well as external children mappings.
   382  		size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
   383  		if node.external != nil {
   384  			size -= common.StorageSize(len(node.external) * common.HashLength)
   385  		}
   386  		oldest = node.flushNext
   387  	}
   388  	// Flush out any remainder data from the last batch
   389  	if err := batch.Write(); err != nil {
   390  		log.Error("Failed to write flush list to disk", "err", err)
   391  		return err
   392  	}
   393  	// Write successful, clear out the flushed data
   394  	db.lock.Lock()
   395  	defer db.lock.Unlock()
   396  
   397  	for db.oldest != oldest {
   398  		node := db.dirties[db.oldest]
   399  		delete(db.dirties, db.oldest)
   400  		db.oldest = node.flushNext
   401  
   402  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   403  		if node.external != nil {
   404  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   405  		}
   406  	}
   407  	if db.oldest != (common.Hash{}) {
   408  		db.dirties[db.oldest].flushPrev = common.Hash{}
   409  	}
   410  	db.flushnodes += uint64(nodes - len(db.dirties))
   411  	db.flushsize += storage - db.dirtiesSize
   412  	db.flushtime += time.Since(start)
   413  
   414  	memcacheFlushTimeTimer.Update(time.Since(start))
   415  	memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
   416  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   417  
   418  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   419  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   420  
   421  	return nil
   422  }
   423  
   424  // Commit iterates over all the children of a particular node, writes them out
   425  // to disk, forcefully tearing down all references in both directions. As a side
   426  // effect, all pre-images accumulated up to this point are also written.
   427  //
   428  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   429  // concurrently with other mutators.
   430  func (db *Database) Commit(node common.Hash, report bool) error {
   431  	// Create a database batch to flush persistent data out. It is important that
   432  	// outside code doesn't see an inconsistent state (referenced data removed from
   433  	// memory cache during commit but not yet in persistent storage). This is ensured
   434  	// by only uncaching existing data when the database write finalizes.
   435  	start := time.Now()
   436  	batch := db.diskdb.NewBatch()
   437  
   438  	// Move the trie itself into the batch, flushing if enough data is accumulated
   439  	nodes, storage := len(db.dirties), db.dirtiesSize
   440  
   441  	uncacher := &cleaner{db}
   442  	if err := db.commit(node, batch, uncacher); err != nil {
   443  		log.Error("Failed to commit trie from trie database", "err", err)
   444  		return err
   445  	}
   446  	// Trie mostly committed to disk, flush any batch leftovers
   447  	if err := batch.Write(); err != nil {
   448  		log.Error("Failed to write trie to disk", "err", err)
   449  		return err
   450  	}
   451  	// Uncache any leftovers in the last batch
   452  	db.lock.Lock()
   453  	defer db.lock.Unlock()
   454  	if err := batch.Replay(uncacher); err != nil {
   455  		return err
   456  	}
   457  	batch.Reset()
   458  
   459  	// Reset the storage counters and bumped metrics
   460  	memcacheCommitTimeTimer.Update(time.Since(start))
   461  	memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
   462  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   463  
   464  	logger := log.Info
   465  	if !report {
   466  		logger = log.Debug
   467  	}
   468  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   469  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   470  
   471  	// Reset the garbage collection statistics
   472  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   473  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   474  
   475  	return nil
   476  }
   477  
   478  // commit is the private locked version of Commit.
   479  func (db *Database) commit(hash common.Hash, batch zonddb.Batch, uncacher *cleaner) error {
   480  	// If the node does not exist, it's a previously committed node
   481  	node, ok := db.dirties[hash]
   482  	if !ok {
   483  		return nil
   484  	}
   485  	var err error
   486  
   487  	// Dereference all children and delete the node
   488  	node.forChildren(db.resolver, func(child common.Hash) {
   489  		if err == nil {
   490  			err = db.commit(child, batch, uncacher)
   491  		}
   492  	})
   493  	if err != nil {
   494  		return err
   495  	}
   496  	// If we've reached an optimal batch size, commit and start over
   497  	rawdb.WriteLegacyTrieNode(batch, hash, node.node)
   498  	if batch.ValueSize() >= zonddb.IdealBatchSize {
   499  		if err := batch.Write(); err != nil {
   500  			return err
   501  		}
   502  		db.lock.Lock()
   503  		err := batch.Replay(uncacher)
   504  		batch.Reset()
   505  		db.lock.Unlock()
   506  		if err != nil {
   507  			return err
   508  		}
   509  	}
   510  	return nil
   511  }
   512  
   513  // cleaner is a database batch replayer that takes a batch of write operations
   514  // and cleans up the trie database from anything written to disk.
   515  type cleaner struct {
   516  	db *Database
   517  }
   518  
   519  // Put reacts to database writes and implements dirty data uncaching. This is the
   520  // post-processing step of a commit operation where the already persisted trie is
   521  // removed from the dirty cache and moved into the clean cache. The reason behind
   522  // the two-phase commit is to ensure data availability while moving from memory
   523  // to disk.
   524  func (c *cleaner) Put(key []byte, rlp []byte) error {
   525  	hash := common.BytesToHash(key)
   526  
   527  	// If the node does not exist, we're done on this path
   528  	node, ok := c.db.dirties[hash]
   529  	if !ok {
   530  		return nil
   531  	}
   532  	// Node still exists, remove it from the flush-list
   533  	switch hash {
   534  	case c.db.oldest:
   535  		c.db.oldest = node.flushNext
   536  		if node.flushNext != (common.Hash{}) {
   537  			c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   538  		}
   539  	case c.db.newest:
   540  		c.db.newest = node.flushPrev
   541  		if node.flushPrev != (common.Hash{}) {
   542  			c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   543  		}
   544  	default:
   545  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   546  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   547  	}
   548  	// Remove the node from the dirty cache
   549  	delete(c.db.dirties, hash)
   550  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   551  	if node.external != nil {
   552  		c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   553  	}
   554  	// Move the flushed node into the clean cache to prevent insta-reloads
   555  	if c.db.cleans != nil {
   556  		c.db.cleans.Set(hash[:], rlp)
   557  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   558  	}
   559  	return nil
   560  }
   561  
   562  func (c *cleaner) Delete(key []byte) error {
   563  	panic("not implemented")
   564  }
   565  
   566  // Initialized returns an indicator if state data is already initialized
   567  // in hash-based scheme by checking the presence of genesis state.
   568  func (db *Database) Initialized(genesisRoot common.Hash) bool {
   569  	return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
   570  }
   571  
   572  // Update inserts the dirty nodes in provided nodeset into database and link the
   573  // account trie with multiple storage tries if necessary.
   574  func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
   575  	// Ensure the parent state is present and signal a warning if not.
   576  	if parent != types.EmptyRootHash {
   577  		if blob, _ := db.Node(parent); len(blob) == 0 {
   578  			log.Error("parent state is not present")
   579  		}
   580  	}
   581  	db.lock.Lock()
   582  	defer db.lock.Unlock()
   583  
   584  	// Insert dirty nodes into the database. In the same tree, it must be
   585  	// ensured that children are inserted first, then parent so that children
   586  	// can be linked with their parent correctly.
   587  	//
   588  	// Note, the storage tries must be flushed before the account trie to
   589  	// retain the invariant that children go into the dirty cache first.
   590  	var order []common.Hash
   591  	for owner := range nodes.Sets {
   592  		if owner == (common.Hash{}) {
   593  			continue
   594  		}
   595  		order = append(order, owner)
   596  	}
   597  	if _, ok := nodes.Sets[common.Hash{}]; ok {
   598  		order = append(order, common.Hash{})
   599  	}
   600  	for _, owner := range order {
   601  		subset := nodes.Sets[owner]
   602  		subset.ForEachWithOrder(func(path string, n *trienode.Node) {
   603  			if n.IsDeleted() {
   604  				return // ignore deletion
   605  			}
   606  			db.insert(n.Hash, n.Blob)
   607  		})
   608  	}
   609  	// Link up the account trie and storage trie if the node points
   610  	// to an account trie leaf.
   611  	if set, present := nodes.Sets[common.Hash{}]; present {
   612  		for _, n := range set.Leaves {
   613  			var account types.StateAccount
   614  			if err := rlp.DecodeBytes(n.Blob, &account); err != nil {
   615  				return err
   616  			}
   617  			if account.Root != types.EmptyRootHash {
   618  				db.reference(account.Root, n.Parent)
   619  			}
   620  		}
   621  	}
   622  	return nil
   623  }
   624  
   625  // Size returns the current storage size of the memory cache in front of the
   626  // persistent database layer.
   627  //
   628  // The first return will always be 0, representing the memory stored in unbounded
   629  // diff layers above the dirty cache. This is only available in pathdb.
   630  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   631  	db.lock.RLock()
   632  	defer db.lock.RUnlock()
   633  
   634  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   635  	// the total memory consumption, the maintenance metadata is also needed to be
   636  	// counted.
   637  	var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
   638  	return 0, db.dirtiesSize + db.childrenSize + metadataSize
   639  }
   640  
   641  // Close closes the trie database and releases all held resources.
   642  func (db *Database) Close() error {
   643  	if db.cleans != nil {
   644  		db.cleans.Reset()
   645  		db.cleans = nil
   646  	}
   647  	return nil
   648  }
   649  
   650  // Scheme returns the node scheme used in the database.
   651  func (db *Database) Scheme() string {
   652  	return rawdb.HashScheme
   653  }
   654  
   655  // Reader retrieves a node reader belonging to the given state root.
   656  // An error will be returned if the requested state is not available.
   657  func (db *Database) Reader(root common.Hash) (*reader, error) {
   658  	if _, err := db.Node(root); err != nil {
   659  		return nil, fmt.Errorf("state %#x is not available, %v", root, err)
   660  	}
   661  	return &reader{db: db}, nil
   662  }
   663  
   664  // reader is a state reader of Database which implements the Reader interface.
   665  type reader struct {
   666  	db *Database
   667  }
   668  
   669  // Node retrieves the trie node with the given node hash.
   670  // No error will be returned if the node is not found.
   671  func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
   672  	blob, _ := reader.db.Node(hash)
   673  	return blob, nil
   674  }