github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/triedb/hashdb/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package hashdb
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"reflect"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/VictoriaMetrics/fastcache"
    27  	"github.com/ethereum/go-ethereum/common"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  	"github.com/ethereum/go-ethereum/ethdb"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/metrics"
    33  	"github.com/ethereum/go-ethereum/rlp"
    34  	"github.com/ethereum/go-ethereum/trie/trienode"
    35  	"github.com/ethereum/go-ethereum/trie/triestate"
    36  )
    37  
    38  var (
    39  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
    40  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
    41  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
    42  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
    43  
    44  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
    45  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
    46  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
    47  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
    48  
    49  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
    50  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
    51  	memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
    52  
    53  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
    54  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
    55  	memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
    56  
    57  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
    58  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
    59  	memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
    60  )
    61  
    62  // ChildResolver defines the required method to decode the provided
    63  // trie node and iterate the children on top.
    64  type ChildResolver interface {
    65  	ForEach(node []byte, onChild func(common.Hash))
    66  }
    67  
    68  // Config contains the settings for database.
    69  type Config struct {
    70  	CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
    71  }
    72  
    73  // Defaults is the default setting for database if it's not specified.
    74  // Notably, clean cache is disabled explicitly,
    75  var Defaults = &Config{
    76  	// Explicitly set clean cache size to 0 to avoid creating fastcache,
    77  	// otherwise database must be closed when it's no longer needed to
    78  	// prevent memory leak.
    79  	CleanCacheSize: 0,
    80  }
    81  
    82  // Database is an intermediate write layer between the trie data structures and
    83  // the disk database. The aim is to accumulate trie writes in-memory and only
    84  // periodically flush a couple tries to disk, garbage collecting the remainder.
    85  type Database struct {
    86  	diskdb   ethdb.Database // Persistent storage for matured trie nodes
    87  	resolver ChildResolver  // The handler to resolve children of nodes
    88  
    89  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    90  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
    91  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    92  	newest  common.Hash                 // Newest tracked node, flush-list tail
    93  
    94  	gctime  time.Duration      // Time spent on garbage collection since last commit
    95  	gcnodes uint64             // Nodes garbage collected since last commit
    96  	gcsize  common.StorageSize // Data storage garbage collected since last commit
    97  
    98  	flushtime  time.Duration      // Time spent on data flushing since last commit
    99  	flushnodes uint64             // Nodes flushed since last commit
   100  	flushsize  common.StorageSize // Data storage flushed since last commit
   101  
   102  	dirtiesSize  common.StorageSize // Storage size of the dirty node cache (exc. metadata)
   103  	childrenSize common.StorageSize // Storage size of the external children tracking
   104  
   105  	lock sync.RWMutex
   106  }
   107  
   108  // cachedNode is all the information we know about a single cached trie node
   109  // in the memory database write layer.
   110  type cachedNode struct {
   111  	node      []byte                   // Encoded node blob, immutable
   112  	parents   uint32                   // Number of live nodes referencing this one
   113  	external  map[common.Hash]struct{} // The set of external children
   114  	flushPrev common.Hash              // Previous node in the flush-list
   115  	flushNext common.Hash              // Next node in the flush-list
   116  }
   117  
   118  // cachedNodeSize is the raw size of a cachedNode data structure without any
   119  // node data included. It's an approximate size, but should be a lot better
   120  // than not counting them.
   121  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   122  
   123  // forChildren invokes the callback for all the tracked children of this node,
   124  // both the implicit ones from inside the node as well as the explicit ones
   125  // from outside the node.
   126  func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
   127  	for child := range n.external {
   128  		onChild(child)
   129  	}
   130  	resolver.ForEach(n.node, onChild)
   131  }
   132  
   133  // New initializes the hash-based node database.
   134  func New(diskdb ethdb.Database, config *Config, resolver ChildResolver) *Database {
   135  	if config == nil {
   136  		config = Defaults
   137  	}
   138  	var cleans *fastcache.Cache
   139  	if config.CleanCacheSize > 0 {
   140  		cleans = fastcache.New(config.CleanCacheSize)
   141  	}
   142  	return &Database{
   143  		diskdb:   diskdb,
   144  		resolver: resolver,
   145  		cleans:   cleans,
   146  		dirties:  make(map[common.Hash]*cachedNode),
   147  	}
   148  }
   149  
   150  // insert inserts a trie node into the memory database. All nodes inserted by
   151  // this function will be reference tracked. This function assumes the lock is
   152  // already held.
   153  func (db *Database) insert(hash common.Hash, node []byte) {
   154  	// If the node's already cached, skip
   155  	if _, ok := db.dirties[hash]; ok {
   156  		return
   157  	}
   158  	memcacheDirtyWriteMeter.Mark(int64(len(node)))
   159  
   160  	// Create the cached entry for this node
   161  	entry := &cachedNode{
   162  		node:      node,
   163  		flushPrev: db.newest,
   164  	}
   165  	entry.forChildren(db.resolver, func(child common.Hash) {
   166  		if c := db.dirties[child]; c != nil {
   167  			c.parents++
   168  		}
   169  	})
   170  	db.dirties[hash] = entry
   171  
   172  	// Update the flush-list endpoints
   173  	if db.oldest == (common.Hash{}) {
   174  		db.oldest, db.newest = hash, hash
   175  	} else {
   176  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   177  	}
   178  	db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
   179  }
   180  
   181  // node retrieves an encoded cached trie node from memory. If it cannot be found
   182  // cached, the method queries the persistent database for the content.
   183  func (db *Database) node(hash common.Hash) ([]byte, error) {
   184  	// It doesn't make sense to retrieve the metaroot
   185  	if hash == (common.Hash{}) {
   186  		return nil, errors.New("not found")
   187  	}
   188  	// Retrieve the node from the clean cache if available
   189  	if db.cleans != nil {
   190  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   191  			memcacheCleanHitMeter.Mark(1)
   192  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   193  			return enc, nil
   194  		}
   195  	}
   196  	// Retrieve the node from the dirty cache if available.
   197  	db.lock.RLock()
   198  	dirty := db.dirties[hash]
   199  	db.lock.RUnlock()
   200  
   201  	// Return the cached node if it's found in the dirty set.
   202  	// The dirty.node field is immutable and safe to read it
   203  	// even without lock guard.
   204  	if dirty != nil {
   205  		memcacheDirtyHitMeter.Mark(1)
   206  		memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
   207  		return dirty.node, nil
   208  	}
   209  	memcacheDirtyMissMeter.Mark(1)
   210  
   211  	// Content unavailable in memory, attempt to retrieve from disk
   212  	enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash)
   213  	if len(enc) != 0 {
   214  		if db.cleans != nil {
   215  			db.cleans.Set(hash[:], enc)
   216  			memcacheCleanMissMeter.Mark(1)
   217  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   218  		}
   219  		return enc, nil
   220  	}
   221  	return nil, errors.New("not found")
   222  }
   223  
   224  // Reference adds a new reference from a parent node to a child node.
   225  // This function is used to add reference between internal trie node
   226  // and external node(e.g. storage trie root), all internal trie nodes
   227  // are referenced together by database itself.
   228  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   229  	db.lock.Lock()
   230  	defer db.lock.Unlock()
   231  
   232  	db.reference(child, parent)
   233  }
   234  
   235  // reference is the private locked version of Reference.
   236  func (db *Database) reference(child common.Hash, parent common.Hash) {
   237  	// If the node does not exist, it's a node pulled from disk, skip
   238  	node, ok := db.dirties[child]
   239  	if !ok {
   240  		return
   241  	}
   242  	// The reference is for state root, increase the reference counter.
   243  	if parent == (common.Hash{}) {
   244  		node.parents += 1
   245  		return
   246  	}
   247  	// The reference is for external storage trie, don't duplicate if
   248  	// the reference is already existent.
   249  	if db.dirties[parent].external == nil {
   250  		db.dirties[parent].external = make(map[common.Hash]struct{})
   251  	}
   252  	if _, ok := db.dirties[parent].external[child]; ok {
   253  		return
   254  	}
   255  	node.parents++
   256  	db.dirties[parent].external[child] = struct{}{}
   257  	db.childrenSize += common.HashLength
   258  }
   259  
   260  // Dereference removes an existing reference from a root node.
   261  func (db *Database) Dereference(root common.Hash) {
   262  	// Sanity check to ensure that the meta-root is not removed
   263  	if root == (common.Hash{}) {
   264  		log.Error("Attempted to dereference the trie cache meta root")
   265  		return
   266  	}
   267  	db.lock.Lock()
   268  	defer db.lock.Unlock()
   269  
   270  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   271  	db.dereference(root)
   272  
   273  	db.gcnodes += uint64(nodes - len(db.dirties))
   274  	db.gcsize += storage - db.dirtiesSize
   275  	db.gctime += time.Since(start)
   276  
   277  	memcacheGCTimeTimer.Update(time.Since(start))
   278  	memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
   279  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   280  
   281  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   282  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   283  }
   284  
   285  // dereference is the private locked version of Dereference.
   286  func (db *Database) dereference(hash common.Hash) {
   287  	// If the node does not exist, it's a previously committed node.
   288  	node, ok := db.dirties[hash]
   289  	if !ok {
   290  		return
   291  	}
   292  	// If there are no more references to the node, delete it and cascade
   293  	if node.parents > 0 {
   294  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   295  		// memcache any more) gets reinjected as a new node (short node split into full,
   296  		// then reverted into short), causing a cached node to have no parents. That is
   297  		// no problem in itself, but don't make maxint parents out of it.
   298  		node.parents--
   299  	}
   300  	if node.parents == 0 {
   301  		// Remove the node from the flush-list
   302  		switch hash {
   303  		case db.oldest:
   304  			db.oldest = node.flushNext
   305  			if node.flushNext != (common.Hash{}) {
   306  				db.dirties[node.flushNext].flushPrev = common.Hash{}
   307  			}
   308  		case db.newest:
   309  			db.newest = node.flushPrev
   310  			if node.flushPrev != (common.Hash{}) {
   311  				db.dirties[node.flushPrev].flushNext = common.Hash{}
   312  			}
   313  		default:
   314  			db.dirties[node.flushPrev].flushNext = node.flushNext
   315  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   316  		}
   317  		// Dereference all children and delete the node
   318  		node.forChildren(db.resolver, func(child common.Hash) {
   319  			db.dereference(child)
   320  		})
   321  		delete(db.dirties, hash)
   322  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   323  		if node.external != nil {
   324  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   325  		}
   326  	}
   327  }
   328  
   329  // Cap iteratively flushes old but still referenced trie nodes until the total
   330  // memory usage goes below the given threshold.
   331  func (db *Database) Cap(limit common.StorageSize) error {
   332  	db.lock.Lock()
   333  	defer db.lock.Unlock()
   334  
   335  	// Create a database batch to flush persistent data out. It is important that
   336  	// outside code doesn't see an inconsistent state (referenced data removed from
   337  	// memory cache during commit but not yet in persistent storage). This is ensured
   338  	// by only uncaching existing data when the database write finalizes.
   339  	batch := db.diskdb.NewBatch()
   340  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   341  
   342  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   343  	// the total memory consumption, the maintenance metadata is also needed to be
   344  	// counted.
   345  	size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
   346  	size += db.childrenSize
   347  
   348  	// Keep committing nodes from the flush-list until we're below allowance
   349  	oldest := db.oldest
   350  	for size > limit && oldest != (common.Hash{}) {
   351  		// Fetch the oldest referenced node and push into the batch
   352  		node := db.dirties[oldest]
   353  		rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
   354  
   355  		// If we exceeded the ideal batch size, commit and reset
   356  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   357  			if err := batch.Write(); err != nil {
   358  				log.Error("Failed to write flush list to disk", "err", err)
   359  				return err
   360  			}
   361  			batch.Reset()
   362  		}
   363  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   364  		// is the total size, including the useful cached data (hash -> blob), the
   365  		// cache item metadata, as well as external children mappings.
   366  		size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
   367  		if node.external != nil {
   368  			size -= common.StorageSize(len(node.external) * common.HashLength)
   369  		}
   370  		oldest = node.flushNext
   371  	}
   372  	// Flush out any remainder data from the last batch
   373  	if err := batch.Write(); err != nil {
   374  		log.Error("Failed to write flush list to disk", "err", err)
   375  		return err
   376  	}
   377  	// Write successful, clear out the flushed data
   378  	for db.oldest != oldest {
   379  		node := db.dirties[db.oldest]
   380  		delete(db.dirties, db.oldest)
   381  		db.oldest = node.flushNext
   382  
   383  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   384  		if node.external != nil {
   385  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   386  		}
   387  	}
   388  	if db.oldest != (common.Hash{}) {
   389  		db.dirties[db.oldest].flushPrev = common.Hash{}
   390  	}
   391  	db.flushnodes += uint64(nodes - len(db.dirties))
   392  	db.flushsize += storage - db.dirtiesSize
   393  	db.flushtime += time.Since(start)
   394  
   395  	memcacheFlushTimeTimer.Update(time.Since(start))
   396  	memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
   397  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   398  
   399  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   400  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   401  
   402  	return nil
   403  }
   404  
   405  // Commit iterates over all the children of a particular node, writes them out
   406  // to disk, forcefully tearing down all references in both directions. As a side
   407  // effect, all pre-images accumulated up to this point are also written.
   408  func (db *Database) Commit(node common.Hash, report bool) error {
   409  	db.lock.Lock()
   410  	defer db.lock.Unlock()
   411  
   412  	// Create a database batch to flush persistent data out. It is important that
   413  	// outside code doesn't see an inconsistent state (referenced data removed from
   414  	// memory cache during commit but not yet in persistent storage). This is ensured
   415  	// by only uncaching existing data when the database write finalizes.
   416  	start := time.Now()
   417  	batch := db.diskdb.NewBatch()
   418  
   419  	// Move the trie itself into the batch, flushing if enough data is accumulated
   420  	nodes, storage := len(db.dirties), db.dirtiesSize
   421  
   422  	uncacher := &cleaner{db}
   423  	if err := db.commit(node, batch, uncacher); err != nil {
   424  		log.Error("Failed to commit trie from trie database", "err", err)
   425  		return err
   426  	}
   427  	// Trie mostly committed to disk, flush any batch leftovers
   428  	if err := batch.Write(); err != nil {
   429  		log.Error("Failed to write trie to disk", "err", err)
   430  		return err
   431  	}
   432  	// Uncache any leftovers in the last batch
   433  	if err := batch.Replay(uncacher); err != nil {
   434  		return err
   435  	}
   436  	batch.Reset()
   437  
   438  	// Reset the storage counters and bumped metrics
   439  	memcacheCommitTimeTimer.Update(time.Since(start))
   440  	memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
   441  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   442  
   443  	logger := log.Info
   444  	if !report {
   445  		logger = log.Debug
   446  	}
   447  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   448  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   449  
   450  	// Reset the garbage collection statistics
   451  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   452  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   453  
   454  	return nil
   455  }
   456  
   457  // commit is the private locked version of Commit.
   458  func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
   459  	// If the node does not exist, it's a previously committed node
   460  	node, ok := db.dirties[hash]
   461  	if !ok {
   462  		return nil
   463  	}
   464  	var err error
   465  
   466  	// Dereference all children and delete the node
   467  	node.forChildren(db.resolver, func(child common.Hash) {
   468  		if err == nil {
   469  			err = db.commit(child, batch, uncacher)
   470  		}
   471  	})
   472  	if err != nil {
   473  		return err
   474  	}
   475  	// If we've reached an optimal batch size, commit and start over
   476  	rawdb.WriteLegacyTrieNode(batch, hash, node.node)
   477  	if batch.ValueSize() >= ethdb.IdealBatchSize {
   478  		if err := batch.Write(); err != nil {
   479  			return err
   480  		}
   481  		err := batch.Replay(uncacher)
   482  		if err != nil {
   483  			return err
   484  		}
   485  		batch.Reset()
   486  	}
   487  	return nil
   488  }
   489  
   490  // cleaner is a database batch replayer that takes a batch of write operations
   491  // and cleans up the trie database from anything written to disk.
   492  type cleaner struct {
   493  	db *Database
   494  }
   495  
   496  // Put reacts to database writes and implements dirty data uncaching. This is the
   497  // post-processing step of a commit operation where the already persisted trie is
   498  // removed from the dirty cache and moved into the clean cache. The reason behind
   499  // the two-phase commit is to ensure data availability while moving from memory
   500  // to disk.
   501  func (c *cleaner) Put(key []byte, rlp []byte) error {
   502  	hash := common.BytesToHash(key)
   503  
   504  	// If the node does not exist, we're done on this path
   505  	node, ok := c.db.dirties[hash]
   506  	if !ok {
   507  		return nil
   508  	}
   509  	// Node still exists, remove it from the flush-list
   510  	switch hash {
   511  	case c.db.oldest:
   512  		c.db.oldest = node.flushNext
   513  		if node.flushNext != (common.Hash{}) {
   514  			c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   515  		}
   516  	case c.db.newest:
   517  		c.db.newest = node.flushPrev
   518  		if node.flushPrev != (common.Hash{}) {
   519  			c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   520  		}
   521  	default:
   522  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   523  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   524  	}
   525  	// Remove the node from the dirty cache
   526  	delete(c.db.dirties, hash)
   527  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   528  	if node.external != nil {
   529  		c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   530  	}
   531  	// Move the flushed node into the clean cache to prevent insta-reloads
   532  	if c.db.cleans != nil {
   533  		c.db.cleans.Set(hash[:], rlp)
   534  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   535  	}
   536  	return nil
   537  }
   538  
   539  func (c *cleaner) Delete(key []byte) error {
   540  	panic("not implemented")
   541  }
   542  
   543  // Initialized returns an indicator if state data is already initialized
   544  // in hash-based scheme by checking the presence of genesis state.
   545  func (db *Database) Initialized(genesisRoot common.Hash) bool {
   546  	return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
   547  }
   548  
   549  // Update inserts the dirty nodes in provided nodeset into database and link the
   550  // account trie with multiple storage tries if necessary.
   551  func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *triestate.Set) error {
   552  	// Ensure the parent state is present and signal a warning if not.
   553  	if parent != types.EmptyRootHash {
   554  		if blob, _ := db.node(parent); len(blob) == 0 {
   555  			log.Error("parent state is not present")
   556  		}
   557  	}
   558  	db.lock.Lock()
   559  	defer db.lock.Unlock()
   560  
   561  	// Insert dirty nodes into the database. In the same tree, it must be
   562  	// ensured that children are inserted first, then parent so that children
   563  	// can be linked with their parent correctly.
   564  	//
   565  	// Note, the storage tries must be flushed before the account trie to
   566  	// retain the invariant that children go into the dirty cache first.
   567  	var order []common.Hash
   568  	for owner := range nodes.Sets {
   569  		if owner == (common.Hash{}) {
   570  			continue
   571  		}
   572  		order = append(order, owner)
   573  	}
   574  	if _, ok := nodes.Sets[common.Hash{}]; ok {
   575  		order = append(order, common.Hash{})
   576  	}
   577  	for _, owner := range order {
   578  		subset := nodes.Sets[owner]
   579  		subset.ForEachWithOrder(func(path string, n *trienode.Node) {
   580  			if n.IsDeleted() {
   581  				return // ignore deletion
   582  			}
   583  			db.insert(n.Hash, n.Blob)
   584  		})
   585  	}
   586  	// Link up the account trie and storage trie if the node points
   587  	// to an account trie leaf.
   588  	if set, present := nodes.Sets[common.Hash{}]; present {
   589  		for _, n := range set.Leaves {
   590  			var account types.StateAccount
   591  			if err := rlp.DecodeBytes(n.Blob, &account); err != nil {
   592  				return err
   593  			}
   594  			if account.Root != types.EmptyRootHash {
   595  				db.reference(account.Root, n.Parent)
   596  			}
   597  		}
   598  	}
   599  	return nil
   600  }
   601  
   602  // Size returns the current storage size of the memory cache in front of the
   603  // persistent database layer.
   604  //
   605  // The first return will always be 0, representing the memory stored in unbounded
   606  // diff layers above the dirty cache. This is only available in pathdb.
   607  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   608  	db.lock.RLock()
   609  	defer db.lock.RUnlock()
   610  
   611  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   612  	// the total memory consumption, the maintenance metadata is also needed to be
   613  	// counted.
   614  	var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
   615  	return 0, db.dirtiesSize + db.childrenSize + metadataSize
   616  }
   617  
   618  // Close closes the trie database and releases all held resources.
   619  func (db *Database) Close() error {
   620  	if db.cleans != nil {
   621  		db.cleans.Reset()
   622  	}
   623  	return nil
   624  }
   625  
   626  // Reader retrieves a node reader belonging to the given state root.
   627  // An error will be returned if the requested state is not available.
   628  func (db *Database) Reader(root common.Hash) (*reader, error) {
   629  	if _, err := db.node(root); err != nil {
   630  		return nil, fmt.Errorf("state %#x is not available, %v", root, err)
   631  	}
   632  	return &reader{db: db}, nil
   633  }
   634  
   635  // reader is a state reader of Database which implements the Reader interface.
   636  type reader struct {
   637  	db *Database
   638  }
   639  
   640  // Node retrieves the trie node with the given node hash. No error will be
   641  // returned if the node is not found.
   642  func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
   643  	blob, _ := reader.db.node(hash)
   644  	return blob, nil
   645  }