github.com/ethereum/go-ethereum@v1.16.1/triedb/hashdb/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package hashdb
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"reflect"
    23  	"sync"
    24  	"time"
    25  
    26  	"github.com/VictoriaMetrics/fastcache"
    27  	"github.com/ethereum/go-ethereum/common"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  	"github.com/ethereum/go-ethereum/ethdb"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/metrics"
    33  	"github.com/ethereum/go-ethereum/rlp"
    34  	"github.com/ethereum/go-ethereum/trie"
    35  	"github.com/ethereum/go-ethereum/trie/trienode"
    36  	"github.com/ethereum/go-ethereum/triedb/database"
    37  )
    38  
    39  var (
    40  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/clean/hit", nil)
    41  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/miss", nil)
    42  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/clean/read", nil)
    43  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/clean/write", nil)
    44  
    45  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("hashdb/memcache/dirty/hit", nil)
    46  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/miss", nil)
    47  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("hashdb/memcache/dirty/read", nil)
    48  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("hashdb/memcache/dirty/write", nil)
    49  
    50  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/flush/time", nil)
    51  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/nodes", nil)
    52  	memcacheFlushBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/flush/bytes", nil)
    53  
    54  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/gc/time", nil)
    55  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/nodes", nil)
    56  	memcacheGCBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/gc/bytes", nil)
    57  
    58  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("hashdb/memcache/commit/time", nil)
    59  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/nodes", nil)
    60  	memcacheCommitBytesMeter = metrics.NewRegisteredMeter("hashdb/memcache/commit/bytes", nil)
    61  )
    62  
    63  // Config contains the settings for database.
    64  type Config struct {
    65  	CleanCacheSize int // Maximum memory allowance (in bytes) for caching clean nodes
    66  }
    67  
    68  // Defaults is the default setting for database if it's not specified.
    69  // Notably, clean cache is disabled explicitly,
    70  var Defaults = &Config{
    71  	// Explicitly set clean cache size to 0 to avoid creating fastcache,
    72  	// otherwise database must be closed when it's no longer needed to
    73  	// prevent memory leak.
    74  	CleanCacheSize: 0,
    75  }
    76  
    77  // Database is an intermediate write layer between the trie data structures and
    78  // the disk database. The aim is to accumulate trie writes in-memory and only
    79  // periodically flush a couple tries to disk, garbage collecting the remainder.
    80  type Database struct {
    81  	diskdb  ethdb.Database              // Persistent storage for matured trie nodes
    82  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    83  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
    84  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    85  	newest  common.Hash                 // Newest tracked node, flush-list tail
    86  
    87  	gctime  time.Duration      // Time spent on garbage collection since last commit
    88  	gcnodes uint64             // Nodes garbage collected since last commit
    89  	gcsize  common.StorageSize // Data storage garbage collected since last commit
    90  
    91  	flushtime  time.Duration      // Time spent on data flushing since last commit
    92  	flushnodes uint64             // Nodes flushed since last commit
    93  	flushsize  common.StorageSize // Data storage flushed since last commit
    94  
    95  	dirtiesSize  common.StorageSize // Storage size of the dirty node cache (exc. metadata)
    96  	childrenSize common.StorageSize // Storage size of the external children tracking
    97  
    98  	lock sync.RWMutex
    99  }
   100  
   101  // cachedNode is all the information we know about a single cached trie node
   102  // in the memory database write layer.
   103  type cachedNode struct {
   104  	node      []byte                   // Encoded node blob, immutable
   105  	parents   uint32                   // Number of live nodes referencing this one
   106  	external  map[common.Hash]struct{} // The set of external children
   107  	flushPrev common.Hash              // Previous node in the flush-list
   108  	flushNext common.Hash              // Next node in the flush-list
   109  }
   110  
   111  // cachedNodeSize is the raw size of a cachedNode data structure without any
   112  // node data included. It's an approximate size, but should be a lot better
   113  // than not counting them.
   114  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   115  
   116  // forChildren invokes the callback for all the tracked children of this node,
   117  // both the implicit ones from inside the node as well as the explicit ones
   118  // from outside the node.
   119  func (n *cachedNode) forChildren(onChild func(hash common.Hash)) {
   120  	for child := range n.external {
   121  		onChild(child)
   122  	}
   123  	trie.ForGatherChildren(n.node, onChild)
   124  }
   125  
   126  // New initializes the hash-based node database.
   127  func New(diskdb ethdb.Database, config *Config) *Database {
   128  	if config == nil {
   129  		config = Defaults
   130  	}
   131  	var cleans *fastcache.Cache
   132  	if config.CleanCacheSize > 0 {
   133  		cleans = fastcache.New(config.CleanCacheSize)
   134  	}
   135  	return &Database{
   136  		diskdb:  diskdb,
   137  		cleans:  cleans,
   138  		dirties: make(map[common.Hash]*cachedNode),
   139  	}
   140  }
   141  
   142  // insert inserts a trie node into the memory database. All nodes inserted by
   143  // this function will be reference tracked. This function assumes the lock is
   144  // already held.
   145  func (db *Database) insert(hash common.Hash, node []byte) {
   146  	// If the node's already cached, skip
   147  	if _, ok := db.dirties[hash]; ok {
   148  		return
   149  	}
   150  	memcacheDirtyWriteMeter.Mark(int64(len(node)))
   151  
   152  	// Create the cached entry for this node
   153  	entry := &cachedNode{
   154  		node:      node,
   155  		flushPrev: db.newest,
   156  	}
   157  	entry.forChildren(func(child common.Hash) {
   158  		if c := db.dirties[child]; c != nil {
   159  			c.parents++
   160  		}
   161  	})
   162  	db.dirties[hash] = entry
   163  
   164  	// Update the flush-list endpoints
   165  	if db.oldest == (common.Hash{}) {
   166  		db.oldest, db.newest = hash, hash
   167  	} else {
   168  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   169  	}
   170  	db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
   171  }
   172  
   173  // node retrieves an encoded cached trie node from memory. If it cannot be found
   174  // cached, the method queries the persistent database for the content.
   175  func (db *Database) node(hash common.Hash) ([]byte, error) {
   176  	// It doesn't make sense to retrieve the metaroot
   177  	if hash == (common.Hash{}) {
   178  		return nil, errors.New("not found")
   179  	}
   180  	// Retrieve the node from the clean cache if available
   181  	if db.cleans != nil {
   182  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   183  			memcacheCleanHitMeter.Mark(1)
   184  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   185  			return enc, nil
   186  		}
   187  	}
   188  	// Retrieve the node from the dirty cache if available.
   189  	db.lock.RLock()
   190  	dirty := db.dirties[hash]
   191  	db.lock.RUnlock()
   192  
   193  	// Return the cached node if it's found in the dirty set.
   194  	// The dirty.node field is immutable and safe to read it
   195  	// even without lock guard.
   196  	if dirty != nil {
   197  		memcacheDirtyHitMeter.Mark(1)
   198  		memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
   199  		return dirty.node, nil
   200  	}
   201  	memcacheDirtyMissMeter.Mark(1)
   202  
   203  	// Content unavailable in memory, attempt to retrieve from disk
   204  	enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash)
   205  	if len(enc) != 0 {
   206  		if db.cleans != nil {
   207  			db.cleans.Set(hash[:], enc)
   208  			memcacheCleanMissMeter.Mark(1)
   209  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   210  		}
   211  		return enc, nil
   212  	}
   213  	return nil, errors.New("not found")
   214  }
   215  
   216  // Reference adds a new reference from a parent node to a child node.
   217  // This function is used to add reference between internal trie node
   218  // and external node(e.g. storage trie root), all internal trie nodes
   219  // are referenced together by database itself.
   220  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   221  	db.lock.Lock()
   222  	defer db.lock.Unlock()
   223  
   224  	db.reference(child, parent)
   225  }
   226  
   227  // reference is the private locked version of Reference.
   228  func (db *Database) reference(child common.Hash, parent common.Hash) {
   229  	// If the node does not exist, it's a node pulled from disk, skip
   230  	node, ok := db.dirties[child]
   231  	if !ok {
   232  		return
   233  	}
   234  	// The reference is for state root, increase the reference counter.
   235  	if parent == (common.Hash{}) {
   236  		node.parents += 1
   237  		return
   238  	}
   239  	// The reference is for external storage trie, don't duplicate if
   240  	// the reference is already existent.
   241  	if db.dirties[parent].external == nil {
   242  		db.dirties[parent].external = make(map[common.Hash]struct{})
   243  	}
   244  	if _, ok := db.dirties[parent].external[child]; ok {
   245  		return
   246  	}
   247  	node.parents++
   248  	db.dirties[parent].external[child] = struct{}{}
   249  	db.childrenSize += common.HashLength
   250  }
   251  
   252  // Dereference removes an existing reference from a root node.
   253  func (db *Database) Dereference(root common.Hash) {
   254  	// Sanity check to ensure that the meta-root is not removed
   255  	if root == (common.Hash{}) {
   256  		log.Error("Attempted to dereference the trie cache meta root")
   257  		return
   258  	}
   259  	db.lock.Lock()
   260  	defer db.lock.Unlock()
   261  
   262  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   263  	db.dereference(root)
   264  
   265  	db.gcnodes += uint64(nodes - len(db.dirties))
   266  	db.gcsize += storage - db.dirtiesSize
   267  	db.gctime += time.Since(start)
   268  
   269  	memcacheGCTimeTimer.Update(time.Since(start))
   270  	memcacheGCBytesMeter.Mark(int64(storage - db.dirtiesSize))
   271  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   272  
   273  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   274  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   275  }
   276  
   277  // dereference is the private locked version of Dereference.
   278  func (db *Database) dereference(hash common.Hash) {
   279  	// If the node does not exist, it's a previously committed node.
   280  	node, ok := db.dirties[hash]
   281  	if !ok {
   282  		return
   283  	}
   284  	// If there are no more references to the node, delete it and cascade
   285  	if node.parents > 0 {
   286  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   287  		// memcache any more) gets reinjected as a new node (short node split into full,
   288  		// then reverted into short), causing a cached node to have no parents. That is
   289  		// no problem in itself, but don't make maxint parents out of it.
   290  		node.parents--
   291  	}
   292  	if node.parents == 0 {
   293  		// Remove the node from the flush-list
   294  		switch hash {
   295  		case db.oldest:
   296  			db.oldest = node.flushNext
   297  			if node.flushNext != (common.Hash{}) {
   298  				db.dirties[node.flushNext].flushPrev = common.Hash{}
   299  			}
   300  		case db.newest:
   301  			db.newest = node.flushPrev
   302  			if node.flushPrev != (common.Hash{}) {
   303  				db.dirties[node.flushPrev].flushNext = common.Hash{}
   304  			}
   305  		default:
   306  			db.dirties[node.flushPrev].flushNext = node.flushNext
   307  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   308  		}
   309  		// Dereference all children and delete the node
   310  		node.forChildren(func(child common.Hash) {
   311  			db.dereference(child)
   312  		})
   313  		delete(db.dirties, hash)
   314  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   315  		if node.external != nil {
   316  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   317  		}
   318  	}
   319  }
   320  
   321  // Cap iteratively flushes old but still referenced trie nodes until the total
   322  // memory usage goes below the given threshold.
   323  func (db *Database) Cap(limit common.StorageSize) error {
   324  	db.lock.Lock()
   325  	defer db.lock.Unlock()
   326  
   327  	// Create a database batch to flush persistent data out. It is important that
   328  	// outside code doesn't see an inconsistent state (referenced data removed from
   329  	// memory cache during commit but not yet in persistent storage). This is ensured
   330  	// by only uncaching existing data when the database write finalizes.
   331  	batch := db.diskdb.NewBatch()
   332  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   333  
   334  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   335  	// the total memory consumption, the maintenance metadata is also needed to be
   336  	// counted.
   337  	size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
   338  	size += db.childrenSize
   339  
   340  	// Keep committing nodes from the flush-list until we're below allowance
   341  	oldest := db.oldest
   342  	for size > limit && oldest != (common.Hash{}) {
   343  		// Fetch the oldest referenced node and push into the batch
   344  		node := db.dirties[oldest]
   345  		rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
   346  
   347  		// If we exceeded the ideal batch size, commit and reset
   348  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   349  			if err := batch.Write(); err != nil {
   350  				log.Error("Failed to write flush list to disk", "err", err)
   351  				return err
   352  			}
   353  			batch.Reset()
   354  		}
   355  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   356  		// is the total size, including the useful cached data (hash -> blob), the
   357  		// cache item metadata, as well as external children mappings.
   358  		size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
   359  		if node.external != nil {
   360  			size -= common.StorageSize(len(node.external) * common.HashLength)
   361  		}
   362  		oldest = node.flushNext
   363  	}
   364  	// Flush out any remainder data from the last batch
   365  	if err := batch.Write(); err != nil {
   366  		log.Error("Failed to write flush list to disk", "err", err)
   367  		return err
   368  	}
   369  	// Write successful, clear out the flushed data
   370  	for db.oldest != oldest {
   371  		node := db.dirties[db.oldest]
   372  		delete(db.dirties, db.oldest)
   373  		db.oldest = node.flushNext
   374  
   375  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   376  		if node.external != nil {
   377  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   378  		}
   379  	}
   380  	if db.oldest != (common.Hash{}) {
   381  		db.dirties[db.oldest].flushPrev = common.Hash{}
   382  	}
   383  	db.flushnodes += uint64(nodes - len(db.dirties))
   384  	db.flushsize += storage - db.dirtiesSize
   385  	db.flushtime += time.Since(start)
   386  
   387  	memcacheFlushTimeTimer.Update(time.Since(start))
   388  	memcacheFlushBytesMeter.Mark(int64(storage - db.dirtiesSize))
   389  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   390  
   391  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   392  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   393  
   394  	return nil
   395  }
   396  
   397  // Commit iterates over all the children of a particular node, writes them out
   398  // to disk, forcefully tearing down all references in both directions. As a side
   399  // effect, all pre-images accumulated up to this point are also written.
   400  func (db *Database) Commit(node common.Hash, report bool) error {
   401  	db.lock.Lock()
   402  	defer db.lock.Unlock()
   403  
   404  	// Create a database batch to flush persistent data out. It is important that
   405  	// outside code doesn't see an inconsistent state (referenced data removed from
   406  	// memory cache during commit but not yet in persistent storage). This is ensured
   407  	// by only uncaching existing data when the database write finalizes.
   408  	start := time.Now()
   409  	batch := db.diskdb.NewBatch()
   410  
   411  	// Move the trie itself into the batch, flushing if enough data is accumulated
   412  	nodes, storage := len(db.dirties), db.dirtiesSize
   413  
   414  	uncacher := &cleaner{db}
   415  	if err := db.commit(node, batch, uncacher); err != nil {
   416  		log.Error("Failed to commit trie from trie database", "err", err)
   417  		return err
   418  	}
   419  	// Trie mostly committed to disk, flush any batch leftovers
   420  	if err := batch.Write(); err != nil {
   421  		log.Error("Failed to write trie to disk", "err", err)
   422  		return err
   423  	}
   424  	// Uncache any leftovers in the last batch
   425  	if err := batch.Replay(uncacher); err != nil {
   426  		return err
   427  	}
   428  	batch.Reset()
   429  
   430  	// Reset the storage counters and bumped metrics
   431  	memcacheCommitTimeTimer.Update(time.Since(start))
   432  	memcacheCommitBytesMeter.Mark(int64(storage - db.dirtiesSize))
   433  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   434  
   435  	logger := log.Info
   436  	if !report {
   437  		logger = log.Debug
   438  	}
   439  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   440  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   441  
   442  	// Reset the garbage collection statistics
   443  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   444  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   445  
   446  	return nil
   447  }
   448  
   449  // commit is the private locked version of Commit.
   450  func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
   451  	// If the node does not exist, it's a previously committed node
   452  	node, ok := db.dirties[hash]
   453  	if !ok {
   454  		return nil
   455  	}
   456  	var err error
   457  
   458  	// Dereference all children and delete the node
   459  	node.forChildren(func(child common.Hash) {
   460  		if err == nil {
   461  			err = db.commit(child, batch, uncacher)
   462  		}
   463  	})
   464  	if err != nil {
   465  		return err
   466  	}
   467  	// If we've reached an optimal batch size, commit and start over
   468  	rawdb.WriteLegacyTrieNode(batch, hash, node.node)
   469  	if batch.ValueSize() >= ethdb.IdealBatchSize {
   470  		if err := batch.Write(); err != nil {
   471  			return err
   472  		}
   473  		err := batch.Replay(uncacher)
   474  		if err != nil {
   475  			return err
   476  		}
   477  		batch.Reset()
   478  	}
   479  	return nil
   480  }
   481  
   482  // cleaner is a database batch replayer that takes a batch of write operations
   483  // and cleans up the trie database from anything written to disk.
   484  type cleaner struct {
   485  	db *Database
   486  }
   487  
   488  // Put reacts to database writes and implements dirty data uncaching. This is the
   489  // post-processing step of a commit operation where the already persisted trie is
   490  // removed from the dirty cache and moved into the clean cache. The reason behind
   491  // the two-phase commit is to ensure data availability while moving from memory
   492  // to disk.
   493  func (c *cleaner) Put(key []byte, rlp []byte) error {
   494  	hash := common.BytesToHash(key)
   495  
   496  	// If the node does not exist, we're done on this path
   497  	node, ok := c.db.dirties[hash]
   498  	if !ok {
   499  		return nil
   500  	}
   501  	// Node still exists, remove it from the flush-list
   502  	switch hash {
   503  	case c.db.oldest:
   504  		c.db.oldest = node.flushNext
   505  		if node.flushNext != (common.Hash{}) {
   506  			c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   507  		}
   508  	case c.db.newest:
   509  		c.db.newest = node.flushPrev
   510  		if node.flushPrev != (common.Hash{}) {
   511  			c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   512  		}
   513  	default:
   514  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   515  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   516  	}
   517  	// Remove the node from the dirty cache
   518  	delete(c.db.dirties, hash)
   519  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   520  	if node.external != nil {
   521  		c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   522  	}
   523  	// Move the flushed node into the clean cache to prevent insta-reloads
   524  	if c.db.cleans != nil {
   525  		c.db.cleans.Set(hash[:], rlp)
   526  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   527  	}
   528  	return nil
   529  }
   530  
   531  func (c *cleaner) Delete(key []byte) error {
   532  	panic("not implemented")
   533  }
   534  
   535  // Update inserts the dirty nodes in provided nodeset into database and link the
   536  // account trie with multiple storage tries if necessary.
   537  func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet) error {
   538  	// Ensure the parent state is present and signal a warning if not.
   539  	if parent != types.EmptyRootHash {
   540  		if blob, _ := db.node(parent); len(blob) == 0 {
   541  			log.Error("parent state is not present")
   542  		}
   543  	}
   544  	db.lock.Lock()
   545  	defer db.lock.Unlock()
   546  
   547  	// Insert dirty nodes into the database. In the same tree, it must be
   548  	// ensured that children are inserted first, then parent so that children
   549  	// can be linked with their parent correctly.
   550  	//
   551  	// Note, the storage tries must be flushed before the account trie to
   552  	// retain the invariant that children go into the dirty cache first.
   553  	var order []common.Hash
   554  	for owner := range nodes.Sets {
   555  		if owner == (common.Hash{}) {
   556  			continue
   557  		}
   558  		order = append(order, owner)
   559  	}
   560  	if _, ok := nodes.Sets[common.Hash{}]; ok {
   561  		order = append(order, common.Hash{})
   562  	}
   563  	for _, owner := range order {
   564  		subset := nodes.Sets[owner]
   565  		subset.ForEachWithOrder(func(path string, n *trienode.Node) {
   566  			if n.IsDeleted() {
   567  				return // ignore deletion
   568  			}
   569  			db.insert(n.Hash, n.Blob)
   570  		})
   571  	}
   572  	// Link up the account trie and storage trie if the node points
   573  	// to an account trie leaf.
   574  	if set, present := nodes.Sets[common.Hash{}]; present {
   575  		for _, n := range set.Leaves {
   576  			var account types.StateAccount
   577  			if err := rlp.DecodeBytes(n.Blob, &account); err != nil {
   578  				return err
   579  			}
   580  			if account.Root != types.EmptyRootHash {
   581  				db.reference(account.Root, n.Parent)
   582  			}
   583  		}
   584  	}
   585  	return nil
   586  }
   587  
   588  // Size returns the current storage size of the memory cache in front of the
   589  // persistent database layer.
   590  //
   591  // The first return will always be 0, representing the memory stored in unbounded
   592  // diff layers above the dirty cache. This is only available in pathdb.
   593  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   594  	db.lock.RLock()
   595  	defer db.lock.RUnlock()
   596  
   597  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   598  	// the total memory consumption, the maintenance metadata is also needed to be
   599  	// counted.
   600  	var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
   601  	return 0, db.dirtiesSize + db.childrenSize + metadataSize
   602  }
   603  
   604  // Close closes the trie database and releases all held resources.
   605  func (db *Database) Close() error {
   606  	if db.cleans != nil {
   607  		db.cleans.Reset()
   608  	}
   609  	return nil
   610  }
   611  
   612  // NodeReader returns a reader for accessing trie nodes within the specified state.
   613  // An error will be returned if the specified state is not available.
   614  func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) {
   615  	if _, err := db.node(root); err != nil {
   616  		return nil, fmt.Errorf("state %#x is not available, %v", root, err)
   617  	}
   618  	return &reader{db: db}, nil
   619  }
   620  
   621  // reader is a state reader of Database which implements the Reader interface.
   622  type reader struct {
   623  	db *Database
   624  }
   625  
   626  // Node retrieves the trie node with the given node hash. No error will be
   627  // returned if the node is not found.
   628  func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
   629  	blob, _ := reader.db.node(hash)
   630  	return blob, nil
   631  }
   632  
   633  // StateReader returns a reader that allows access to the state data associated
   634  // with the specified state.
   635  func (db *Database) StateReader(root common.Hash) (database.StateReader, error) {
   636  	return nil, errors.New("not implemented")
   637  }