github.com/calmw/ethereum@v0.1.1/trie/triedb/hashdb/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package hashdb
    18  
    19  import (
    20  	"errors"
    21  	"reflect"
    22  	"sync"
    23  	"time"
    24  
    25  	"github.com/VictoriaMetrics/fastcache"
    26  	"github.com/calmw/ethereum/common"
    27  	"github.com/calmw/ethereum/core/rawdb"
    28  	"github.com/calmw/ethereum/core/types"
    29  	"github.com/calmw/ethereum/ethdb"
    30  	"github.com/calmw/ethereum/log"
    31  	"github.com/calmw/ethereum/metrics"
    32  	"github.com/calmw/ethereum/rlp"
    33  	"github.com/calmw/ethereum/trie/trienode"
    34  )
    35  
    36  var (
    37  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
    38  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
    39  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
    40  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
    41  
    42  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
    43  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
    44  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
    45  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
    46  
    47  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
    48  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
    49  	memcacheFlushSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
    50  
    51  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
    52  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
    53  	memcacheGCSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
    54  
    55  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
    56  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
    57  	memcacheCommitSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
    58  )
    59  
    60  // ChildResolver defines the required method to decode the provided
    61  // trie node and iterate the children on top.
    62  type ChildResolver interface {
    63  	ForEach(node []byte, onChild func(common.Hash))
    64  }
    65  
    66  // Database is an intermediate write layer between the trie data structures and
    67  // the disk database. The aim is to accumulate trie writes in-memory and only
    68  // periodically flush a couple tries to disk, garbage collecting the remainder.
    69  //
    70  // Note, the trie Database is **not** thread safe in its mutations, but it **is**
    71  // thread safe in providing individual, independent node access. The rationale
    72  // behind this split design is to provide read access to RPC handlers and sync
    73  // servers even while the trie is executing expensive garbage collection.
    74  type Database struct {
    75  	diskdb   ethdb.Database // Persistent storage for matured trie nodes
    76  	resolver ChildResolver  // The handler to resolve children of nodes
    77  
    78  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    79  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
    80  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    81  	newest  common.Hash                 // Newest tracked node, flush-list tail
    82  
    83  	gctime  time.Duration      // Time spent on garbage collection since last commit
    84  	gcnodes uint64             // Nodes garbage collected since last commit
    85  	gcsize  common.StorageSize // Data storage garbage collected since last commit
    86  
    87  	flushtime  time.Duration      // Time spent on data flushing since last commit
    88  	flushnodes uint64             // Nodes flushed since last commit
    89  	flushsize  common.StorageSize // Data storage flushed since last commit
    90  
    91  	dirtiesSize  common.StorageSize // Storage size of the dirty node cache (exc. metadata)
    92  	childrenSize common.StorageSize // Storage size of the external children tracking
    93  
    94  	lock sync.RWMutex
    95  }
    96  
    97  // cachedNode is all the information we know about a single cached trie node
    98  // in the memory database write layer.
    99  type cachedNode struct {
   100  	node      []byte                   // Encoded node blob
   101  	parents   uint32                   // Number of live nodes referencing this one
   102  	external  map[common.Hash]struct{} // The set of external children
   103  	flushPrev common.Hash              // Previous node in the flush-list
   104  	flushNext common.Hash              // Next node in the flush-list
   105  }
   106  
   107  // cachedNodeSize is the raw size of a cachedNode data structure without any
   108  // node data included. It's an approximate size, but should be a lot better
   109  // than not counting them.
   110  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   111  
   112  // forChildren invokes the callback for all the tracked children of this node,
   113  // both the implicit ones from inside the node as well as the explicit ones
   114  // from outside the node.
   115  func (n *cachedNode) forChildren(resolver ChildResolver, onChild func(hash common.Hash)) {
   116  	for child := range n.external {
   117  		onChild(child)
   118  	}
   119  	resolver.ForEach(n.node, onChild)
   120  }
   121  
   122  // New initializes the hash-based node database.
   123  func New(diskdb ethdb.Database, cleans *fastcache.Cache, resolver ChildResolver) *Database {
   124  	return &Database{
   125  		diskdb:   diskdb,
   126  		resolver: resolver,
   127  		cleans:   cleans,
   128  		dirties:  make(map[common.Hash]*cachedNode),
   129  	}
   130  }
   131  
   132  // insert inserts a simplified trie node into the memory database.
   133  // All nodes inserted by this function will be reference tracked
   134  // and in theory should only used for **trie nodes** insertion.
   135  func (db *Database) insert(hash common.Hash, node []byte) {
   136  	// If the node's already cached, skip
   137  	if _, ok := db.dirties[hash]; ok {
   138  		return
   139  	}
   140  	memcacheDirtyWriteMeter.Mark(int64(len(node)))
   141  
   142  	// Create the cached entry for this node
   143  	entry := &cachedNode{
   144  		node:      node,
   145  		flushPrev: db.newest,
   146  	}
   147  	entry.forChildren(db.resolver, func(child common.Hash) {
   148  		if c := db.dirties[child]; c != nil {
   149  			c.parents++
   150  		}
   151  	})
   152  	db.dirties[hash] = entry
   153  
   154  	// Update the flush-list endpoints
   155  	if db.oldest == (common.Hash{}) {
   156  		db.oldest, db.newest = hash, hash
   157  	} else {
   158  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   159  	}
   160  	db.dirtiesSize += common.StorageSize(common.HashLength + len(node))
   161  }
   162  
   163  // Node retrieves an encoded cached trie node from memory. If it cannot be found
   164  // cached, the method queries the persistent database for the content.
   165  func (db *Database) Node(hash common.Hash) ([]byte, error) {
   166  	// It doesn't make sense to retrieve the metaroot
   167  	if hash == (common.Hash{}) {
   168  		return nil, errors.New("not found")
   169  	}
   170  	// Retrieve the node from the clean cache if available
   171  	if db.cleans != nil {
   172  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   173  			memcacheCleanHitMeter.Mark(1)
   174  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   175  			return enc, nil
   176  		}
   177  	}
   178  	// Retrieve the node from the dirty cache if available
   179  	db.lock.RLock()
   180  	dirty := db.dirties[hash]
   181  	db.lock.RUnlock()
   182  
   183  	if dirty != nil {
   184  		memcacheDirtyHitMeter.Mark(1)
   185  		memcacheDirtyReadMeter.Mark(int64(len(dirty.node)))
   186  		return dirty.node, nil
   187  	}
   188  	memcacheDirtyMissMeter.Mark(1)
   189  
   190  	// Content unavailable in memory, attempt to retrieve from disk
   191  	enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash)
   192  	if len(enc) != 0 {
   193  		if db.cleans != nil {
   194  			db.cleans.Set(hash[:], enc)
   195  			memcacheCleanMissMeter.Mark(1)
   196  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   197  		}
   198  		return enc, nil
   199  	}
   200  	return nil, errors.New("not found")
   201  }
   202  
   203  // Nodes retrieves the hashes of all the nodes cached within the memory database.
   204  // This method is extremely expensive and should only be used to validate internal
   205  // states in test code.
   206  func (db *Database) Nodes() []common.Hash {
   207  	db.lock.RLock()
   208  	defer db.lock.RUnlock()
   209  
   210  	var hashes = make([]common.Hash, 0, len(db.dirties))
   211  	for hash := range db.dirties {
   212  		hashes = append(hashes, hash)
   213  	}
   214  	return hashes
   215  }
   216  
   217  // Reference adds a new reference from a parent node to a child node.
   218  // This function is used to add reference between internal trie node
   219  // and external node(e.g. storage trie root), all internal trie nodes
   220  // are referenced together by database itself.
   221  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   222  	db.lock.Lock()
   223  	defer db.lock.Unlock()
   224  
   225  	db.reference(child, parent)
   226  }
   227  
   228  // reference is the private locked version of Reference.
   229  func (db *Database) reference(child common.Hash, parent common.Hash) {
   230  	// If the node does not exist, it's a node pulled from disk, skip
   231  	node, ok := db.dirties[child]
   232  	if !ok {
   233  		return
   234  	}
   235  	// The reference is for state root, increase the reference counter.
   236  	if parent == (common.Hash{}) {
   237  		node.parents += 1
   238  		return
   239  	}
   240  	// The reference is for external storage trie, don't duplicate if
   241  	// the reference is already existent.
   242  	if db.dirties[parent].external == nil {
   243  		db.dirties[parent].external = make(map[common.Hash]struct{})
   244  	}
   245  	if _, ok := db.dirties[parent].external[child]; ok {
   246  		return
   247  	}
   248  	node.parents++
   249  	db.dirties[parent].external[child] = struct{}{}
   250  	db.childrenSize += common.HashLength
   251  }
   252  
   253  // Dereference removes an existing reference from a root node.
   254  func (db *Database) Dereference(root common.Hash) {
   255  	// Sanity check to ensure that the meta-root is not removed
   256  	if root == (common.Hash{}) {
   257  		log.Error("Attempted to dereference the trie cache meta root")
   258  		return
   259  	}
   260  	db.lock.Lock()
   261  	defer db.lock.Unlock()
   262  
   263  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   264  	db.dereference(root)
   265  
   266  	db.gcnodes += uint64(nodes - len(db.dirties))
   267  	db.gcsize += storage - db.dirtiesSize
   268  	db.gctime += time.Since(start)
   269  
   270  	memcacheGCTimeTimer.Update(time.Since(start))
   271  	memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
   272  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   273  
   274  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   275  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   276  }
   277  
   278  // dereference is the private locked version of Dereference.
   279  func (db *Database) dereference(hash common.Hash) {
   280  	// If the node does not exist, it's a previously committed node.
   281  	node, ok := db.dirties[hash]
   282  	if !ok {
   283  		return
   284  	}
   285  	// If there are no more references to the node, delete it and cascade
   286  	if node.parents > 0 {
   287  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   288  		// memcache any more) gets reinjected as a new node (short node split into full,
   289  		// then reverted into short), causing a cached node to have no parents. That is
   290  		// no problem in itself, but don't make maxint parents out of it.
   291  		node.parents--
   292  	}
   293  	if node.parents == 0 {
   294  		// Remove the node from the flush-list
   295  		switch hash {
   296  		case db.oldest:
   297  			db.oldest = node.flushNext
   298  			if node.flushNext != (common.Hash{}) {
   299  				db.dirties[node.flushNext].flushPrev = common.Hash{}
   300  			}
   301  		case db.newest:
   302  			db.newest = node.flushPrev
   303  			if node.flushPrev != (common.Hash{}) {
   304  				db.dirties[node.flushPrev].flushNext = common.Hash{}
   305  			}
   306  		default:
   307  			db.dirties[node.flushPrev].flushNext = node.flushNext
   308  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   309  		}
   310  		// Dereference all children and delete the node
   311  		node.forChildren(db.resolver, func(child common.Hash) {
   312  			db.dereference(child)
   313  		})
   314  		delete(db.dirties, hash)
   315  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   316  		if node.external != nil {
   317  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   318  		}
   319  	}
   320  }
   321  
   322  // Cap iteratively flushes old but still referenced trie nodes until the total
   323  // memory usage goes below the given threshold.
   324  //
   325  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   326  // concurrently with other mutators.
   327  func (db *Database) Cap(limit common.StorageSize) error {
   328  	// Create a database batch to flush persistent data out. It is important that
   329  	// outside code doesn't see an inconsistent state (referenced data removed from
   330  	// memory cache during commit but not yet in persistent storage). This is ensured
   331  	// by only uncaching existing data when the database write finalizes.
   332  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   333  	batch := db.diskdb.NewBatch()
   334  
   335  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   336  	// the total memory consumption, the maintenance metadata is also needed to be
   337  	// counted.
   338  	size := db.dirtiesSize + common.StorageSize(len(db.dirties)*cachedNodeSize)
   339  	size += db.childrenSize
   340  
   341  	// Keep committing nodes from the flush-list until we're below allowance
   342  	oldest := db.oldest
   343  	for size > limit && oldest != (common.Hash{}) {
   344  		// Fetch the oldest referenced node and push into the batch
   345  		node := db.dirties[oldest]
   346  		rawdb.WriteLegacyTrieNode(batch, oldest, node.node)
   347  
   348  		// If we exceeded the ideal batch size, commit and reset
   349  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   350  			if err := batch.Write(); err != nil {
   351  				log.Error("Failed to write flush list to disk", "err", err)
   352  				return err
   353  			}
   354  			batch.Reset()
   355  		}
   356  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   357  		// is the total size, including the useful cached data (hash -> blob), the
   358  		// cache item metadata, as well as external children mappings.
   359  		size -= common.StorageSize(common.HashLength + len(node.node) + cachedNodeSize)
   360  		if node.external != nil {
   361  			size -= common.StorageSize(len(node.external) * common.HashLength)
   362  		}
   363  		oldest = node.flushNext
   364  	}
   365  	// Flush out any remainder data from the last batch
   366  	if err := batch.Write(); err != nil {
   367  		log.Error("Failed to write flush list to disk", "err", err)
   368  		return err
   369  	}
   370  	// Write successful, clear out the flushed data
   371  	db.lock.Lock()
   372  	defer db.lock.Unlock()
   373  
   374  	for db.oldest != oldest {
   375  		node := db.dirties[db.oldest]
   376  		delete(db.dirties, db.oldest)
   377  		db.oldest = node.flushNext
   378  
   379  		db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   380  		if node.external != nil {
   381  			db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   382  		}
   383  	}
   384  	if db.oldest != (common.Hash{}) {
   385  		db.dirties[db.oldest].flushPrev = common.Hash{}
   386  	}
   387  	db.flushnodes += uint64(nodes - len(db.dirties))
   388  	db.flushsize += storage - db.dirtiesSize
   389  	db.flushtime += time.Since(start)
   390  
   391  	memcacheFlushTimeTimer.Update(time.Since(start))
   392  	memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
   393  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   394  
   395  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   396  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   397  
   398  	return nil
   399  }
   400  
   401  // Commit iterates over all the children of a particular node, writes them out
   402  // to disk, forcefully tearing down all references in both directions. As a side
   403  // effect, all pre-images accumulated up to this point are also written.
   404  //
   405  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   406  // concurrently with other mutators.
   407  func (db *Database) Commit(node common.Hash, report bool) error {
   408  	// Create a database batch to flush persistent data out. It is important that
   409  	// outside code doesn't see an inconsistent state (referenced data removed from
   410  	// memory cache during commit but not yet in persistent storage). This is ensured
   411  	// by only uncaching existing data when the database write finalizes.
   412  	start := time.Now()
   413  	batch := db.diskdb.NewBatch()
   414  
   415  	// Move the trie itself into the batch, flushing if enough data is accumulated
   416  	nodes, storage := len(db.dirties), db.dirtiesSize
   417  
   418  	uncacher := &cleaner{db}
   419  	if err := db.commit(node, batch, uncacher); err != nil {
   420  		log.Error("Failed to commit trie from trie database", "err", err)
   421  		return err
   422  	}
   423  	// Trie mostly committed to disk, flush any batch leftovers
   424  	if err := batch.Write(); err != nil {
   425  		log.Error("Failed to write trie to disk", "err", err)
   426  		return err
   427  	}
   428  	// Uncache any leftovers in the last batch
   429  	db.lock.Lock()
   430  	defer db.lock.Unlock()
   431  	if err := batch.Replay(uncacher); err != nil {
   432  		return err
   433  	}
   434  	batch.Reset()
   435  
   436  	// Reset the storage counters and bumped metrics
   437  	memcacheCommitTimeTimer.Update(time.Since(start))
   438  	memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
   439  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   440  
   441  	logger := log.Info
   442  	if !report {
   443  		logger = log.Debug
   444  	}
   445  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   446  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   447  
   448  	// Reset the garbage collection statistics
   449  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   450  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   451  
   452  	return nil
   453  }
   454  
   455  // commit is the private locked version of Commit.
   456  func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
   457  	// If the node does not exist, it's a previously committed node
   458  	node, ok := db.dirties[hash]
   459  	if !ok {
   460  		return nil
   461  	}
   462  	var err error
   463  
   464  	// Dereference all children and delete the node
   465  	node.forChildren(db.resolver, func(child common.Hash) {
   466  		if err == nil {
   467  			err = db.commit(child, batch, uncacher)
   468  		}
   469  	})
   470  	if err != nil {
   471  		return err
   472  	}
   473  	// If we've reached an optimal batch size, commit and start over
   474  	rawdb.WriteLegacyTrieNode(batch, hash, node.node)
   475  	if batch.ValueSize() >= ethdb.IdealBatchSize {
   476  		if err := batch.Write(); err != nil {
   477  			return err
   478  		}
   479  		db.lock.Lock()
   480  		err := batch.Replay(uncacher)
   481  		batch.Reset()
   482  		db.lock.Unlock()
   483  		if err != nil {
   484  			return err
   485  		}
   486  	}
   487  	return nil
   488  }
   489  
   490  // cleaner is a database batch replayer that takes a batch of write operations
   491  // and cleans up the trie database from anything written to disk.
   492  type cleaner struct {
   493  	db *Database
   494  }
   495  
   496  // Put reacts to database writes and implements dirty data uncaching. This is the
   497  // post-processing step of a commit operation where the already persisted trie is
   498  // removed from the dirty cache and moved into the clean cache. The reason behind
   499  // the two-phase commit is to ensure data availability while moving from memory
   500  // to disk.
   501  func (c *cleaner) Put(key []byte, rlp []byte) error {
   502  	hash := common.BytesToHash(key)
   503  
   504  	// If the node does not exist, we're done on this path
   505  	node, ok := c.db.dirties[hash]
   506  	if !ok {
   507  		return nil
   508  	}
   509  	// Node still exists, remove it from the flush-list
   510  	switch hash {
   511  	case c.db.oldest:
   512  		c.db.oldest = node.flushNext
   513  		if node.flushNext != (common.Hash{}) {
   514  			c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   515  		}
   516  	case c.db.newest:
   517  		c.db.newest = node.flushPrev
   518  		if node.flushPrev != (common.Hash{}) {
   519  			c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   520  		}
   521  	default:
   522  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   523  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   524  	}
   525  	// Remove the node from the dirty cache
   526  	delete(c.db.dirties, hash)
   527  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + len(node.node))
   528  	if node.external != nil {
   529  		c.db.childrenSize -= common.StorageSize(len(node.external) * common.HashLength)
   530  	}
   531  	// Move the flushed node into the clean cache to prevent insta-reloads
   532  	if c.db.cleans != nil {
   533  		c.db.cleans.Set(hash[:], rlp)
   534  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   535  	}
   536  	return nil
   537  }
   538  
   539  func (c *cleaner) Delete(key []byte) error {
   540  	panic("not implemented")
   541  }
   542  
   543  // Initialized returns an indicator if state data is already initialized
   544  // in hash-based scheme by checking the presence of genesis state.
   545  func (db *Database) Initialized(genesisRoot common.Hash) bool {
   546  	return rawdb.HasLegacyTrieNode(db.diskdb, genesisRoot)
   547  }
   548  
   549  // Update inserts the dirty nodes in provided nodeset into database and link the
   550  // account trie with multiple storage tries if necessary.
   551  func (db *Database) Update(root common.Hash, parent common.Hash, nodes *trienode.MergedNodeSet) error {
   552  	// Ensure the parent state is present and signal a warning if not.
   553  	if parent != types.EmptyRootHash {
   554  		if blob, _ := db.Node(parent); len(blob) == 0 {
   555  			log.Error("parent state is not present")
   556  		}
   557  	}
   558  	db.lock.Lock()
   559  	defer db.lock.Unlock()
   560  
   561  	// Insert dirty nodes into the database. In the same tree, it must be
   562  	// ensured that children are inserted first, then parent so that children
   563  	// can be linked with their parent correctly.
   564  	//
   565  	// Note, the storage tries must be flushed before the account trie to
   566  	// retain the invariant that children go into the dirty cache first.
   567  	var order []common.Hash
   568  	for owner := range nodes.Sets {
   569  		if owner == (common.Hash{}) {
   570  			continue
   571  		}
   572  		order = append(order, owner)
   573  	}
   574  	if _, ok := nodes.Sets[common.Hash{}]; ok {
   575  		order = append(order, common.Hash{})
   576  	}
   577  	for _, owner := range order {
   578  		subset := nodes.Sets[owner]
   579  		subset.ForEachWithOrder(func(path string, n *trienode.Node) {
   580  			if n.IsDeleted() {
   581  				return // ignore deletion
   582  			}
   583  			db.insert(n.Hash, n.Blob)
   584  		})
   585  	}
   586  	// Link up the account trie and storage trie if the node points
   587  	// to an account trie leaf.
   588  	if set, present := nodes.Sets[common.Hash{}]; present {
   589  		for _, n := range set.Leaves {
   590  			var account types.StateAccount
   591  			if err := rlp.DecodeBytes(n.Blob, &account); err != nil {
   592  				return err
   593  			}
   594  			if account.Root != types.EmptyRootHash {
   595  				db.reference(account.Root, n.Parent)
   596  			}
   597  		}
   598  	}
   599  	return nil
   600  }
   601  
   602  // Size returns the current storage size of the memory cache in front of the
   603  // persistent database layer.
   604  func (db *Database) Size() common.StorageSize {
   605  	db.lock.RLock()
   606  	defer db.lock.RUnlock()
   607  
   608  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   609  	// the total memory consumption, the maintenance metadata is also needed to be
   610  	// counted.
   611  	var metadataSize = common.StorageSize(len(db.dirties) * cachedNodeSize)
   612  	return db.dirtiesSize + db.childrenSize + metadataSize
   613  }
   614  
   615  // Close closes the trie database and releases all held resources.
   616  func (db *Database) Close() error { return nil }
   617  
   618  // Scheme returns the node scheme used in the database.
   619  func (db *Database) Scheme() string {
   620  	return rawdb.HashScheme
   621  }
   622  
   623  // Reader retrieves a node reader belonging to the given state root.
   624  func (db *Database) Reader(root common.Hash) *reader {
   625  	return &reader{db: db}
   626  }
   627  
   628  // reader is a state reader of Database which implements the Reader interface.
   629  type reader struct {
   630  	db *Database
   631  }
   632  
   633  // Node retrieves the trie node with the given node hash.
   634  // No error will be returned if the node is not found.
   635  func (reader *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) {
   636  	blob, _ := reader.db.Node(hash)
   637  	return blob, nil
   638  }