github.com/ethereum-optimism/optimism/l2geth@v0.0.0-20230612200230-50b04ade19e3/trie/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"io"
    23  	"reflect"
    24  	"sync"
    25  	"time"
    26  
    27  	"github.com/VictoriaMetrics/fastcache"
    28  	"github.com/ethereum-optimism/optimism/l2geth/common"
    29  	"github.com/ethereum-optimism/optimism/l2geth/ethdb"
    30  	"github.com/ethereum-optimism/optimism/l2geth/log"
    31  	"github.com/ethereum-optimism/optimism/l2geth/metrics"
    32  	"github.com/ethereum-optimism/optimism/l2geth/rlp"
    33  )
    34  
    35  var (
    36  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
    37  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
    38  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
    39  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
    40  
    41  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
    42  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
    43  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
    44  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
    45  
    46  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
    47  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
    48  	memcacheFlushSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
    49  
    50  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
    51  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
    52  	memcacheGCSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
    53  
    54  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
    55  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
    56  	memcacheCommitSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
    57  )
    58  
    59  // secureKeyPrefix is the database key prefix used to store trie node preimages.
    60  var secureKeyPrefix = []byte("secure-key-")
    61  
    62  // secureKeyLength is the length of the above prefix + 32byte hash.
    63  const secureKeyLength = 11 + 32
    64  
    65  // Database is an intermediate write layer between the trie data structures and
    66  // the disk database. The aim is to accumulate trie writes in-memory and only
    67  // periodically flush a couple tries to disk, garbage collecting the remainder.
    68  //
    69  // Note, the trie Database is **not** thread safe in its mutations, but it **is**
    70  // thread safe in providing individual, independent node access. The rationale
    71  // behind this split design is to provide read access to RPC handlers and sync
    72  // servers even while the trie is executing expensive garbage collection.
    73  type Database struct {
    74  	diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes
    75  
    76  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    77  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes
    78  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    79  	newest  common.Hash                 // Newest tracked node, flush-list tail
    80  
    81  	preimages map[common.Hash][]byte // Preimages of nodes from the secure trie
    82  	seckeybuf [secureKeyLength]byte  // Ephemeral buffer for calculating preimage keys
    83  
    84  	gctime  time.Duration      // Time spent on garbage collection since last commit
    85  	gcnodes uint64             // Nodes garbage collected since last commit
    86  	gcsize  common.StorageSize // Data storage garbage collected since last commit
    87  
    88  	flushtime  time.Duration      // Time spent on data flushing since last commit
    89  	flushnodes uint64             // Nodes flushed since last commit
    90  	flushsize  common.StorageSize // Data storage flushed since last commit
    91  
    92  	dirtiesSize   common.StorageSize // Storage size of the dirty node cache (exc. metadata)
    93  	childrenSize  common.StorageSize // Storage size of the external children tracking
    94  	preimagesSize common.StorageSize // Storage size of the preimages cache
    95  
    96  	lock sync.RWMutex
    97  }
    98  
    99  // rawNode is a simple binary blob used to differentiate between collapsed trie
   100  // nodes and already encoded RLP binary blobs (while at the same time store them
   101  // in the same cache fields).
   102  type rawNode []byte
   103  
   104  func (n rawNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   105  func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   106  
   107  // rawFullNode represents only the useful data content of a full node, with the
   108  // caches and flags stripped out to minimize its data storage. This type honors
   109  // the same RLP encoding as the original parent.
   110  type rawFullNode [17]node
   111  
   112  func (n rawFullNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   113  func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   114  
   115  func (n rawFullNode) EncodeRLP(w io.Writer) error {
   116  	var nodes [17]node
   117  
   118  	for i, child := range n {
   119  		if child != nil {
   120  			nodes[i] = child
   121  		} else {
   122  			nodes[i] = nilValueNode
   123  		}
   124  	}
   125  	return rlp.Encode(w, nodes)
   126  }
   127  
   128  // rawShortNode represents only the useful data content of a short node, with the
   129  // caches and flags stripped out to minimize its data storage. This type honors
   130  // the same RLP encoding as the original parent.
   131  type rawShortNode struct {
   132  	Key []byte
   133  	Val node
   134  }
   135  
   136  func (n rawShortNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   137  func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   138  
   139  // cachedNode is all the information we know about a single cached node in the
   140  // memory database write layer.
   141  type cachedNode struct {
   142  	node node   // Cached collapsed trie node, or raw rlp data
   143  	size uint16 // Byte size of the useful cached data
   144  
   145  	parents  uint32                 // Number of live nodes referencing this one
   146  	children map[common.Hash]uint16 // External children referenced by this node
   147  
   148  	flushPrev common.Hash // Previous node in the flush-list
   149  	flushNext common.Hash // Next node in the flush-list
   150  }
   151  
   152  // cachedNodeSize is the raw size of a cachedNode data structure without any
   153  // node data included. It's an approximate size, but should be a lot better
   154  // than not counting them.
   155  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   156  
   157  // cachedNodeChildrenSize is the raw size of an initialized but empty external
   158  // reference map.
   159  const cachedNodeChildrenSize = 48
   160  
   161  // rlp returns the raw rlp encoded blob of the cached node, either directly from
   162  // the cache, or by regenerating it from the collapsed node.
   163  func (n *cachedNode) rlp() []byte {
   164  	if node, ok := n.node.(rawNode); ok {
   165  		return node
   166  	}
   167  	blob, err := rlp.EncodeToBytes(n.node)
   168  	if err != nil {
   169  		panic(err)
   170  	}
   171  	return blob
   172  }
   173  
   174  // obj returns the decoded and expanded trie node, either directly from the cache,
   175  // or by regenerating it from the rlp encoded blob.
   176  func (n *cachedNode) obj(hash common.Hash) node {
   177  	if node, ok := n.node.(rawNode); ok {
   178  		return mustDecodeNode(hash[:], node)
   179  	}
   180  	return expandNode(hash[:], n.node)
   181  }
   182  
   183  // forChilds invokes the callback for  all the tracked children of this node,
   184  // both the implicit ones  from inside the node as well as the explicit ones
   185  //from outside the node.
   186  func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
   187  	for child := range n.children {
   188  		onChild(child)
   189  	}
   190  	if _, ok := n.node.(rawNode); !ok {
   191  		forGatherChildren(n.node, onChild)
   192  	}
   193  }
   194  
   195  // forGatherChildren traverses the node hierarchy of a collapsed storage node and
   196  // invokes the callback for all the hashnode children.
   197  func forGatherChildren(n node, onChild func(hash common.Hash)) {
   198  	switch n := n.(type) {
   199  	case *rawShortNode:
   200  		forGatherChildren(n.Val, onChild)
   201  	case rawFullNode:
   202  		for i := 0; i < 16; i++ {
   203  			forGatherChildren(n[i], onChild)
   204  		}
   205  	case hashNode:
   206  		onChild(common.BytesToHash(n))
   207  	case valueNode, nil:
   208  	default:
   209  		panic(fmt.Sprintf("unknown node type: %T", n))
   210  	}
   211  }
   212  
   213  // simplifyNode traverses the hierarchy of an expanded memory node and discards
   214  // all the internal caches, returning a node that only contains the raw data.
   215  func simplifyNode(n node) node {
   216  	switch n := n.(type) {
   217  	case *shortNode:
   218  		// Short nodes discard the flags and cascade
   219  		return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)}
   220  
   221  	case *fullNode:
   222  		// Full nodes discard the flags and cascade
   223  		node := rawFullNode(n.Children)
   224  		for i := 0; i < len(node); i++ {
   225  			if node[i] != nil {
   226  				node[i] = simplifyNode(node[i])
   227  			}
   228  		}
   229  		return node
   230  
   231  	case valueNode, hashNode, rawNode:
   232  		return n
   233  
   234  	default:
   235  		panic(fmt.Sprintf("unknown node type: %T", n))
   236  	}
   237  }
   238  
   239  // expandNode traverses the node hierarchy of a collapsed storage node and converts
   240  // all fields and keys into expanded memory form.
   241  func expandNode(hash hashNode, n node) node {
   242  	switch n := n.(type) {
   243  	case *rawShortNode:
   244  		// Short nodes need key and child expansion
   245  		return &shortNode{
   246  			Key: compactToHex(n.Key),
   247  			Val: expandNode(nil, n.Val),
   248  			flags: nodeFlag{
   249  				hash: hash,
   250  			},
   251  		}
   252  
   253  	case rawFullNode:
   254  		// Full nodes need child expansion
   255  		node := &fullNode{
   256  			flags: nodeFlag{
   257  				hash: hash,
   258  			},
   259  		}
   260  		for i := 0; i < len(node.Children); i++ {
   261  			if n[i] != nil {
   262  				node.Children[i] = expandNode(nil, n[i])
   263  			}
   264  		}
   265  		return node
   266  
   267  	case valueNode, hashNode:
   268  		return n
   269  
   270  	default:
   271  		panic(fmt.Sprintf("unknown node type: %T", n))
   272  	}
   273  }
   274  
   275  // NewDatabase creates a new trie database to store ephemeral trie content before
   276  // its written out to disk or garbage collected. No read cache is created, so all
   277  // data retrievals will hit the underlying disk database.
   278  func NewDatabase(diskdb ethdb.KeyValueStore) *Database {
   279  	return NewDatabaseWithCache(diskdb, 0)
   280  }
   281  
   282  // NewDatabaseWithCache creates a new trie database to store ephemeral trie content
   283  // before its written out to disk or garbage collected. It also acts as a read cache
   284  // for nodes loaded from disk.
   285  func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database {
   286  	var cleans *fastcache.Cache
   287  	if cache > 0 {
   288  		cleans = fastcache.New(cache * 1024 * 1024)
   289  	}
   290  	return &Database{
   291  		diskdb: diskdb,
   292  		cleans: cleans,
   293  		dirties: map[common.Hash]*cachedNode{{}: {
   294  			children: make(map[common.Hash]uint16),
   295  		}},
   296  		preimages: make(map[common.Hash][]byte),
   297  	}
   298  }
   299  
   300  // DiskDB retrieves the persistent storage backing the trie database.
   301  func (db *Database) DiskDB() ethdb.KeyValueReader {
   302  	return db.diskdb
   303  }
   304  
   305  // InsertBlob writes a new reference tracked blob to the memory database if it's
   306  // yet unknown. This method should only be used for non-trie nodes that require
   307  // reference counting, since trie nodes are garbage collected directly through
   308  // their embedded children.
   309  func (db *Database) InsertBlob(hash common.Hash, blob []byte) {
   310  	db.lock.Lock()
   311  	defer db.lock.Unlock()
   312  
   313  	db.insert(hash, blob, rawNode(blob))
   314  }
   315  
   316  // insert inserts a collapsed trie node into the memory database. This method is
   317  // a more generic version of InsertBlob, supporting both raw blob insertions as
   318  // well ex trie node insertions. The blob must always be specified to allow proper
   319  // size tracking.
   320  func (db *Database) insert(hash common.Hash, blob []byte, node node) {
   321  	// If the node's already cached, skip
   322  	if _, ok := db.dirties[hash]; ok {
   323  		return
   324  	}
   325  	memcacheDirtyWriteMeter.Mark(int64(len(blob)))
   326  
   327  	// Create the cached entry for this node
   328  	entry := &cachedNode{
   329  		node:      simplifyNode(node),
   330  		size:      uint16(len(blob)),
   331  		flushPrev: db.newest,
   332  	}
   333  	entry.forChilds(func(child common.Hash) {
   334  		if c := db.dirties[child]; c != nil {
   335  			c.parents++
   336  		}
   337  	})
   338  	db.dirties[hash] = entry
   339  
   340  	// Update the flush-list endpoints
   341  	if db.oldest == (common.Hash{}) {
   342  		db.oldest, db.newest = hash, hash
   343  	} else {
   344  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   345  	}
   346  	db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
   347  }
   348  
   349  // insertPreimage writes a new trie node pre-image to the memory database if it's
   350  // yet unknown. The method will make a copy of the slice.
   351  //
   352  // Note, this method assumes that the database's lock is held!
   353  func (db *Database) insertPreimage(hash common.Hash, preimage []byte) {
   354  	if _, ok := db.preimages[hash]; ok {
   355  		return
   356  	}
   357  	db.preimages[hash] = common.CopyBytes(preimage)
   358  	db.preimagesSize += common.StorageSize(common.HashLength + len(preimage))
   359  }
   360  
   361  // node retrieves a cached trie node from memory, or returns nil if none can be
   362  // found in the memory cache.
   363  func (db *Database) node(hash common.Hash) node {
   364  	// Retrieve the node from the clean cache if available
   365  	if db.cleans != nil {
   366  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   367  			memcacheCleanHitMeter.Mark(1)
   368  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   369  			return mustDecodeNode(hash[:], enc)
   370  		}
   371  	}
   372  	// Retrieve the node from the dirty cache if available
   373  	db.lock.RLock()
   374  	dirty := db.dirties[hash]
   375  	db.lock.RUnlock()
   376  
   377  	if dirty != nil {
   378  		memcacheDirtyHitMeter.Mark(1)
   379  		memcacheDirtyReadMeter.Mark(int64(dirty.size))
   380  		return dirty.obj(hash)
   381  	}
   382  	memcacheDirtyMissMeter.Mark(1)
   383  
   384  	// Content unavailable in memory, attempt to retrieve from disk
   385  	enc, err := db.diskdb.Get(hash[:])
   386  	if err != nil || enc == nil {
   387  		return nil
   388  	}
   389  	if db.cleans != nil {
   390  		db.cleans.Set(hash[:], enc)
   391  		memcacheCleanMissMeter.Mark(1)
   392  		memcacheCleanWriteMeter.Mark(int64(len(enc)))
   393  	}
   394  	return mustDecodeNode(hash[:], enc)
   395  }
   396  
   397  // Node retrieves an encoded cached trie node from memory. If it cannot be found
   398  // cached, the method queries the persistent database for the content.
   399  func (db *Database) Node(hash common.Hash) ([]byte, error) {
   400  	// It doens't make sense to retrieve the metaroot
   401  	if hash == (common.Hash{}) {
   402  		return nil, errors.New("not found")
   403  	}
   404  	// Retrieve the node from the clean cache if available
   405  	if db.cleans != nil {
   406  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   407  			memcacheCleanHitMeter.Mark(1)
   408  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   409  			return enc, nil
   410  		}
   411  	}
   412  	// Retrieve the node from the dirty cache if available
   413  	db.lock.RLock()
   414  	dirty := db.dirties[hash]
   415  	db.lock.RUnlock()
   416  
   417  	if dirty != nil {
   418  		memcacheDirtyHitMeter.Mark(1)
   419  		memcacheDirtyReadMeter.Mark(int64(dirty.size))
   420  		return dirty.rlp(), nil
   421  	}
   422  	memcacheDirtyMissMeter.Mark(1)
   423  
   424  	// Content unavailable in memory, attempt to retrieve from disk
   425  	enc, err := db.diskdb.Get(hash[:])
   426  	if err == nil && enc != nil {
   427  		if db.cleans != nil {
   428  			db.cleans.Set(hash[:], enc)
   429  			memcacheCleanMissMeter.Mark(1)
   430  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   431  		}
   432  	}
   433  	return enc, err
   434  }
   435  
   436  // preimage retrieves a cached trie node pre-image from memory. If it cannot be
   437  // found cached, the method queries the persistent database for the content.
   438  func (db *Database) preimage(hash common.Hash) ([]byte, error) {
   439  	// Retrieve the node from cache if available
   440  	db.lock.RLock()
   441  	preimage := db.preimages[hash]
   442  	db.lock.RUnlock()
   443  
   444  	if preimage != nil {
   445  		return preimage, nil
   446  	}
   447  	// Content unavailable in memory, attempt to retrieve from disk
   448  	return db.diskdb.Get(db.secureKey(hash[:]))
   449  }
   450  
   451  // secureKey returns the database key for the preimage of key, as an ephemeral
   452  // buffer. The caller must not hold onto the return value because it will become
   453  // invalid on the next call.
   454  func (db *Database) secureKey(key []byte) []byte {
   455  	buf := append(db.seckeybuf[:0], secureKeyPrefix...)
   456  	buf = append(buf, key...)
   457  	return buf
   458  }
   459  
   460  // Nodes retrieves the hashes of all the nodes cached within the memory database.
   461  // This method is extremely expensive and should only be used to validate internal
   462  // states in test code.
   463  func (db *Database) Nodes() []common.Hash {
   464  	db.lock.RLock()
   465  	defer db.lock.RUnlock()
   466  
   467  	var hashes = make([]common.Hash, 0, len(db.dirties))
   468  	for hash := range db.dirties {
   469  		if hash != (common.Hash{}) { // Special case for "root" references/nodes
   470  			hashes = append(hashes, hash)
   471  		}
   472  	}
   473  	return hashes
   474  }
   475  
   476  // Reference adds a new reference from a parent node to a child node.
   477  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   478  	db.lock.Lock()
   479  	defer db.lock.Unlock()
   480  
   481  	db.reference(child, parent)
   482  }
   483  
   484  // reference is the private locked version of Reference.
   485  func (db *Database) reference(child common.Hash, parent common.Hash) {
   486  	// If the node does not exist, it's a node pulled from disk, skip
   487  	node, ok := db.dirties[child]
   488  	if !ok {
   489  		return
   490  	}
   491  	// If the reference already exists, only duplicate for roots
   492  	if db.dirties[parent].children == nil {
   493  		db.dirties[parent].children = make(map[common.Hash]uint16)
   494  		db.childrenSize += cachedNodeChildrenSize
   495  	} else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
   496  		return
   497  	}
   498  	node.parents++
   499  	db.dirties[parent].children[child]++
   500  	if db.dirties[parent].children[child] == 1 {
   501  		db.childrenSize += common.HashLength + 2 // uint16 counter
   502  	}
   503  }
   504  
   505  // Dereference removes an existing reference from a root node.
   506  func (db *Database) Dereference(root common.Hash) {
   507  	// Sanity check to ensure that the meta-root is not removed
   508  	if root == (common.Hash{}) {
   509  		log.Error("Attempted to dereference the trie cache meta root")
   510  		return
   511  	}
   512  	db.lock.Lock()
   513  	defer db.lock.Unlock()
   514  
   515  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   516  	db.dereference(root, common.Hash{})
   517  
   518  	db.gcnodes += uint64(nodes - len(db.dirties))
   519  	db.gcsize += storage - db.dirtiesSize
   520  	db.gctime += time.Since(start)
   521  
   522  	memcacheGCTimeTimer.Update(time.Since(start))
   523  	memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
   524  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   525  
   526  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   527  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   528  }
   529  
   530  // dereference is the private locked version of Dereference.
   531  func (db *Database) dereference(child common.Hash, parent common.Hash) {
   532  	// Dereference the parent-child
   533  	node := db.dirties[parent]
   534  
   535  	if node.children != nil && node.children[child] > 0 {
   536  		node.children[child]--
   537  		if node.children[child] == 0 {
   538  			delete(node.children, child)
   539  			db.childrenSize -= (common.HashLength + 2) // uint16 counter
   540  		}
   541  	}
   542  	// If the child does not exist, it's a previously committed node.
   543  	node, ok := db.dirties[child]
   544  	if !ok {
   545  		return
   546  	}
   547  	// If there are no more references to the child, delete it and cascade
   548  	if node.parents > 0 {
   549  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   550  		// memcache any more) gets reinjected as a new node (short node split into full,
   551  		// then reverted into short), causing a cached node to have no parents. That is
   552  		// no problem in itself, but don't make maxint parents out of it.
   553  		node.parents--
   554  	}
   555  	if node.parents == 0 {
   556  		// Remove the node from the flush-list
   557  		switch child {
   558  		case db.oldest:
   559  			db.oldest = node.flushNext
   560  			db.dirties[node.flushNext].flushPrev = common.Hash{}
   561  		case db.newest:
   562  			db.newest = node.flushPrev
   563  			db.dirties[node.flushPrev].flushNext = common.Hash{}
   564  		default:
   565  			db.dirties[node.flushPrev].flushNext = node.flushNext
   566  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   567  		}
   568  		// Dereference all children and delete the node
   569  		node.forChilds(func(hash common.Hash) {
   570  			db.dereference(hash, child)
   571  		})
   572  		delete(db.dirties, child)
   573  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   574  		if node.children != nil {
   575  			db.childrenSize -= cachedNodeChildrenSize
   576  		}
   577  	}
   578  }
   579  
   580  // Cap iteratively flushes old but still referenced trie nodes until the total
   581  // memory usage goes below the given threshold.
   582  //
   583  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   584  // concurrently with other mutators.
   585  func (db *Database) Cap(limit common.StorageSize) error {
   586  	// Create a database batch to flush persistent data out. It is important that
   587  	// outside code doesn't see an inconsistent state (referenced data removed from
   588  	// memory cache during commit but not yet in persistent storage). This is ensured
   589  	// by only uncaching existing data when the database write finalizes.
   590  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   591  	batch := db.diskdb.NewBatch()
   592  
   593  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   594  	// the total memory consumption, the maintenance metadata is also needed to be
   595  	// counted.
   596  	size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
   597  	size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
   598  
   599  	// If the preimage cache got large enough, push to disk. If it's still small
   600  	// leave for later to deduplicate writes.
   601  	flushPreimages := db.preimagesSize > 4*1024*1024
   602  	if flushPreimages {
   603  		for hash, preimage := range db.preimages {
   604  			if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
   605  				log.Error("Failed to commit preimage from trie database", "err", err)
   606  				return err
   607  			}
   608  			if batch.ValueSize() > ethdb.IdealBatchSize {
   609  				if err := batch.Write(); err != nil {
   610  					return err
   611  				}
   612  				batch.Reset()
   613  			}
   614  		}
   615  	}
   616  	// Keep committing nodes from the flush-list until we're below allowance
   617  	oldest := db.oldest
   618  	for size > limit && oldest != (common.Hash{}) {
   619  		// Fetch the oldest referenced node and push into the batch
   620  		node := db.dirties[oldest]
   621  		if err := batch.Put(oldest[:], node.rlp()); err != nil {
   622  			return err
   623  		}
   624  		// If we exceeded the ideal batch size, commit and reset
   625  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   626  			if err := batch.Write(); err != nil {
   627  				log.Error("Failed to write flush list to disk", "err", err)
   628  				return err
   629  			}
   630  			batch.Reset()
   631  		}
   632  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   633  		// is the total size, including the useful cached data (hash -> blob), the
   634  		// cache item metadata, as well as external children mappings.
   635  		size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize)
   636  		if node.children != nil {
   637  			size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   638  		}
   639  		oldest = node.flushNext
   640  	}
   641  	// Flush out any remainder data from the last batch
   642  	if err := batch.Write(); err != nil {
   643  		log.Error("Failed to write flush list to disk", "err", err)
   644  		return err
   645  	}
   646  	// Write successful, clear out the flushed data
   647  	db.lock.Lock()
   648  	defer db.lock.Unlock()
   649  
   650  	if flushPreimages {
   651  		db.preimages = make(map[common.Hash][]byte)
   652  		db.preimagesSize = 0
   653  	}
   654  	for db.oldest != oldest {
   655  		node := db.dirties[db.oldest]
   656  		delete(db.dirties, db.oldest)
   657  		db.oldest = node.flushNext
   658  
   659  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   660  		if node.children != nil {
   661  			db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   662  		}
   663  	}
   664  	if db.oldest != (common.Hash{}) {
   665  		db.dirties[db.oldest].flushPrev = common.Hash{}
   666  	}
   667  	db.flushnodes += uint64(nodes - len(db.dirties))
   668  	db.flushsize += storage - db.dirtiesSize
   669  	db.flushtime += time.Since(start)
   670  
   671  	memcacheFlushTimeTimer.Update(time.Since(start))
   672  	memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
   673  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   674  
   675  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   676  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   677  
   678  	return nil
   679  }
   680  
   681  // Commit iterates over all the children of a particular node, writes them out
   682  // to disk, forcefully tearing down all references in both directions. As a side
   683  // effect, all pre-images accumulated up to this point are also written.
   684  //
   685  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   686  // concurrently with other mutators.
   687  func (db *Database) Commit(node common.Hash, report bool) error {
   688  	// Create a database batch to flush persistent data out. It is important that
   689  	// outside code doesn't see an inconsistent state (referenced data removed from
   690  	// memory cache during commit but not yet in persistent storage). This is ensured
   691  	// by only uncaching existing data when the database write finalizes.
   692  	start := time.Now()
   693  	batch := db.diskdb.NewBatch()
   694  
   695  	// Move all of the accumulated preimages into a write batch
   696  	for hash, preimage := range db.preimages {
   697  		if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil {
   698  			log.Error("Failed to commit preimage from trie database", "err", err)
   699  			return err
   700  		}
   701  		// If the batch is too large, flush to disk
   702  		if batch.ValueSize() > ethdb.IdealBatchSize {
   703  			if err := batch.Write(); err != nil {
   704  				return err
   705  			}
   706  			batch.Reset()
   707  		}
   708  	}
   709  	// Since we're going to replay trie node writes into the clean cache, flush out
   710  	// any batched pre-images before continuing.
   711  	if err := batch.Write(); err != nil {
   712  		return err
   713  	}
   714  	batch.Reset()
   715  
   716  	// Move the trie itself into the batch, flushing if enough data is accumulated
   717  	nodes, storage := len(db.dirties), db.dirtiesSize
   718  
   719  	uncacher := &cleaner{db}
   720  	if err := db.commit(node, batch, uncacher); err != nil {
   721  		log.Error("Failed to commit trie from trie database", "err", err)
   722  		return err
   723  	}
   724  	// Trie mostly committed to disk, flush any batch leftovers
   725  	if err := batch.Write(); err != nil {
   726  		log.Error("Failed to write trie to disk", "err", err)
   727  		return err
   728  	}
   729  	// Uncache any leftovers in the last batch
   730  	db.lock.Lock()
   731  	defer db.lock.Unlock()
   732  
   733  	batch.Replay(uncacher)
   734  	batch.Reset()
   735  
   736  	// Reset the storage counters and bumpd metrics
   737  	db.preimages = make(map[common.Hash][]byte)
   738  	db.preimagesSize = 0
   739  
   740  	memcacheCommitTimeTimer.Update(time.Since(start))
   741  	memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
   742  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   743  
   744  	logger := log.Info
   745  	if !report {
   746  		logger = log.Debug
   747  	}
   748  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   749  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   750  
   751  	// Reset the garbage collection statistics
   752  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   753  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   754  
   755  	return nil
   756  }
   757  
   758  // commit is the private locked version of Commit.
   759  func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error {
   760  	// If the node does not exist, it's a previously committed node
   761  	node, ok := db.dirties[hash]
   762  	if !ok {
   763  		return nil
   764  	}
   765  	var err error
   766  	node.forChilds(func(child common.Hash) {
   767  		if err == nil {
   768  			err = db.commit(child, batch, uncacher)
   769  		}
   770  	})
   771  	if err != nil {
   772  		return err
   773  	}
   774  	if err := batch.Put(hash[:], node.rlp()); err != nil {
   775  		return err
   776  	}
   777  	// If we've reached an optimal batch size, commit and start over
   778  	if batch.ValueSize() >= ethdb.IdealBatchSize {
   779  		if err := batch.Write(); err != nil {
   780  			return err
   781  		}
   782  		db.lock.Lock()
   783  		batch.Replay(uncacher)
   784  		batch.Reset()
   785  		db.lock.Unlock()
   786  	}
   787  	return nil
   788  }
   789  
   790  // cleaner is a database batch replayer that takes a batch of write operations
   791  // and cleans up the trie database from anything written to disk.
   792  type cleaner struct {
   793  	db *Database
   794  }
   795  
   796  // Put reacts to database writes and implements dirty data uncaching. This is the
   797  // post-processing step of a commit operation where the already persisted trie is
   798  // removed from the dirty cache and moved into the clean cache. The reason behind
   799  // the two-phase commit is to ensure ensure data availability while moving from
   800  // memory to disk.
   801  func (c *cleaner) Put(key []byte, rlp []byte) error {
   802  	hash := common.BytesToHash(key)
   803  
   804  	// If the node does not exist, we're done on this path
   805  	node, ok := c.db.dirties[hash]
   806  	if !ok {
   807  		return nil
   808  	}
   809  	// Node still exists, remove it from the flush-list
   810  	switch hash {
   811  	case c.db.oldest:
   812  		c.db.oldest = node.flushNext
   813  		c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   814  	case c.db.newest:
   815  		c.db.newest = node.flushPrev
   816  		c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   817  	default:
   818  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   819  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   820  	}
   821  	// Remove the node from the dirty cache
   822  	delete(c.db.dirties, hash)
   823  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   824  	if node.children != nil {
   825  		c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   826  	}
   827  	// Move the flushed node into the clean cache to prevent insta-reloads
   828  	if c.db.cleans != nil {
   829  		c.db.cleans.Set(hash[:], rlp)
   830  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   831  	}
   832  	return nil
   833  }
   834  
   835  func (c *cleaner) Delete(key []byte) error {
   836  	panic("not implemented")
   837  }
   838  
   839  // Size returns the current storage size of the memory cache in front of the
   840  // persistent database layer.
   841  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   842  	db.lock.RLock()
   843  	defer db.lock.RUnlock()
   844  
   845  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   846  	// the total memory consumption, the maintenance metadata is also needed to be
   847  	// counted.
   848  	var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
   849  	var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
   850  	return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize
   851  }