github.com/jimmyx0x/go-ethereum@v1.10.28/trie/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"io"
    23  	"reflect"
    24  	"runtime"
    25  	"sync"
    26  	"time"
    27  
    28  	"github.com/VictoriaMetrics/fastcache"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/core/rawdb"
    31  	"github.com/ethereum/go-ethereum/core/types"
    32  	"github.com/ethereum/go-ethereum/ethdb"
    33  	"github.com/ethereum/go-ethereum/log"
    34  	"github.com/ethereum/go-ethereum/metrics"
    35  	"github.com/ethereum/go-ethereum/rlp"
    36  )
    37  
    38  var (
    39  	memcacheCleanHitMeter   = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil)
    40  	memcacheCleanMissMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil)
    41  	memcacheCleanReadMeter  = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil)
    42  	memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil)
    43  
    44  	memcacheDirtyHitMeter   = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil)
    45  	memcacheDirtyMissMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil)
    46  	memcacheDirtyReadMeter  = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil)
    47  	memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil)
    48  
    49  	memcacheFlushTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil)
    50  	memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil)
    51  	memcacheFlushSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil)
    52  
    53  	memcacheGCTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil)
    54  	memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil)
    55  	memcacheGCSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil)
    56  
    57  	memcacheCommitTimeTimer  = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil)
    58  	memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil)
    59  	memcacheCommitSizeMeter  = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil)
    60  )
    61  
    62  // Database is an intermediate write layer between the trie data structures and
    63  // the disk database. The aim is to accumulate trie writes in-memory and only
    64  // periodically flush a couple tries to disk, garbage collecting the remainder.
    65  //
    66  // Note, the trie Database is **not** thread safe in its mutations, but it **is**
    67  // thread safe in providing individual, independent node access. The rationale
    68  // behind this split design is to provide read access to RPC handlers and sync
    69  // servers even while the trie is executing expensive garbage collection.
    70  type Database struct {
    71  	diskdb ethdb.Database // Persistent storage for matured trie nodes
    72  
    73  	cleans  *fastcache.Cache            // GC friendly memory cache of clean node RLPs
    74  	dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes
    75  	oldest  common.Hash                 // Oldest tracked node, flush-list head
    76  	newest  common.Hash                 // Newest tracked node, flush-list tail
    77  
    78  	gctime  time.Duration      // Time spent on garbage collection since last commit
    79  	gcnodes uint64             // Nodes garbage collected since last commit
    80  	gcsize  common.StorageSize // Data storage garbage collected since last commit
    81  
    82  	flushtime  time.Duration      // Time spent on data flushing since last commit
    83  	flushnodes uint64             // Nodes flushed since last commit
    84  	flushsize  common.StorageSize // Data storage flushed since last commit
    85  
    86  	dirtiesSize  common.StorageSize // Storage size of the dirty node cache (exc. metadata)
    87  	childrenSize common.StorageSize // Storage size of the external children tracking
    88  	preimages    *preimageStore     // The store for caching preimages
    89  
    90  	lock sync.RWMutex
    91  }
    92  
    93  // rawNode is a simple binary blob used to differentiate between collapsed trie
    94  // nodes and already encoded RLP binary blobs (while at the same time store them
    95  // in the same cache fields).
    96  type rawNode []byte
    97  
    98  func (n rawNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
    99  func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   100  
   101  func (n rawNode) EncodeRLP(w io.Writer) error {
   102  	_, err := w.Write(n)
   103  	return err
   104  }
   105  
   106  // rawFullNode represents only the useful data content of a full node, with the
   107  // caches and flags stripped out to minimize its data storage. This type honors
   108  // the same RLP encoding as the original parent.
   109  type rawFullNode [17]node
   110  
   111  func (n rawFullNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   112  func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   113  
   114  func (n rawFullNode) EncodeRLP(w io.Writer) error {
   115  	eb := rlp.NewEncoderBuffer(w)
   116  	n.encode(eb)
   117  	return eb.Flush()
   118  }
   119  
   120  // rawShortNode represents only the useful data content of a short node, with the
   121  // caches and flags stripped out to minimize its data storage. This type honors
   122  // the same RLP encoding as the original parent.
   123  type rawShortNode struct {
   124  	Key []byte
   125  	Val node
   126  }
   127  
   128  func (n rawShortNode) cache() (hashNode, bool)   { panic("this should never end up in a live trie") }
   129  func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") }
   130  
   131  // cachedNode is all the information we know about a single cached trie node
   132  // in the memory database write layer.
   133  type cachedNode struct {
   134  	node node   // Cached collapsed trie node, or raw rlp data
   135  	size uint16 // Byte size of the useful cached data
   136  
   137  	parents  uint32                 // Number of live nodes referencing this one
   138  	children map[common.Hash]uint16 // External children referenced by this node
   139  
   140  	flushPrev common.Hash // Previous node in the flush-list
   141  	flushNext common.Hash // Next node in the flush-list
   142  }
   143  
   144  // cachedNodeSize is the raw size of a cachedNode data structure without any
   145  // node data included. It's an approximate size, but should be a lot better
   146  // than not counting them.
   147  var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size())
   148  
   149  // cachedNodeChildrenSize is the raw size of an initialized but empty external
   150  // reference map.
   151  const cachedNodeChildrenSize = 48
   152  
   153  // rlp returns the raw rlp encoded blob of the cached trie node, either directly
   154  // from the cache, or by regenerating it from the collapsed node.
   155  func (n *cachedNode) rlp() []byte {
   156  	if node, ok := n.node.(rawNode); ok {
   157  		return node
   158  	}
   159  	return nodeToBytes(n.node)
   160  }
   161  
   162  // obj returns the decoded and expanded trie node, either directly from the cache,
   163  // or by regenerating it from the rlp encoded blob.
   164  func (n *cachedNode) obj(hash common.Hash) node {
   165  	if node, ok := n.node.(rawNode); ok {
   166  		// The raw-blob format nodes are loaded either from the
   167  		// clean cache or the database, they are all in their own
   168  		// copy and safe to use unsafe decoder.
   169  		return mustDecodeNodeUnsafe(hash[:], node)
   170  	}
   171  	return expandNode(hash[:], n.node)
   172  }
   173  
   174  // forChilds invokes the callback for all the tracked children of this node,
   175  // both the implicit ones from inside the node as well as the explicit ones
   176  // from outside the node.
   177  func (n *cachedNode) forChilds(onChild func(hash common.Hash)) {
   178  	for child := range n.children {
   179  		onChild(child)
   180  	}
   181  	if _, ok := n.node.(rawNode); !ok {
   182  		forGatherChildren(n.node, onChild)
   183  	}
   184  }
   185  
   186  // forGatherChildren traverses the node hierarchy of a collapsed storage node and
   187  // invokes the callback for all the hashnode children.
   188  func forGatherChildren(n node, onChild func(hash common.Hash)) {
   189  	switch n := n.(type) {
   190  	case *rawShortNode:
   191  		forGatherChildren(n.Val, onChild)
   192  	case rawFullNode:
   193  		for i := 0; i < 16; i++ {
   194  			forGatherChildren(n[i], onChild)
   195  		}
   196  	case hashNode:
   197  		onChild(common.BytesToHash(n))
   198  	case valueNode, nil, rawNode:
   199  	default:
   200  		panic(fmt.Sprintf("unknown node type: %T", n))
   201  	}
   202  }
   203  
   204  // simplifyNode traverses the hierarchy of an expanded memory node and discards
   205  // all the internal caches, returning a node that only contains the raw data.
   206  func simplifyNode(n node) node {
   207  	switch n := n.(type) {
   208  	case *shortNode:
   209  		// Short nodes discard the flags and cascade
   210  		return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)}
   211  
   212  	case *fullNode:
   213  		// Full nodes discard the flags and cascade
   214  		node := rawFullNode(n.Children)
   215  		for i := 0; i < len(node); i++ {
   216  			if node[i] != nil {
   217  				node[i] = simplifyNode(node[i])
   218  			}
   219  		}
   220  		return node
   221  
   222  	case valueNode, hashNode, rawNode:
   223  		return n
   224  
   225  	default:
   226  		panic(fmt.Sprintf("unknown node type: %T", n))
   227  	}
   228  }
   229  
   230  // expandNode traverses the node hierarchy of a collapsed storage node and converts
   231  // all fields and keys into expanded memory form.
   232  func expandNode(hash hashNode, n node) node {
   233  	switch n := n.(type) {
   234  	case *rawShortNode:
   235  		// Short nodes need key and child expansion
   236  		return &shortNode{
   237  			Key: compactToHex(n.Key),
   238  			Val: expandNode(nil, n.Val),
   239  			flags: nodeFlag{
   240  				hash: hash,
   241  			},
   242  		}
   243  
   244  	case rawFullNode:
   245  		// Full nodes need child expansion
   246  		node := &fullNode{
   247  			flags: nodeFlag{
   248  				hash: hash,
   249  			},
   250  		}
   251  		for i := 0; i < len(node.Children); i++ {
   252  			if n[i] != nil {
   253  				node.Children[i] = expandNode(nil, n[i])
   254  			}
   255  		}
   256  		return node
   257  
   258  	case valueNode, hashNode:
   259  		return n
   260  
   261  	default:
   262  		panic(fmt.Sprintf("unknown node type: %T", n))
   263  	}
   264  }
   265  
   266  // Config defines all necessary options for database.
   267  type Config struct {
   268  	Cache     int    // Memory allowance (MB) to use for caching trie nodes in memory
   269  	Journal   string // Journal of clean cache to survive node restarts
   270  	Preimages bool   // Flag whether the preimage of trie key is recorded
   271  }
   272  
   273  // NewDatabase creates a new trie database to store ephemeral trie content before
   274  // its written out to disk or garbage collected. No read cache is created, so all
   275  // data retrievals will hit the underlying disk database.
   276  func NewDatabase(diskdb ethdb.Database) *Database {
   277  	return NewDatabaseWithConfig(diskdb, nil)
   278  }
   279  
   280  // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content
   281  // before its written out to disk or garbage collected. It also acts as a read cache
   282  // for nodes loaded from disk.
   283  func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database {
   284  	var cleans *fastcache.Cache
   285  	if config != nil && config.Cache > 0 {
   286  		if config.Journal == "" {
   287  			cleans = fastcache.New(config.Cache * 1024 * 1024)
   288  		} else {
   289  			cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024)
   290  		}
   291  	}
   292  	var preimage *preimageStore
   293  	if config != nil && config.Preimages {
   294  		preimage = newPreimageStore(diskdb)
   295  	}
   296  	db := &Database{
   297  		diskdb: diskdb,
   298  		cleans: cleans,
   299  		dirties: map[common.Hash]*cachedNode{{}: {
   300  			children: make(map[common.Hash]uint16),
   301  		}},
   302  		preimages: preimage,
   303  	}
   304  	return db
   305  }
   306  
   307  // insert inserts a simplified trie node into the memory database.
   308  // All nodes inserted by this function will be reference tracked
   309  // and in theory should only used for **trie nodes** insertion.
   310  func (db *Database) insert(hash common.Hash, size int, node node) {
   311  	// If the node's already cached, skip
   312  	if _, ok := db.dirties[hash]; ok {
   313  		return
   314  	}
   315  	memcacheDirtyWriteMeter.Mark(int64(size))
   316  
   317  	// Create the cached entry for this node
   318  	entry := &cachedNode{
   319  		node:      node,
   320  		size:      uint16(size),
   321  		flushPrev: db.newest,
   322  	}
   323  	entry.forChilds(func(child common.Hash) {
   324  		if c := db.dirties[child]; c != nil {
   325  			c.parents++
   326  		}
   327  	})
   328  	db.dirties[hash] = entry
   329  
   330  	// Update the flush-list endpoints
   331  	if db.oldest == (common.Hash{}) {
   332  		db.oldest, db.newest = hash, hash
   333  	} else {
   334  		db.dirties[db.newest].flushNext, db.newest = hash, hash
   335  	}
   336  	db.dirtiesSize += common.StorageSize(common.HashLength + entry.size)
   337  }
   338  
   339  // node retrieves a cached trie node from memory, or returns nil if none can be
   340  // found in the memory cache.
   341  func (db *Database) node(hash common.Hash) node {
   342  	// Retrieve the node from the clean cache if available
   343  	if db.cleans != nil {
   344  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   345  			memcacheCleanHitMeter.Mark(1)
   346  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   347  
   348  			// The returned value from cache is in its own copy,
   349  			// safe to use mustDecodeNodeUnsafe for decoding.
   350  			return mustDecodeNodeUnsafe(hash[:], enc)
   351  		}
   352  	}
   353  	// Retrieve the node from the dirty cache if available
   354  	db.lock.RLock()
   355  	dirty := db.dirties[hash]
   356  	db.lock.RUnlock()
   357  
   358  	if dirty != nil {
   359  		memcacheDirtyHitMeter.Mark(1)
   360  		memcacheDirtyReadMeter.Mark(int64(dirty.size))
   361  		return dirty.obj(hash)
   362  	}
   363  	memcacheDirtyMissMeter.Mark(1)
   364  
   365  	// Content unavailable in memory, attempt to retrieve from disk
   366  	enc, err := db.diskdb.Get(hash[:])
   367  	if err != nil || enc == nil {
   368  		return nil
   369  	}
   370  	if db.cleans != nil {
   371  		db.cleans.Set(hash[:], enc)
   372  		memcacheCleanMissMeter.Mark(1)
   373  		memcacheCleanWriteMeter.Mark(int64(len(enc)))
   374  	}
   375  	// The returned value from database is in its own copy,
   376  	// safe to use mustDecodeNodeUnsafe for decoding.
   377  	return mustDecodeNodeUnsafe(hash[:], enc)
   378  }
   379  
   380  // Node retrieves an encoded cached trie node from memory. If it cannot be found
   381  // cached, the method queries the persistent database for the content.
   382  func (db *Database) Node(hash common.Hash) ([]byte, error) {
   383  	// It doesn't make sense to retrieve the metaroot
   384  	if hash == (common.Hash{}) {
   385  		return nil, errors.New("not found")
   386  	}
   387  	// Retrieve the node from the clean cache if available
   388  	if db.cleans != nil {
   389  		if enc := db.cleans.Get(nil, hash[:]); enc != nil {
   390  			memcacheCleanHitMeter.Mark(1)
   391  			memcacheCleanReadMeter.Mark(int64(len(enc)))
   392  			return enc, nil
   393  		}
   394  	}
   395  	// Retrieve the node from the dirty cache if available
   396  	db.lock.RLock()
   397  	dirty := db.dirties[hash]
   398  	db.lock.RUnlock()
   399  
   400  	if dirty != nil {
   401  		memcacheDirtyHitMeter.Mark(1)
   402  		memcacheDirtyReadMeter.Mark(int64(dirty.size))
   403  		return dirty.rlp(), nil
   404  	}
   405  	memcacheDirtyMissMeter.Mark(1)
   406  
   407  	// Content unavailable in memory, attempt to retrieve from disk
   408  	enc := rawdb.ReadTrieNode(db.diskdb, hash)
   409  	if len(enc) != 0 {
   410  		if db.cleans != nil {
   411  			db.cleans.Set(hash[:], enc)
   412  			memcacheCleanMissMeter.Mark(1)
   413  			memcacheCleanWriteMeter.Mark(int64(len(enc)))
   414  		}
   415  		return enc, nil
   416  	}
   417  	return nil, errors.New("not found")
   418  }
   419  
   420  // Nodes retrieves the hashes of all the nodes cached within the memory database.
   421  // This method is extremely expensive and should only be used to validate internal
   422  // states in test code.
   423  func (db *Database) Nodes() []common.Hash {
   424  	db.lock.RLock()
   425  	defer db.lock.RUnlock()
   426  
   427  	var hashes = make([]common.Hash, 0, len(db.dirties))
   428  	for hash := range db.dirties {
   429  		if hash != (common.Hash{}) { // Special case for "root" references/nodes
   430  			hashes = append(hashes, hash)
   431  		}
   432  	}
   433  	return hashes
   434  }
   435  
   436  // Reference adds a new reference from a parent node to a child node.
   437  // This function is used to add reference between internal trie node
   438  // and external node(e.g. storage trie root), all internal trie nodes
   439  // are referenced together by database itself.
   440  func (db *Database) Reference(child common.Hash, parent common.Hash) {
   441  	db.lock.Lock()
   442  	defer db.lock.Unlock()
   443  
   444  	db.reference(child, parent)
   445  }
   446  
   447  // reference is the private locked version of Reference.
   448  func (db *Database) reference(child common.Hash, parent common.Hash) {
   449  	// If the node does not exist, it's a node pulled from disk, skip
   450  	node, ok := db.dirties[child]
   451  	if !ok {
   452  		return
   453  	}
   454  	// If the reference already exists, only duplicate for roots
   455  	if db.dirties[parent].children == nil {
   456  		db.dirties[parent].children = make(map[common.Hash]uint16)
   457  		db.childrenSize += cachedNodeChildrenSize
   458  	} else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) {
   459  		return
   460  	}
   461  	node.parents++
   462  	db.dirties[parent].children[child]++
   463  	if db.dirties[parent].children[child] == 1 {
   464  		db.childrenSize += common.HashLength + 2 // uint16 counter
   465  	}
   466  }
   467  
   468  // Dereference removes an existing reference from a root node.
   469  func (db *Database) Dereference(root common.Hash) {
   470  	// Sanity check to ensure that the meta-root is not removed
   471  	if root == (common.Hash{}) {
   472  		log.Error("Attempted to dereference the trie cache meta root")
   473  		return
   474  	}
   475  	db.lock.Lock()
   476  	defer db.lock.Unlock()
   477  
   478  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   479  	db.dereference(root, common.Hash{})
   480  
   481  	db.gcnodes += uint64(nodes - len(db.dirties))
   482  	db.gcsize += storage - db.dirtiesSize
   483  	db.gctime += time.Since(start)
   484  
   485  	memcacheGCTimeTimer.Update(time.Since(start))
   486  	memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize))
   487  	memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties)))
   488  
   489  	log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   490  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   491  }
   492  
   493  // dereference is the private locked version of Dereference.
   494  func (db *Database) dereference(child common.Hash, parent common.Hash) {
   495  	// Dereference the parent-child
   496  	node := db.dirties[parent]
   497  
   498  	if node.children != nil && node.children[child] > 0 {
   499  		node.children[child]--
   500  		if node.children[child] == 0 {
   501  			delete(node.children, child)
   502  			db.childrenSize -= (common.HashLength + 2) // uint16 counter
   503  		}
   504  	}
   505  	// If the child does not exist, it's a previously committed node.
   506  	node, ok := db.dirties[child]
   507  	if !ok {
   508  		return
   509  	}
   510  	// If there are no more references to the child, delete it and cascade
   511  	if node.parents > 0 {
   512  		// This is a special cornercase where a node loaded from disk (i.e. not in the
   513  		// memcache any more) gets reinjected as a new node (short node split into full,
   514  		// then reverted into short), causing a cached node to have no parents. That is
   515  		// no problem in itself, but don't make maxint parents out of it.
   516  		node.parents--
   517  	}
   518  	if node.parents == 0 {
   519  		// Remove the node from the flush-list
   520  		switch child {
   521  		case db.oldest:
   522  			db.oldest = node.flushNext
   523  			db.dirties[node.flushNext].flushPrev = common.Hash{}
   524  		case db.newest:
   525  			db.newest = node.flushPrev
   526  			db.dirties[node.flushPrev].flushNext = common.Hash{}
   527  		default:
   528  			db.dirties[node.flushPrev].flushNext = node.flushNext
   529  			db.dirties[node.flushNext].flushPrev = node.flushPrev
   530  		}
   531  		// Dereference all children and delete the node
   532  		node.forChilds(func(hash common.Hash) {
   533  			db.dereference(hash, child)
   534  		})
   535  		delete(db.dirties, child)
   536  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   537  		if node.children != nil {
   538  			db.childrenSize -= cachedNodeChildrenSize
   539  		}
   540  	}
   541  }
   542  
   543  // Cap iteratively flushes old but still referenced trie nodes until the total
   544  // memory usage goes below the given threshold.
   545  //
   546  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   547  // concurrently with other mutators.
   548  func (db *Database) Cap(limit common.StorageSize) error {
   549  	// Create a database batch to flush persistent data out. It is important that
   550  	// outside code doesn't see an inconsistent state (referenced data removed from
   551  	// memory cache during commit but not yet in persistent storage). This is ensured
   552  	// by only uncaching existing data when the database write finalizes.
   553  	nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now()
   554  	batch := db.diskdb.NewBatch()
   555  
   556  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   557  	// the total memory consumption, the maintenance metadata is also needed to be
   558  	// counted.
   559  	size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize)
   560  	size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2))
   561  
   562  	// If the preimage cache got large enough, push to disk. If it's still small
   563  	// leave for later to deduplicate writes.
   564  	if db.preimages != nil {
   565  		if err := db.preimages.commit(false); err != nil {
   566  			return err
   567  		}
   568  	}
   569  	// Keep committing nodes from the flush-list until we're below allowance
   570  	oldest := db.oldest
   571  	for size > limit && oldest != (common.Hash{}) {
   572  		// Fetch the oldest referenced node and push into the batch
   573  		node := db.dirties[oldest]
   574  		rawdb.WriteTrieNode(batch, oldest, node.rlp())
   575  
   576  		// If we exceeded the ideal batch size, commit and reset
   577  		if batch.ValueSize() >= ethdb.IdealBatchSize {
   578  			if err := batch.Write(); err != nil {
   579  				log.Error("Failed to write flush list to disk", "err", err)
   580  				return err
   581  			}
   582  			batch.Reset()
   583  		}
   584  		// Iterate to the next flush item, or abort if the size cap was achieved. Size
   585  		// is the total size, including the useful cached data (hash -> blob), the
   586  		// cache item metadata, as well as external children mappings.
   587  		size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize)
   588  		if node.children != nil {
   589  			size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   590  		}
   591  		oldest = node.flushNext
   592  	}
   593  	// Flush out any remainder data from the last batch
   594  	if err := batch.Write(); err != nil {
   595  		log.Error("Failed to write flush list to disk", "err", err)
   596  		return err
   597  	}
   598  	// Write successful, clear out the flushed data
   599  	db.lock.Lock()
   600  	defer db.lock.Unlock()
   601  
   602  	for db.oldest != oldest {
   603  		node := db.dirties[db.oldest]
   604  		delete(db.dirties, db.oldest)
   605  		db.oldest = node.flushNext
   606  
   607  		db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   608  		if node.children != nil {
   609  			db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   610  		}
   611  	}
   612  	if db.oldest != (common.Hash{}) {
   613  		db.dirties[db.oldest].flushPrev = common.Hash{}
   614  	}
   615  	db.flushnodes += uint64(nodes - len(db.dirties))
   616  	db.flushsize += storage - db.dirtiesSize
   617  	db.flushtime += time.Since(start)
   618  
   619  	memcacheFlushTimeTimer.Update(time.Since(start))
   620  	memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize))
   621  	memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties)))
   622  
   623  	log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start),
   624  		"flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   625  
   626  	return nil
   627  }
   628  
   629  // Commit iterates over all the children of a particular node, writes them out
   630  // to disk, forcefully tearing down all references in both directions. As a side
   631  // effect, all pre-images accumulated up to this point are also written.
   632  //
   633  // Note, this method is a non-synchronized mutator. It is unsafe to call this
   634  // concurrently with other mutators.
   635  func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error {
   636  	// Create a database batch to flush persistent data out. It is important that
   637  	// outside code doesn't see an inconsistent state (referenced data removed from
   638  	// memory cache during commit but not yet in persistent storage). This is ensured
   639  	// by only uncaching existing data when the database write finalizes.
   640  	start := time.Now()
   641  	batch := db.diskdb.NewBatch()
   642  
   643  	// Move all of the accumulated preimages into a write batch
   644  	if db.preimages != nil {
   645  		if err := db.preimages.commit(true); err != nil {
   646  			return err
   647  		}
   648  	}
   649  	// Move the trie itself into the batch, flushing if enough data is accumulated
   650  	nodes, storage := len(db.dirties), db.dirtiesSize
   651  
   652  	uncacher := &cleaner{db}
   653  	if err := db.commit(node, batch, uncacher, callback); err != nil {
   654  		log.Error("Failed to commit trie from trie database", "err", err)
   655  		return err
   656  	}
   657  	// Trie mostly committed to disk, flush any batch leftovers
   658  	if err := batch.Write(); err != nil {
   659  		log.Error("Failed to write trie to disk", "err", err)
   660  		return err
   661  	}
   662  	// Uncache any leftovers in the last batch
   663  	db.lock.Lock()
   664  	defer db.lock.Unlock()
   665  	if err := batch.Replay(uncacher); err != nil {
   666  		return err
   667  	}
   668  	batch.Reset()
   669  
   670  	// Reset the storage counters and bumped metrics
   671  	memcacheCommitTimeTimer.Update(time.Since(start))
   672  	memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize))
   673  	memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties)))
   674  
   675  	logger := log.Info
   676  	if !report {
   677  		logger = log.Debug
   678  	}
   679  	logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime,
   680  		"gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize)
   681  
   682  	// Reset the garbage collection statistics
   683  	db.gcnodes, db.gcsize, db.gctime = 0, 0, 0
   684  	db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0
   685  
   686  	return nil
   687  }
   688  
   689  // commit is the private locked version of Commit.
   690  func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error {
   691  	// If the node does not exist, it's a previously committed node
   692  	node, ok := db.dirties[hash]
   693  	if !ok {
   694  		return nil
   695  	}
   696  	var err error
   697  	node.forChilds(func(child common.Hash) {
   698  		if err == nil {
   699  			err = db.commit(child, batch, uncacher, callback)
   700  		}
   701  	})
   702  	if err != nil {
   703  		return err
   704  	}
   705  	// If we've reached an optimal batch size, commit and start over
   706  	rawdb.WriteTrieNode(batch, hash, node.rlp())
   707  	if callback != nil {
   708  		callback(hash)
   709  	}
   710  	if batch.ValueSize() >= ethdb.IdealBatchSize {
   711  		if err := batch.Write(); err != nil {
   712  			return err
   713  		}
   714  		db.lock.Lock()
   715  		err := batch.Replay(uncacher)
   716  		batch.Reset()
   717  		db.lock.Unlock()
   718  		if err != nil {
   719  			return err
   720  		}
   721  	}
   722  	return nil
   723  }
   724  
   725  // cleaner is a database batch replayer that takes a batch of write operations
   726  // and cleans up the trie database from anything written to disk.
   727  type cleaner struct {
   728  	db *Database
   729  }
   730  
   731  // Put reacts to database writes and implements dirty data uncaching. This is the
   732  // post-processing step of a commit operation where the already persisted trie is
   733  // removed from the dirty cache and moved into the clean cache. The reason behind
   734  // the two-phase commit is to ensure data availability while moving from memory
   735  // to disk.
   736  func (c *cleaner) Put(key []byte, rlp []byte) error {
   737  	hash := common.BytesToHash(key)
   738  
   739  	// If the node does not exist, we're done on this path
   740  	node, ok := c.db.dirties[hash]
   741  	if !ok {
   742  		return nil
   743  	}
   744  	// Node still exists, remove it from the flush-list
   745  	switch hash {
   746  	case c.db.oldest:
   747  		c.db.oldest = node.flushNext
   748  		c.db.dirties[node.flushNext].flushPrev = common.Hash{}
   749  	case c.db.newest:
   750  		c.db.newest = node.flushPrev
   751  		c.db.dirties[node.flushPrev].flushNext = common.Hash{}
   752  	default:
   753  		c.db.dirties[node.flushPrev].flushNext = node.flushNext
   754  		c.db.dirties[node.flushNext].flushPrev = node.flushPrev
   755  	}
   756  	// Remove the node from the dirty cache
   757  	delete(c.db.dirties, hash)
   758  	c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size))
   759  	if node.children != nil {
   760  		c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2))
   761  	}
   762  	// Move the flushed node into the clean cache to prevent insta-reloads
   763  	if c.db.cleans != nil {
   764  		c.db.cleans.Set(hash[:], rlp)
   765  		memcacheCleanWriteMeter.Mark(int64(len(rlp)))
   766  	}
   767  	return nil
   768  }
   769  
   770  func (c *cleaner) Delete(key []byte) error {
   771  	panic("not implemented")
   772  }
   773  
   774  // Update inserts the dirty nodes in provided nodeset into database and
   775  // link the account trie with multiple storage tries if necessary.
   776  func (db *Database) Update(nodes *MergedNodeSet) error {
   777  	db.lock.Lock()
   778  	defer db.lock.Unlock()
   779  
   780  	// Insert dirty nodes into the database. In the same tree, it must be
   781  	// ensured that children are inserted first, then parent so that children
   782  	// can be linked with their parent correctly.
   783  	//
   784  	// Note, the storage tries must be flushed before the account trie to
   785  	// retain the invariant that children go into the dirty cache first.
   786  	var order []common.Hash
   787  	for owner := range nodes.sets {
   788  		if owner == (common.Hash{}) {
   789  			continue
   790  		}
   791  		order = append(order, owner)
   792  	}
   793  	if _, ok := nodes.sets[common.Hash{}]; ok {
   794  		order = append(order, common.Hash{})
   795  	}
   796  	for _, owner := range order {
   797  		subset := nodes.sets[owner]
   798  		for _, path := range subset.updates.order {
   799  			n, ok := subset.updates.nodes[path]
   800  			if !ok {
   801  				return fmt.Errorf("missing node %x %v", owner, path)
   802  			}
   803  			db.insert(n.hash, int(n.size), n.node)
   804  		}
   805  	}
   806  	// Link up the account trie and storage trie if the node points
   807  	// to an account trie leaf.
   808  	if set, present := nodes.sets[common.Hash{}]; present {
   809  		for _, n := range set.leaves {
   810  			var account types.StateAccount
   811  			if err := rlp.DecodeBytes(n.blob, &account); err != nil {
   812  				return err
   813  			}
   814  			if account.Root != emptyRoot {
   815  				db.reference(account.Root, n.parent)
   816  			}
   817  		}
   818  	}
   819  	return nil
   820  }
   821  
   822  // Size returns the current storage size of the memory cache in front of the
   823  // persistent database layer.
   824  func (db *Database) Size() (common.StorageSize, common.StorageSize) {
   825  	db.lock.RLock()
   826  	defer db.lock.RUnlock()
   827  
   828  	// db.dirtiesSize only contains the useful data in the cache, but when reporting
   829  	// the total memory consumption, the maintenance metadata is also needed to be
   830  	// counted.
   831  	var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize)
   832  	var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2))
   833  	var preimageSize common.StorageSize
   834  	if db.preimages != nil {
   835  		preimageSize = db.preimages.size()
   836  	}
   837  	return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize
   838  }
   839  
   840  // GetReader retrieves a node reader belonging to the given state root.
   841  func (db *Database) GetReader(root common.Hash) Reader {
   842  	return newHashReader(db)
   843  }
   844  
   845  // hashReader is reader of hashDatabase which implements the Reader interface.
   846  type hashReader struct {
   847  	db *Database
   848  }
   849  
   850  // newHashReader initializes the hash reader.
   851  func newHashReader(db *Database) *hashReader {
   852  	return &hashReader{db: db}
   853  }
   854  
   855  // Node retrieves the trie node with the given node hash.
   856  // No error will be returned if the node is not found.
   857  func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) {
   858  	return reader.db.node(hash), nil
   859  }
   860  
   861  // NodeBlob retrieves the RLP-encoded trie node blob with the given node hash.
   862  // No error will be returned if the node is not found.
   863  func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) {
   864  	blob, _ := reader.db.Node(hash)
   865  	return blob, nil
   866  }
   867  
   868  // saveCache saves clean state cache to given directory path
   869  // using specified CPU cores.
   870  func (db *Database) saveCache(dir string, threads int) error {
   871  	if db.cleans == nil {
   872  		return nil
   873  	}
   874  	log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads)
   875  
   876  	start := time.Now()
   877  	err := db.cleans.SaveToFileConcurrent(dir, threads)
   878  	if err != nil {
   879  		log.Error("Failed to persist clean trie cache", "error", err)
   880  		return err
   881  	}
   882  	log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start)))
   883  	return nil
   884  }
   885  
   886  // SaveCache atomically saves fast cache data to the given dir using all
   887  // available CPU cores.
   888  func (db *Database) SaveCache(dir string) error {
   889  	return db.saveCache(dir, runtime.GOMAXPROCS(0))
   890  }
   891  
   892  // SaveCachePeriodically atomically saves fast cache data to the given dir with
   893  // the specified interval. All dump operation will only use a single CPU core.
   894  func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) {
   895  	ticker := time.NewTicker(interval)
   896  	defer ticker.Stop()
   897  
   898  	for {
   899  		select {
   900  		case <-ticker.C:
   901  			db.saveCache(dir, 1)
   902  		case <-stopCh:
   903  			return
   904  		}
   905  	}
   906  }
   907  
   908  // CommitPreimages flushes the dangling preimages to disk. It is meant to be
   909  // called when closing the blockchain object, so that preimages are persisted
   910  // to the database.
   911  func (db *Database) CommitPreimages() error {
   912  	db.lock.Lock()
   913  	defer db.lock.Unlock()
   914  
   915  	if db.preimages == nil {
   916  		return nil
   917  	}
   918  	return db.preimages.commit(true)
   919  }
   920  
   921  // Scheme returns the node scheme used in the database.
   922  func (db *Database) Scheme() NodeScheme {
   923  	return &hashScheme{}
   924  }