github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/trie/committer.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package trie
    18  
    19  import (
    20  	"errors"
    21  	"fmt"
    22  	"sync"
    23  
    24  	"github.com/ethereum/go-ethereum/common"
    25  	"github.com/ethereum/go-ethereum/crypto"
    26  	"golang.org/x/crypto/sha3"
    27  )
    28  
    29  // leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow
    30  // some parallelism but not incur too much memory overhead.
    31  const leafChanSize = 200
    32  
    33  // leaf represents a trie leaf value
    34  type leaf struct {
    35  	size int         // size of the rlp data (estimate)
    36  	hash common.Hash // hash of rlp data
    37  	node node        // the node to commit
    38  }
    39  
    40  // committer is a type used for the trie Commit operation. A committer has some
    41  // internal preallocated temp space, and also a callback that is invoked when
    42  // leaves are committed. The leafs are passed through the `leafCh`,  to allow
    43  // some level of parallelism.
    44  // By 'some level' of parallelism, it's still the case that all leaves will be
    45  // processed sequentially - onleaf will never be called in parallel or out of order.
    46  type committer struct {
    47  	sha crypto.KeccakState
    48  
    49  	onleaf LeafCallback
    50  	leafCh chan *leaf
    51  }
    52  
    53  // committers live in a global sync.Pool
    54  var committerPool = sync.Pool{
    55  	New: func() interface{} {
    56  		return &committer{
    57  			sha: sha3.NewLegacyKeccak256().(crypto.KeccakState),
    58  		}
    59  	},
    60  }
    61  
    62  // newCommitter creates a new committer or picks one from the pool.
    63  func newCommitter() *committer {
    64  	return committerPool.Get().(*committer)
    65  }
    66  
    67  func returnCommitterToPool(h *committer) {
    68  	h.onleaf = nil
    69  	h.leafCh = nil
    70  	committerPool.Put(h)
    71  }
    72  
    73  // Commit collapses a node down into a hash node and inserts it into the database
    74  func (c *committer) Commit(n node, db *Database) (hashNode, int, error) {
    75  	if db == nil {
    76  		return nil, 0, errors.New("no db provided")
    77  	}
    78  	h, committed, err := c.commit(n, db)
    79  	if err != nil {
    80  		return nil, 0, err
    81  	}
    82  	return h.(hashNode), committed, nil
    83  }
    84  
    85  // commit collapses a node down into a hash node and inserts it into the database
    86  func (c *committer) commit(n node, db *Database) (node, int, error) {
    87  	// if this path is clean, use available cached data
    88  	hash, dirty := n.cache()
    89  	if hash != nil && !dirty {
    90  		return hash, 0, nil
    91  	}
    92  	// Commit children, then parent, and remove the dirty flag.
    93  	switch cn := n.(type) {
    94  	case *shortNode:
    95  		// Commit child
    96  		collapsed := cn.copy()
    97  
    98  		// If the child is fullNode, recursively commit,
    99  		// otherwise it can only be hashNode or valueNode.
   100  		var childCommitted int
   101  		if _, ok := cn.Val.(*fullNode); ok {
   102  			childV, committed, err := c.commit(cn.Val, db)
   103  			if err != nil {
   104  				return nil, 0, err
   105  			}
   106  			collapsed.Val, childCommitted = childV, committed
   107  		}
   108  		// The key needs to be copied, since we're delivering it to database
   109  		collapsed.Key = hexToCompact(cn.Key)
   110  		hashedNode := c.store(collapsed, db)
   111  		if hn, ok := hashedNode.(hashNode); ok {
   112  			return hn, childCommitted + 1, nil
   113  		}
   114  		return collapsed, childCommitted, nil
   115  	case *fullNode:
   116  		hashedKids, childCommitted, err := c.commitChildren(cn, db)
   117  		if err != nil {
   118  			return nil, 0, err
   119  		}
   120  		collapsed := cn.copy()
   121  		collapsed.Children = hashedKids
   122  
   123  		hashedNode := c.store(collapsed, db)
   124  		if hn, ok := hashedNode.(hashNode); ok {
   125  			return hn, childCommitted + 1, nil
   126  		}
   127  		return collapsed, childCommitted, nil
   128  	case hashNode:
   129  		return cn, 0, nil
   130  	default:
   131  		// nil, valuenode shouldn't be committed
   132  		panic(fmt.Sprintf("%T: invalid node: %v", n, n))
   133  	}
   134  }
   135  
   136  // commitChildren commits the children of the given fullnode
   137  func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, int, error) {
   138  	var (
   139  		committed int
   140  		children  [17]node
   141  	)
   142  	for i := 0; i < 16; i++ {
   143  		child := n.Children[i]
   144  		if child == nil {
   145  			continue
   146  		}
   147  		// If it's the hashed child, save the hash value directly.
   148  		// Note: it's impossible that the child in range [0, 15]
   149  		// is a valueNode.
   150  		if hn, ok := child.(hashNode); ok {
   151  			children[i] = hn
   152  			continue
   153  		}
   154  		// Commit the child recursively and store the "hashed" value.
   155  		// Note the returned node can be some embedded nodes, so it's
   156  		// possible the type is not hashNode.
   157  		hashed, childCommitted, err := c.commit(child, db)
   158  		if err != nil {
   159  			return children, 0, err
   160  		}
   161  		children[i] = hashed
   162  		committed += childCommitted
   163  	}
   164  	// For the 17th child, it's possible the type is valuenode.
   165  	if n.Children[16] != nil {
   166  		children[16] = n.Children[16]
   167  	}
   168  	return children, committed, nil
   169  }
   170  
   171  // store hashes the node n and if we have a storage layer specified, it writes
   172  // the key/value pair to it and tracks any node->child references as well as any
   173  // node->external trie references.
   174  func (c *committer) store(n node, db *Database) node {
   175  	// Larger nodes are replaced by their hash and stored in the database.
   176  	var (
   177  		hash, _ = n.cache()
   178  		size    int
   179  	)
   180  	if hash == nil {
   181  		// This was not generated - must be a small node stored in the parent.
   182  		// In theory, we should apply the leafCall here if it's not nil(embedded
   183  		// node usually contains value). But small value(less than 32bytes) is
   184  		// not our target.
   185  		return n
   186  	} else {
   187  		// We have the hash already, estimate the RLP encoding-size of the node.
   188  		// The size is used for mem tracking, does not need to be exact
   189  		size = estimateSize(n)
   190  	}
   191  	// If we're using channel-based leaf-reporting, send to channel.
   192  	// The leaf channel will be active only when there an active leaf-callback
   193  	if c.leafCh != nil {
   194  		c.leafCh <- &leaf{
   195  			size: size,
   196  			hash: common.BytesToHash(hash),
   197  			node: n,
   198  		}
   199  	} else if db != nil {
   200  		// No leaf-callback used, but there's still a database. Do serial
   201  		// insertion
   202  		db.lock.Lock()
   203  		db.insert(common.BytesToHash(hash), size, n)
   204  		db.lock.Unlock()
   205  	}
   206  	return hash
   207  }
   208  
   209  // commitLoop does the actual insert + leaf callback for nodes.
   210  func (c *committer) commitLoop(db *Database) {
   211  	for item := range c.leafCh {
   212  		var (
   213  			hash = item.hash
   214  			size = item.size
   215  			n    = item.node
   216  		)
   217  		// We are pooling the trie nodes into an intermediate memory cache
   218  		db.lock.Lock()
   219  		db.insert(hash, size, n)
   220  		db.lock.Unlock()
   221  
   222  		if c.onleaf != nil {
   223  			switch n := n.(type) {
   224  			case *shortNode:
   225  				if child, ok := n.Val.(valueNode); ok {
   226  					c.onleaf(nil, nil, child, hash)
   227  				}
   228  			case *fullNode:
   229  				// For children in range [0, 15], it's impossible
   230  				// to contain valueNode. Only check the 17th child.
   231  				if n.Children[16] != nil {
   232  					c.onleaf(nil, nil, n.Children[16].(valueNode), hash)
   233  				}
   234  			}
   235  		}
   236  	}
   237  }
   238  
   239  func (c *committer) makeHashNode(data []byte) hashNode {
   240  	n := make(hashNode, c.sha.Size())
   241  	c.sha.Reset()
   242  	c.sha.Write(data)
   243  	c.sha.Read(n)
   244  	return n
   245  }
   246  
   247  // estimateSize estimates the size of an rlp-encoded node, without actually
   248  // rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie
   249  // with 1000 leafs, the only errors above 1% are on small shortnodes, where this
   250  // method overestimates by 2 or 3 bytes (e.g. 37 instead of 35)
   251  func estimateSize(n node) int {
   252  	switch n := n.(type) {
   253  	case *shortNode:
   254  		// A short node contains a compacted key, and a value.
   255  		return 3 + len(n.Key) + estimateSize(n.Val)
   256  	case *fullNode:
   257  		// A full node contains up to 16 hashes (some nils), and a key
   258  		s := 3
   259  		for i := 0; i < 16; i++ {
   260  			if child := n.Children[i]; child != nil {
   261  				s += estimateSize(child)
   262  			} else {
   263  				s++
   264  			}
   265  		}
   266  		return s
   267  	case valueNode:
   268  		return 1 + len(n)
   269  	case hashNode:
   270  		return 1 + len(n)
   271  	default:
   272  		panic(fmt.Sprintf("node type %T", n))
   273  	}
   274  }