github.com/phillinzzz/newBsc@v1.1.6/trie/committer.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "sync" 23 24 "github.com/phillinzzz/newBsc/common" 25 "github.com/phillinzzz/newBsc/crypto" 26 "golang.org/x/crypto/sha3" 27 ) 28 29 // leafChanSize is the size of the leafCh. It's a pretty arbitrary number, to allow 30 // some parallelism but not incur too much memory overhead. 31 const leafChanSize = 200 32 33 // leaf represents a trie leaf value 34 type leaf struct { 35 size int // size of the rlp data (estimate) 36 hash common.Hash // hash of rlp data 37 node node // the node to commit 38 } 39 40 // committer is a type used for the trie Commit operation. A committer has some 41 // internal preallocated temp space, and also a callback that is invoked when 42 // leaves are committed. The leafs are passed through the `leafCh`, to allow 43 // some level of parallelism. 44 // By 'some level' of parallelism, it's still the case that all leaves will be 45 // processed sequentially - onleaf will never be called in parallel or out of order. 46 type committer struct { 47 sha crypto.KeccakState 48 49 onleaf LeafCallback 50 leafCh chan *leaf 51 } 52 53 // committers live in a global sync.Pool 54 var committerPool = sync.Pool{ 55 New: func() interface{} { 56 return &committer{ 57 sha: sha3.NewLegacyKeccak256().(crypto.KeccakState), 58 } 59 }, 60 } 61 62 // newCommitter creates a new committer or picks one from the pool. 63 func newCommitter() *committer { 64 return committerPool.Get().(*committer) 65 } 66 67 func returnCommitterToPool(h *committer) { 68 h.onleaf = nil 69 h.leafCh = nil 70 committerPool.Put(h) 71 } 72 73 // commit collapses a node down into a hash node and inserts it into the database 74 func (c *committer) Commit(n node, db *Database) (hashNode, error) { 75 if db == nil { 76 return nil, errors.New("no db provided") 77 } 78 h, err := c.commit(n, db) 79 if err != nil { 80 return nil, err 81 } 82 return h.(hashNode), nil 83 } 84 85 // commit collapses a node down into a hash node and inserts it into the database 86 func (c *committer) commit(n node, db *Database) (node, error) { 87 // if this path is clean, use available cached data 88 hash, dirty := n.cache() 89 if hash != nil && !dirty { 90 return hash, nil 91 } 92 // Commit children, then parent, and remove remove the dirty flag. 93 switch cn := n.(type) { 94 case *shortNode: 95 // Commit child 96 cn.flags.dirty = false 97 collapsed := cn.copy() 98 99 // If the child is fullnode, recursively commit. 100 // Otherwise it can only be hashNode or valueNode. 101 if _, ok := cn.Val.(*fullNode); ok { 102 childV, err := c.commit(cn.Val, db) 103 if err != nil { 104 return nil, err 105 } 106 collapsed.Val = childV 107 } 108 // The key needs to be copied, since we're delivering it to database 109 collapsed.Key = hexToCompact(cn.Key) 110 hashedNode := c.store(collapsed, db) 111 if hn, ok := hashedNode.(hashNode); ok { 112 return hn, nil 113 } 114 return collapsed, nil 115 case *fullNode: 116 cn.flags.dirty = false 117 hashedKids, err := c.commitChildren(cn, db) 118 if err != nil { 119 return nil, err 120 } 121 collapsed := cn.copy() 122 collapsed.Children = hashedKids 123 124 hashedNode := c.store(collapsed, db) 125 if hn, ok := hashedNode.(hashNode); ok { 126 return hn, nil 127 } 128 return collapsed, nil 129 case hashNode: 130 return cn, nil 131 default: 132 // nil, valuenode shouldn't be committed 133 panic(fmt.Sprintf("%T: invalid node: %v", n, n)) 134 } 135 } 136 137 // commitChildren commits the children of the given fullnode 138 func (c *committer) commitChildren(n *fullNode, db *Database) ([17]node, error) { 139 var children [17]node 140 for i := 0; i < 16; i++ { 141 child := n.Children[i] 142 if child == nil { 143 continue 144 } 145 // If it's the hashed child, save the hash value directly. 146 // Note: it's impossible that the child in range [0, 15] 147 // is a valuenode. 148 if hn, ok := child.(hashNode); ok { 149 children[i] = hn 150 continue 151 } 152 // Commit the child recursively and store the "hashed" value. 153 // Note the returned node can be some embedded nodes, so it's 154 // possible the type is not hashnode. 155 hashed, err := c.commit(child, db) 156 if err != nil { 157 return children, err 158 } 159 children[i] = hashed 160 } 161 // For the 17th child, it's possible the type is valuenode. 162 if n.Children[16] != nil { 163 children[16] = n.Children[16] 164 } 165 return children, nil 166 } 167 168 // store hashes the node n and if we have a storage layer specified, it writes 169 // the key/value pair to it and tracks any node->child references as well as any 170 // node->external trie references. 171 func (c *committer) store(n node, db *Database) node { 172 // Larger nodes are replaced by their hash and stored in the database. 173 var ( 174 hash, _ = n.cache() 175 size int 176 ) 177 if hash == nil { 178 // This was not generated - must be a small node stored in the parent. 179 // In theory we should apply the leafCall here if it's not nil(embedded 180 // node usually contains value). But small value(less than 32bytes) is 181 // not our target. 182 return n 183 } else { 184 // We have the hash already, estimate the RLP encoding-size of the node. 185 // The size is used for mem tracking, does not need to be exact 186 size = estimateSize(n) 187 } 188 // If we're using channel-based leaf-reporting, send to channel. 189 // The leaf channel will be active only when there an active leaf-callback 190 if c.leafCh != nil { 191 c.leafCh <- &leaf{ 192 size: size, 193 hash: common.BytesToHash(hash), 194 node: n, 195 } 196 } else if db != nil { 197 // No leaf-callback used, but there's still a database. Do serial 198 // insertion 199 db.lock.Lock() 200 db.insert(common.BytesToHash(hash), size, n) 201 db.lock.Unlock() 202 } 203 return hash 204 } 205 206 // commitLoop does the actual insert + leaf callback for nodes. 207 func (c *committer) commitLoop(db *Database) { 208 for item := range c.leafCh { 209 var ( 210 hash = item.hash 211 size = item.size 212 n = item.node 213 ) 214 // We are pooling the trie nodes into an intermediate memory cache 215 db.lock.Lock() 216 db.insert(hash, size, n) 217 db.lock.Unlock() 218 219 if c.onleaf != nil { 220 switch n := n.(type) { 221 case *shortNode: 222 if child, ok := n.Val.(valueNode); ok { 223 c.onleaf(nil, nil, child, hash) 224 } 225 case *fullNode: 226 // For children in range [0, 15], it's impossible 227 // to contain valuenode. Only check the 17th child. 228 if n.Children[16] != nil { 229 c.onleaf(nil, nil, n.Children[16].(valueNode), hash) 230 } 231 } 232 } 233 } 234 } 235 236 func (c *committer) makeHashNode(data []byte) hashNode { 237 n := make(hashNode, c.sha.Size()) 238 c.sha.Reset() 239 c.sha.Write(data) 240 c.sha.Read(n) 241 return n 242 } 243 244 // estimateSize estimates the size of an rlp-encoded node, without actually 245 // rlp-encoding it (zero allocs). This method has been experimentally tried, and with a trie 246 // with 1000 leafs, the only errors above 1% are on small shortnodes, where this 247 // method overestimates by 2 or 3 bytes (e.g. 37 instead of 35) 248 func estimateSize(n node) int { 249 switch n := n.(type) { 250 case *shortNode: 251 // A short node contains a compacted key, and a value. 252 return 3 + len(n.Key) + estimateSize(n.Val) 253 case *fullNode: 254 // A full node contains up to 16 hashes (some nils), and a key 255 s := 3 256 for i := 0; i < 16; i++ { 257 if child := n.Children[i]; child != nil { 258 s += estimateSize(child) 259 } else { 260 s++ 261 } 262 } 263 return s 264 case valueNode: 265 return 1 + len(n) 266 case hashNode: 267 return 1 + len(n) 268 default: 269 panic(fmt.Sprintf("node type %T", n)) 270 } 271 }