gitee.com/liu-zhao234568/cntest@v1.0.0/trie/sync.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 23 "gitee.com/liu-zhao234568/cntest/common" 24 "gitee.com/liu-zhao234568/cntest/common/prque" 25 "gitee.com/liu-zhao234568/cntest/core/rawdb" 26 "gitee.com/liu-zhao234568/cntest/ethdb" 27 ) 28 29 // ErrNotRequested is returned by the trie sync when it's requested to process a 30 // node it did not request. 31 var ErrNotRequested = errors.New("not requested") 32 33 // ErrAlreadyProcessed is returned by the trie sync when it's requested to process a 34 // node it already processed previously. 35 var ErrAlreadyProcessed = errors.New("already processed") 36 37 // maxFetchesPerDepth is the maximum number of pending trie nodes per depth. The 38 // role of this value is to limit the number of trie nodes that get expanded in 39 // memory if the node was configured with a significant number of peers. 40 const maxFetchesPerDepth = 16384 41 42 // request represents a scheduled or already in-flight state retrieval request. 43 type request struct { 44 path []byte // Merkle path leading to this node for prioritization 45 hash common.Hash // Hash of the node data content to retrieve 46 data []byte // Data content of the node, cached until all subtrees complete 47 code bool // Whether this is a code entry 48 49 parents []*request // Parent state nodes referencing this entry (notify all upon completion) 50 deps int // Number of dependencies before allowed to commit this node 51 52 callback LeafCallback // Callback to invoke if a leaf node it reached on this branch 53 } 54 55 // SyncPath is a path tuple identifying a particular trie node either in a single 56 // trie (account) or a layered trie (account -> storage). 57 // 58 // Content wise the tuple either has 1 element if it addresses a node in a single 59 // trie or 2 elements if it addresses a node in a stacked trie. 60 // 61 // To support aiming arbitrary trie nodes, the path needs to support odd nibble 62 // lengths. To avoid transferring expanded hex form over the network, the last 63 // part of the tuple (which needs to index into the middle of a trie) is compact 64 // encoded. In case of a 2-tuple, the first item is always 32 bytes so that is 65 // simple binary encoded. 66 // 67 // Examples: 68 // - Path 0x9 -> {0x19} 69 // - Path 0x99 -> {0x0099} 70 // - Path 0x01234567890123456789012345678901012345678901234567890123456789019 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x19} 71 // - Path 0x012345678901234567890123456789010123456789012345678901234567890199 -> {0x0123456789012345678901234567890101234567890123456789012345678901, 0x0099} 72 type SyncPath [][]byte 73 74 // newSyncPath converts an expanded trie path from nibble form into a compact 75 // version that can be sent over the network. 76 func newSyncPath(path []byte) SyncPath { 77 // If the hash is from the account trie, append a single item, if it 78 // is from the a storage trie, append a tuple. Note, the length 64 is 79 // clashing between account leaf and storage root. It's fine though 80 // because having a trie node at 64 depth means a hash collision was 81 // found and we're long dead. 82 if len(path) < 64 { 83 return SyncPath{hexToCompact(path)} 84 } 85 return SyncPath{hexToKeybytes(path[:64]), hexToCompact(path[64:])} 86 } 87 88 // SyncResult is a response with requested data along with it's hash. 89 type SyncResult struct { 90 Hash common.Hash // Hash of the originally unknown trie node 91 Data []byte // Data content of the retrieved node 92 } 93 94 // syncMemBatch is an in-memory buffer of successfully downloaded but not yet 95 // persisted data items. 96 type syncMemBatch struct { 97 nodes map[common.Hash][]byte // In-memory membatch of recently completed nodes 98 codes map[common.Hash][]byte // In-memory membatch of recently completed codes 99 } 100 101 // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. 102 func newSyncMemBatch() *syncMemBatch { 103 return &syncMemBatch{ 104 nodes: make(map[common.Hash][]byte), 105 codes: make(map[common.Hash][]byte), 106 } 107 } 108 109 // hasNode reports the trie node with specific hash is already cached. 110 func (batch *syncMemBatch) hasNode(hash common.Hash) bool { 111 _, ok := batch.nodes[hash] 112 return ok 113 } 114 115 // hasCode reports the contract code with specific hash is already cached. 116 func (batch *syncMemBatch) hasCode(hash common.Hash) bool { 117 _, ok := batch.codes[hash] 118 return ok 119 } 120 121 // Sync is the main state trie synchronisation scheduler, which provides yet 122 // unknown trie hashes to retrieve, accepts node data associated with said hashes 123 // and reconstructs the trie step by step until all is done. 124 type Sync struct { 125 database ethdb.KeyValueReader // Persistent database to check for existing entries 126 membatch *syncMemBatch // Memory buffer to avoid frequent database writes 127 nodeReqs map[common.Hash]*request // Pending requests pertaining to a trie node hash 128 codeReqs map[common.Hash]*request // Pending requests pertaining to a code hash 129 queue *prque.Prque // Priority queue with the pending requests 130 fetches map[int]int // Number of active fetches per trie node depth 131 bloom *SyncBloom // Bloom filter for fast state existence checks 132 } 133 134 // NewSync creates a new trie data download scheduler. 135 func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync { 136 ts := &Sync{ 137 database: database, 138 membatch: newSyncMemBatch(), 139 nodeReqs: make(map[common.Hash]*request), 140 codeReqs: make(map[common.Hash]*request), 141 queue: prque.New(nil), 142 fetches: make(map[int]int), 143 bloom: bloom, 144 } 145 ts.AddSubTrie(root, nil, common.Hash{}, callback) 146 return ts 147 } 148 149 // AddSubTrie registers a new trie to the sync code, rooted at the designated parent. 150 func (s *Sync) AddSubTrie(root common.Hash, path []byte, parent common.Hash, callback LeafCallback) { 151 // Short circuit if the trie is empty or already known 152 if root == emptyRoot { 153 return 154 } 155 if s.membatch.hasNode(root) { 156 return 157 } 158 if s.bloom == nil || s.bloom.Contains(root[:]) { 159 // Bloom filter says this might be a duplicate, double check. 160 // If database says yes, then at least the trie node is present 161 // and we hold the assumption that it's NOT legacy contract code. 162 blob := rawdb.ReadTrieNode(s.database, root) 163 if len(blob) > 0 { 164 return 165 } 166 // False positive, bump fault meter 167 bloomFaultMeter.Mark(1) 168 } 169 // Assemble the new sub-trie sync request 170 req := &request{ 171 path: path, 172 hash: root, 173 callback: callback, 174 } 175 // If this sub-trie has a designated parent, link them together 176 if parent != (common.Hash{}) { 177 ancestor := s.nodeReqs[parent] 178 if ancestor == nil { 179 panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) 180 } 181 ancestor.deps++ 182 req.parents = append(req.parents, ancestor) 183 } 184 s.schedule(req) 185 } 186 187 // AddCodeEntry schedules the direct retrieval of a contract code that should not 188 // be interpreted as a trie node, but rather accepted and stored into the database 189 // as is. 190 func (s *Sync) AddCodeEntry(hash common.Hash, path []byte, parent common.Hash) { 191 // Short circuit if the entry is empty or already known 192 if hash == emptyState { 193 return 194 } 195 if s.membatch.hasCode(hash) { 196 return 197 } 198 if s.bloom == nil || s.bloom.Contains(hash[:]) { 199 // Bloom filter says this might be a duplicate, double check. 200 // If database says yes, the blob is present for sure. 201 // Note we only check the existence with new code scheme, fast 202 // sync is expected to run with a fresh new node. Even there 203 // exists the code with legacy format, fetch and store with 204 // new scheme anyway. 205 if blob := rawdb.ReadCodeWithPrefix(s.database, hash); len(blob) > 0 { 206 return 207 } 208 // False positive, bump fault meter 209 bloomFaultMeter.Mark(1) 210 } 211 // Assemble the new sub-trie sync request 212 req := &request{ 213 path: path, 214 hash: hash, 215 code: true, 216 } 217 // If this sub-trie has a designated parent, link them together 218 if parent != (common.Hash{}) { 219 ancestor := s.nodeReqs[parent] // the parent of codereq can ONLY be nodereq 220 if ancestor == nil { 221 panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) 222 } 223 ancestor.deps++ 224 req.parents = append(req.parents, ancestor) 225 } 226 s.schedule(req) 227 } 228 229 // Missing retrieves the known missing nodes from the trie for retrieval. To aid 230 // both eth/6x style fast sync and snap/1x style state sync, the paths of trie 231 // nodes are returned too, as well as separate hash list for codes. 232 func (s *Sync) Missing(max int) (nodes []common.Hash, paths []SyncPath, codes []common.Hash) { 233 var ( 234 nodeHashes []common.Hash 235 nodePaths []SyncPath 236 codeHashes []common.Hash 237 ) 238 for !s.queue.Empty() && (max == 0 || len(nodeHashes)+len(codeHashes) < max) { 239 // Retrieve th enext item in line 240 item, prio := s.queue.Peek() 241 242 // If we have too many already-pending tasks for this depth, throttle 243 depth := int(prio >> 56) 244 if s.fetches[depth] > maxFetchesPerDepth { 245 break 246 } 247 // Item is allowed to be scheduled, add it to the task list 248 s.queue.Pop() 249 s.fetches[depth]++ 250 251 hash := item.(common.Hash) 252 if req, ok := s.nodeReqs[hash]; ok { 253 nodeHashes = append(nodeHashes, hash) 254 nodePaths = append(nodePaths, newSyncPath(req.path)) 255 } else { 256 codeHashes = append(codeHashes, hash) 257 } 258 } 259 return nodeHashes, nodePaths, codeHashes 260 } 261 262 // Process injects the received data for requested item. Note it can 263 // happpen that the single response commits two pending requests(e.g. 264 // there are two requests one for code and one for node but the hash 265 // is same). In this case the second response for the same hash will 266 // be treated as "non-requested" item or "already-processed" item but 267 // there is no downside. 268 func (s *Sync) Process(result SyncResult) error { 269 // If the item was not requested either for code or node, bail out 270 if s.nodeReqs[result.Hash] == nil && s.codeReqs[result.Hash] == nil { 271 return ErrNotRequested 272 } 273 // There is an pending code request for this data, commit directly 274 var filled bool 275 if req := s.codeReqs[result.Hash]; req != nil && req.data == nil { 276 filled = true 277 req.data = result.Data 278 s.commit(req) 279 } 280 // There is an pending node request for this data, fill it. 281 if req := s.nodeReqs[result.Hash]; req != nil && req.data == nil { 282 filled = true 283 // Decode the node data content and update the request 284 node, err := decodeNode(result.Hash[:], result.Data) 285 if err != nil { 286 return err 287 } 288 req.data = result.Data 289 290 // Create and schedule a request for all the children nodes 291 requests, err := s.children(req, node) 292 if err != nil { 293 return err 294 } 295 if len(requests) == 0 && req.deps == 0 { 296 s.commit(req) 297 } else { 298 req.deps += len(requests) 299 for _, child := range requests { 300 s.schedule(child) 301 } 302 } 303 } 304 if !filled { 305 return ErrAlreadyProcessed 306 } 307 return nil 308 } 309 310 // Commit flushes the data stored in the internal membatch out to persistent 311 // storage, returning any occurred error. 312 func (s *Sync) Commit(dbw ethdb.Batch) error { 313 // Dump the membatch into a database dbw 314 for key, value := range s.membatch.nodes { 315 rawdb.WriteTrieNode(dbw, key, value) 316 if s.bloom != nil { 317 s.bloom.Add(key[:]) 318 } 319 } 320 for key, value := range s.membatch.codes { 321 rawdb.WriteCode(dbw, key, value) 322 if s.bloom != nil { 323 s.bloom.Add(key[:]) 324 } 325 } 326 // Drop the membatch data and return 327 s.membatch = newSyncMemBatch() 328 return nil 329 } 330 331 // Pending returns the number of state entries currently pending for download. 332 func (s *Sync) Pending() int { 333 return len(s.nodeReqs) + len(s.codeReqs) 334 } 335 336 // schedule inserts a new state retrieval request into the fetch queue. If there 337 // is already a pending request for this node, the new request will be discarded 338 // and only a parent reference added to the old one. 339 func (s *Sync) schedule(req *request) { 340 var reqset = s.nodeReqs 341 if req.code { 342 reqset = s.codeReqs 343 } 344 // If we're already requesting this node, add a new reference and stop 345 if old, ok := reqset[req.hash]; ok { 346 old.parents = append(old.parents, req.parents...) 347 return 348 } 349 reqset[req.hash] = req 350 351 // Schedule the request for future retrieval. This queue is shared 352 // by both node requests and code requests. It can happen that there 353 // is a trie node and code has same hash. In this case two elements 354 // with same hash and same or different depth will be pushed. But it's 355 // ok the worst case is the second response will be treated as duplicated. 356 prio := int64(len(req.path)) << 56 // depth >= 128 will never happen, storage leaves will be included in their parents 357 for i := 0; i < 14 && i < len(req.path); i++ { 358 prio |= int64(15-req.path[i]) << (52 - i*4) // 15-nibble => lexicographic order 359 } 360 s.queue.Push(req.hash, prio) 361 } 362 363 // children retrieves all the missing children of a state trie entry for future 364 // retrieval scheduling. 365 func (s *Sync) children(req *request, object node) ([]*request, error) { 366 // Gather all the children of the node, irrelevant whether known or not 367 type child struct { 368 path []byte 369 node node 370 } 371 var children []child 372 373 switch node := (object).(type) { 374 case *shortNode: 375 key := node.Key 376 if hasTerm(key) { 377 key = key[:len(key)-1] 378 } 379 children = []child{{ 380 node: node.Val, 381 path: append(append([]byte(nil), req.path...), key...), 382 }} 383 case *fullNode: 384 for i := 0; i < 17; i++ { 385 if node.Children[i] != nil { 386 children = append(children, child{ 387 node: node.Children[i], 388 path: append(append([]byte(nil), req.path...), byte(i)), 389 }) 390 } 391 } 392 default: 393 panic(fmt.Sprintf("unknown node: %+v", node)) 394 } 395 // Iterate over the children, and request all unknown ones 396 requests := make([]*request, 0, len(children)) 397 for _, child := range children { 398 // Notify any external watcher of a new key/value node 399 if req.callback != nil { 400 if node, ok := (child.node).(valueNode); ok { 401 var paths [][]byte 402 if len(child.path) == 2*common.HashLength { 403 paths = append(paths, hexToKeybytes(child.path)) 404 } else if len(child.path) == 4*common.HashLength { 405 paths = append(paths, hexToKeybytes(child.path[:2*common.HashLength])) 406 paths = append(paths, hexToKeybytes(child.path[2*common.HashLength:])) 407 } 408 if err := req.callback(paths, child.path, node, req.hash); err != nil { 409 return nil, err 410 } 411 } 412 } 413 // If the child references another node, resolve or schedule 414 if node, ok := (child.node).(hashNode); ok { 415 // Try to resolve the node from the local database 416 hash := common.BytesToHash(node) 417 if s.membatch.hasNode(hash) { 418 continue 419 } 420 if s.bloom == nil || s.bloom.Contains(node) { 421 // Bloom filter says this might be a duplicate, double check. 422 // If database says yes, then at least the trie node is present 423 // and we hold the assumption that it's NOT legacy contract code. 424 if blob := rawdb.ReadTrieNode(s.database, hash); len(blob) > 0 { 425 continue 426 } 427 // False positive, bump fault meter 428 bloomFaultMeter.Mark(1) 429 } 430 // Locally unknown node, schedule for retrieval 431 requests = append(requests, &request{ 432 path: child.path, 433 hash: hash, 434 parents: []*request{req}, 435 callback: req.callback, 436 }) 437 } 438 } 439 return requests, nil 440 } 441 442 // commit finalizes a retrieval request and stores it into the membatch. If any 443 // of the referencing parent requests complete due to this commit, they are also 444 // committed themselves. 445 func (s *Sync) commit(req *request) (err error) { 446 // Write the node content to the membatch 447 if req.code { 448 s.membatch.codes[req.hash] = req.data 449 delete(s.codeReqs, req.hash) 450 s.fetches[len(req.path)]-- 451 } else { 452 s.membatch.nodes[req.hash] = req.data 453 delete(s.nodeReqs, req.hash) 454 s.fetches[len(req.path)]-- 455 } 456 // Check all parents for completion 457 for _, parent := range req.parents { 458 parent.deps-- 459 if parent.deps == 0 { 460 if err := s.commit(parent); err != nil { 461 return err 462 } 463 } 464 } 465 return nil 466 }