github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/trie/sync.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // Copyright 2019 The go-aigar Authors 3 // This file is part of the go-aigar library. 4 // 5 // The go-aigar library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-aigar library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>. 17 18 package trie 19 20 import ( 21 "errors" 22 "fmt" 23 24 "github.com/AigarNetwork/aigar/common" 25 "github.com/AigarNetwork/aigar/common/prque" 26 "github.com/AigarNetwork/aigar/ethdb" 27 ) 28 29 // ErrNotRequested is returned by the trie sync when it's requested to process a 30 // node it did not request. 31 var ErrNotRequested = errors.New("not requested") 32 33 // ErrAlreadyProcessed is returned by the trie sync when it's requested to process a 34 // node it already processed previously. 35 var ErrAlreadyProcessed = errors.New("already processed") 36 37 // request represents a scheduled or already in-flight state retrieval request. 38 type request struct { 39 hash common.Hash // Hash of the node data content to retrieve 40 data []byte // Data content of the node, cached until all subtrees complete 41 raw bool // Whether this is a raw entry (code) or a trie node 42 43 parents []*request // Parent state nodes referencing this entry (notify all upon completion) 44 depth int // Depth level within the trie the node is located to prioritise DFS 45 deps int // Number of dependencies before allowed to commit this node 46 47 callback LeafCallback // Callback to invoke if a leaf node it reached on this branch 48 } 49 50 // SyncResult is a simple list to return missing nodes along with their request 51 // hashes. 52 type SyncResult struct { 53 Hash common.Hash // Hash of the originally unknown trie node 54 Data []byte // Data content of the retrieved node 55 } 56 57 // syncMemBatch is an in-memory buffer of successfully downloaded but not yet 58 // persisted data items. 59 type syncMemBatch struct { 60 batch map[common.Hash][]byte // In-memory membatch of recently completed items 61 } 62 63 // newSyncMemBatch allocates a new memory-buffer for not-yet persisted trie nodes. 64 func newSyncMemBatch() *syncMemBatch { 65 return &syncMemBatch{ 66 batch: make(map[common.Hash][]byte), 67 } 68 } 69 70 // Sync is the main state trie synchronisation scheduler, which provides yet 71 // unknown trie hashes to retrieve, accepts node data associated with said hashes 72 // and reconstructs the trie step by step until all is done. 73 type Sync struct { 74 database ethdb.KeyValueReader // Persistent database to check for existing entries 75 membatch *syncMemBatch // Memory buffer to avoid frequent database writes 76 requests map[common.Hash]*request // Pending requests pertaining to a key hash 77 queue *prque.Prque // Priority queue with the pending requests 78 bloom *SyncBloom // Bloom filter for fast node existence checks 79 } 80 81 // NewSync creates a new trie data download scheduler. 82 func NewSync(root common.Hash, database ethdb.KeyValueReader, callback LeafCallback, bloom *SyncBloom) *Sync { 83 ts := &Sync{ 84 database: database, 85 membatch: newSyncMemBatch(), 86 requests: make(map[common.Hash]*request), 87 queue: prque.New(nil), 88 bloom: bloom, 89 } 90 ts.AddSubTrie(root, 0, common.Hash{}, callback) 91 return ts 92 } 93 94 // AddSubTrie registers a new trie to the sync code, rooted at the designated parent. 95 func (s *Sync) AddSubTrie(root common.Hash, depth int, parent common.Hash, callback LeafCallback) { 96 // Short circuit if the trie is empty or already known 97 if root == emptyRoot { 98 return 99 } 100 if _, ok := s.membatch.batch[root]; ok { 101 return 102 } 103 if s.bloom.Contains(root[:]) { 104 // Bloom filter says this might be a duplicate, double check 105 blob, _ := s.database.Get(root[:]) 106 if local, err := decodeNode(root[:], blob); local != nil && err == nil { 107 return 108 } 109 // False positive, bump fault meter 110 bloomFaultMeter.Mark(1) 111 } 112 // Assemble the new sub-trie sync request 113 req := &request{ 114 hash: root, 115 depth: depth, 116 callback: callback, 117 } 118 // If this sub-trie has a designated parent, link them together 119 if parent != (common.Hash{}) { 120 ancestor := s.requests[parent] 121 if ancestor == nil { 122 panic(fmt.Sprintf("sub-trie ancestor not found: %x", parent)) 123 } 124 ancestor.deps++ 125 req.parents = append(req.parents, ancestor) 126 } 127 s.schedule(req) 128 } 129 130 // AddRawEntry schedules the direct retrieval of a state entry that should not be 131 // interpreted as a trie node, but rather accepted and stored into the database 132 // as is. This method's goal is to support misc state metadata retrievals (e.g. 133 // contract code). 134 func (s *Sync) AddRawEntry(hash common.Hash, depth int, parent common.Hash) { 135 // Short circuit if the entry is empty or already known 136 if hash == emptyState { 137 return 138 } 139 if _, ok := s.membatch.batch[hash]; ok { 140 return 141 } 142 if s.bloom.Contains(hash[:]) { 143 // Bloom filter says this might be a duplicate, double check 144 if ok, _ := s.database.Has(hash[:]); ok { 145 return 146 } 147 // False positive, bump fault meter 148 bloomFaultMeter.Mark(1) 149 } 150 // Assemble the new sub-trie sync request 151 req := &request{ 152 hash: hash, 153 raw: true, 154 depth: depth, 155 } 156 // If this sub-trie has a designated parent, link them together 157 if parent != (common.Hash{}) { 158 ancestor := s.requests[parent] 159 if ancestor == nil { 160 panic(fmt.Sprintf("raw-entry ancestor not found: %x", parent)) 161 } 162 ancestor.deps++ 163 req.parents = append(req.parents, ancestor) 164 } 165 s.schedule(req) 166 } 167 168 // Missing retrieves the known missing nodes from the trie for retrieval. 169 func (s *Sync) Missing(max int) []common.Hash { 170 var requests []common.Hash 171 for !s.queue.Empty() && (max == 0 || len(requests) < max) { 172 requests = append(requests, s.queue.PopItem().(common.Hash)) 173 } 174 return requests 175 } 176 177 // Process injects a batch of retrieved trie nodes data, returning if something 178 // was committed to the database and also the index of an entry if processing of 179 // it failed. 180 func (s *Sync) Process(results []SyncResult) (bool, int, error) { 181 committed := false 182 183 for i, item := range results { 184 // If the item was not requested, bail out 185 request := s.requests[item.Hash] 186 if request == nil { 187 return committed, i, ErrNotRequested 188 } 189 if request.data != nil { 190 return committed, i, ErrAlreadyProcessed 191 } 192 // If the item is a raw entry request, commit directly 193 if request.raw { 194 request.data = item.Data 195 s.commit(request) 196 committed = true 197 continue 198 } 199 // Decode the node data content and update the request 200 node, err := decodeNode(item.Hash[:], item.Data) 201 if err != nil { 202 return committed, i, err 203 } 204 request.data = item.Data 205 206 // Create and schedule a request for all the children nodes 207 requests, err := s.children(request, node) 208 if err != nil { 209 return committed, i, err 210 } 211 if len(requests) == 0 && request.deps == 0 { 212 s.commit(request) 213 committed = true 214 continue 215 } 216 request.deps += len(requests) 217 for _, child := range requests { 218 s.schedule(child) 219 } 220 } 221 return committed, 0, nil 222 } 223 224 // Commit flushes the data stored in the internal membatch out to persistent 225 // storage, returning any occurred error. 226 func (s *Sync) Commit(dbw ethdb.Batch) error { 227 // Dump the membatch into a database dbw 228 for key, value := range s.membatch.batch { 229 if err := dbw.Put(key[:], value); err != nil { 230 return err 231 } 232 s.bloom.Add(key[:]) 233 } 234 // Drop the membatch data and return 235 s.membatch = newSyncMemBatch() 236 return nil 237 } 238 239 // Pending returns the number of state entries currently pending for download. 240 func (s *Sync) Pending() int { 241 return len(s.requests) 242 } 243 244 // schedule inserts a new state retrieval request into the fetch queue. If there 245 // is already a pending request for this node, the new request will be discarded 246 // and only a parent reference added to the old one. 247 func (s *Sync) schedule(req *request) { 248 // If we're already requesting this node, add a new reference and stop 249 if old, ok := s.requests[req.hash]; ok { 250 old.parents = append(old.parents, req.parents...) 251 return 252 } 253 // Schedule the request for future retrieval 254 s.queue.Push(req.hash, int64(req.depth)) 255 s.requests[req.hash] = req 256 } 257 258 // children retrieves all the missing children of a state trie entry for future 259 // retrieval scheduling. 260 func (s *Sync) children(req *request, object node) ([]*request, error) { 261 // Gather all the children of the node, irrelevant whether known or not 262 type child struct { 263 node node 264 depth int 265 } 266 var children []child 267 268 switch node := (object).(type) { 269 case *shortNode: 270 children = []child{{ 271 node: node.Val, 272 depth: req.depth + len(node.Key), 273 }} 274 case *fullNode: 275 for i := 0; i < 17; i++ { 276 if node.Children[i] != nil { 277 children = append(children, child{ 278 node: node.Children[i], 279 depth: req.depth + 1, 280 }) 281 } 282 } 283 default: 284 panic(fmt.Sprintf("unknown node: %+v", node)) 285 } 286 // Iterate over the children, and request all unknown ones 287 requests := make([]*request, 0, len(children)) 288 for _, child := range children { 289 // Notify any external watcher of a new key/value node 290 if req.callback != nil { 291 if node, ok := (child.node).(valueNode); ok { 292 if err := req.callback(node, req.hash); err != nil { 293 return nil, err 294 } 295 } 296 } 297 // If the child references another node, resolve or schedule 298 if node, ok := (child.node).(hashNode); ok { 299 // Try to resolve the node from the local database 300 hash := common.BytesToHash(node) 301 if _, ok := s.membatch.batch[hash]; ok { 302 continue 303 } 304 if s.bloom.Contains(node) { 305 // Bloom filter says this might be a duplicate, double check 306 if ok, _ := s.database.Has(node); ok { 307 continue 308 } 309 // False positive, bump fault meter 310 bloomFaultMeter.Mark(1) 311 } 312 // Locally unknown node, schedule for retrieval 313 requests = append(requests, &request{ 314 hash: hash, 315 parents: []*request{req}, 316 depth: child.depth, 317 callback: req.callback, 318 }) 319 } 320 } 321 return requests, nil 322 } 323 324 // commit finalizes a retrieval request and stores it into the membatch. If any 325 // of the referencing parent requests complete due to this commit, they are also 326 // committed themselves. 327 func (s *Sync) commit(req *request) (err error) { 328 // Write the node content to the membatch 329 s.membatch.batch[req.hash] = req.data 330 331 delete(s.requests, req.hash) 332 333 // Check all parents for completion 334 for _, parent := range req.parents { 335 parent.deps-- 336 if parent.deps == 0 { 337 if err := s.commit(parent); err != nil { 338 return err 339 } 340 } 341 } 342 return nil 343 }