github.com/oskarth/go-ethereum@v1.6.8-0.20191013093314-dac24a9d3494/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "fmt" 21 "io" 22 "sync" 23 "time" 24 25 "github.com/ethereum/go-ethereum/common" 26 "github.com/ethereum/go-ethereum/ethdb" 27 "github.com/ethereum/go-ethereum/log" 28 "github.com/ethereum/go-ethereum/metrics" 29 "github.com/ethereum/go-ethereum/rlp" 30 ) 31 32 var ( 33 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 34 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 35 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 36 37 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 38 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 39 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 40 41 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 42 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 43 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 44 ) 45 46 // secureKeyPrefix is the database key prefix used to store trie node preimages. 47 var secureKeyPrefix = []byte("secure-key-") 48 49 // secureKeyLength is the length of the above prefix + 32byte hash. 50 const secureKeyLength = 11 + 32 51 52 // DatabaseReader wraps the Get and Has method of a backing store for the trie. 53 type DatabaseReader interface { 54 // Get retrieves the value associated with key from the database. 55 Get(key []byte) (value []byte, err error) 56 57 // Has retrieves whether a key is present in the database. 58 Has(key []byte) (bool, error) 59 } 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 type Database struct { 65 diskdb ethdb.Database // Persistent storage for matured trie nodes 66 67 nodes map[common.Hash]*cachedNode // Data and references relationships of a node 68 oldest common.Hash // Oldest tracked node, flush-list head 69 newest common.Hash // Newest tracked node, flush-list tail 70 71 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 72 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys 73 74 gctime time.Duration // Time spent on garbage collection since last commit 75 gcnodes uint64 // Nodes garbage collected since last commit 76 gcsize common.StorageSize // Data storage garbage collected since last commit 77 78 flushtime time.Duration // Time spent on data flushing since last commit 79 flushnodes uint64 // Nodes flushed since last commit 80 flushsize common.StorageSize // Data storage flushed since last commit 81 82 nodesSize common.StorageSize // Storage size of the nodes cache (exc. flushlist) 83 preimagesSize common.StorageSize // Storage size of the preimages cache 84 85 lock sync.RWMutex 86 } 87 88 // rawNode is a simple binary blob used to differentiate between collapsed trie 89 // nodes and already encoded RLP binary blobs (while at the same time store them 90 // in the same cache fields). 91 type rawNode []byte 92 93 func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 94 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 95 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 96 97 // rawFullNode represents only the useful data content of a full node, with the 98 // caches and flags stripped out to minimize its data storage. This type honors 99 // the same RLP encoding as the original parent. 100 type rawFullNode [17]node 101 102 func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 103 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 104 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 105 106 func (n rawFullNode) EncodeRLP(w io.Writer) error { 107 var nodes [17]node 108 109 for i, child := range n { 110 if child != nil { 111 nodes[i] = child 112 } else { 113 nodes[i] = nilValueNode 114 } 115 } 116 return rlp.Encode(w, nodes) 117 } 118 119 // rawShortNode represents only the useful data content of a short node, with the 120 // caches and flags stripped out to minimize its data storage. This type honors 121 // the same RLP encoding as the original parent. 122 type rawShortNode struct { 123 Key []byte 124 Val node 125 } 126 127 func (n rawShortNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 128 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 129 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 130 131 // cachedNode is all the information we know about a single cached node in the 132 // memory database write layer. 133 type cachedNode struct { 134 node node // Cached collapsed trie node, or raw rlp data 135 size uint16 // Byte size of the useful cached data 136 137 parents uint16 // Number of live nodes referencing this one 138 children map[common.Hash]uint16 // External children referenced by this node 139 140 flushPrev common.Hash // Previous node in the flush-list 141 flushNext common.Hash // Next node in the flush-list 142 } 143 144 // rlp returns the raw rlp encoded blob of the cached node, either directly from 145 // the cache, or by regenerating it from the collapsed node. 146 func (n *cachedNode) rlp() []byte { 147 if node, ok := n.node.(rawNode); ok { 148 return node 149 } 150 blob, err := rlp.EncodeToBytes(n.node) 151 if err != nil { 152 panic(err) 153 } 154 return blob 155 } 156 157 // obj returns the decoded and expanded trie node, either directly from the cache, 158 // or by regenerating it from the rlp encoded blob. 159 func (n *cachedNode) obj(hash common.Hash, cachegen uint16) node { 160 if node, ok := n.node.(rawNode); ok { 161 return mustDecodeNode(hash[:], node, cachegen) 162 } 163 return expandNode(hash[:], n.node, cachegen) 164 } 165 166 // childs returns all the tracked children of this node, both the implicit ones 167 // from inside the node as well as the explicit ones from outside the node. 168 func (n *cachedNode) childs() []common.Hash { 169 children := make([]common.Hash, 0, 16) 170 for child := range n.children { 171 children = append(children, child) 172 } 173 if _, ok := n.node.(rawNode); !ok { 174 gatherChildren(n.node, &children) 175 } 176 return children 177 } 178 179 // gatherChildren traverses the node hierarchy of a collapsed storage node and 180 // retrieves all the hashnode children. 181 func gatherChildren(n node, children *[]common.Hash) { 182 switch n := n.(type) { 183 case *rawShortNode: 184 gatherChildren(n.Val, children) 185 186 case rawFullNode: 187 for i := 0; i < 16; i++ { 188 gatherChildren(n[i], children) 189 } 190 case hashNode: 191 *children = append(*children, common.BytesToHash(n)) 192 193 case valueNode, nil: 194 195 default: 196 panic(fmt.Sprintf("unknown node type: %T", n)) 197 } 198 } 199 200 // simplifyNode traverses the hierarchy of an expanded memory node and discards 201 // all the internal caches, returning a node that only contains the raw data. 202 func simplifyNode(n node) node { 203 switch n := n.(type) { 204 case *shortNode: 205 // Short nodes discard the flags and cascade 206 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 207 208 case *fullNode: 209 // Full nodes discard the flags and cascade 210 node := rawFullNode(n.Children) 211 for i := 0; i < len(node); i++ { 212 if node[i] != nil { 213 node[i] = simplifyNode(node[i]) 214 } 215 } 216 return node 217 218 case valueNode, hashNode, rawNode: 219 return n 220 221 default: 222 panic(fmt.Sprintf("unknown node type: %T", n)) 223 } 224 } 225 226 // expandNode traverses the node hierarchy of a collapsed storage node and converts 227 // all fields and keys into expanded memory form. 228 func expandNode(hash hashNode, n node, cachegen uint16) node { 229 switch n := n.(type) { 230 case *rawShortNode: 231 // Short nodes need key and child expansion 232 return &shortNode{ 233 Key: compactToHex(n.Key), 234 Val: expandNode(nil, n.Val, cachegen), 235 flags: nodeFlag{ 236 hash: hash, 237 gen: cachegen, 238 }, 239 } 240 241 case rawFullNode: 242 // Full nodes need child expansion 243 node := &fullNode{ 244 flags: nodeFlag{ 245 hash: hash, 246 gen: cachegen, 247 }, 248 } 249 for i := 0; i < len(node.Children); i++ { 250 if n[i] != nil { 251 node.Children[i] = expandNode(nil, n[i], cachegen) 252 } 253 } 254 return node 255 256 case valueNode, hashNode: 257 return n 258 259 default: 260 panic(fmt.Sprintf("unknown node type: %T", n)) 261 } 262 } 263 264 // NewDatabase creates a new trie database to store ephemeral trie content before 265 // its written out to disk or garbage collected. 266 func NewDatabase(diskdb ethdb.Database) *Database { 267 return &Database{ 268 diskdb: diskdb, 269 nodes: map[common.Hash]*cachedNode{{}: {}}, 270 preimages: make(map[common.Hash][]byte), 271 } 272 } 273 274 // DiskDB retrieves the persistent storage backing the trie database. 275 func (db *Database) DiskDB() DatabaseReader { 276 return db.diskdb 277 } 278 279 // InsertBlob writes a new reference tracked blob to the memory database if it's 280 // yet unknown. This method should only be used for non-trie nodes that require 281 // reference counting, since trie nodes are garbage collected directly through 282 // their embedded children. 283 func (db *Database) InsertBlob(hash common.Hash, blob []byte) { 284 db.lock.Lock() 285 defer db.lock.Unlock() 286 287 db.insert(hash, blob, rawNode(blob)) 288 } 289 290 // insert inserts a collapsed trie node into the memory database. This method is 291 // a more generic version of InsertBlob, supporting both raw blob insertions as 292 // well ex trie node insertions. The blob must always be specified to allow proper 293 // size tracking. 294 func (db *Database) insert(hash common.Hash, blob []byte, node node) { 295 // If the node's already cached, skip 296 if _, ok := db.nodes[hash]; ok { 297 return 298 } 299 // Create the cached entry for this node 300 entry := &cachedNode{ 301 node: simplifyNode(node), 302 size: uint16(len(blob)), 303 flushPrev: db.newest, 304 } 305 for _, child := range entry.childs() { 306 if c := db.nodes[child]; c != nil { 307 c.parents++ 308 } 309 } 310 db.nodes[hash] = entry 311 312 // Update the flush-list endpoints 313 if db.oldest == (common.Hash{}) { 314 db.oldest, db.newest = hash, hash 315 } else { 316 db.nodes[db.newest].flushNext, db.newest = hash, hash 317 } 318 db.nodesSize += common.StorageSize(common.HashLength + entry.size) 319 } 320 321 // insertPreimage writes a new trie node pre-image to the memory database if it's 322 // yet unknown. The method will make a copy of the slice. 323 // 324 // Note, this method assumes that the database's lock is held! 325 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 326 if _, ok := db.preimages[hash]; ok { 327 return 328 } 329 db.preimages[hash] = common.CopyBytes(preimage) 330 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 331 } 332 333 // node retrieves a cached trie node from memory, or returns nil if none can be 334 // found in the memory cache. 335 func (db *Database) node(hash common.Hash, cachegen uint16) node { 336 // Retrieve the node from cache if available 337 db.lock.RLock() 338 node := db.nodes[hash] 339 db.lock.RUnlock() 340 341 if node != nil { 342 return node.obj(hash, cachegen) 343 } 344 // Content unavailable in memory, attempt to retrieve from disk 345 enc, err := db.diskdb.Get(hash[:]) 346 if err != nil || enc == nil { 347 return nil 348 } 349 return mustDecodeNode(hash[:], enc, cachegen) 350 } 351 352 // Node retrieves an encoded cached trie node from memory. If it cannot be found 353 // cached, the method queries the persistent database for the content. 354 func (db *Database) Node(hash common.Hash) ([]byte, error) { 355 // Retrieve the node from cache if available 356 db.lock.RLock() 357 node := db.nodes[hash] 358 db.lock.RUnlock() 359 360 if node != nil { 361 return node.rlp(), nil 362 } 363 // Content unavailable in memory, attempt to retrieve from disk 364 return db.diskdb.Get(hash[:]) 365 } 366 367 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 368 // found cached, the method queries the persistent database for the content. 369 func (db *Database) preimage(hash common.Hash) ([]byte, error) { 370 // Retrieve the node from cache if available 371 db.lock.RLock() 372 preimage := db.preimages[hash] 373 db.lock.RUnlock() 374 375 if preimage != nil { 376 return preimage, nil 377 } 378 // Content unavailable in memory, attempt to retrieve from disk 379 return db.diskdb.Get(db.secureKey(hash[:])) 380 } 381 382 // secureKey returns the database key for the preimage of key, as an ephemeral 383 // buffer. The caller must not hold onto the return value because it will become 384 // invalid on the next call. 385 func (db *Database) secureKey(key []byte) []byte { 386 buf := append(db.seckeybuf[:0], secureKeyPrefix...) 387 buf = append(buf, key...) 388 return buf 389 } 390 391 // Nodes retrieves the hashes of all the nodes cached within the memory database. 392 // This method is extremely expensive and should only be used to validate internal 393 // states in test code. 394 func (db *Database) Nodes() []common.Hash { 395 db.lock.RLock() 396 defer db.lock.RUnlock() 397 398 var hashes = make([]common.Hash, 0, len(db.nodes)) 399 for hash := range db.nodes { 400 if hash != (common.Hash{}) { // Special case for "root" references/nodes 401 hashes = append(hashes, hash) 402 } 403 } 404 return hashes 405 } 406 407 // Reference adds a new reference from a parent node to a child node. 408 func (db *Database) Reference(child common.Hash, parent common.Hash) { 409 db.lock.RLock() 410 defer db.lock.RUnlock() 411 412 db.reference(child, parent) 413 } 414 415 // reference is the private locked version of Reference. 416 func (db *Database) reference(child common.Hash, parent common.Hash) { 417 // If the node does not exist, it's a node pulled from disk, skip 418 node, ok := db.nodes[child] 419 if !ok { 420 return 421 } 422 // If the reference already exists, only duplicate for roots 423 if db.nodes[parent].children == nil { 424 db.nodes[parent].children = make(map[common.Hash]uint16) 425 } else if _, ok = db.nodes[parent].children[child]; ok && parent != (common.Hash{}) { 426 return 427 } 428 node.parents++ 429 db.nodes[parent].children[child]++ 430 } 431 432 // Dereference removes an existing reference from a root node. 433 func (db *Database) Dereference(root common.Hash) { 434 // Sanity check to ensure that the meta-root is not removed 435 if root == (common.Hash{}) { 436 log.Error("Attempted to dereference the trie cache meta root") 437 return 438 } 439 db.lock.Lock() 440 defer db.lock.Unlock() 441 442 nodes, storage, start := len(db.nodes), db.nodesSize, time.Now() 443 db.dereference(root, common.Hash{}) 444 445 db.gcnodes += uint64(nodes - len(db.nodes)) 446 db.gcsize += storage - db.nodesSize 447 db.gctime += time.Since(start) 448 449 memcacheGCTimeTimer.Update(time.Since(start)) 450 memcacheGCSizeMeter.Mark(int64(storage - db.nodesSize)) 451 memcacheGCNodesMeter.Mark(int64(nodes - len(db.nodes))) 452 453 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start), 454 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize) 455 } 456 457 // dereference is the private locked version of Dereference. 458 func (db *Database) dereference(child common.Hash, parent common.Hash) { 459 // Dereference the parent-child 460 node := db.nodes[parent] 461 462 if node.children != nil && node.children[child] > 0 { 463 node.children[child]-- 464 if node.children[child] == 0 { 465 delete(node.children, child) 466 } 467 } 468 // If the child does not exist, it's a previously committed node. 469 node, ok := db.nodes[child] 470 if !ok { 471 return 472 } 473 // If there are no more references to the child, delete it and cascade 474 if node.parents > 0 { 475 // This is a special cornercase where a node loaded from disk (i.e. not in the 476 // memcache any more) gets reinjected as a new node (short node split into full, 477 // then reverted into short), causing a cached node to have no parents. That is 478 // no problem in itself, but don't make maxint parents out of it. 479 node.parents-- 480 } 481 if node.parents == 0 { 482 // Remove the node from the flush-list 483 switch child { 484 case db.oldest: 485 db.oldest = node.flushNext 486 db.nodes[node.flushNext].flushPrev = common.Hash{} 487 case db.newest: 488 db.newest = node.flushPrev 489 db.nodes[node.flushPrev].flushNext = common.Hash{} 490 default: 491 db.nodes[node.flushPrev].flushNext = node.flushNext 492 db.nodes[node.flushNext].flushPrev = node.flushPrev 493 } 494 // Dereference all children and delete the node 495 for _, hash := range node.childs() { 496 db.dereference(hash, child) 497 } 498 delete(db.nodes, child) 499 db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) 500 } 501 } 502 503 // Cap iteratively flushes old but still referenced trie nodes until the total 504 // memory usage goes below the given threshold. 505 func (db *Database) Cap(limit common.StorageSize) error { 506 // Create a database batch to flush persistent data out. It is important that 507 // outside code doesn't see an inconsistent state (referenced data removed from 508 // memory cache during commit but not yet in persistent storage). This is ensured 509 // by only uncaching existing data when the database write finalizes. 510 db.lock.RLock() 511 512 nodes, storage, start := len(db.nodes), db.nodesSize, time.Now() 513 batch := db.diskdb.NewBatch() 514 515 // db.nodesSize only contains the useful data in the cache, but when reporting 516 // the total memory consumption, the maintenance metadata is also needed to be 517 // counted. For every useful node, we track 2 extra hashes as the flushlist. 518 size := db.nodesSize + common.StorageSize((len(db.nodes)-1)*2*common.HashLength) 519 520 // If the preimage cache got large enough, push to disk. If it's still small 521 // leave for later to deduplicate writes. 522 flushPreimages := db.preimagesSize > 4*1024*1024 523 if flushPreimages { 524 for hash, preimage := range db.preimages { 525 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 526 log.Error("Failed to commit preimage from trie database", "err", err) 527 db.lock.RUnlock() 528 return err 529 } 530 if batch.ValueSize() > ethdb.IdealBatchSize { 531 if err := batch.Write(); err != nil { 532 db.lock.RUnlock() 533 return err 534 } 535 batch.Reset() 536 } 537 } 538 } 539 // Keep committing nodes from the flush-list until we're below allowance 540 oldest := db.oldest 541 for size > limit && oldest != (common.Hash{}) { 542 // Fetch the oldest referenced node and push into the batch 543 node := db.nodes[oldest] 544 if err := batch.Put(oldest[:], node.rlp()); err != nil { 545 db.lock.RUnlock() 546 return err 547 } 548 // If we exceeded the ideal batch size, commit and reset 549 if batch.ValueSize() >= ethdb.IdealBatchSize { 550 if err := batch.Write(); err != nil { 551 log.Error("Failed to write flush list to disk", "err", err) 552 db.lock.RUnlock() 553 return err 554 } 555 batch.Reset() 556 } 557 // Iterate to the next flush item, or abort if the size cap was achieved. Size 558 // is the total size, including both the useful cached data (hash -> blob), as 559 // well as the flushlist metadata (2*hash). When flushing items from the cache, 560 // we need to reduce both. 561 size -= common.StorageSize(3*common.HashLength + int(node.size)) 562 oldest = node.flushNext 563 } 564 // Flush out any remainder data from the last batch 565 if err := batch.Write(); err != nil { 566 log.Error("Failed to write flush list to disk", "err", err) 567 db.lock.RUnlock() 568 return err 569 } 570 db.lock.RUnlock() 571 572 // Write successful, clear out the flushed data 573 db.lock.Lock() 574 defer db.lock.Unlock() 575 576 if flushPreimages { 577 db.preimages = make(map[common.Hash][]byte) 578 db.preimagesSize = 0 579 } 580 for db.oldest != oldest { 581 node := db.nodes[db.oldest] 582 delete(db.nodes, db.oldest) 583 db.oldest = node.flushNext 584 585 db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) 586 } 587 if db.oldest != (common.Hash{}) { 588 db.nodes[db.oldest].flushPrev = common.Hash{} 589 } 590 db.flushnodes += uint64(nodes - len(db.nodes)) 591 db.flushsize += storage - db.nodesSize 592 db.flushtime += time.Since(start) 593 594 memcacheFlushTimeTimer.Update(time.Since(start)) 595 memcacheFlushSizeMeter.Mark(int64(storage - db.nodesSize)) 596 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.nodes))) 597 598 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.nodes), "size", storage-db.nodesSize, "time", time.Since(start), 599 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.nodes), "livesize", db.nodesSize) 600 601 return nil 602 } 603 604 // Commit iterates over all the children of a particular node, writes them out 605 // to disk, forcefully tearing down all references in both directions. 606 // 607 // As a side effect, all pre-images accumulated up to this point are also written. 608 func (db *Database) Commit(node common.Hash, report bool) error { 609 // Create a database batch to flush persistent data out. It is important that 610 // outside code doesn't see an inconsistent state (referenced data removed from 611 // memory cache during commit but not yet in persistent storage). This is ensured 612 // by only uncaching existing data when the database write finalizes. 613 db.lock.RLock() 614 615 start := time.Now() 616 batch := db.diskdb.NewBatch() 617 618 // Move all of the accumulated preimages into a write batch 619 for hash, preimage := range db.preimages { 620 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 621 log.Error("Failed to commit preimage from trie database", "err", err) 622 db.lock.RUnlock() 623 return err 624 } 625 if batch.ValueSize() > ethdb.IdealBatchSize { 626 if err := batch.Write(); err != nil { 627 return err 628 } 629 batch.Reset() 630 } 631 } 632 // Move the trie itself into the batch, flushing if enough data is accumulated 633 nodes, storage := len(db.nodes), db.nodesSize 634 if err := db.commit(node, batch); err != nil { 635 log.Error("Failed to commit trie from trie database", "err", err) 636 db.lock.RUnlock() 637 return err 638 } 639 // Write batch ready, unlock for readers during persistence 640 if err := batch.Write(); err != nil { 641 log.Error("Failed to write trie to disk", "err", err) 642 db.lock.RUnlock() 643 return err 644 } 645 db.lock.RUnlock() 646 647 // Write successful, clear out the flushed data 648 db.lock.Lock() 649 defer db.lock.Unlock() 650 651 db.preimages = make(map[common.Hash][]byte) 652 db.preimagesSize = 0 653 654 db.uncache(node) 655 656 memcacheCommitTimeTimer.Update(time.Since(start)) 657 memcacheCommitSizeMeter.Mark(int64(storage - db.nodesSize)) 658 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.nodes))) 659 660 logger := log.Info 661 if !report { 662 logger = log.Debug 663 } 664 logger("Persisted trie from memory database", "nodes", nodes-len(db.nodes)+int(db.flushnodes), "size", storage-db.nodesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 665 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.nodes), "livesize", db.nodesSize) 666 667 // Reset the garbage collection statistics 668 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 669 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 670 671 return nil 672 } 673 674 // commit is the private locked version of Commit. 675 func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error { 676 // If the node does not exist, it's a previously committed node 677 node, ok := db.nodes[hash] 678 if !ok { 679 return nil 680 } 681 for _, child := range node.childs() { 682 if err := db.commit(child, batch); err != nil { 683 return err 684 } 685 } 686 if err := batch.Put(hash[:], node.rlp()); err != nil { 687 return err 688 } 689 // If we've reached an optimal batch size, commit and start over 690 if batch.ValueSize() >= ethdb.IdealBatchSize { 691 if err := batch.Write(); err != nil { 692 return err 693 } 694 batch.Reset() 695 } 696 return nil 697 } 698 699 // uncache is the post-processing step of a commit operation where the already 700 // persisted trie is removed from the cache. The reason behind the two-phase 701 // commit is to ensure consistent data availability while moving from memory 702 // to disk. 703 func (db *Database) uncache(hash common.Hash) { 704 // If the node does not exist, we're done on this path 705 node, ok := db.nodes[hash] 706 if !ok { 707 return 708 } 709 // Node still exists, remove it from the flush-list 710 switch hash { 711 case db.oldest: 712 db.oldest = node.flushNext 713 db.nodes[node.flushNext].flushPrev = common.Hash{} 714 case db.newest: 715 db.newest = node.flushPrev 716 db.nodes[node.flushPrev].flushNext = common.Hash{} 717 default: 718 db.nodes[node.flushPrev].flushNext = node.flushNext 719 db.nodes[node.flushNext].flushPrev = node.flushPrev 720 } 721 // Uncache the node's subtries and remove the node itself too 722 for _, child := range node.childs() { 723 db.uncache(child) 724 } 725 delete(db.nodes, hash) 726 db.nodesSize -= common.StorageSize(common.HashLength + int(node.size)) 727 } 728 729 // Size returns the current storage size of the memory cache in front of the 730 // persistent database layer. 731 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 732 db.lock.RLock() 733 defer db.lock.RUnlock() 734 735 // db.nodesSize only contains the useful data in the cache, but when reporting 736 // the total memory consumption, the maintenance metadata is also needed to be 737 // counted. For every useful node, we track 2 extra hashes as the flushlist. 738 var flushlistSize = common.StorageSize((len(db.nodes) - 1) * 2 * common.HashLength) 739 return db.nodesSize + flushlistSize, db.preimagesSize 740 } 741 742 // verifyIntegrity is a debug method to iterate over the entire trie stored in 743 // memory and check whether every node is reachable from the meta root. The goal 744 // is to find any errors that might cause memory leaks and or trie nodes to go 745 // missing. 746 // 747 // This method is extremely CPU and memory intensive, only use when must. 748 func (db *Database) verifyIntegrity() { 749 // Iterate over all the cached nodes and accumulate them into a set 750 reachable := map[common.Hash]struct{}{{}: {}} 751 752 for child := range db.nodes[common.Hash{}].children { 753 db.accumulate(child, reachable) 754 } 755 // Find any unreachable but cached nodes 756 unreachable := []string{} 757 for hash, node := range db.nodes { 758 if _, ok := reachable[hash]; !ok { 759 unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", 760 hash, node.node, node.parents, node.flushPrev, node.flushNext)) 761 } 762 } 763 if len(unreachable) != 0 { 764 panic(fmt.Sprintf("trie cache memory leak: %v", unreachable)) 765 } 766 } 767 768 // accumulate iterates over the trie defined by hash and accumulates all the 769 // cached children found in memory. 770 func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { 771 // Mark the node reachable if present in the memory cache 772 node, ok := db.nodes[hash] 773 if !ok { 774 return 775 } 776 reachable[hash] = struct{}{} 777 778 // Iterate over all the children and accumulate them too 779 for _, child := range node.childs() { 780 db.accumulate(child, reachable) 781 } 782 }