github.com/codingfuture/orig-energi3@v0.8.4/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "sync" 24 "time" 25 26 "github.com/allegro/bigcache" 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/ethdb" 29 "github.com/ethereum/go-ethereum/log" 30 "github.com/ethereum/go-ethereum/metrics" 31 "github.com/ethereum/go-ethereum/rlp" 32 ) 33 34 var ( 35 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 36 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 37 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 38 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 39 40 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 41 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 42 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 43 44 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 45 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 46 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 47 48 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 49 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 50 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 51 ) 52 53 // secureKeyPrefix is the database key prefix used to store trie node preimages. 54 var secureKeyPrefix = []byte("secure-key-") 55 56 // secureKeyLength is the length of the above prefix + 32byte hash. 57 const secureKeyLength = 11 + 32 58 59 // DatabaseReader wraps the Get and Has method of a backing store for the trie. 60 type DatabaseReader interface { 61 // Get retrieves the value associated with key from the database. 62 Get(key []byte) (value []byte, err error) 63 64 // Has retrieves whether a key is present in the database. 65 Has(key []byte) (bool, error) 66 } 67 68 // Database is an intermediate write layer between the trie data structures and 69 // the disk database. The aim is to accumulate trie writes in-memory and only 70 // periodically flush a couple tries to disk, garbage collecting the remainder. 71 type Database struct { 72 diskdb ethdb.Database // Persistent storage for matured trie nodes 73 74 cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs 75 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes 76 oldest common.Hash // Oldest tracked node, flush-list head 77 newest common.Hash // Newest tracked node, flush-list tail 78 79 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 80 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys 81 82 gctime time.Duration // Time spent on garbage collection since last commit 83 gcnodes uint64 // Nodes garbage collected since last commit 84 gcsize common.StorageSize // Data storage garbage collected since last commit 85 86 flushtime time.Duration // Time spent on data flushing since last commit 87 flushnodes uint64 // Nodes flushed since last commit 88 flushsize common.StorageSize // Data storage flushed since last commit 89 90 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. flushlist) 91 preimagesSize common.StorageSize // Storage size of the preimages cache 92 93 lock sync.RWMutex 94 } 95 96 // rawNode is a simple binary blob used to differentiate between collapsed trie 97 // nodes and already encoded RLP binary blobs (while at the same time store them 98 // in the same cache fields). 99 type rawNode []byte 100 101 func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 102 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 103 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 104 105 // rawFullNode represents only the useful data content of a full node, with the 106 // caches and flags stripped out to minimize its data storage. This type honors 107 // the same RLP encoding as the original parent. 108 type rawFullNode [17]node 109 110 func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 111 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 112 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 113 114 func (n rawFullNode) EncodeRLP(w io.Writer) error { 115 var nodes [17]node 116 117 for i, child := range n { 118 if child != nil { 119 nodes[i] = child 120 } else { 121 nodes[i] = nilValueNode 122 } 123 } 124 return rlp.Encode(w, nodes) 125 } 126 127 // rawShortNode represents only the useful data content of a short node, with the 128 // caches and flags stripped out to minimize its data storage. This type honors 129 // the same RLP encoding as the original parent. 130 type rawShortNode struct { 131 Key []byte 132 Val node 133 } 134 135 func (n rawShortNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 136 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 137 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 138 139 // cachedNode is all the information we know about a single cached node in the 140 // memory database write layer. 141 type cachedNode struct { 142 node node // Cached collapsed trie node, or raw rlp data 143 size uint16 // Byte size of the useful cached data 144 145 parents uint32 // Number of live nodes referencing this one 146 children map[common.Hash]uint16 // External children referenced by this node 147 148 flushPrev common.Hash // Previous node in the flush-list 149 flushNext common.Hash // Next node in the flush-list 150 } 151 152 // rlp returns the raw rlp encoded blob of the cached node, either directly from 153 // the cache, or by regenerating it from the collapsed node. 154 func (n *cachedNode) rlp() []byte { 155 if node, ok := n.node.(rawNode); ok { 156 return node 157 } 158 blob, err := rlp.EncodeToBytes(n.node) 159 if err != nil { 160 panic(err) 161 } 162 return blob 163 } 164 165 // obj returns the decoded and expanded trie node, either directly from the cache, 166 // or by regenerating it from the rlp encoded blob. 167 func (n *cachedNode) obj(hash common.Hash, cachegen uint16) node { 168 if node, ok := n.node.(rawNode); ok { 169 return mustDecodeNode(hash[:], node, cachegen) 170 } 171 return expandNode(hash[:], n.node, cachegen) 172 } 173 174 // childs returns all the tracked children of this node, both the implicit ones 175 // from inside the node as well as the explicit ones from outside the node. 176 func (n *cachedNode) childs() []common.Hash { 177 children := make([]common.Hash, 0, 16) 178 for child := range n.children { 179 children = append(children, child) 180 } 181 if _, ok := n.node.(rawNode); !ok { 182 gatherChildren(n.node, &children) 183 } 184 return children 185 } 186 187 // gatherChildren traverses the node hierarchy of a collapsed storage node and 188 // retrieves all the hashnode children. 189 func gatherChildren(n node, children *[]common.Hash) { 190 switch n := n.(type) { 191 case *rawShortNode: 192 gatherChildren(n.Val, children) 193 194 case rawFullNode: 195 for i := 0; i < 16; i++ { 196 gatherChildren(n[i], children) 197 } 198 case hashNode: 199 *children = append(*children, common.BytesToHash(n)) 200 201 case valueNode, nil: 202 203 default: 204 panic(fmt.Sprintf("unknown node type: %T", n)) 205 } 206 } 207 208 // simplifyNode traverses the hierarchy of an expanded memory node and discards 209 // all the internal caches, returning a node that only contains the raw data. 210 func simplifyNode(n node) node { 211 switch n := n.(type) { 212 case *shortNode: 213 // Short nodes discard the flags and cascade 214 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 215 216 case *fullNode: 217 // Full nodes discard the flags and cascade 218 node := rawFullNode(n.Children) 219 for i := 0; i < len(node); i++ { 220 if node[i] != nil { 221 node[i] = simplifyNode(node[i]) 222 } 223 } 224 return node 225 226 case valueNode, hashNode, rawNode: 227 return n 228 229 default: 230 panic(fmt.Sprintf("unknown node type: %T", n)) 231 } 232 } 233 234 // expandNode traverses the node hierarchy of a collapsed storage node and converts 235 // all fields and keys into expanded memory form. 236 func expandNode(hash hashNode, n node, cachegen uint16) node { 237 switch n := n.(type) { 238 case *rawShortNode: 239 // Short nodes need key and child expansion 240 return &shortNode{ 241 Key: compactToHex(n.Key), 242 Val: expandNode(nil, n.Val, cachegen), 243 flags: nodeFlag{ 244 hash: hash, 245 gen: cachegen, 246 }, 247 } 248 249 case rawFullNode: 250 // Full nodes need child expansion 251 node := &fullNode{ 252 flags: nodeFlag{ 253 hash: hash, 254 gen: cachegen, 255 }, 256 } 257 for i := 0; i < len(node.Children); i++ { 258 if n[i] != nil { 259 node.Children[i] = expandNode(nil, n[i], cachegen) 260 } 261 } 262 return node 263 264 case valueNode, hashNode: 265 return n 266 267 default: 268 panic(fmt.Sprintf("unknown node type: %T", n)) 269 } 270 } 271 272 // NewDatabase creates a new trie database to store ephemeral trie content before 273 // its written out to disk or garbage collected. No read cache is created, so all 274 // data retrievals will hit the underlying disk database. 275 func NewDatabase(diskdb ethdb.Database) *Database { 276 return NewDatabaseWithCache(diskdb, 0) 277 } 278 279 // NewDatabaseWithCache creates a new trie database to store ephemeral trie content 280 // before its written out to disk or garbage collected. It also acts as a read cache 281 // for nodes loaded from disk. 282 func NewDatabaseWithCache(diskdb ethdb.Database, cache int) *Database { 283 var cleans *bigcache.BigCache 284 if cache > 0 { 285 cleans, _ = bigcache.NewBigCache(bigcache.Config{ 286 Shards: 1024, 287 LifeWindow: time.Hour, 288 MaxEntriesInWindow: cache * 1024, 289 MaxEntrySize: 512, 290 HardMaxCacheSize: cache, 291 }) 292 } 293 return &Database{ 294 diskdb: diskdb, 295 cleans: cleans, 296 dirties: map[common.Hash]*cachedNode{{}: {}}, 297 preimages: make(map[common.Hash][]byte), 298 } 299 } 300 301 // DiskDB retrieves the persistent storage backing the trie database. 302 func (db *Database) DiskDB() DatabaseReader { 303 return db.diskdb 304 } 305 306 // InsertBlob writes a new reference tracked blob to the memory database if it's 307 // yet unknown. This method should only be used for non-trie nodes that require 308 // reference counting, since trie nodes are garbage collected directly through 309 // their embedded children. 310 func (db *Database) InsertBlob(hash common.Hash, blob []byte) { 311 db.lock.Lock() 312 defer db.lock.Unlock() 313 314 db.insert(hash, blob, rawNode(blob)) 315 } 316 317 // insert inserts a collapsed trie node into the memory database. This method is 318 // a more generic version of InsertBlob, supporting both raw blob insertions as 319 // well ex trie node insertions. The blob must always be specified to allow proper 320 // size tracking. 321 func (db *Database) insert(hash common.Hash, blob []byte, node node) { 322 // If the node's already cached, skip 323 if _, ok := db.dirties[hash]; ok { 324 return 325 } 326 // Create the cached entry for this node 327 entry := &cachedNode{ 328 node: simplifyNode(node), 329 size: uint16(len(blob)), 330 flushPrev: db.newest, 331 } 332 for _, child := range entry.childs() { 333 if c := db.dirties[child]; c != nil { 334 c.parents++ 335 } 336 } 337 db.dirties[hash] = entry 338 339 // Update the flush-list endpoints 340 if db.oldest == (common.Hash{}) { 341 db.oldest, db.newest = hash, hash 342 } else { 343 db.dirties[db.newest].flushNext, db.newest = hash, hash 344 } 345 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 346 } 347 348 // insertPreimage writes a new trie node pre-image to the memory database if it's 349 // yet unknown. The method will make a copy of the slice. 350 // 351 // Note, this method assumes that the database's lock is held! 352 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 353 if _, ok := db.preimages[hash]; ok { 354 return 355 } 356 db.preimages[hash] = common.CopyBytes(preimage) 357 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 358 } 359 360 // node retrieves a cached trie node from memory, or returns nil if none can be 361 // found in the memory cache. 362 func (db *Database) node(hash common.Hash, cachegen uint16) node { 363 // Retrieve the node from the clean cache if available 364 if db.cleans != nil { 365 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 366 memcacheCleanHitMeter.Mark(1) 367 memcacheCleanReadMeter.Mark(int64(len(enc))) 368 return mustDecodeNode(hash[:], enc, cachegen) 369 } 370 } 371 // Retrieve the node from the dirty cache if available 372 db.lock.RLock() 373 dirty := db.dirties[hash] 374 db.lock.RUnlock() 375 376 if dirty != nil { 377 return dirty.obj(hash, cachegen) 378 } 379 // Content unavailable in memory, attempt to retrieve from disk 380 enc, err := db.diskdb.Get(hash[:]) 381 if err != nil || enc == nil { 382 return nil 383 } 384 if db.cleans != nil { 385 db.cleans.Set(string(hash[:]), enc) 386 memcacheCleanMissMeter.Mark(1) 387 memcacheCleanWriteMeter.Mark(int64(len(enc))) 388 } 389 return mustDecodeNode(hash[:], enc, cachegen) 390 } 391 392 // Node retrieves an encoded cached trie node from memory. If it cannot be found 393 // cached, the method queries the persistent database for the content. 394 func (db *Database) Node(hash common.Hash) ([]byte, error) { 395 // It doens't make sense to retrieve the metaroot 396 if hash == (common.Hash{}) { 397 return nil, errors.New("not found") 398 } 399 // Retrieve the node from the clean cache if available 400 if db.cleans != nil { 401 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 402 memcacheCleanHitMeter.Mark(1) 403 memcacheCleanReadMeter.Mark(int64(len(enc))) 404 return enc, nil 405 } 406 } 407 // Retrieve the node from the dirty cache if available 408 db.lock.RLock() 409 dirty := db.dirties[hash] 410 db.lock.RUnlock() 411 412 if dirty != nil { 413 return dirty.rlp(), nil 414 } 415 // Content unavailable in memory, attempt to retrieve from disk 416 enc, err := db.diskdb.Get(hash[:]) 417 if err == nil && enc != nil { 418 if db.cleans != nil { 419 db.cleans.Set(string(hash[:]), enc) 420 memcacheCleanMissMeter.Mark(1) 421 memcacheCleanWriteMeter.Mark(int64(len(enc))) 422 } 423 } 424 return enc, err 425 } 426 427 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 428 // found cached, the method queries the persistent database for the content. 429 func (db *Database) preimage(hash common.Hash) ([]byte, error) { 430 // Retrieve the node from cache if available 431 db.lock.RLock() 432 preimage := db.preimages[hash] 433 db.lock.RUnlock() 434 435 if preimage != nil { 436 return preimage, nil 437 } 438 // Content unavailable in memory, attempt to retrieve from disk 439 return db.diskdb.Get(db.secureKey(hash[:])) 440 } 441 442 // secureKey returns the database key for the preimage of key, as an ephemeral 443 // buffer. The caller must not hold onto the return value because it will become 444 // invalid on the next call. 445 func (db *Database) secureKey(key []byte) []byte { 446 buf := append(db.seckeybuf[:0], secureKeyPrefix...) 447 buf = append(buf, key...) 448 return buf 449 } 450 451 // Nodes retrieves the hashes of all the nodes cached within the memory database. 452 // This method is extremely expensive and should only be used to validate internal 453 // states in test code. 454 func (db *Database) Nodes() []common.Hash { 455 db.lock.RLock() 456 defer db.lock.RUnlock() 457 458 var hashes = make([]common.Hash, 0, len(db.dirties)) 459 for hash := range db.dirties { 460 if hash != (common.Hash{}) { // Special case for "root" references/nodes 461 hashes = append(hashes, hash) 462 } 463 } 464 return hashes 465 } 466 467 // Reference adds a new reference from a parent node to a child node. 468 func (db *Database) Reference(child common.Hash, parent common.Hash) { 469 db.lock.RLock() 470 defer db.lock.RUnlock() 471 472 db.reference(child, parent) 473 } 474 475 // reference is the private locked version of Reference. 476 func (db *Database) reference(child common.Hash, parent common.Hash) { 477 // If the node does not exist, it's a node pulled from disk, skip 478 node, ok := db.dirties[child] 479 if !ok { 480 return 481 } 482 // If the reference already exists, only duplicate for roots 483 if db.dirties[parent].children == nil { 484 db.dirties[parent].children = make(map[common.Hash]uint16) 485 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 486 return 487 } 488 node.parents++ 489 db.dirties[parent].children[child]++ 490 } 491 492 // Dereference removes an existing reference from a root node. 493 func (db *Database) Dereference(root common.Hash) { 494 // Sanity check to ensure that the meta-root is not removed 495 if root == (common.Hash{}) { 496 log.Error("Attempted to dereference the trie cache meta root") 497 return 498 } 499 db.lock.Lock() 500 defer db.lock.Unlock() 501 502 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 503 db.dereference(root, common.Hash{}) 504 505 db.gcnodes += uint64(nodes - len(db.dirties)) 506 db.gcsize += storage - db.dirtiesSize 507 db.gctime += time.Since(start) 508 509 memcacheGCTimeTimer.Update(time.Since(start)) 510 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 511 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 512 513 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 514 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 515 } 516 517 // dereference is the private locked version of Dereference. 518 func (db *Database) dereference(child common.Hash, parent common.Hash) { 519 // Dereference the parent-child 520 node := db.dirties[parent] 521 522 if node.children != nil && node.children[child] > 0 { 523 node.children[child]-- 524 if node.children[child] == 0 { 525 delete(node.children, child) 526 } 527 } 528 // If the child does not exist, it's a previously committed node. 529 node, ok := db.dirties[child] 530 if !ok { 531 return 532 } 533 // If there are no more references to the child, delete it and cascade 534 if node.parents > 0 { 535 // This is a special cornercase where a node loaded from disk (i.e. not in the 536 // memcache any more) gets reinjected as a new node (short node split into full, 537 // then reverted into short), causing a cached node to have no parents. That is 538 // no problem in itself, but don't make maxint parents out of it. 539 node.parents-- 540 } 541 if node.parents == 0 { 542 // Remove the node from the flush-list 543 switch child { 544 case db.oldest: 545 db.oldest = node.flushNext 546 db.dirties[node.flushNext].flushPrev = common.Hash{} 547 case db.newest: 548 db.newest = node.flushPrev 549 db.dirties[node.flushPrev].flushNext = common.Hash{} 550 default: 551 db.dirties[node.flushPrev].flushNext = node.flushNext 552 db.dirties[node.flushNext].flushPrev = node.flushPrev 553 } 554 // Dereference all children and delete the node 555 for _, hash := range node.childs() { 556 db.dereference(hash, child) 557 } 558 delete(db.dirties, child) 559 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 560 } 561 } 562 563 // Cap iteratively flushes old but still referenced trie nodes until the total 564 // memory usage goes below the given threshold. 565 func (db *Database) Cap(limit common.StorageSize) error { 566 // Create a database batch to flush persistent data out. It is important that 567 // outside code doesn't see an inconsistent state (referenced data removed from 568 // memory cache during commit but not yet in persistent storage). This is ensured 569 // by only uncaching existing data when the database write finalizes. 570 db.lock.RLock() 571 572 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 573 batch := db.diskdb.NewBatch() 574 575 // db.dirtiesSize only contains the useful data in the cache, but when reporting 576 // the total memory consumption, the maintenance metadata is also needed to be 577 // counted. For every useful node, we track 2 extra hashes as the flushlist. 578 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*2*common.HashLength) 579 580 // If the preimage cache got large enough, push to disk. If it's still small 581 // leave for later to deduplicate writes. 582 flushPreimages := db.preimagesSize > 4*1024*1024 583 if flushPreimages { 584 for hash, preimage := range db.preimages { 585 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 586 log.Error("Failed to commit preimage from trie database", "err", err) 587 db.lock.RUnlock() 588 return err 589 } 590 if batch.ValueSize() > ethdb.IdealBatchSize { 591 if err := batch.Write(); err != nil { 592 db.lock.RUnlock() 593 return err 594 } 595 batch.Reset() 596 } 597 } 598 } 599 // Keep committing nodes from the flush-list until we're below allowance 600 oldest := db.oldest 601 for size > limit && oldest != (common.Hash{}) { 602 // Fetch the oldest referenced node and push into the batch 603 node := db.dirties[oldest] 604 if err := batch.Put(oldest[:], node.rlp()); err != nil { 605 db.lock.RUnlock() 606 return err 607 } 608 // If we exceeded the ideal batch size, commit and reset 609 if batch.ValueSize() >= ethdb.IdealBatchSize { 610 if err := batch.Write(); err != nil { 611 log.Error("Failed to write flush list to disk", "err", err) 612 db.lock.RUnlock() 613 return err 614 } 615 batch.Reset() 616 } 617 // Iterate to the next flush item, or abort if the size cap was achieved. Size 618 // is the total size, including both the useful cached data (hash -> blob), as 619 // well as the flushlist metadata (2*hash). When flushing items from the cache, 620 // we need to reduce both. 621 size -= common.StorageSize(3*common.HashLength + int(node.size)) 622 oldest = node.flushNext 623 } 624 // Flush out any remainder data from the last batch 625 if err := batch.Write(); err != nil { 626 log.Error("Failed to write flush list to disk", "err", err) 627 db.lock.RUnlock() 628 return err 629 } 630 db.lock.RUnlock() 631 632 // Write successful, clear out the flushed data 633 db.lock.Lock() 634 defer db.lock.Unlock() 635 636 if flushPreimages { 637 db.preimages = make(map[common.Hash][]byte) 638 db.preimagesSize = 0 639 } 640 for db.oldest != oldest { 641 node := db.dirties[db.oldest] 642 delete(db.dirties, db.oldest) 643 db.oldest = node.flushNext 644 645 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 646 } 647 if db.oldest != (common.Hash{}) { 648 db.dirties[db.oldest].flushPrev = common.Hash{} 649 } 650 db.flushnodes += uint64(nodes - len(db.dirties)) 651 db.flushsize += storage - db.dirtiesSize 652 db.flushtime += time.Since(start) 653 654 memcacheFlushTimeTimer.Update(time.Since(start)) 655 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 656 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 657 658 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 659 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 660 661 return nil 662 } 663 664 // Commit iterates over all the children of a particular node, writes them out 665 // to disk, forcefully tearing down all references in both directions. 666 // 667 // As a side effect, all pre-images accumulated up to this point are also written. 668 func (db *Database) Commit(node common.Hash, report bool) error { 669 // Create a database batch to flush persistent data out. It is important that 670 // outside code doesn't see an inconsistent state (referenced data removed from 671 // memory cache during commit but not yet in persistent storage). This is ensured 672 // by only uncaching existing data when the database write finalizes. 673 db.lock.RLock() 674 675 start := time.Now() 676 batch := db.diskdb.NewBatch() 677 678 // Move all of the accumulated preimages into a write batch 679 for hash, preimage := range db.preimages { 680 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 681 log.Error("Failed to commit preimage from trie database", "err", err) 682 db.lock.RUnlock() 683 return err 684 } 685 if batch.ValueSize() > ethdb.IdealBatchSize { 686 if err := batch.Write(); err != nil { 687 return err 688 } 689 batch.Reset() 690 } 691 } 692 // Move the trie itself into the batch, flushing if enough data is accumulated 693 nodes, storage := len(db.dirties), db.dirtiesSize 694 if err := db.commit(node, batch); err != nil { 695 log.Error("Failed to commit trie from trie database", "err", err) 696 db.lock.RUnlock() 697 return err 698 } 699 // Write batch ready, unlock for readers during persistence 700 if err := batch.Write(); err != nil { 701 log.Error("Failed to write trie to disk", "err", err) 702 db.lock.RUnlock() 703 return err 704 } 705 db.lock.RUnlock() 706 707 // Write successful, clear out the flushed data 708 db.lock.Lock() 709 defer db.lock.Unlock() 710 711 db.preimages = make(map[common.Hash][]byte) 712 db.preimagesSize = 0 713 714 db.uncache(node) 715 716 memcacheCommitTimeTimer.Update(time.Since(start)) 717 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 718 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 719 720 logger := log.Info 721 if !report { 722 logger = log.Debug 723 } 724 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 725 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 726 727 // Reset the garbage collection statistics 728 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 729 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 730 731 return nil 732 } 733 734 // commit is the private locked version of Commit. 735 func (db *Database) commit(hash common.Hash, batch ethdb.Batch) error { 736 // If the node does not exist, it's a previously committed node 737 node, ok := db.dirties[hash] 738 if !ok { 739 return nil 740 } 741 for _, child := range node.childs() { 742 if err := db.commit(child, batch); err != nil { 743 return err 744 } 745 } 746 if err := batch.Put(hash[:], node.rlp()); err != nil { 747 return err 748 } 749 // If we've reached an optimal batch size, commit and start over 750 if batch.ValueSize() >= ethdb.IdealBatchSize { 751 if err := batch.Write(); err != nil { 752 return err 753 } 754 batch.Reset() 755 } 756 return nil 757 } 758 759 // uncache is the post-processing step of a commit operation where the already 760 // persisted trie is removed from the cache. The reason behind the two-phase 761 // commit is to ensure consistent data availability while moving from memory 762 // to disk. 763 func (db *Database) uncache(hash common.Hash) { 764 // If the node does not exist, we're done on this path 765 node, ok := db.dirties[hash] 766 if !ok { 767 return 768 } 769 // Node still exists, remove it from the flush-list 770 switch hash { 771 case db.oldest: 772 db.oldest = node.flushNext 773 db.dirties[node.flushNext].flushPrev = common.Hash{} 774 case db.newest: 775 db.newest = node.flushPrev 776 db.dirties[node.flushPrev].flushNext = common.Hash{} 777 default: 778 db.dirties[node.flushPrev].flushNext = node.flushNext 779 db.dirties[node.flushNext].flushPrev = node.flushPrev 780 } 781 // Uncache the node's subtries and remove the node itself too 782 for _, child := range node.childs() { 783 db.uncache(child) 784 } 785 delete(db.dirties, hash) 786 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 787 } 788 789 // Size returns the current storage size of the memory cache in front of the 790 // persistent database layer. 791 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 792 db.lock.RLock() 793 defer db.lock.RUnlock() 794 795 // db.dirtiesSize only contains the useful data in the cache, but when reporting 796 // the total memory consumption, the maintenance metadata is also needed to be 797 // counted. For every useful node, we track 2 extra hashes as the flushlist. 798 var flushlistSize = common.StorageSize((len(db.dirties) - 1) * 2 * common.HashLength) 799 return db.dirtiesSize + flushlistSize, db.preimagesSize 800 } 801 802 // verifyIntegrity is a debug method to iterate over the entire trie stored in 803 // memory and check whether every node is reachable from the meta root. The goal 804 // is to find any errors that might cause memory leaks and or trie nodes to go 805 // missing. 806 // 807 // This method is extremely CPU and memory intensive, only use when must. 808 func (db *Database) verifyIntegrity() { 809 // Iterate over all the cached nodes and accumulate them into a set 810 reachable := map[common.Hash]struct{}{{}: {}} 811 812 for child := range db.dirties[common.Hash{}].children { 813 db.accumulate(child, reachable) 814 } 815 // Find any unreachable but cached nodes 816 unreachable := []string{} 817 for hash, node := range db.dirties { 818 if _, ok := reachable[hash]; !ok { 819 unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", 820 hash, node.node, node.parents, node.flushPrev, node.flushNext)) 821 } 822 } 823 if len(unreachable) != 0 { 824 panic(fmt.Sprintf("trie cache memory leak: %v", unreachable)) 825 } 826 } 827 828 // accumulate iterates over the trie defined by hash and accumulates all the 829 // cached children found in memory. 830 func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { 831 // Mark the node reachable if present in the memory cache 832 node, ok := db.dirties[hash] 833 if !ok { 834 return 835 } 836 reachable[hash] = struct{}{} 837 838 // Iterate over all the children and accumulate them too 839 for _, child := range node.childs() { 840 db.accumulate(child, reachable) 841 } 842 }