github.com/xfers/quorum@v21.1.0+incompatible/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "encoding/binary" 21 "errors" 22 "fmt" 23 "io" 24 "reflect" 25 "sync" 26 "time" 27 28 "github.com/allegro/bigcache" 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/metrics" 33 "github.com/ethereum/go-ethereum/rlp" 34 ) 35 36 var ( 37 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 38 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 39 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 40 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 41 42 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 43 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 44 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 45 46 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 47 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 48 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 49 50 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 51 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 52 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 53 ) 54 55 // secureKeyPrefix is the database key prefix used to store trie node preimages. 56 var secureKeyPrefix = []byte("secure-key-") 57 58 // secureKeyLength is the length of the above prefix + 32byte hash. 59 const secureKeyLength = 11 + 32 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 // 65 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 66 // thread safe in providing individual, independent node access. The rationale 67 // behind this split design is to provide read access to RPC handlers and sync 68 // servers even while the trie is executing expensive garbage collection. 69 type Database struct { 70 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 71 72 cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs 73 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes 74 oldest common.Hash // Oldest tracked node, flush-list head 75 newest common.Hash // Newest tracked node, flush-list tail 76 77 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 78 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys 79 80 gctime time.Duration // Time spent on garbage collection since last commit 81 gcnodes uint64 // Nodes garbage collected since last commit 82 gcsize common.StorageSize // Data storage garbage collected since last commit 83 84 flushtime time.Duration // Time spent on data flushing since last commit 85 flushnodes uint64 // Nodes flushed since last commit 86 flushsize common.StorageSize // Data storage flushed since last commit 87 88 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 89 childrenSize common.StorageSize // Storage size of the external children tracking 90 preimagesSize common.StorageSize // Storage size of the preimages cache 91 92 lock sync.RWMutex 93 } 94 95 // rawNode is a simple binary blob used to differentiate between collapsed trie 96 // nodes and already encoded RLP binary blobs (while at the same time store them 97 // in the same cache fields). 98 type rawNode []byte 99 100 func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 101 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 102 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 103 104 // rawFullNode represents only the useful data content of a full node, with the 105 // caches and flags stripped out to minimize its data storage. This type honors 106 // the same RLP encoding as the original parent. 107 type rawFullNode [17]node 108 109 func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 110 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 111 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 112 113 func (n rawFullNode) EncodeRLP(w io.Writer) error { 114 var nodes [17]node 115 116 for i, child := range n { 117 if child != nil { 118 nodes[i] = child 119 } else { 120 nodes[i] = nilValueNode 121 } 122 } 123 return rlp.Encode(w, nodes) 124 } 125 126 // rawShortNode represents only the useful data content of a short node, with the 127 // caches and flags stripped out to minimize its data storage. This type honors 128 // the same RLP encoding as the original parent. 129 type rawShortNode struct { 130 Key []byte 131 Val node 132 } 133 134 func (n rawShortNode) canUnload(uint16, uint16) bool { 135 panic("this should never end up in a live trie") 136 } 137 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 138 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 139 140 // cachedNode is all the information we know about a single cached node in the 141 // memory database write layer. 142 type cachedNode struct { 143 node node // Cached collapsed trie node, or raw rlp data 144 size uint16 // Byte size of the useful cached data 145 146 parents uint32 // Number of live nodes referencing this one 147 children map[common.Hash]uint16 // External children referenced by this node 148 149 flushPrev common.Hash // Previous node in the flush-list 150 flushNext common.Hash // Next node in the flush-list 151 } 152 153 // cachedNodeSize is the raw size of a cachedNode data structure without any 154 // node data included. It's an approximate size, but should be a lot better 155 // than not counting them. 156 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 157 158 // cachedNodeChildrenSize is the raw size of an initialized but empty external 159 // reference map. 160 const cachedNodeChildrenSize = 48 161 162 // rlp returns the raw rlp encoded blob of the cached node, either directly from 163 // the cache, or by regenerating it from the collapsed node. 164 func (n *cachedNode) rlp() []byte { 165 if node, ok := n.node.(rawNode); ok { 166 return node 167 } 168 blob, err := rlp.EncodeToBytes(n.node) 169 if err != nil { 170 panic(err) 171 } 172 return blob 173 } 174 175 // obj returns the decoded and expanded trie node, either directly from the cache, 176 // or by regenerating it from the rlp encoded blob. 177 func (n *cachedNode) obj(hash common.Hash) node { 178 if node, ok := n.node.(rawNode); ok { 179 return mustDecodeNode(hash[:], node) 180 } 181 return expandNode(hash[:], n.node) 182 } 183 184 // childs returns all the tracked children of this node, both the implicit ones 185 // from inside the node as well as the explicit ones from outside the node. 186 func (n *cachedNode) childs() []common.Hash { 187 children := make([]common.Hash, 0, 16) 188 for child := range n.children { 189 children = append(children, child) 190 } 191 if _, ok := n.node.(rawNode); !ok { 192 gatherChildren(n.node, &children) 193 } 194 return children 195 } 196 197 // gatherChildren traverses the node hierarchy of a collapsed storage node and 198 // retrieves all the hashnode children. 199 func gatherChildren(n node, children *[]common.Hash) { 200 switch n := n.(type) { 201 case *rawShortNode: 202 gatherChildren(n.Val, children) 203 204 case rawFullNode: 205 for i := 0; i < 16; i++ { 206 gatherChildren(n[i], children) 207 } 208 case hashNode: 209 *children = append(*children, common.BytesToHash(n)) 210 211 case valueNode, nil: 212 213 default: 214 panic(fmt.Sprintf("unknown node type: %T", n)) 215 } 216 } 217 218 // simplifyNode traverses the hierarchy of an expanded memory node and discards 219 // all the internal caches, returning a node that only contains the raw data. 220 func simplifyNode(n node) node { 221 switch n := n.(type) { 222 case *shortNode: 223 // Short nodes discard the flags and cascade 224 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 225 226 case *fullNode: 227 // Full nodes discard the flags and cascade 228 node := rawFullNode(n.Children) 229 for i := 0; i < len(node); i++ { 230 if node[i] != nil { 231 node[i] = simplifyNode(node[i]) 232 } 233 } 234 return node 235 236 case valueNode, hashNode, rawNode: 237 return n 238 239 default: 240 panic(fmt.Sprintf("unknown node type: %T", n)) 241 } 242 } 243 244 // expandNode traverses the node hierarchy of a collapsed storage node and converts 245 // all fields and keys into expanded memory form. 246 func expandNode(hash hashNode, n node) node { 247 switch n := n.(type) { 248 case *rawShortNode: 249 // Short nodes need key and child expansion 250 return &shortNode{ 251 Key: compactToHex(n.Key), 252 Val: expandNode(nil, n.Val), 253 flags: nodeFlag{ 254 hash: hash, 255 }, 256 } 257 258 case rawFullNode: 259 // Full nodes need child expansion 260 node := &fullNode{ 261 flags: nodeFlag{ 262 hash: hash, 263 }, 264 } 265 for i := 0; i < len(node.Children); i++ { 266 if n[i] != nil { 267 node.Children[i] = expandNode(nil, n[i]) 268 } 269 } 270 return node 271 272 case valueNode, hashNode: 273 return n 274 275 default: 276 panic(fmt.Sprintf("unknown node type: %T", n)) 277 } 278 } 279 280 // trienodeHasher is a struct to be used with BigCache, which uses a Hasher to 281 // determine which shard to place an entry into. It's not a cryptographic hash, 282 // just to provide a bit of anti-collision (default is FNV64a). 283 // 284 // Since trie keys are already hashes, we can just use the key directly to 285 // map shard id. 286 type trienodeHasher struct{} 287 288 // Sum64 implements the bigcache.Hasher interface. 289 func (t trienodeHasher) Sum64(key string) uint64 { 290 return binary.BigEndian.Uint64([]byte(key)) 291 } 292 293 // NewDatabase creates a new trie database to store ephemeral trie content before 294 // its written out to disk or garbage collected. No read cache is created, so all 295 // data retrievals will hit the underlying disk database. 296 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 297 return NewDatabaseWithCache(diskdb, 0) 298 } 299 300 // NewDatabaseWithCache creates a new trie database to store ephemeral trie content 301 // before its written out to disk or garbage collected. It also acts as a read cache 302 // for nodes loaded from disk. 303 func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database { 304 var cleans *bigcache.BigCache 305 if cache > 0 { 306 cleans, _ = bigcache.NewBigCache(bigcache.Config{ 307 Shards: 1024, 308 LifeWindow: time.Hour, 309 MaxEntriesInWindow: cache * 1024, 310 MaxEntrySize: 512, 311 HardMaxCacheSize: cache, 312 Hasher: trienodeHasher{}, 313 }) 314 } 315 return &Database{ 316 diskdb: diskdb, 317 cleans: cleans, 318 dirties: map[common.Hash]*cachedNode{{}: { 319 children: make(map[common.Hash]uint16), 320 }}, 321 preimages: make(map[common.Hash][]byte), 322 } 323 } 324 325 // DiskDB retrieves the persistent storage backing the trie database. 326 func (db *Database) DiskDB() ethdb.KeyValueReader { 327 return db.diskdb 328 } 329 330 // InsertBlob writes a new reference tracked blob to the memory database if it's 331 // yet unknown. This method should only be used for non-trie nodes that require 332 // reference counting, since trie nodes are garbage collected directly through 333 // their embedded children. 334 func (db *Database) InsertBlob(hash common.Hash, blob []byte) { 335 db.lock.Lock() 336 defer db.lock.Unlock() 337 338 db.insert(hash, blob, rawNode(blob)) 339 } 340 341 // insert inserts a collapsed trie node into the memory database. This method is 342 // a more generic version of InsertBlob, supporting both raw blob insertions as 343 // well ex trie node insertions. The blob must always be specified to allow proper 344 // size tracking. 345 func (db *Database) insert(hash common.Hash, blob []byte, node node) { 346 // If the node's already cached, skip 347 if _, ok := db.dirties[hash]; ok { 348 return 349 } 350 // Create the cached entry for this node 351 entry := &cachedNode{ 352 node: simplifyNode(node), 353 size: uint16(len(blob)), 354 flushPrev: db.newest, 355 } 356 for _, child := range entry.childs() { 357 if c := db.dirties[child]; c != nil { 358 c.parents++ 359 } 360 } 361 db.dirties[hash] = entry 362 363 // Update the flush-list endpoints 364 if db.oldest == (common.Hash{}) { 365 db.oldest, db.newest = hash, hash 366 } else { 367 db.dirties[db.newest].flushNext, db.newest = hash, hash 368 } 369 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 370 } 371 372 // insertPreimage writes a new trie node pre-image to the memory database if it's 373 // yet unknown. The method will make a copy of the slice. 374 // 375 // Note, this method assumes that the database's lock is held! 376 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 377 if _, ok := db.preimages[hash]; ok { 378 return 379 } 380 db.preimages[hash] = common.CopyBytes(preimage) 381 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 382 } 383 384 // node retrieves a cached trie node from memory, or returns nil if none can be 385 // found in the memory cache. 386 func (db *Database) node(hash common.Hash) node { 387 // Retrieve the node from the clean cache if available 388 if db.cleans != nil { 389 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 390 memcacheCleanHitMeter.Mark(1) 391 memcacheCleanReadMeter.Mark(int64(len(enc))) 392 return mustDecodeNode(hash[:], enc) 393 } 394 } 395 // Retrieve the node from the dirty cache if available 396 db.lock.RLock() 397 dirty := db.dirties[hash] 398 db.lock.RUnlock() 399 400 if dirty != nil { 401 return dirty.obj(hash) 402 } 403 // Content unavailable in memory, attempt to retrieve from disk 404 enc, err := db.diskdb.Get(hash[:]) 405 if err != nil || enc == nil { 406 return nil 407 } 408 if db.cleans != nil { 409 db.cleans.Set(string(hash[:]), enc) 410 memcacheCleanMissMeter.Mark(1) 411 memcacheCleanWriteMeter.Mark(int64(len(enc))) 412 } 413 return mustDecodeNode(hash[:], enc) 414 } 415 416 // Node retrieves an encoded cached trie node from memory. If it cannot be found 417 // cached, the method queries the persistent database for the content. 418 func (db *Database) Node(hash common.Hash) ([]byte, error) { 419 // It doens't make sense to retrieve the metaroot 420 if hash == (common.Hash{}) { 421 return nil, errors.New("not found") 422 } 423 // Retrieve the node from the clean cache if available 424 if db.cleans != nil { 425 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 426 memcacheCleanHitMeter.Mark(1) 427 memcacheCleanReadMeter.Mark(int64(len(enc))) 428 return enc, nil 429 } 430 } 431 // Retrieve the node from the dirty cache if available 432 db.lock.RLock() 433 dirty := db.dirties[hash] 434 db.lock.RUnlock() 435 436 if dirty != nil { 437 return dirty.rlp(), nil 438 } 439 // Content unavailable in memory, attempt to retrieve from disk 440 enc, err := db.diskdb.Get(hash[:]) 441 if err == nil && enc != nil { 442 if db.cleans != nil { 443 db.cleans.Set(string(hash[:]), enc) 444 memcacheCleanMissMeter.Mark(1) 445 memcacheCleanWriteMeter.Mark(int64(len(enc))) 446 } 447 } 448 return enc, err 449 } 450 451 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 452 // found cached, the method queries the persistent database for the content. 453 func (db *Database) preimage(hash common.Hash) ([]byte, error) { 454 // Retrieve the node from cache if available 455 db.lock.RLock() 456 preimage := db.preimages[hash] 457 db.lock.RUnlock() 458 459 if preimage != nil { 460 return preimage, nil 461 } 462 // Content unavailable in memory, attempt to retrieve from disk 463 return db.diskdb.Get(db.secureKey(hash[:])) 464 } 465 466 // secureKey returns the database key for the preimage of key, as an ephemeral 467 // buffer. The caller must not hold onto the return value because it will become 468 // invalid on the next call. 469 func (db *Database) secureKey(key []byte) []byte { 470 buf := append(db.seckeybuf[:0], secureKeyPrefix...) 471 buf = append(buf, key...) 472 return buf 473 } 474 475 // Nodes retrieves the hashes of all the nodes cached within the memory database. 476 // This method is extremely expensive and should only be used to validate internal 477 // states in test code. 478 func (db *Database) Nodes() []common.Hash { 479 db.lock.RLock() 480 defer db.lock.RUnlock() 481 482 var hashes = make([]common.Hash, 0, len(db.dirties)) 483 for hash := range db.dirties { 484 if hash != (common.Hash{}) { // Special case for "root" references/nodes 485 hashes = append(hashes, hash) 486 } 487 } 488 return hashes 489 } 490 491 // Reference adds a new reference from a parent node to a child node. 492 func (db *Database) Reference(child common.Hash, parent common.Hash) { 493 db.lock.Lock() 494 defer db.lock.Unlock() 495 496 db.reference(child, parent) 497 } 498 499 // reference is the private locked version of Reference. 500 func (db *Database) reference(child common.Hash, parent common.Hash) { 501 // If the node does not exist, it's a node pulled from disk, skip 502 node, ok := db.dirties[child] 503 if !ok { 504 return 505 } 506 // If the reference already exists, only duplicate for roots 507 if db.dirties[parent].children == nil { 508 db.dirties[parent].children = make(map[common.Hash]uint16) 509 db.childrenSize += cachedNodeChildrenSize 510 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 511 return 512 } 513 node.parents++ 514 db.dirties[parent].children[child]++ 515 if db.dirties[parent].children[child] == 1 { 516 db.childrenSize += common.HashLength + 2 // uint16 counter 517 } 518 } 519 520 // Dereference removes an existing reference from a root node. 521 func (db *Database) Dereference(root common.Hash) { 522 // Sanity check to ensure that the meta-root is not removed 523 if root == (common.Hash{}) { 524 log.Error("Attempted to dereference the trie cache meta root") 525 return 526 } 527 db.lock.Lock() 528 defer db.lock.Unlock() 529 530 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 531 db.dereference(root, common.Hash{}) 532 533 db.gcnodes += uint64(nodes - len(db.dirties)) 534 db.gcsize += storage - db.dirtiesSize 535 db.gctime += time.Since(start) 536 537 memcacheGCTimeTimer.Update(time.Since(start)) 538 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 539 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 540 541 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 542 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 543 } 544 545 // dereference is the private locked version of Dereference. 546 func (db *Database) dereference(child common.Hash, parent common.Hash) { 547 // Dereference the parent-child 548 node := db.dirties[parent] 549 550 if node.children != nil && node.children[child] > 0 { 551 node.children[child]-- 552 if node.children[child] == 0 { 553 delete(node.children, child) 554 db.childrenSize -= (common.HashLength + 2) // uint16 counter 555 } 556 } 557 // If the child does not exist, it's a previously committed node. 558 node, ok := db.dirties[child] 559 if !ok { 560 return 561 } 562 // If there are no more references to the child, delete it and cascade 563 if node.parents > 0 { 564 // This is a special cornercase where a node loaded from disk (i.e. not in the 565 // memcache any more) gets reinjected as a new node (short node split into full, 566 // then reverted into short), causing a cached node to have no parents. That is 567 // no problem in itself, but don't make maxint parents out of it. 568 node.parents-- 569 } 570 if node.parents == 0 { 571 // Remove the node from the flush-list 572 switch child { 573 case db.oldest: 574 db.oldest = node.flushNext 575 db.dirties[node.flushNext].flushPrev = common.Hash{} 576 case db.newest: 577 db.newest = node.flushPrev 578 db.dirties[node.flushPrev].flushNext = common.Hash{} 579 default: 580 db.dirties[node.flushPrev].flushNext = node.flushNext 581 db.dirties[node.flushNext].flushPrev = node.flushPrev 582 } 583 // Dereference all children and delete the node 584 for _, hash := range node.childs() { 585 db.dereference(hash, child) 586 } 587 delete(db.dirties, child) 588 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 589 if node.children != nil { 590 db.childrenSize -= cachedNodeChildrenSize 591 } 592 } 593 } 594 595 // Cap iteratively flushes old but still referenced trie nodes until the total 596 // memory usage goes below the given threshold. 597 // 598 // Note, this method is a non-synchronized mutator. It is unsafe to call this 599 // concurrently with other mutators. 600 func (db *Database) Cap(limit common.StorageSize) error { 601 // Create a database batch to flush persistent data out. It is important that 602 // outside code doesn't see an inconsistent state (referenced data removed from 603 // memory cache during commit but not yet in persistent storage). This is ensured 604 // by only uncaching existing data when the database write finalizes. 605 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 606 batch := db.diskdb.NewBatch() 607 608 // db.dirtiesSize only contains the useful data in the cache, but when reporting 609 // the total memory consumption, the maintenance metadata is also needed to be 610 // counted. 611 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 612 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 613 614 // If the preimage cache got large enough, push to disk. If it's still small 615 // leave for later to deduplicate writes. 616 flushPreimages := db.preimagesSize > 4*1024*1024 617 if flushPreimages { 618 for hash, preimage := range db.preimages { 619 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 620 log.Error("Failed to commit preimage from trie database", "err", err) 621 return err 622 } 623 if batch.ValueSize() > ethdb.IdealBatchSize { 624 if err := batch.Write(); err != nil { 625 return err 626 } 627 batch.Reset() 628 } 629 } 630 } 631 // Keep committing nodes from the flush-list until we're below allowance 632 oldest := db.oldest 633 for size > limit && oldest != (common.Hash{}) { 634 // Fetch the oldest referenced node and push into the batch 635 node := db.dirties[oldest] 636 if err := batch.Put(oldest[:], node.rlp()); err != nil { 637 return err 638 } 639 // If we exceeded the ideal batch size, commit and reset 640 if batch.ValueSize() >= ethdb.IdealBatchSize { 641 if err := batch.Write(); err != nil { 642 log.Error("Failed to write flush list to disk", "err", err) 643 return err 644 } 645 batch.Reset() 646 } 647 // Iterate to the next flush item, or abort if the size cap was achieved. Size 648 // is the total size, including the useful cached data (hash -> blob), the 649 // cache item metadata, as well as external children mappings. 650 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 651 if node.children != nil { 652 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 653 } 654 oldest = node.flushNext 655 } 656 // Flush out any remainder data from the last batch 657 if err := batch.Write(); err != nil { 658 log.Error("Failed to write flush list to disk", "err", err) 659 return err 660 } 661 // Write successful, clear out the flushed data 662 db.lock.Lock() 663 defer db.lock.Unlock() 664 665 if flushPreimages { 666 db.preimages = make(map[common.Hash][]byte) 667 db.preimagesSize = 0 668 } 669 for db.oldest != oldest { 670 node := db.dirties[db.oldest] 671 delete(db.dirties, db.oldest) 672 db.oldest = node.flushNext 673 674 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 675 if node.children != nil { 676 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 677 } 678 } 679 if db.oldest != (common.Hash{}) { 680 db.dirties[db.oldest].flushPrev = common.Hash{} 681 } 682 db.flushnodes += uint64(nodes - len(db.dirties)) 683 db.flushsize += storage - db.dirtiesSize 684 db.flushtime += time.Since(start) 685 686 memcacheFlushTimeTimer.Update(time.Since(start)) 687 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 688 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 689 690 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 691 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 692 693 return nil 694 } 695 696 // Commit iterates over all the children of a particular node, writes them out 697 // to disk, forcefully tearing down all references in both directions. As a side 698 // effect, all pre-images accumulated up to this point are also written. 699 // 700 // Note, this method is a non-synchronized mutator. It is unsafe to call this 701 // concurrently with other mutators. 702 func (db *Database) Commit(node common.Hash, report bool) error { 703 // Create a database batch to flush persistent data out. It is important that 704 // outside code doesn't see an inconsistent state (referenced data removed from 705 // memory cache during commit but not yet in persistent storage). This is ensured 706 // by only uncaching existing data when the database write finalizes. 707 start := time.Now() 708 batch := db.diskdb.NewBatch() 709 710 // Move all of the accumulated preimages into a write batch 711 for hash, preimage := range db.preimages { 712 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 713 log.Error("Failed to commit preimage from trie database", "err", err) 714 return err 715 } 716 // If the batch is too large, flush to disk 717 if batch.ValueSize() > ethdb.IdealBatchSize { 718 if err := batch.Write(); err != nil { 719 return err 720 } 721 batch.Reset() 722 } 723 } 724 // Since we're going to replay trie node writes into the clean cache, flush out 725 // any batched pre-images before continuing. 726 if err := batch.Write(); err != nil { 727 return err 728 } 729 batch.Reset() 730 731 // Move the trie itself into the batch, flushing if enough data is accumulated 732 nodes, storage := len(db.dirties), db.dirtiesSize 733 734 uncacher := &cleaner{db} 735 if err := db.commit(node, batch, uncacher); err != nil { 736 log.Error("Failed to commit trie from trie database", "err", err) 737 return err 738 } 739 // Trie mostly committed to disk, flush any batch leftovers 740 if err := batch.Write(); err != nil { 741 log.Error("Failed to write trie to disk", "err", err) 742 return err 743 } 744 // Uncache any leftovers in the last batch 745 db.lock.Lock() 746 defer db.lock.Unlock() 747 748 batch.Replay(uncacher) 749 batch.Reset() 750 751 // Reset the storage counters and bumpd metrics 752 db.preimages = make(map[common.Hash][]byte) 753 db.preimagesSize = 0 754 755 memcacheCommitTimeTimer.Update(time.Since(start)) 756 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 757 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 758 759 logger := log.Info 760 if !report { 761 logger = log.Debug 762 } 763 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 764 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 765 766 // Reset the garbage collection statistics 767 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 768 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 769 770 return nil 771 } 772 773 // commit is the private locked version of Commit. 774 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error { 775 // If the node does not exist, it's a previously committed node 776 node, ok := db.dirties[hash] 777 if !ok { 778 return nil 779 } 780 for _, child := range node.childs() { 781 if err := db.commit(child, batch, uncacher); err != nil { 782 return err 783 } 784 } 785 if err := batch.Put(hash[:], node.rlp()); err != nil { 786 return err 787 } 788 // If we've reached an optimal batch size, commit and start over 789 if batch.ValueSize() >= ethdb.IdealBatchSize { 790 if err := batch.Write(); err != nil { 791 return err 792 } 793 db.lock.Lock() 794 batch.Replay(uncacher) 795 batch.Reset() 796 db.lock.Unlock() 797 } 798 return nil 799 } 800 801 // cleaner is a database batch replayer that takes a batch of write operations 802 // and cleans up the trie database from anything written to disk. 803 type cleaner struct { 804 db *Database 805 } 806 807 // Put reacts to database writes and implements dirty data uncaching. This is the 808 // post-processing step of a commit operation where the already persisted trie is 809 // removed from the dirty cache and moved into the clean cache. The reason behind 810 // the two-phase commit is to ensure ensure data availability while moving from 811 // memory to disk. 812 func (c *cleaner) Put(key []byte, rlp []byte) error { 813 hash := common.BytesToHash(key) 814 815 // If the node does not exist, we're done on this path 816 node, ok := c.db.dirties[hash] 817 if !ok { 818 return nil 819 } 820 // Node still exists, remove it from the flush-list 821 switch hash { 822 case c.db.oldest: 823 c.db.oldest = node.flushNext 824 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 825 case c.db.newest: 826 c.db.newest = node.flushPrev 827 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 828 default: 829 c.db.dirties[node.flushPrev].flushNext = node.flushNext 830 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 831 } 832 // Remove the node from the dirty cache 833 delete(c.db.dirties, hash) 834 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 835 if node.children != nil { 836 c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 837 } 838 // Move the flushed node into the clean cache to prevent insta-reloads 839 if c.db.cleans != nil { 840 c.db.cleans.Set(string(hash[:]), rlp) 841 } 842 return nil 843 } 844 845 func (c *cleaner) Delete(key []byte) error { 846 panic("Not implemented") 847 } 848 849 // Size returns the current storage size of the memory cache in front of the 850 // persistent database layer. 851 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 852 db.lock.RLock() 853 defer db.lock.RUnlock() 854 855 // db.dirtiesSize only contains the useful data in the cache, but when reporting 856 // the total memory consumption, the maintenance metadata is also needed to be 857 // counted. 858 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 859 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 860 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize 861 } 862 863 // verifyIntegrity is a debug method to iterate over the entire trie stored in 864 // memory and check whether every node is reachable from the meta root. The goal 865 // is to find any errors that might cause memory leaks and or trie nodes to go 866 // missing. 867 // 868 // This method is extremely CPU and memory intensive, only use when must. 869 func (db *Database) verifyIntegrity() { 870 // Iterate over all the cached nodes and accumulate them into a set 871 reachable := map[common.Hash]struct{}{{}: {}} 872 873 for child := range db.dirties[common.Hash{}].children { 874 db.accumulate(child, reachable) 875 } 876 // Find any unreachable but cached nodes 877 var unreachable []string 878 for hash, node := range db.dirties { 879 if _, ok := reachable[hash]; !ok { 880 unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", 881 hash, node.node, node.parents, node.flushPrev, node.flushNext)) 882 } 883 } 884 if len(unreachable) != 0 { 885 panic(fmt.Sprintf("trie cache memory leak: %v", unreachable)) 886 } 887 } 888 889 // accumulate iterates over the trie defined by hash and accumulates all the 890 // cached children found in memory. 891 func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { 892 // Mark the node reachable if present in the memory cache 893 node, ok := db.dirties[hash] 894 if !ok { 895 return 896 } 897 reachable[hash] = struct{}{} 898 899 // Iterate over all the children and accumulate them too 900 for _, child := range node.childs() { 901 db.accumulate(child, reachable) 902 } 903 }