github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "encoding/binary" 21 "errors" 22 "fmt" 23 "io" 24 "sync" 25 "time" 26 27 "github.com/allegro/bigcache" 28 "github.com/intfoundation/intchain/common" 29 "github.com/intfoundation/intchain/intdb" 30 "github.com/intfoundation/intchain/log" 31 "github.com/intfoundation/intchain/metrics" 32 "github.com/intfoundation/intchain/rlp" 33 ) 34 35 var ( 36 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 37 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 38 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 39 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 40 41 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 42 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 43 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 44 45 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 46 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 47 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 48 49 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 50 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 51 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 52 ) 53 54 // secureKeyPrefix is the database key prefix used to store trie node preimages. 55 var secureKeyPrefix = []byte("secure-key-") 56 57 // secureKeyLength is the length of the above prefix + 32byte hash. 58 const secureKeyLength = 11 + 32 59 60 // Database is an intermediate write layer between the trie data structures and 61 // the disk database. The aim is to accumulate trie writes in-memory and only 62 // periodically flush a couple tries to disk, garbage collecting the remainder. 63 // 64 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 65 // thread safe in providing individual, independent node access. The rationale 66 // behind this split design is to provide read access to RPC handlers and sync 67 // servers even while the trie is executing expensive garbage collection. 68 type Database struct { 69 diskdb intdb.KeyValueStore // Persistent storage for matured trie nodes 70 71 cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs 72 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes 73 oldest common.Hash // Oldest tracked node, flush-list head 74 newest common.Hash // Newest tracked node, flush-list tail 75 76 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 77 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys 78 79 gctime time.Duration // Time spent on garbage collection since last commit 80 gcnodes uint64 // Nodes garbage collected since last commit 81 gcsize common.StorageSize // Data storage garbage collected since last commit 82 83 flushtime time.Duration // Time spent on data flushing since last commit 84 flushnodes uint64 // Nodes flushed since last commit 85 flushsize common.StorageSize // Data storage flushed since last commit 86 87 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. flushlist) 88 preimagesSize common.StorageSize // Storage size of the preimages cache 89 90 lock sync.RWMutex 91 } 92 93 // rawNode is a simple binary blob used to differentiate between collapsed trie 94 // nodes and already encoded RLP binary blobs (while at the same time store them 95 // in the same cache fields). 96 type rawNode []byte 97 98 func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 99 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 100 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 101 102 // rawFullNode represents only the useful data content of a full node, with the 103 // caches and flags stripped out to minimize its data storage. This type honors 104 // the same RLP encoding as the original parent. 105 type rawFullNode [17]node 106 107 func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 108 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 109 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 110 111 func (n rawFullNode) EncodeRLP(w io.Writer) error { 112 var nodes [17]node 113 114 for i, child := range n { 115 if child != nil { 116 nodes[i] = child 117 } else { 118 nodes[i] = nilValueNode 119 } 120 } 121 return rlp.Encode(w, nodes) 122 } 123 124 // rawShortNode represents only the useful data content of a short node, with the 125 // caches and flags stripped out to minimize its data storage. This type honors 126 // the same RLP encoding as the original parent. 127 type rawShortNode struct { 128 Key []byte 129 Val node 130 } 131 132 func (n rawShortNode) canUnload(uint16, uint16) bool { 133 panic("this should never end up in a live trie") 134 } 135 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 136 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 137 138 // cachedNode is all the information we know about a single cached node in the 139 // memory database write layer. 140 type cachedNode struct { 141 node node // Cached collapsed trie node, or raw rlp data 142 size uint16 // Byte size of the useful cached data 143 144 parents uint32 // Number of live nodes referencing this one 145 children map[common.Hash]uint16 // External children referenced by this node 146 147 flushPrev common.Hash // Previous node in the flush-list 148 flushNext common.Hash // Next node in the flush-list 149 } 150 151 // rlp returns the raw rlp encoded blob of the cached node, either directly from 152 // the cache, or by regenerating it from the collapsed node. 153 func (n *cachedNode) rlp() []byte { 154 if node, ok := n.node.(rawNode); ok { 155 return node 156 } 157 blob, err := rlp.EncodeToBytes(n.node) 158 if err != nil { 159 panic(err) 160 } 161 return blob 162 } 163 164 // obj returns the decoded and expanded trie node, either directly from the cache, 165 // or by regenerating it from the rlp encoded blob. 166 func (n *cachedNode) obj(hash common.Hash) node { 167 if node, ok := n.node.(rawNode); ok { 168 return mustDecodeNode(hash[:], node) 169 } 170 return expandNode(hash[:], n.node) 171 } 172 173 // childs returns all the tracked children of this node, both the implicit ones 174 // from inside the node as well as the explicit ones from outside the node. 175 func (n *cachedNode) childs() []common.Hash { 176 children := make([]common.Hash, 0, 16) 177 for child := range n.children { 178 children = append(children, child) 179 } 180 if _, ok := n.node.(rawNode); !ok { 181 gatherChildren(n.node, &children) 182 } 183 return children 184 } 185 186 // gatherChildren traverses the node hierarchy of a collapsed storage node and 187 // retrieves all the hashnode children. 188 func gatherChildren(n node, children *[]common.Hash) { 189 switch n := n.(type) { 190 case *rawShortNode: 191 gatherChildren(n.Val, children) 192 193 case rawFullNode: 194 for i := 0; i < 16; i++ { 195 gatherChildren(n[i], children) 196 } 197 case hashNode: 198 *children = append(*children, common.BytesToHash(n)) 199 200 case valueNode, nil: 201 202 default: 203 panic(fmt.Sprintf("unknown node type: %T", n)) 204 } 205 } 206 207 // simplifyNode traverses the hierarchy of an expanded memory node and discards 208 // all the internal caches, returning a node that only contains the raw data. 209 func simplifyNode(n node) node { 210 switch n := n.(type) { 211 case *shortNode: 212 // Short nodes discard the flags and cascade 213 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 214 215 case *fullNode: 216 // Full nodes discard the flags and cascade 217 node := rawFullNode(n.Children) 218 for i := 0; i < len(node); i++ { 219 if node[i] != nil { 220 node[i] = simplifyNode(node[i]) 221 } 222 } 223 return node 224 225 case valueNode, hashNode, rawNode: 226 return n 227 228 default: 229 panic(fmt.Sprintf("unknown node type: %T", n)) 230 } 231 } 232 233 // expandNode traverses the node hierarchy of a collapsed storage node and converts 234 // all fields and keys into expanded memory form. 235 func expandNode(hash hashNode, n node) node { 236 switch n := n.(type) { 237 case *rawShortNode: 238 // Short nodes need key and child expansion 239 return &shortNode{ 240 Key: compactToHex(n.Key), 241 Val: expandNode(nil, n.Val), 242 flags: nodeFlag{ 243 hash: hash, 244 }, 245 } 246 247 case rawFullNode: 248 // Full nodes need child expansion 249 node := &fullNode{ 250 flags: nodeFlag{ 251 hash: hash, 252 }, 253 } 254 for i := 0; i < len(node.Children); i++ { 255 if n[i] != nil { 256 node.Children[i] = expandNode(nil, n[i]) 257 } 258 } 259 return node 260 261 case valueNode, hashNode: 262 return n 263 264 default: 265 panic(fmt.Sprintf("unknown node type: %T", n)) 266 } 267 } 268 269 // trienodeHasher is a struct to be used with BigCache, which uses a Hasher to 270 // determine which shard to place an entry into. It's not a cryptographic hash, 271 // just to provide a bit of anti-collision (default is FNV64a). 272 // 273 // Since trie keys are already hashes, we can just use the key directly to 274 // map shard id. 275 type trienodeHasher struct{} 276 277 // Sum64 implements the bigcache.Hasher interface. 278 func (t trienodeHasher) Sum64(key string) uint64 { 279 return binary.BigEndian.Uint64([]byte(key)) 280 } 281 282 // NewDatabase creates a new trie database to store ephemeral trie content before 283 // its written out to disk or garbage collected. No read cache is created, so all 284 // data retrievals will hit the underlying disk database. 285 func NewDatabase(diskdb intdb.KeyValueStore) *Database { 286 return NewDatabaseWithCache(diskdb, 0) 287 } 288 289 // NewDatabaseWithCache creates a new trie database to store ephemeral trie content 290 // before its written out to disk or garbage collected. It also acts as a read cache 291 // for nodes loaded from disk. 292 func NewDatabaseWithCache(diskdb intdb.KeyValueStore, cache int) *Database { 293 var cleans *bigcache.BigCache 294 if cache > 0 { 295 cleans, _ = bigcache.NewBigCache(bigcache.Config{ 296 Shards: 1024, 297 LifeWindow: time.Hour, 298 MaxEntriesInWindow: cache * 1024, 299 MaxEntrySize: 512, 300 HardMaxCacheSize: cache, 301 Hasher: trienodeHasher{}, 302 }) 303 } 304 return &Database{ 305 diskdb: diskdb, 306 cleans: cleans, 307 dirties: map[common.Hash]*cachedNode{{}: {}}, 308 preimages: make(map[common.Hash][]byte), 309 } 310 } 311 312 // DiskDB retrieves the persistent storage backing the trie database. 313 func (db *Database) DiskDB() intdb.Reader { 314 return db.diskdb 315 } 316 317 // InsertBlob writes a new reference tracked blob to the memory database if it's 318 // yet unknown. This method should only be used for non-trie nodes that require 319 // reference counting, since trie nodes are garbage collected directly through 320 // their embedded children. 321 func (db *Database) InsertBlob(hash common.Hash, blob []byte) { 322 db.lock.Lock() 323 defer db.lock.Unlock() 324 325 db.insert(hash, blob, rawNode(blob)) 326 } 327 328 // insert inserts a collapsed trie node into the memory database. This method is 329 // a more generic version of InsertBlob, supporting both raw blob insertions as 330 // well ex trie node insertions. The blob must always be specified to allow proper 331 // size tracking. 332 func (db *Database) insert(hash common.Hash, blob []byte, node node) { 333 // If the node's already cached, skip 334 if _, ok := db.dirties[hash]; ok { 335 return 336 } 337 // Create the cached entry for this node 338 entry := &cachedNode{ 339 node: simplifyNode(node), 340 size: uint16(len(blob)), 341 flushPrev: db.newest, 342 } 343 for _, child := range entry.childs() { 344 if c := db.dirties[child]; c != nil { 345 c.parents++ 346 } 347 } 348 db.dirties[hash] = entry 349 350 // Update the flush-list endpoints 351 if db.oldest == (common.Hash{}) { 352 db.oldest, db.newest = hash, hash 353 } else { 354 db.dirties[db.newest].flushNext, db.newest = hash, hash 355 } 356 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 357 } 358 359 // insertPreimage writes a new trie node pre-image to the memory database if it's 360 // yet unknown. The method will make a copy of the slice. 361 // 362 // Note, this method assumes that the database's lock is held! 363 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 364 if _, ok := db.preimages[hash]; ok { 365 return 366 } 367 db.preimages[hash] = common.CopyBytes(preimage) 368 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 369 } 370 371 // node retrieves a cached trie node from memory, or returns nil if none can be 372 // found in the memory cache. 373 func (db *Database) node(hash common.Hash) node { 374 // Retrieve the node from the clean cache if available 375 if db.cleans != nil { 376 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 377 memcacheCleanHitMeter.Mark(1) 378 memcacheCleanReadMeter.Mark(int64(len(enc))) 379 return mustDecodeNode(hash[:], enc) 380 } 381 } 382 // Retrieve the node from the dirty cache if available 383 db.lock.RLock() 384 dirty := db.dirties[hash] 385 db.lock.RUnlock() 386 387 if dirty != nil { 388 return dirty.obj(hash) 389 } 390 // Content unavailable in memory, attempt to retrieve from disk 391 enc, err := db.diskdb.Get(hash[:]) 392 if err != nil || enc == nil { 393 return nil 394 } 395 if db.cleans != nil { 396 db.cleans.Set(string(hash[:]), enc) 397 memcacheCleanMissMeter.Mark(1) 398 memcacheCleanWriteMeter.Mark(int64(len(enc))) 399 } 400 return mustDecodeNode(hash[:], enc) 401 } 402 403 // Node retrieves an encoded cached trie node from memory. If it cannot be found 404 // cached, the method queries the persistent database for the content. 405 func (db *Database) Node(hash common.Hash) ([]byte, error) { 406 // It doens't make sense to retrieve the metaroot 407 if hash == (common.Hash{}) { 408 return nil, errors.New("not found") 409 } 410 // Retrieve the node from the clean cache if available 411 if db.cleans != nil { 412 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 413 memcacheCleanHitMeter.Mark(1) 414 memcacheCleanReadMeter.Mark(int64(len(enc))) 415 return enc, nil 416 } 417 } 418 // Retrieve the node from the dirty cache if available 419 db.lock.RLock() 420 dirty := db.dirties[hash] 421 db.lock.RUnlock() 422 423 if dirty != nil { 424 return dirty.rlp(), nil 425 } 426 // Content unavailable in memory, attempt to retrieve from disk 427 enc, err := db.diskdb.Get(hash[:]) 428 if err == nil && enc != nil { 429 if db.cleans != nil { 430 db.cleans.Set(string(hash[:]), enc) 431 memcacheCleanMissMeter.Mark(1) 432 memcacheCleanWriteMeter.Mark(int64(len(enc))) 433 } 434 } 435 return enc, err 436 } 437 438 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 439 // found cached, the method queries the persistent database for the content. 440 func (db *Database) preimage(hash common.Hash) ([]byte, error) { 441 // Retrieve the node from cache if available 442 db.lock.RLock() 443 preimage := db.preimages[hash] 444 db.lock.RUnlock() 445 446 if preimage != nil { 447 return preimage, nil 448 } 449 // Content unavailable in memory, attempt to retrieve from disk 450 return db.diskdb.Get(db.secureKey(hash[:])) 451 } 452 453 // secureKey returns the database key for the preimage of key, as an ephemeral 454 // buffer. The caller must not hold onto the return value because it will become 455 // invalid on the next call. 456 func (db *Database) secureKey(key []byte) []byte { 457 //buf := append(db.seckeybuf[:0], secureKeyPrefix...) 458 //buf = append(buf, key...) 459 buf := append(secureKeyPrefix[:], key...) 460 return buf 461 } 462 463 // Nodes retrieves the hashes of all the nodes cached within the memory database. 464 // This method is extremely expensive and should only be used to validate internal 465 // states in test code. 466 func (db *Database) Nodes() []common.Hash { 467 db.lock.RLock() 468 defer db.lock.RUnlock() 469 470 var hashes = make([]common.Hash, 0, len(db.dirties)) 471 for hash := range db.dirties { 472 if hash != (common.Hash{}) { // Special case for "root" references/nodes 473 hashes = append(hashes, hash) 474 } 475 } 476 return hashes 477 } 478 479 // Reference adds a new reference from a parent node to a child node. 480 func (db *Database) Reference(child common.Hash, parent common.Hash) { 481 db.lock.Lock() 482 defer db.lock.Unlock() 483 484 db.reference(child, parent) 485 } 486 487 // reference is the private locked version of Reference. 488 func (db *Database) reference(child common.Hash, parent common.Hash) { 489 // If the node does not exist, it's a node pulled from disk, skip 490 node, ok := db.dirties[child] 491 if !ok { 492 return 493 } 494 // If the reference already exists, only duplicate for roots 495 if db.dirties[parent].children == nil { 496 db.dirties[parent].children = make(map[common.Hash]uint16) 497 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 498 return 499 } 500 node.parents++ 501 db.dirties[parent].children[child]++ 502 } 503 504 // Dereference removes an existing reference from a root node. 505 func (db *Database) Dereference(root common.Hash) { 506 // Sanity check to ensure that the meta-root is not removed 507 if root == (common.Hash{}) { 508 log.Error("Attempted to dereference the trie cache meta root") 509 return 510 } 511 db.lock.Lock() 512 defer db.lock.Unlock() 513 514 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 515 db.dereference(root, common.Hash{}) 516 517 db.gcnodes += uint64(nodes - len(db.dirties)) 518 db.gcsize += storage - db.dirtiesSize 519 db.gctime += time.Since(start) 520 521 memcacheGCTimeTimer.Update(time.Since(start)) 522 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 523 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 524 525 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 526 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 527 } 528 529 // dereference is the private locked version of Dereference. 530 func (db *Database) dereference(child common.Hash, parent common.Hash) { 531 // Dereference the parent-child 532 node := db.dirties[parent] 533 534 if node.children != nil && node.children[child] > 0 { 535 node.children[child]-- 536 if node.children[child] == 0 { 537 delete(node.children, child) 538 } 539 } 540 // If the child does not exist, it's a previously committed node. 541 node, ok := db.dirties[child] 542 if !ok { 543 return 544 } 545 // If there are no more references to the child, delete it and cascade 546 if node.parents > 0 { 547 // This is a special cornercase where a node loaded from disk (i.e. not in the 548 // memcache any more) gets reinjected as a new node (short node split into full, 549 // then reverted into short), causing a cached node to have no parents. That is 550 // no problem in itself, but don't make maxint parents out of it. 551 node.parents-- 552 } 553 if node.parents == 0 { 554 // Remove the node from the flush-list 555 switch child { 556 case db.oldest: 557 db.oldest = node.flushNext 558 db.dirties[node.flushNext].flushPrev = common.Hash{} 559 case db.newest: 560 db.newest = node.flushPrev 561 db.dirties[node.flushPrev].flushNext = common.Hash{} 562 default: 563 db.dirties[node.flushPrev].flushNext = node.flushNext 564 db.dirties[node.flushNext].flushPrev = node.flushPrev 565 } 566 // Dereference all children and delete the node 567 for _, hash := range node.childs() { 568 db.dereference(hash, child) 569 } 570 delete(db.dirties, child) 571 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 572 } 573 } 574 575 // Cap iteratively flushes old but still referenced trie nodes until the total 576 // memory usage goes below the given threshold. 577 // 578 // Note, this method is a non-synchronized mutator. It is unsafe to call this 579 // concurrently with other mutators. 580 func (db *Database) Cap(limit common.StorageSize) error { 581 // Create a database batch to flush persistent data out. It is important that 582 // outside code doesn't see an inconsistent state (referenced data removed from 583 // memory cache during commit but not yet in persistent storage). This is ensured 584 // by only uncaching existing data when the database write finalizes. 585 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 586 batch := db.diskdb.NewBatch() 587 588 // db.dirtiesSize only contains the useful data in the cache, but when reporting 589 // the total memory consumption, the maintenance metadata is also needed to be 590 // counted. For every useful node, we track 2 extra hashes as the flushlist. 591 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*2*common.HashLength) 592 593 // If the preimage cache got large enough, push to disk. If it's still small 594 // leave for later to deduplicate writes. 595 flushPreimages := db.preimagesSize > 4*1024*1024 596 if flushPreimages { 597 for hash, preimage := range db.preimages { 598 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 599 log.Error("Failed to commit preimage from trie database", "err", err) 600 return err 601 } 602 if batch.ValueSize() > intdb.IdealBatchSize { 603 if err := batch.Write(); err != nil { 604 return err 605 } 606 batch.Reset() 607 } 608 } 609 } 610 // Keep committing nodes from the flush-list until we're below allowance 611 oldest := db.oldest 612 for size > limit && oldest != (common.Hash{}) { 613 // Fetch the oldest referenced node and push into the batch 614 node := db.dirties[oldest] 615 if err := batch.Put(oldest[:], node.rlp()); err != nil { 616 return err 617 } 618 // If we exceeded the ideal batch size, commit and reset 619 if batch.ValueSize() >= intdb.IdealBatchSize { 620 if err := batch.Write(); err != nil { 621 log.Error("Failed to write flush list to disk", "err", err) 622 return err 623 } 624 batch.Reset() 625 } 626 // Iterate to the next flush item, or abort if the size cap was achieved. Size 627 // is the total size, including both the useful cached data (hash -> blob), as 628 // well as the flushlist metadata (2*hash). When flushing items from the cache, 629 // we need to reduce both. 630 size -= common.StorageSize(3*common.HashLength + int(node.size)) 631 oldest = node.flushNext 632 } 633 // Flush out any remainder data from the last batch 634 if err := batch.Write(); err != nil { 635 log.Error("Failed to write flush list to disk", "err", err) 636 return err 637 } 638 // Write successful, clear out the flushed data 639 db.lock.Lock() 640 defer db.lock.Unlock() 641 642 if flushPreimages { 643 db.preimages = make(map[common.Hash][]byte) 644 db.preimagesSize = 0 645 } 646 for db.oldest != oldest { 647 node := db.dirties[db.oldest] 648 delete(db.dirties, db.oldest) 649 db.oldest = node.flushNext 650 651 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 652 } 653 if db.oldest != (common.Hash{}) { 654 db.dirties[db.oldest].flushPrev = common.Hash{} 655 } 656 db.flushnodes += uint64(nodes - len(db.dirties)) 657 db.flushsize += storage - db.dirtiesSize 658 db.flushtime += time.Since(start) 659 660 memcacheFlushTimeTimer.Update(time.Since(start)) 661 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 662 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 663 664 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 665 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 666 667 return nil 668 } 669 670 // Commit iterates over all the children of a particular node, writes them out 671 // to disk, forcefully tearing down all references in both directions. As a side 672 // effect, all pre-images accumulated up to this point are also written. 673 // 674 // Note, this method is a non-synchronized mutator. It is unsafe to call this 675 // concurrently with other mutators. 676 func (db *Database) Commit(node common.Hash, report bool) error { 677 // Create a database batch to flush persistent data out. It is important that 678 // outside code doesn't see an inconsistent state (referenced data removed from 679 // memory cache during commit but not yet in persistent storage). This is ensured 680 // by only uncaching existing data when the database write finalizes. 681 start := time.Now() 682 batch := db.diskdb.NewBatch() 683 684 // Move all of the accumulated preimages into a write batch 685 for hash, preimage := range db.preimages { 686 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 687 log.Error("Failed to commit preimage from trie database", "err", err) 688 return err 689 } 690 // If the batch is too large, flush to disk 691 if batch.ValueSize() > intdb.IdealBatchSize { 692 if err := batch.Write(); err != nil { 693 return err 694 } 695 batch.Reset() 696 } 697 } 698 // Since we're going to replay trie node writes into the clean cache, flush out 699 // any batched pre-images before continuing. 700 if err := batch.Write(); err != nil { 701 return err 702 } 703 batch.Reset() 704 705 // Move the trie itself into the batch, flushing if enough data is accumulated 706 nodes, storage := len(db.dirties), db.dirtiesSize 707 708 uncacher := &cleaner{db} 709 if err := db.commit(node, batch, uncacher); err != nil { 710 log.Error("Failed to commit trie from trie database", "err", err) 711 return err 712 } 713 // Trie mostly committed to disk, flush any batch leftovers 714 if err := batch.Write(); err != nil { 715 log.Error("Failed to write trie to disk", "err", err) 716 return err 717 } 718 // Uncache any leftovers in the last batch 719 db.lock.Lock() 720 defer db.lock.Unlock() 721 722 batch.Replay(uncacher) 723 batch.Reset() 724 725 // Reset the storage counters and bumpd metrics 726 db.preimages = make(map[common.Hash][]byte) 727 db.preimagesSize = 0 728 729 memcacheCommitTimeTimer.Update(time.Since(start)) 730 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 731 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 732 733 logger := log.Info 734 if !report { 735 logger = log.Debug 736 } 737 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 738 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 739 740 // Reset the garbage collection statistics 741 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 742 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 743 744 return nil 745 } 746 747 // commit is the private locked version of Commit. 748 func (db *Database) commit(hash common.Hash, batch intdb.Batch, uncacher *cleaner) error { 749 // If the node does not exist, it's a previously committed node 750 node, ok := db.dirties[hash] 751 if !ok { 752 return nil 753 } 754 for _, child := range node.childs() { 755 if err := db.commit(child, batch, uncacher); err != nil { 756 return err 757 } 758 } 759 if err := batch.Put(hash[:], node.rlp()); err != nil { 760 return err 761 } 762 // If we've reached an optimal batch size, commit and start over 763 if batch.ValueSize() >= intdb.IdealBatchSize { 764 if err := batch.Write(); err != nil { 765 return err 766 } 767 db.lock.Lock() 768 batch.Replay(uncacher) 769 batch.Reset() 770 db.lock.Unlock() 771 } 772 return nil 773 } 774 775 // cleaner is a database batch replayer that takes a batch of write operations 776 // and cleans up the trie database from anything written to disk. 777 type cleaner struct { 778 db *Database 779 } 780 781 // Put reacts to database writes and implements dirty data uncaching. This is the 782 // post-processing step of a commit operation where the already persisted trie is 783 // removed from the dirty cache and moved into the clean cache. The reason behind 784 // the two-phase commit is to ensure ensure data availability while moving from 785 // memory to disk. 786 func (c *cleaner) Put(key []byte, rlp []byte) error { 787 hash := common.BytesToHash(key) 788 789 // If the node does not exist, we're done on this path 790 node, ok := c.db.dirties[hash] 791 if !ok { 792 return nil 793 } 794 // Node still exists, remove it from the flush-list 795 switch hash { 796 case c.db.oldest: 797 c.db.oldest = node.flushNext 798 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 799 case c.db.newest: 800 c.db.newest = node.flushPrev 801 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 802 default: 803 c.db.dirties[node.flushPrev].flushNext = node.flushNext 804 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 805 } 806 // Remove the node from the dirty cache 807 delete(c.db.dirties, hash) 808 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 809 810 // Move the flushed node into the clean cache to prevent insta-reloads 811 if c.db.cleans != nil { 812 c.db.cleans.Set(string(hash[:]), rlp) 813 } 814 return nil 815 } 816 817 func (c *cleaner) Delete(key []byte) error { 818 panic("Not implemented") 819 } 820 821 // Size returns the current storage size of the memory cache in front of the 822 // persistent database layer. 823 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 824 db.lock.RLock() 825 defer db.lock.RUnlock() 826 827 // db.dirtiesSize only contains the useful data in the cache, but when reporting 828 // the total memory consumption, the maintenance metadata is also needed to be 829 // counted. For every useful node, we track 2 extra hashes as the flushlist. 830 var flushlistSize = common.StorageSize((len(db.dirties) - 1) * 2 * common.HashLength) 831 return db.dirtiesSize + flushlistSize, db.preimagesSize 832 } 833 834 // verifyIntegrity is a debug method to iterate over the entire trie stored in 835 // memory and check whether every node is reachable from the meta root. The goal 836 // is to find any errors that might cause memory leaks and or trie nodes to go 837 // missing. 838 // 839 // This method is extremely CPU and memory intensive, only use when must. 840 func (db *Database) verifyIntegrity() { 841 // Iterate over all the cached nodes and accumulate them into a set 842 reachable := map[common.Hash]struct{}{{}: {}} 843 844 for child := range db.dirties[common.Hash{}].children { 845 db.accumulate(child, reachable) 846 } 847 // Find any unreachable but cached nodes 848 var unreachable []string 849 for hash, node := range db.dirties { 850 if _, ok := reachable[hash]; !ok { 851 unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", 852 hash, node.node, node.parents, node.flushPrev, node.flushNext)) 853 } 854 } 855 if len(unreachable) != 0 { 856 panic(fmt.Sprintf("trie cache memory leak: %v", unreachable)) 857 } 858 } 859 860 // accumulate iterates over the trie defined by hash and accumulates all the 861 // cached children found in memory. 862 func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { 863 // Mark the node reachable if present in the memory cache 864 node, ok := db.dirties[hash] 865 if !ok { 866 return 867 } 868 reachable[hash] = struct{}{} 869 870 // Iterate over all the children and accumulate them too 871 for _, child := range node.childs() { 872 db.accumulate(child, reachable) 873 } 874 } 875 876 var proposedInEpochPrefix = []byte("proposed-in-epoch-") 877 878 func encodeUint64(number uint64) []byte { 879 enc := make([]byte, 8) 880 binary.BigEndian.PutUint64(enc, number) 881 return enc 882 } 883 884 func decodeUint64(raw []byte) uint64 { 885 return binary.BigEndian.Uint64(raw) 886 } 887 888 func (db *Database) MarkProposedInEpoch(address common.Address, epoch uint64) error { 889 return db.diskdb.Put(append( 890 append(proposedInEpochPrefix, address.Bytes()...), encodeUint64(epoch)...), 891 encodeUint64(1)) 892 } 893 894 func (db *Database) CheckProposedInEpoch(address common.Address, epoch uint64) bool { 895 _, err := db.diskdb.Get(append(append(proposedInEpochPrefix, address.Bytes()...), encodeUint64(epoch)...)) 896 if err != nil { 897 return false 898 } 899 return true 900 }