github.com/unicornultrafoundation/go-u2u@v1.0.0-rc1.0.20240205080301-e74a83d3fadc/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 "github.com/unicornultrafoundation/go-u2u/common" 30 "github.com/unicornultrafoundation/go-u2u/core/rawdb" 31 "github.com/unicornultrafoundation/go-u2u/ethdb" 32 "github.com/unicornultrafoundation/go-u2u/log" 33 "github.com/unicornultrafoundation/go-u2u/metrics" 34 "github.com/unicornultrafoundation/go-u2u/rlp" 35 ) 36 37 var ( 38 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 39 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 40 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 41 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 42 43 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 44 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 45 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 46 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 47 48 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 49 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 50 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 51 52 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 53 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 54 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 55 56 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 57 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 58 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 59 ) 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 // 65 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 66 // thread safe in providing individual, independent node access. The rationale 67 // behind this split design is to provide read access to RPC handlers and sync 68 // servers even while the trie is executing expensive garbage collection. 69 type Database struct { 70 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 71 72 greedyGC bool // run gc greedy or not 73 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 74 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 75 oldest common.Hash // Oldest tracked node, flush-list head 76 newest common.Hash // Newest tracked node, flush-list tail 77 78 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 79 80 gctime time.Duration // Time spent on garbage collection since last commit 81 gcnodes uint64 // Nodes garbage collected since last commit 82 gcsize common.StorageSize // Data storage garbage collected since last commit 83 84 flushtime time.Duration // Time spent on data flushing since last commit 85 flushnodes uint64 // Nodes flushed since last commit 86 flushsize common.StorageSize // Data storage flushed since last commit 87 88 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 89 childrenSize common.StorageSize // Storage size of the external children tracking 90 preimagesSize common.StorageSize // Storage size of the preimages cache 91 92 lock sync.RWMutex 93 } 94 95 // rawNode is a simple binary blob used to differentiate between collapsed trie 96 // nodes and already encoded RLP binary blobs (while at the same time store them 97 // in the same cache fields). 98 type rawNode []byte 99 100 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 101 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 102 103 func (n rawNode) EncodeRLP(w io.Writer) error { 104 _, err := w.Write(n) 105 return err 106 } 107 108 // rawFullNode represents only the useful data content of a full node, with the 109 // caches and flags stripped out to minimize its data storage. This type honors 110 // the same RLP encoding as the original parent. 111 type rawFullNode [17]node 112 113 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 114 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 115 116 func (n rawFullNode) EncodeRLP(w io.Writer) error { 117 var nodes [17]node 118 119 for i, child := range n { 120 if child != nil { 121 nodes[i] = child 122 } else { 123 nodes[i] = nilValueNode 124 } 125 } 126 return rlp.Encode(w, nodes) 127 } 128 129 // rawShortNode represents only the useful data content of a short node, with the 130 // caches and flags stripped out to minimize its data storage. This type honors 131 // the same RLP encoding as the original parent. 132 type rawShortNode struct { 133 Key []byte 134 Val node 135 } 136 137 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 138 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 139 140 // cachedNode is all the information we know about a single cached trie node 141 // in the memory database write layer. 142 type cachedNode struct { 143 node node // Cached collapsed trie node, or raw rlp data 144 size uint16 // Byte size of the useful cached data 145 146 parents uint32 // Number of live nodes referencing this one 147 children map[common.Hash]uint16 // External children referenced by this node 148 149 flushPrev common.Hash // Previous node in the flush-list 150 flushNext common.Hash // Next node in the flush-list 151 152 commited bool 153 } 154 155 // cachedNodeSize is the raw size of a cachedNode data structure without any 156 // node data included. It's an approximate size, but should be a lot better 157 // than not counting them. 158 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 159 160 // cachedNodeChildrenSize is the raw size of an initialized but empty external 161 // reference map. 162 const cachedNodeChildrenSize = 48 163 164 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 165 // from the cache, or by regenerating it from the collapsed node. 166 func (n *cachedNode) rlp() []byte { 167 if node, ok := n.node.(rawNode); ok { 168 return node 169 } 170 blob, err := rlp.EncodeToBytes(n.node) 171 if err != nil { 172 panic(err) 173 } 174 return blob 175 } 176 177 // obj returns the decoded and expanded trie node, either directly from the cache, 178 // or by regenerating it from the rlp encoded blob. 179 func (n *cachedNode) obj(hash common.Hash) node { 180 if node, ok := n.node.(rawNode); ok { 181 return mustDecodeNode(hash[:], node) 182 } 183 return expandNode(hash[:], n.node) 184 } 185 186 // forChilds invokes the callback for all the tracked children of this node, 187 // both the implicit ones from inside the node as well as the explicit ones 188 // from outside the node. 189 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 190 for child := range n.children { 191 onChild(child) 192 } 193 if _, ok := n.node.(rawNode); !ok { 194 forGatherChildren(n.node, onChild) 195 } 196 } 197 198 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 199 // invokes the callback for all the hashnode children. 200 func forGatherChildren(n node, onChild func(hash common.Hash)) { 201 switch n := n.(type) { 202 case *rawShortNode: 203 forGatherChildren(n.Val, onChild) 204 case rawFullNode: 205 for i := 0; i < 16; i++ { 206 forGatherChildren(n[i], onChild) 207 } 208 case hashNode: 209 onChild(common.BytesToHash(n)) 210 case valueNode, nil, rawNode: 211 default: 212 panic(fmt.Sprintf("unknown node type: %T", n)) 213 } 214 } 215 216 // simplifyNode traverses the hierarchy of an expanded memory node and discards 217 // all the internal caches, returning a node that only contains the raw data. 218 func simplifyNode(n node) node { 219 switch n := n.(type) { 220 case *shortNode: 221 // Short nodes discard the flags and cascade 222 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 223 224 case *fullNode: 225 // Full nodes discard the flags and cascade 226 node := rawFullNode(n.Children) 227 for i := 0; i < len(node); i++ { 228 if node[i] != nil { 229 node[i] = simplifyNode(node[i]) 230 } 231 } 232 return node 233 234 case valueNode, hashNode, rawNode: 235 return n 236 237 default: 238 panic(fmt.Sprintf("unknown node type: %T", n)) 239 } 240 } 241 242 // expandNode traverses the node hierarchy of a collapsed storage node and converts 243 // all fields and keys into expanded memory form. 244 func expandNode(hash hashNode, n node) node { 245 switch n := n.(type) { 246 case *rawShortNode: 247 // Short nodes need key and child expansion 248 return &shortNode{ 249 Key: compactToHex(n.Key), 250 Val: expandNode(nil, n.Val), 251 flags: nodeFlag{ 252 hash: hash, 253 }, 254 } 255 256 case rawFullNode: 257 // Full nodes need child expansion 258 node := &fullNode{ 259 flags: nodeFlag{ 260 hash: hash, 261 }, 262 } 263 for i := 0; i < len(node.Children); i++ { 264 if n[i] != nil { 265 node.Children[i] = expandNode(nil, n[i]) 266 } 267 } 268 return node 269 270 case valueNode, hashNode: 271 return n 272 273 default: 274 panic(fmt.Sprintf("unknown node type: %T", n)) 275 } 276 } 277 278 // Config defines all necessary options for database. 279 type Config struct { 280 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 281 Journal string // Journal of clean cache to survive node restarts 282 Preimages bool // Flag whether the preimage of trie key is recorded 283 GreedyGC bool // "light" or "greedy" GC 284 } 285 286 // NewDatabase creates a new trie database to store ephemeral trie content before 287 // its written out to disk or garbage collected. No read cache is created, so all 288 // data retrievals will hit the underlying disk database. 289 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 290 return NewDatabaseWithConfig(diskdb, nil) 291 } 292 293 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 294 // before its written out to disk or garbage collected. It also acts as a read cache 295 // for nodes loaded from disk. 296 func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { 297 var cleans *fastcache.Cache 298 if config != nil && config.Cache > 0 { 299 if config.Journal == "" { 300 cleans = fastcache.New(config.Cache * 1024 * 1024) 301 } else { 302 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 303 } 304 } 305 db := &Database{ 306 diskdb: diskdb, 307 cleans: cleans, 308 dirties: map[common.Hash]*cachedNode{{}: { 309 children: make(map[common.Hash]uint16), 310 }}, 311 } 312 if config != nil { 313 db.greedyGC = config.GreedyGC 314 } 315 if config == nil || config.Preimages { // TODO(karalabe): Flip to default off in the future 316 db.preimages = make(map[common.Hash][]byte) 317 } 318 return db 319 } 320 321 // DiskDB retrieves the persistent storage backing the trie database. 322 func (db *Database) DiskDB() ethdb.KeyValueStore { 323 return db.diskdb 324 } 325 326 // insert inserts a collapsed trie node into the memory database. 327 // The blob size must be specified to allow proper size tracking. 328 // All nodes inserted by this function will be reference tracked 329 // and in theory should only used for **trie nodes** insertion. 330 func (db *Database) insert(hash common.Hash, size int, node node) { 331 // If the node's already cached, skip 332 if _, ok := db.dirties[hash]; ok { 333 return 334 } 335 memcacheDirtyWriteMeter.Mark(int64(size)) 336 337 // Create the cached entry for this node 338 entry := &cachedNode{ 339 node: simplifyNode(node), 340 size: uint16(size), 341 flushPrev: db.newest, 342 } 343 entry.forChilds(func(child common.Hash) { 344 if c := db.dirties[child]; c != nil { 345 c.parents++ 346 } 347 }) 348 db.dirties[hash] = entry 349 350 // Update the flush-list endpoints 351 if db.oldest == (common.Hash{}) { 352 db.oldest, db.newest = hash, hash 353 } else { 354 db.dirties[db.newest].flushNext, db.newest = hash, hash 355 } 356 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 357 } 358 359 // insertPreimage writes a new trie node pre-image to the memory database if it's 360 // yet unknown. The method will NOT make a copy of the slice, 361 // only use if the preimage will NOT be changed later on. 362 // 363 // Note, this method assumes that the database's lock is held! 364 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 365 // Short circuit if preimage collection is disabled 366 if db.preimages == nil { 367 return 368 } 369 // Track the preimage if a yet unknown one 370 if _, ok := db.preimages[hash]; ok { 371 return 372 } 373 db.preimages[hash] = preimage 374 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 375 } 376 377 // node retrieves a cached trie node from memory, or returns nil if none can be 378 // found in the memory cache. 379 func (db *Database) node(hash common.Hash) node { 380 // Retrieve the node from the clean cache if available 381 if db.cleans != nil { 382 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 383 memcacheCleanHitMeter.Mark(1) 384 memcacheCleanReadMeter.Mark(int64(len(enc))) 385 return mustDecodeNode(hash[:], enc) 386 } 387 } 388 // Retrieve the node from the dirty cache if available 389 db.lock.RLock() 390 dirty := db.dirties[hash] 391 db.lock.RUnlock() 392 393 if dirty != nil { 394 memcacheDirtyHitMeter.Mark(1) 395 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 396 return dirty.obj(hash) 397 } 398 memcacheDirtyMissMeter.Mark(1) 399 400 // Content unavailable in memory, attempt to retrieve from disk 401 enc, err := db.diskdb.Get(hash[:]) 402 if err != nil || enc == nil { 403 return nil 404 } 405 if db.cleans != nil { 406 db.cleans.Set(hash[:], enc) 407 memcacheCleanMissMeter.Mark(1) 408 memcacheCleanWriteMeter.Mark(int64(len(enc))) 409 } 410 return mustDecodeNode(hash[:], enc) 411 } 412 413 // Node retrieves an encoded cached trie node from memory. If it cannot be found 414 // cached, the method queries the persistent database for the content. 415 func (db *Database) Node(hash common.Hash) ([]byte, error) { 416 // It doesn't make sense to retrieve the metaroot 417 if hash == (common.Hash{}) { 418 return nil, errors.New("not found") 419 } 420 // Retrieve the node from the clean cache if available 421 if db.cleans != nil { 422 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 423 memcacheCleanHitMeter.Mark(1) 424 memcacheCleanReadMeter.Mark(int64(len(enc))) 425 return enc, nil 426 } 427 } 428 // Retrieve the node from the dirty cache if available 429 db.lock.RLock() 430 dirty := db.dirties[hash] 431 db.lock.RUnlock() 432 433 if dirty != nil { 434 memcacheDirtyHitMeter.Mark(1) 435 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 436 return dirty.rlp(), nil 437 } 438 memcacheDirtyMissMeter.Mark(1) 439 440 // Content unavailable in memory, attempt to retrieve from disk 441 enc := rawdb.ReadTrieNode(db.diskdb, hash) 442 if len(enc) != 0 { 443 if db.cleans != nil { 444 db.cleans.Set(hash[:], enc) 445 memcacheCleanMissMeter.Mark(1) 446 memcacheCleanWriteMeter.Mark(int64(len(enc))) 447 } 448 return enc, nil 449 } 450 return nil, errors.New("not found") 451 } 452 453 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 454 // found cached, the method queries the persistent database for the content. 455 func (db *Database) preimage(hash common.Hash) []byte { 456 // Short circuit if preimage collection is disabled 457 if db.preimages == nil { 458 return nil 459 } 460 // Retrieve the node from cache if available 461 db.lock.RLock() 462 preimage := db.preimages[hash] 463 db.lock.RUnlock() 464 465 if preimage != nil { 466 return preimage 467 } 468 return rawdb.ReadPreimage(db.diskdb, hash) 469 } 470 471 // Nodes retrieves the hashes of all the nodes cached within the memory database. 472 // This method is extremely expensive and should only be used to validate internal 473 // states in test code. 474 func (db *Database) Nodes() []common.Hash { 475 db.lock.RLock() 476 defer db.lock.RUnlock() 477 478 var hashes = make([]common.Hash, 0, len(db.dirties)) 479 for hash := range db.dirties { 480 if hash != (common.Hash{}) { // Special case for "root" references/nodes 481 hashes = append(hashes, hash) 482 } 483 } 484 return hashes 485 } 486 487 // Reference adds a new reference from a parent node to a child node. 488 // This function is used to add reference between internal trie node 489 // and external node(e.g. storage trie root), all internal trie nodes 490 // are referenced together by database itself. 491 func (db *Database) Reference(child common.Hash, parent common.Hash) { 492 db.lock.Lock() 493 defer db.lock.Unlock() 494 495 db.reference(child, parent) 496 } 497 498 // reference is the private locked version of Reference. 499 func (db *Database) reference(child common.Hash, parent common.Hash) { 500 // If the node does not exist, it's a node pulled from disk, skip 501 node, ok := db.dirties[child] 502 if !ok { 503 return 504 } 505 // If the reference already exists, only duplicate for roots 506 if db.dirties[parent].children == nil { 507 db.dirties[parent].children = make(map[common.Hash]uint16) 508 db.childrenSize += cachedNodeChildrenSize 509 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 510 return 511 } 512 node.parents++ 513 db.dirties[parent].children[child]++ 514 if db.dirties[parent].children[child] == 1 { 515 db.childrenSize += common.HashLength + 2 // uint16 counter 516 } 517 } 518 519 // Dereference removes an existing reference from a root node. 520 func (db *Database) Dereference(root common.Hash) { 521 // Sanity check to ensure that the meta-root is not removed 522 if root == (common.Hash{}) { 523 log.Error("Attempted to dereference the trie cache meta root") 524 return 525 } 526 db.lock.Lock() 527 defer db.lock.Unlock() 528 529 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 530 batch := db.diskdb.NewBatch() 531 db.dereference(batch, root, common.Hash{}) 532 533 // Flush out all accumulated data from the batch to disk 534 if err := batch.Write(); err != nil { 535 log.Warn("Failed to write flush list to disk", "err", err) 536 } 537 batch.Reset() 538 539 db.gcnodes += uint64(nodes - len(db.dirties)) 540 db.gcsize += storage - db.dirtiesSize 541 db.gctime += time.Since(start) 542 543 memcacheGCTimeTimer.Update(time.Since(start)) 544 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 545 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 546 547 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 548 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 549 } 550 551 // dereference is the private locked version of Dereference. 552 func (db *Database) dereference(batch ethdb.Batch, child common.Hash, parent common.Hash) { 553 // Dereference the parent-child 554 node := db.dirties[parent] 555 556 if node.children != nil && node.children[child] > 0 { 557 node.children[child]-- 558 if node.children[child] == 0 { 559 delete(node.children, child) 560 db.childrenSize -= (common.HashLength + 2) // uint16 counter 561 } 562 } 563 // If the child does not exist, it's a previously committed node. 564 node, ok := db.dirties[child] 565 if !ok { 566 return 567 } 568 // If there are no more references to the child, delete it and cascade 569 if node.parents > 0 { 570 // This is a special cornercase where a node loaded from disk (i.e. not in the 571 // memcache any more) gets reinjected as a new node (short node split into full, 572 // then reverted into short), causing a cached node to have no parents. That is 573 // no problem in itself, but don't make maxint parents out of it. 574 node.parents-- 575 } 576 if node.parents == 0 { 577 // Remove the node from the flush-list 578 switch child { 579 case db.oldest: 580 db.oldest = node.flushNext 581 db.dirties[node.flushNext].flushPrev = common.Hash{} 582 case db.newest: 583 db.newest = node.flushPrev 584 db.dirties[node.flushPrev].flushNext = common.Hash{} 585 default: 586 db.dirties[node.flushPrev].flushNext = node.flushNext 587 db.dirties[node.flushNext].flushPrev = node.flushPrev 588 } 589 590 if batch.ValueSize() >= ethdb.IdealBatchSize { 591 if err := batch.Write(); err != nil { 592 log.Warn("Error on batch flushing out on disk", "err", err) 593 } 594 batch.Reset() 595 } 596 597 // Dereference all children and delete the node 598 node.forChilds(func(hash common.Hash) { 599 db.dereference(batch, hash, child) 600 }) 601 if db.dirties[child].commited { 602 rawdb.DeleteTrieNode(batch, child) 603 } 604 delete(db.dirties, child) 605 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 606 if node.children != nil { 607 db.childrenSize -= cachedNodeChildrenSize 608 } 609 } 610 } 611 612 // Cap iteratively flushes old but still referenced trie nodes until the total 613 // memory usage goes below the given threshold. 614 // 615 // Note, this method is a non-synchronized mutator. It is unsafe to call this 616 // concurrently with other mutators. 617 func (db *Database) Cap(limit common.StorageSize) error { 618 // Create a database batch to flush persistent data out. It is important that 619 // outside code doesn't see an inconsistent state (referenced data removed from 620 // memory cache during commit but not yet in persistent storage). This is ensured 621 // by only uncaching existing data when the database write finalizes. 622 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 623 batch := db.diskdb.NewBatch() 624 625 // db.dirtiesSize only contains the useful data in the cache, but when reporting 626 // the total memory consumption, the maintenance metadata is also needed to be 627 // counted. 628 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 629 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 630 631 // If the preimage cache got large enough, push to disk. If it's still small 632 // leave for later to deduplicate writes. 633 flushPreimages := db.preimagesSize > 4*1024*1024 634 if flushPreimages { 635 if db.preimages == nil { 636 log.Error("Attempted to write preimages whilst disabled") 637 } else { 638 rawdb.WritePreimages(batch, db.preimages) 639 if batch.ValueSize() > ethdb.IdealBatchSize { 640 if err := batch.Write(); err != nil { 641 return err 642 } 643 batch.Reset() 644 } 645 } 646 } 647 // Keep committing nodes from the flush-list until we're below allowance 648 oldest := db.oldest 649 for size > limit && oldest != (common.Hash{}) { 650 // Fetch the oldest referenced node and push into the batch 651 node := db.dirties[oldest] 652 if !node.commited { 653 rawdb.WriteTrieNode(batch, oldest, node.rlp()) 654 } 655 656 // If we exceeded the ideal batch size, commit and reset 657 if batch.ValueSize() >= ethdb.IdealBatchSize { 658 if err := batch.Write(); err != nil { 659 log.Error("Failed to write flush list to disk", "err", err) 660 return err 661 } 662 batch.Reset() 663 } 664 // Iterate to the next flush item, or abort if the size cap was achieved. Size 665 // is the total size, including the useful cached data (hash -> blob), the 666 // cache item metadata, as well as external children mappings. 667 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 668 if node.children != nil { 669 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 670 } 671 oldest = node.flushNext 672 } 673 // Flush out any remainder data from the last batch 674 if err := batch.Write(); err != nil { 675 log.Error("Failed to write flush list to disk", "err", err) 676 return err 677 } 678 // Write successful, clear out the flushed data 679 db.lock.Lock() 680 defer db.lock.Unlock() 681 682 if flushPreimages { 683 if db.preimages == nil { 684 log.Error("Attempted to reset preimage cache whilst disabled") 685 } else { 686 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 687 } 688 } 689 for db.oldest != oldest { 690 node := db.dirties[db.oldest] 691 delete(db.dirties, db.oldest) 692 db.oldest = node.flushNext 693 694 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 695 if node.children != nil { 696 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 697 } 698 } 699 if db.oldest != (common.Hash{}) { 700 db.dirties[db.oldest].flushPrev = common.Hash{} 701 } 702 db.flushnodes += uint64(nodes - len(db.dirties)) 703 db.flushsize += storage - db.dirtiesSize 704 db.flushtime += time.Since(start) 705 706 memcacheFlushTimeTimer.Update(time.Since(start)) 707 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 708 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 709 710 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 711 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 712 713 return nil 714 } 715 716 // Commit iterates over all the children of a particular node, writes them out 717 // to disk, forcefully tearing down all references in both directions. As a side 718 // effect, all pre-images accumulated up to this point are also written. 719 // 720 // Note, this method is a non-synchronized mutator. It is unsafe to call this 721 // concurrently with other mutators. 722 func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { 723 // Create a database batch to flush persistent data out. It is important that 724 // outside code doesn't see an inconsistent state (referenced data removed from 725 // memory cache during commit but not yet in persistent storage). This is ensured 726 // by only uncaching existing data when the database write finalizes. 727 start := time.Now() 728 batch := db.diskdb.NewBatch() 729 730 // Move all of the accumulated preimages into a write batch 731 if db.preimages != nil { 732 rawdb.WritePreimages(batch, db.preimages) 733 // Since we're going to replay trie node writes into the clean cache, flush out 734 // any batched pre-images before continuing. 735 if err := batch.Write(); err != nil { 736 return err 737 } 738 batch.Reset() 739 } 740 // Move the trie itself into the batch, flushing if enough data is accumulated 741 nodes, storage := len(db.dirties), db.dirtiesSize 742 743 var uncacher ethdb.KeyValueWriter 744 if db.greedyGC { 745 uncacher = &greedy{db} 746 } else { 747 uncacher = &cleaner{db} 748 } 749 if err := db.commit(node, batch, uncacher, callback); err != nil { 750 log.Error("Failed to commit trie from trie database", "err", err) 751 return err 752 } 753 // Uncache any leftovers in the last batch 754 db.lock.Lock() 755 defer db.lock.Unlock() 756 757 batch.Replay(uncacher) 758 759 // Trie mostly committed to disk, flush any batch leftovers 760 if err := batch.Write(); err != nil { 761 log.Error("Failed to write trie to disk", "err", err) 762 return err 763 } 764 batch.Reset() 765 766 // Reset the storage counters and bumped metrics 767 if db.preimages != nil { 768 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 769 } 770 memcacheCommitTimeTimer.Update(time.Since(start)) 771 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 772 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 773 774 logger := log.Info 775 if !report { 776 logger = log.Debug 777 } 778 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 779 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 780 781 // Reset the garbage collection statistics 782 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 783 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 784 785 return nil 786 } 787 788 // commit is the private locked version of Commit. 789 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher ethdb.KeyValueWriter, callback func(common.Hash)) error { 790 // If the node does not exist or marked as committed, then it's a previously committed node 791 node, ok := db.dirties[hash] 792 if !ok || node.commited { 793 return nil 794 } 795 var err error 796 node.forChilds(func(child common.Hash) { 797 if err == nil { 798 err = db.commit(child, batch, uncacher, callback) 799 } 800 }) 801 if err != nil { 802 return err 803 } 804 // If we've reached an optimal batch size, commit and start over 805 rawdb.WriteTrieNode(batch, hash, node.rlp()) 806 if callback != nil { 807 callback(hash) 808 } 809 if batch.ValueSize() >= ethdb.IdealBatchSize { 810 db.lock.Lock() 811 batch.Replay(uncacher) 812 db.lock.Unlock() 813 if err := batch.Write(); err != nil { 814 return err 815 } 816 batch.Reset() 817 } 818 return nil 819 } 820 821 // cleaner is a database batch replayer that takes a batch of write operations 822 // and cleans up the trie database from anything written to disk. 823 type cleaner struct { 824 db *Database 825 } 826 827 // evictDirty update the flush-list and remove node from dirty cache 828 func evictDirty(db *Database, hash common.Hash, node *cachedNode) { 829 // Node still exists, remove it from the flush-list 830 switch hash { 831 case db.oldest: 832 db.oldest = node.flushNext 833 db.dirties[node.flushNext].flushPrev = common.Hash{} 834 case db.newest: 835 db.newest = node.flushPrev 836 db.dirties[node.flushPrev].flushNext = common.Hash{} 837 default: 838 db.dirties[node.flushPrev].flushNext = node.flushNext 839 db.dirties[node.flushNext].flushPrev = node.flushPrev 840 } 841 // Remove the node from the dirty cache 842 delete(db.dirties, hash) 843 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 844 if node.children != nil { 845 db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 846 } 847 } 848 849 // Put reacts to database writes and implements dirty data uncaching. This is the 850 // post-processing step of a commit operation where the already persisted trie is 851 // removed from the dirty cache and moved into the clean cache. The reason behind 852 // the two-phase commit is to ensure data availability while moving from 853 // memory to disk. 854 func (c *cleaner) Put(key []byte, rlp []byte) error { 855 hash := common.BytesToHash(key) 856 857 // If the node does not exist, we're done on this path 858 node, ok := c.db.dirties[hash] 859 if !ok { 860 return nil 861 } 862 evictDirty(c.db, hash, node) 863 // Move the flushed node into the clean cache to prevent insta-reloads 864 if c.db.cleans != nil { 865 c.db.cleans.Set(hash[:], rlp) 866 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 867 } 868 return nil 869 } 870 871 func (c *cleaner) Delete(key []byte) error { 872 panic("not implemented") 873 } 874 875 type greedy struct { 876 db *Database 877 } 878 879 func (g *greedy) Put(key []byte, rlp []byte) error { 880 hash := common.BytesToHash(key) 881 882 // If the node does not exist, we're done on this path 883 node, ok := g.db.dirties[hash] 884 if !ok { 885 return nil 886 } 887 // Mark node as commited if node does not existing on db 888 if exist, _ := g.db.diskdb.Has(hash[:]); !exist { 889 g.db.dirties[hash].commited = true 890 } else { 891 evictDirty(g.db, hash, node) 892 } 893 // Move the flushed node into the clean cache to prevent insta-reloads 894 if g.db.cleans != nil { 895 g.db.cleans.Set(hash[:], rlp) 896 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 897 } 898 return nil 899 } 900 901 func (g *greedy) Delete(key []byte) error { 902 panic("not implemented") 903 } 904 905 // Size returns the current storage size of the memory cache in front of the 906 // persistent database layer. 907 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 908 db.lock.RLock() 909 defer db.lock.RUnlock() 910 911 // db.dirtiesSize only contains the useful data in the cache, but when reporting 912 // the total memory consumption, the maintenance metadata is also needed to be 913 // counted. 914 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 915 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 916 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize 917 } 918 919 // saveCache saves clean state cache to given directory path 920 // using specified CPU cores. 921 func (db *Database) saveCache(dir string, threads int) error { 922 if db.cleans == nil { 923 return nil 924 } 925 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 926 927 start := time.Now() 928 err := db.cleans.SaveToFileConcurrent(dir, threads) 929 if err != nil { 930 log.Error("Failed to persist clean trie cache", "error", err) 931 return err 932 } 933 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 934 return nil 935 } 936 937 // SaveCache atomically saves fast cache data to the given dir using all 938 // available CPU cores. 939 func (db *Database) SaveCache(dir string) error { 940 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 941 } 942 943 // SaveCachePeriodically atomically saves fast cache data to the given dir with 944 // the specified interval. All dump operation will only use a single CPU core. 945 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 946 ticker := time.NewTicker(interval) 947 defer ticker.Stop() 948 949 for { 950 select { 951 case <-ticker.C: 952 db.saveCache(dir, 1) 953 case <-stopCh: 954 return 955 } 956 } 957 }