github.com/dominant-strategies/go-quai@v0.28.2/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 "github.com/dominant-strategies/go-quai/common" 30 "github.com/dominant-strategies/go-quai/core/rawdb" 31 "github.com/dominant-strategies/go-quai/ethdb" 32 "github.com/dominant-strategies/go-quai/log" 33 "github.com/dominant-strategies/go-quai/metrics" 34 "github.com/dominant-strategies/go-quai/rlp" 35 ) 36 37 var ( 38 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 39 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 40 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 41 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 42 43 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 44 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 45 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 46 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 47 48 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 49 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 50 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 51 52 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 53 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 54 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 55 56 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 57 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 58 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 59 ) 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 // 65 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 66 // thread safe in providing individual, independent node access. The rationale 67 // behind this split design is to provide read access to RPC handlers and sync 68 // servers even while the trie is executing expensive garbage collection. 69 type Database struct { 70 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 71 72 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 73 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 74 oldest common.Hash // Oldest tracked node, flush-list head 75 newest common.Hash // Newest tracked node, flush-list tail 76 77 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 78 79 gctime time.Duration // Time spent on garbage collection since last commit 80 gcnodes uint64 // Nodes garbage collected since last commit 81 gcsize common.StorageSize // Data storage garbage collected since last commit 82 83 flushtime time.Duration // Time spent on data flushing since last commit 84 flushnodes uint64 // Nodes flushed since last commit 85 flushsize common.StorageSize // Data storage flushed since last commit 86 87 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 88 childrenSize common.StorageSize // Storage size of the external children tracking 89 preimagesSize common.StorageSize // Storage size of the preimages cache 90 91 lock sync.RWMutex 92 } 93 94 // rawNode is a simple binary blob used to differentiate between collapsed trie 95 // nodes and already encoded RLP binary blobs (while at the same time store them 96 // in the same cache fields). 97 type rawNode []byte 98 99 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 100 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 101 102 func (n rawNode) EncodeRLP(w io.Writer) error { 103 _, err := w.Write(n) 104 return err 105 } 106 107 // rawFullNode represents only the useful data content of a full node, with the 108 // caches and flags stripped out to minimize its data storage. This type honors 109 // the same RLP encoding as the original parent. 110 type rawFullNode [17]node 111 112 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 113 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 114 115 func (n rawFullNode) EncodeRLP(w io.Writer) error { 116 var nodes [17]node 117 118 for i, child := range n { 119 if child != nil { 120 nodes[i] = child 121 } else { 122 nodes[i] = nilValueNode 123 } 124 } 125 return rlp.Encode(w, nodes) 126 } 127 128 // rawShortNode represents only the useful data content of a short node, with the 129 // caches and flags stripped out to minimize its data storage. This type honors 130 // the same RLP encoding as the original parent. 131 type rawShortNode struct { 132 Key []byte 133 Val node 134 } 135 136 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 137 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 138 139 // cachedNode is all the information we know about a single cached trie node 140 // in the memory database write layer. 141 type cachedNode struct { 142 node node // Cached collapsed trie node, or raw rlp data 143 size uint16 // Byte size of the useful cached data 144 145 parents uint32 // Number of live nodes referencing this one 146 children map[common.Hash]uint16 // External children referenced by this node 147 148 flushPrev common.Hash // Previous node in the flush-list 149 flushNext common.Hash // Next node in the flush-list 150 } 151 152 // cachedNodeSize is the raw size of a cachedNode data structure without any 153 // node data included. It's an approximate size, but should be a lot better 154 // than not counting them. 155 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 156 157 // cachedNodeChildrenSize is the raw size of an initialized but empty external 158 // reference map. 159 const cachedNodeChildrenSize = 48 160 161 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 162 // from the cache, or by regenerating it from the collapsed node. 163 func (n *cachedNode) rlp() []byte { 164 if node, ok := n.node.(rawNode); ok { 165 return node 166 } 167 blob, err := rlp.EncodeToBytes(n.node) 168 if err != nil { 169 panic(err) 170 } 171 return blob 172 } 173 174 // obj returns the decoded and expanded trie node, either directly from the cache, 175 // or by regenerating it from the rlp encoded blob. 176 func (n *cachedNode) obj(hash common.Hash) node { 177 if node, ok := n.node.(rawNode); ok { 178 return mustDecodeNode(hash[:], node) 179 } 180 return expandNode(hash[:], n.node) 181 } 182 183 // forChilds invokes the callback for all the tracked children of this node, 184 // both the implicit ones from inside the node as well as the explicit ones 185 // from outside the node. 186 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 187 for child := range n.children { 188 onChild(child) 189 } 190 if _, ok := n.node.(rawNode); !ok { 191 forGatherChildren(n.node, onChild) 192 } 193 } 194 195 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 196 // invokes the callback for all the hashnode children. 197 func forGatherChildren(n node, onChild func(hash common.Hash)) { 198 switch n := n.(type) { 199 case *rawShortNode: 200 forGatherChildren(n.Val, onChild) 201 case rawFullNode: 202 for i := 0; i < 16; i++ { 203 forGatherChildren(n[i], onChild) 204 } 205 case hashNode: 206 onChild(common.BytesToHash(n)) 207 case valueNode, nil, rawNode: 208 default: 209 panic(fmt.Sprintf("unknown node type: %T", n)) 210 } 211 } 212 213 // simplifyNode traverses the hierarchy of an expanded memory node and discards 214 // all the internal caches, returning a node that only contains the raw data. 215 func simplifyNode(n node) node { 216 switch n := n.(type) { 217 case *shortNode: 218 // Short nodes discard the flags and cascade 219 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 220 221 case *fullNode: 222 // Full nodes discard the flags and cascade 223 node := rawFullNode(n.Children) 224 for i := 0; i < len(node); i++ { 225 if node[i] != nil { 226 node[i] = simplifyNode(node[i]) 227 } 228 } 229 return node 230 231 case valueNode, hashNode, rawNode: 232 return n 233 234 default: 235 panic(fmt.Sprintf("unknown node type: %T", n)) 236 } 237 } 238 239 // expandNode traverses the node hierarchy of a collapsed storage node and converts 240 // all fields and keys into expanded memory form. 241 func expandNode(hash hashNode, n node) node { 242 switch n := n.(type) { 243 case *rawShortNode: 244 // Short nodes need key and child expansion 245 return &shortNode{ 246 Key: compactToHex(n.Key), 247 Val: expandNode(nil, n.Val), 248 flags: nodeFlag{ 249 hash: hash, 250 }, 251 } 252 253 case rawFullNode: 254 // Full nodes need child expansion 255 node := &fullNode{ 256 flags: nodeFlag{ 257 hash: hash, 258 }, 259 } 260 for i := 0; i < len(node.Children); i++ { 261 if n[i] != nil { 262 node.Children[i] = expandNode(nil, n[i]) 263 } 264 } 265 return node 266 267 case valueNode, hashNode: 268 return n 269 270 default: 271 panic(fmt.Sprintf("unknown node type: %T", n)) 272 } 273 } 274 275 // Config defines all necessary options for database. 276 type Config struct { 277 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 278 Journal string // Journal of clean cache to survive node restarts 279 Preimages bool // Flag whether the preimage of trie key is recorded 280 } 281 282 // NewDatabase creates a new trie database to store ephemeral trie content before 283 // its written out to disk or garbage collected. No read cache is created, so all 284 // data retrievals will hit the underlying disk database. 285 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 286 return NewDatabaseWithConfig(diskdb, nil) 287 } 288 289 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 290 // before its written out to disk or garbage collected. It also acts as a read cache 291 // for nodes loaded from disk. 292 func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { 293 var cleans *fastcache.Cache 294 if config != nil && config.Cache > 0 { 295 if config.Journal == "" { 296 cleans = fastcache.New(config.Cache * 1024 * 1024) 297 } else { 298 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 299 } 300 } 301 db := &Database{ 302 diskdb: diskdb, 303 cleans: cleans, 304 dirties: map[common.Hash]*cachedNode{{}: { 305 children: make(map[common.Hash]uint16), 306 }}, 307 } 308 if config == nil || config.Preimages { 309 db.preimages = make(map[common.Hash][]byte) 310 } 311 return db 312 } 313 314 // DiskDB retrieves the persistent storage backing the trie database. 315 func (db *Database) DiskDB() ethdb.KeyValueStore { 316 return db.diskdb 317 } 318 319 // insert inserts a collapsed trie node into the memory database. 320 // The blob size must be specified to allow proper size tracking. 321 // All nodes inserted by this function will be reference tracked 322 // and in theory should only used for **trie nodes** insertion. 323 func (db *Database) insert(hash common.Hash, size int, node node) { 324 // If the node's already cached, skip 325 if _, ok := db.dirties[hash]; ok { 326 return 327 } 328 memcacheDirtyWriteMeter.Mark(int64(size)) 329 330 // Create the cached entry for this node 331 entry := &cachedNode{ 332 node: simplifyNode(node), 333 size: uint16(size), 334 flushPrev: db.newest, 335 } 336 entry.forChilds(func(child common.Hash) { 337 if c := db.dirties[child]; c != nil { 338 c.parents++ 339 } 340 }) 341 db.dirties[hash] = entry 342 343 // Update the flush-list endpoints 344 if db.oldest == (common.Hash{}) { 345 db.oldest, db.newest = hash, hash 346 } else { 347 db.dirties[db.newest].flushNext, db.newest = hash, hash 348 } 349 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 350 } 351 352 // insertPreimage writes a new trie node pre-image to the memory database if it's 353 // yet unknown. The method will NOT make a copy of the slice, 354 // only use if the preimage will NOT be changed later on. 355 // 356 // Note, this method assumes that the database's lock is held! 357 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 358 // Short circuit if preimage collection is disabled 359 if db.preimages == nil { 360 return 361 } 362 // Track the preimage if a yet unknown one 363 if _, ok := db.preimages[hash]; ok { 364 return 365 } 366 db.preimages[hash] = preimage 367 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 368 } 369 370 // node retrieves a cached trie node from memory, or returns nil if none can be 371 // found in the memory cache. 372 func (db *Database) node(hash common.Hash) node { 373 // Retrieve the node from the clean cache if available 374 if db.cleans != nil { 375 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 376 memcacheCleanHitMeter.Mark(1) 377 memcacheCleanReadMeter.Mark(int64(len(enc))) 378 return mustDecodeNode(hash[:], enc) 379 } 380 } 381 // Retrieve the node from the dirty cache if available 382 db.lock.RLock() 383 dirty := db.dirties[hash] 384 db.lock.RUnlock() 385 386 if dirty != nil { 387 memcacheDirtyHitMeter.Mark(1) 388 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 389 return dirty.obj(hash) 390 } 391 memcacheDirtyMissMeter.Mark(1) 392 393 // Content unavailable in memory, attempt to retrieve from disk 394 enc, err := db.diskdb.Get(hash[:]) 395 if err != nil || enc == nil { 396 return nil 397 } 398 if db.cleans != nil { 399 db.cleans.Set(hash[:], enc) 400 memcacheCleanMissMeter.Mark(1) 401 memcacheCleanWriteMeter.Mark(int64(len(enc))) 402 } 403 return mustDecodeNode(hash[:], enc) 404 } 405 406 // Node retrieves an encoded cached trie node from memory. If it cannot be found 407 // cached, the method queries the persistent database for the content. 408 func (db *Database) Node(hash common.Hash) ([]byte, error) { 409 // It doesn't make sense to retrieve the metaroot 410 if hash == (common.Hash{}) { 411 return nil, errors.New("not found") 412 } 413 // Retrieve the node from the clean cache if available 414 if db.cleans != nil { 415 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 416 memcacheCleanHitMeter.Mark(1) 417 memcacheCleanReadMeter.Mark(int64(len(enc))) 418 return enc, nil 419 } 420 } 421 // Retrieve the node from the dirty cache if available 422 db.lock.RLock() 423 dirty := db.dirties[hash] 424 db.lock.RUnlock() 425 426 if dirty != nil { 427 memcacheDirtyHitMeter.Mark(1) 428 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 429 return dirty.rlp(), nil 430 } 431 memcacheDirtyMissMeter.Mark(1) 432 433 // Content unavailable in memory, attempt to retrieve from disk 434 enc := rawdb.ReadTrieNode(db.diskdb, hash) 435 if len(enc) != 0 { 436 if db.cleans != nil { 437 db.cleans.Set(hash[:], enc) 438 memcacheCleanMissMeter.Mark(1) 439 memcacheCleanWriteMeter.Mark(int64(len(enc))) 440 } 441 return enc, nil 442 } 443 return nil, errors.New("not found") 444 } 445 446 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 447 // found cached, the method queries the persistent database for the content. 448 func (db *Database) preimage(hash common.Hash) []byte { 449 // Short circuit if preimage collection is disabled 450 if db.preimages == nil { 451 return nil 452 } 453 // Retrieve the node from cache if available 454 db.lock.RLock() 455 preimage := db.preimages[hash] 456 db.lock.RUnlock() 457 458 if preimage != nil { 459 return preimage 460 } 461 return rawdb.ReadPreimage(db.diskdb, hash) 462 } 463 464 // Nodes retrieves the hashes of all the nodes cached within the memory database. 465 // This method is extremely expensive and should only be used to validate internal 466 // states in test code. 467 func (db *Database) Nodes() []common.Hash { 468 db.lock.RLock() 469 defer db.lock.RUnlock() 470 471 var hashes = make([]common.Hash, 0, len(db.dirties)) 472 for hash := range db.dirties { 473 if hash != (common.Hash{}) { // Special case for "root" references/nodes 474 hashes = append(hashes, hash) 475 } 476 } 477 return hashes 478 } 479 480 // Reference adds a new reference from a parent node to a child node. 481 // This function is used to add reference between internal trie node 482 // and external node(e.g. storage trie root), all internal trie nodes 483 // are referenced together by database itself. 484 func (db *Database) Reference(child common.Hash, parent common.Hash) { 485 db.lock.Lock() 486 defer db.lock.Unlock() 487 488 db.reference(child, parent) 489 } 490 491 // reference is the private locked version of Reference. 492 func (db *Database) reference(child common.Hash, parent common.Hash) { 493 // If the node does not exist, it's a node pulled from disk, skip 494 node, ok := db.dirties[child] 495 if !ok { 496 return 497 } 498 // If the reference already exists, only duplicate for roots 499 if db.dirties[parent].children == nil { 500 db.dirties[parent].children = make(map[common.Hash]uint16) 501 db.childrenSize += cachedNodeChildrenSize 502 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 503 return 504 } 505 node.parents++ 506 db.dirties[parent].children[child]++ 507 if db.dirties[parent].children[child] == 1 { 508 db.childrenSize += common.HashLength + 2 // uint16 counter 509 } 510 } 511 512 // Dereference removes an existing reference from a root node. 513 func (db *Database) Dereference(root common.Hash) { 514 // Sanity check to ensure that the meta-root is not removed 515 if root == (common.Hash{}) { 516 log.Error("Attempted to dereference the trie cache meta root") 517 return 518 } 519 db.lock.Lock() 520 defer db.lock.Unlock() 521 522 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 523 db.dereference(root, common.Hash{}) 524 525 db.gcnodes += uint64(nodes - len(db.dirties)) 526 db.gcsize += storage - db.dirtiesSize 527 db.gctime += time.Since(start) 528 529 memcacheGCTimeTimer.Update(time.Since(start)) 530 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 531 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 532 533 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 534 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 535 } 536 537 // dereference is the private locked version of Dereference. 538 func (db *Database) dereference(child common.Hash, parent common.Hash) { 539 // Dereference the parent-child 540 node := db.dirties[parent] 541 542 if node.children != nil && node.children[child] > 0 { 543 node.children[child]-- 544 if node.children[child] == 0 { 545 delete(node.children, child) 546 db.childrenSize -= (common.HashLength + 2) // uint16 counter 547 } 548 } 549 // If the child does not exist, it's a previously committed node. 550 node, ok := db.dirties[child] 551 if !ok { 552 return 553 } 554 // If there are no more references to the child, delete it and cascade 555 if node.parents > 0 { 556 // This is a special cornercase where a node loaded from disk (i.e. not in the 557 // memcache any more) gets reinjected as a new node (short node split into full, 558 // then reverted into short), causing a cached node to have no parents. That is 559 // no problem in itself, but don't make maxint parents out of it. 560 node.parents-- 561 } 562 if node.parents == 0 { 563 // Remove the node from the flush-list 564 switch child { 565 case db.oldest: 566 db.oldest = node.flushNext 567 db.dirties[node.flushNext].flushPrev = common.Hash{} 568 case db.newest: 569 db.newest = node.flushPrev 570 db.dirties[node.flushPrev].flushNext = common.Hash{} 571 default: 572 db.dirties[node.flushPrev].flushNext = node.flushNext 573 db.dirties[node.flushNext].flushPrev = node.flushPrev 574 } 575 // Dereference all children and delete the node 576 node.forChilds(func(hash common.Hash) { 577 db.dereference(hash, child) 578 }) 579 delete(db.dirties, child) 580 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 581 if node.children != nil { 582 db.childrenSize -= cachedNodeChildrenSize 583 } 584 } 585 } 586 587 // Cap iteratively flushes old but still referenced trie nodes until the total 588 // memory usage goes below the given threshold. 589 // 590 // Note, this method is a non-synchronized mutator. It is unsafe to call this 591 // concurrently with other mutators. 592 func (db *Database) Cap(limit common.StorageSize) error { 593 // Create a database batch to flush persistent data out. It is important that 594 // outside code doesn't see an inconsistent state (referenced data removed from 595 // memory cache during commit but not yet in persistent storage). This is ensured 596 // by only uncaching existing data when the database write finalizes. 597 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 598 batch := db.diskdb.NewBatch() 599 600 // db.dirtiesSize only contains the useful data in the cache, but when reporting 601 // the total memory consumption, the maintenance metadata is also needed to be 602 // counted. 603 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 604 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 605 606 // If the preimage cache got large enough, push to disk. If it's still small 607 // leave for later to deduplicate writes. 608 flushPreimages := db.preimagesSize > 4*1024*1024 609 if flushPreimages { 610 if db.preimages == nil { 611 log.Error("Attempted to write preimages whilst disabled") 612 } else { 613 rawdb.WritePreimages(batch, db.preimages) 614 if batch.ValueSize() > ethdb.IdealBatchSize { 615 if err := batch.Write(); err != nil { 616 return err 617 } 618 batch.Reset() 619 } 620 } 621 } 622 // Keep committing nodes from the flush-list until we're below allowance 623 oldest := db.oldest 624 for size > limit && oldest != (common.Hash{}) { 625 // Fetch the oldest referenced node and push into the batch 626 node := db.dirties[oldest] 627 rawdb.WriteTrieNode(batch, oldest, node.rlp()) 628 629 // If we exceeded the ideal batch size, commit and reset 630 if batch.ValueSize() >= ethdb.IdealBatchSize { 631 if err := batch.Write(); err != nil { 632 log.Error("Failed to write flush list to disk", "err", err) 633 return err 634 } 635 batch.Reset() 636 } 637 // Iterate to the next flush item, or abort if the size cap was achieved. Size 638 // is the total size, including the useful cached data (hash -> blob), the 639 // cache item metadata, as well as external children mappings. 640 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 641 if node.children != nil { 642 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 643 } 644 oldest = node.flushNext 645 } 646 // Flush out any remainder data from the last batch 647 if err := batch.Write(); err != nil { 648 log.Error("Failed to write flush list to disk", "err", err) 649 return err 650 } 651 // Write successful, clear out the flushed data 652 db.lock.Lock() 653 defer db.lock.Unlock() 654 655 if flushPreimages { 656 if db.preimages == nil { 657 log.Error("Attempted to reset preimage cache whilst disabled") 658 } else { 659 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 660 } 661 } 662 for db.oldest != oldest { 663 node := db.dirties[db.oldest] 664 delete(db.dirties, db.oldest) 665 db.oldest = node.flushNext 666 667 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 668 if node.children != nil { 669 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 670 } 671 } 672 if db.oldest != (common.Hash{}) { 673 db.dirties[db.oldest].flushPrev = common.Hash{} 674 } 675 db.flushnodes += uint64(nodes - len(db.dirties)) 676 db.flushsize += storage - db.dirtiesSize 677 db.flushtime += time.Since(start) 678 679 memcacheFlushTimeTimer.Update(time.Since(start)) 680 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 681 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 682 683 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 684 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 685 686 return nil 687 } 688 689 // Commit iterates over all the children of a particular node, writes them out 690 // to disk, forcefully tearing down all references in both directions. As a side 691 // effect, all pre-images accumulated up to this point are also written. 692 // 693 // Note, this method is a non-synchronized mutator. It is unsafe to call this 694 // concurrently with other mutators. 695 func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { 696 // Create a database batch to flush persistent data out. It is important that 697 // outside code doesn't see an inconsistent state (referenced data removed from 698 // memory cache during commit but not yet in persistent storage). This is ensured 699 // by only uncaching existing data when the database write finalizes. 700 start := time.Now() 701 batch := db.diskdb.NewBatch() 702 703 // Move all of the accumulated preimages into a write batch 704 if db.preimages != nil { 705 rawdb.WritePreimages(batch, db.preimages) 706 // Since we're going to replay trie node writes into the clean cache, flush out 707 // any batched pre-images before continuing. 708 if err := batch.Write(); err != nil { 709 return err 710 } 711 batch.Reset() 712 } 713 // Move the trie itself into the batch, flushing if enough data is accumulated 714 nodes, storage := len(db.dirties), db.dirtiesSize 715 716 uncacher := &cleaner{db} 717 if err := db.commit(node, batch, uncacher, callback); err != nil { 718 log.Error("Failed to commit trie from trie database", "err", err) 719 return err 720 } 721 // Trie mostly committed to disk, flush any batch leftovers 722 if err := batch.Write(); err != nil { 723 log.Error("Failed to write trie to disk", "err", err) 724 return err 725 } 726 // Uncache any leftovers in the last batch 727 db.lock.Lock() 728 defer db.lock.Unlock() 729 730 batch.Replay(uncacher) 731 batch.Reset() 732 733 // Reset the storage counters and bumped metrics 734 if db.preimages != nil { 735 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 736 } 737 memcacheCommitTimeTimer.Update(time.Since(start)) 738 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 739 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 740 741 logger := log.Info 742 if !report { 743 logger = log.Debug 744 } 745 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 746 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 747 748 // Reset the garbage collection statistics 749 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 750 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 751 752 return nil 753 } 754 755 // commit is the private locked version of Commit. 756 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error { 757 // If the node does not exist, it's a previously committed node 758 node, ok := db.dirties[hash] 759 if !ok { 760 return nil 761 } 762 var err error 763 node.forChilds(func(child common.Hash) { 764 if err == nil { 765 err = db.commit(child, batch, uncacher, callback) 766 } 767 }) 768 if err != nil { 769 return err 770 } 771 // If we've reached an optimal batch size, commit and start over 772 rawdb.WriteTrieNode(batch, hash, node.rlp()) 773 if callback != nil { 774 callback(hash) 775 } 776 if batch.ValueSize() >= ethdb.IdealBatchSize { 777 if err := batch.Write(); err != nil { 778 return err 779 } 780 db.lock.Lock() 781 batch.Replay(uncacher) 782 batch.Reset() 783 db.lock.Unlock() 784 } 785 return nil 786 } 787 788 // cleaner is a database batch replayer that takes a batch of write operations 789 // and cleans up the trie database from anything written to disk. 790 type cleaner struct { 791 db *Database 792 } 793 794 // Put reacts to database writes and implements dirty data uncaching. This is the 795 // post-processing step of a commit operation where the already persisted trie is 796 // removed from the dirty cache and moved into the clean cache. The reason behind 797 // the two-phase commit is to ensure ensure data availability while moving from 798 // memory to disk. 799 func (c *cleaner) Put(key []byte, rlp []byte) error { 800 hash := common.BytesToHash(key) 801 802 // If the node does not exist, we're done on this path 803 node, ok := c.db.dirties[hash] 804 if !ok { 805 return nil 806 } 807 // Node still exists, remove it from the flush-list 808 switch hash { 809 case c.db.oldest: 810 c.db.oldest = node.flushNext 811 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 812 case c.db.newest: 813 c.db.newest = node.flushPrev 814 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 815 default: 816 c.db.dirties[node.flushPrev].flushNext = node.flushNext 817 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 818 } 819 // Remove the node from the dirty cache 820 delete(c.db.dirties, hash) 821 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 822 if node.children != nil { 823 c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 824 } 825 // Move the flushed node into the clean cache to prevent insta-reloads 826 if c.db.cleans != nil { 827 c.db.cleans.Set(hash[:], rlp) 828 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 829 } 830 return nil 831 } 832 833 func (c *cleaner) Delete(key []byte) error { 834 panic("not implemented") 835 } 836 837 // Size returns the current storage size of the memory cache in front of the 838 // persistent database layer. 839 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 840 db.lock.RLock() 841 defer db.lock.RUnlock() 842 843 // db.dirtiesSize only contains the useful data in the cache, but when reporting 844 // the total memory consumption, the maintenance metadata is also needed to be 845 // counted. 846 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 847 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 848 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize 849 } 850 851 // saveCache saves clean state cache to given directory path 852 // using specified CPU cores. 853 func (db *Database) saveCache(dir string, threads int) error { 854 if db.cleans == nil { 855 return nil 856 } 857 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 858 859 start := time.Now() 860 err := db.cleans.SaveToFileConcurrent(dir, threads) 861 if err != nil { 862 log.Error("Failed to persist clean trie cache", "error", err) 863 return err 864 } 865 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 866 return nil 867 } 868 869 // SaveCache atomically saves fast cache data to the given dir using all 870 // available CPU cores. 871 func (db *Database) SaveCache(dir string) error { 872 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 873 } 874 875 // SaveCachePeriodically atomically saves fast cache data to the given dir with 876 // the specified interval. All dump operation will only use a single CPU core. 877 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 878 ticker := time.NewTicker(interval) 879 defer ticker.Stop() 880 881 for { 882 select { 883 case <-ticker.C: 884 db.saveCache(dir, 1) 885 case <-stopCh: 886 return 887 } 888 } 889 }