github.com/authcall/reference-optimistic-geth@v0.0.0-20220816224302-06313bfeb8d2/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 "github.com/ethereum/go-ethereum/common" 30 "github.com/ethereum/go-ethereum/core/rawdb" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/log" 33 "github.com/ethereum/go-ethereum/metrics" 34 "github.com/ethereum/go-ethereum/rlp" 35 ) 36 37 var ( 38 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 39 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 40 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 41 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 42 43 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 44 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 45 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 46 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 47 48 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 49 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 50 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 51 52 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 53 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 54 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 55 56 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 57 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 58 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 59 ) 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 // 65 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 66 // thread safe in providing individual, independent node access. The rationale 67 // behind this split design is to provide read access to RPC handlers and sync 68 // servers even while the trie is executing expensive garbage collection. 69 type Database struct { 70 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 71 72 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 73 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 74 oldest common.Hash // Oldest tracked node, flush-list head 75 newest common.Hash // Newest tracked node, flush-list tail 76 77 gctime time.Duration // Time spent on garbage collection since last commit 78 gcnodes uint64 // Nodes garbage collected since last commit 79 gcsize common.StorageSize // Data storage garbage collected since last commit 80 81 flushtime time.Duration // Time spent on data flushing since last commit 82 flushnodes uint64 // Nodes flushed since last commit 83 flushsize common.StorageSize // Data storage flushed since last commit 84 85 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 86 childrenSize common.StorageSize // Storage size of the external children tracking 87 preimages *preimageStore // The store for caching preimages 88 89 lock sync.RWMutex 90 } 91 92 // rawNode is a simple binary blob used to differentiate between collapsed trie 93 // nodes and already encoded RLP binary blobs (while at the same time store them 94 // in the same cache fields). 95 type rawNode []byte 96 97 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 98 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 99 100 func (n rawNode) EncodeRLP(w io.Writer) error { 101 _, err := w.Write(n) 102 return err 103 } 104 105 // rawFullNode represents only the useful data content of a full node, with the 106 // caches and flags stripped out to minimize its data storage. This type honors 107 // the same RLP encoding as the original parent. 108 type rawFullNode [17]node 109 110 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 111 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 112 113 func (n rawFullNode) EncodeRLP(w io.Writer) error { 114 eb := rlp.NewEncoderBuffer(w) 115 n.encode(eb) 116 return eb.Flush() 117 } 118 119 // rawShortNode represents only the useful data content of a short node, with the 120 // caches and flags stripped out to minimize its data storage. This type honors 121 // the same RLP encoding as the original parent. 122 type rawShortNode struct { 123 Key []byte 124 Val node 125 } 126 127 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 128 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 129 130 // cachedNode is all the information we know about a single cached trie node 131 // in the memory database write layer. 132 type cachedNode struct { 133 node node // Cached collapsed trie node, or raw rlp data 134 size uint16 // Byte size of the useful cached data 135 136 parents uint32 // Number of live nodes referencing this one 137 children map[common.Hash]uint16 // External children referenced by this node 138 139 flushPrev common.Hash // Previous node in the flush-list 140 flushNext common.Hash // Next node in the flush-list 141 } 142 143 // cachedNodeSize is the raw size of a cachedNode data structure without any 144 // node data included. It's an approximate size, but should be a lot better 145 // than not counting them. 146 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 147 148 // cachedNodeChildrenSize is the raw size of an initialized but empty external 149 // reference map. 150 const cachedNodeChildrenSize = 48 151 152 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 153 // from the cache, or by regenerating it from the collapsed node. 154 func (n *cachedNode) rlp() []byte { 155 if node, ok := n.node.(rawNode); ok { 156 return node 157 } 158 return nodeToBytes(n.node) 159 } 160 161 // obj returns the decoded and expanded trie node, either directly from the cache, 162 // or by regenerating it from the rlp encoded blob. 163 func (n *cachedNode) obj(hash common.Hash) node { 164 if node, ok := n.node.(rawNode); ok { 165 return mustDecodeNode(hash[:], node) 166 } 167 return expandNode(hash[:], n.node) 168 } 169 170 // forChilds invokes the callback for all the tracked children of this node, 171 // both the implicit ones from inside the node as well as the explicit ones 172 // from outside the node. 173 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 174 for child := range n.children { 175 onChild(child) 176 } 177 if _, ok := n.node.(rawNode); !ok { 178 forGatherChildren(n.node, onChild) 179 } 180 } 181 182 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 183 // invokes the callback for all the hashnode children. 184 func forGatherChildren(n node, onChild func(hash common.Hash)) { 185 switch n := n.(type) { 186 case *rawShortNode: 187 forGatherChildren(n.Val, onChild) 188 case rawFullNode: 189 for i := 0; i < 16; i++ { 190 forGatherChildren(n[i], onChild) 191 } 192 case hashNode: 193 onChild(common.BytesToHash(n)) 194 case valueNode, nil, rawNode: 195 default: 196 panic(fmt.Sprintf("unknown node type: %T", n)) 197 } 198 } 199 200 // simplifyNode traverses the hierarchy of an expanded memory node and discards 201 // all the internal caches, returning a node that only contains the raw data. 202 func simplifyNode(n node) node { 203 switch n := n.(type) { 204 case *shortNode: 205 // Short nodes discard the flags and cascade 206 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 207 208 case *fullNode: 209 // Full nodes discard the flags and cascade 210 node := rawFullNode(n.Children) 211 for i := 0; i < len(node); i++ { 212 if node[i] != nil { 213 node[i] = simplifyNode(node[i]) 214 } 215 } 216 return node 217 218 case valueNode, hashNode, rawNode: 219 return n 220 221 default: 222 panic(fmt.Sprintf("unknown node type: %T", n)) 223 } 224 } 225 226 // expandNode traverses the node hierarchy of a collapsed storage node and converts 227 // all fields and keys into expanded memory form. 228 func expandNode(hash hashNode, n node) node { 229 switch n := n.(type) { 230 case *rawShortNode: 231 // Short nodes need key and child expansion 232 return &shortNode{ 233 Key: compactToHex(n.Key), 234 Val: expandNode(nil, n.Val), 235 flags: nodeFlag{ 236 hash: hash, 237 }, 238 } 239 240 case rawFullNode: 241 // Full nodes need child expansion 242 node := &fullNode{ 243 flags: nodeFlag{ 244 hash: hash, 245 }, 246 } 247 for i := 0; i < len(node.Children); i++ { 248 if n[i] != nil { 249 node.Children[i] = expandNode(nil, n[i]) 250 } 251 } 252 return node 253 254 case valueNode, hashNode: 255 return n 256 257 default: 258 panic(fmt.Sprintf("unknown node type: %T", n)) 259 } 260 } 261 262 // Config defines all necessary options for database. 263 type Config struct { 264 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 265 Journal string // Journal of clean cache to survive node restarts 266 Preimages bool // Flag whether the preimage of trie key is recorded 267 } 268 269 // NewDatabase creates a new trie database to store ephemeral trie content before 270 // its written out to disk or garbage collected. No read cache is created, so all 271 // data retrievals will hit the underlying disk database. 272 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 273 return NewDatabaseWithConfig(diskdb, nil) 274 } 275 276 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 277 // before its written out to disk or garbage collected. It also acts as a read cache 278 // for nodes loaded from disk. 279 func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { 280 var cleans *fastcache.Cache 281 if config != nil && config.Cache > 0 { 282 if config.Journal == "" { 283 cleans = fastcache.New(config.Cache * 1024 * 1024) 284 } else { 285 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 286 } 287 } 288 var preimage *preimageStore 289 if config != nil && config.Preimages { 290 preimage = newPreimageStore(diskdb) 291 } 292 db := &Database{ 293 diskdb: diskdb, 294 cleans: cleans, 295 dirties: map[common.Hash]*cachedNode{{}: { 296 children: make(map[common.Hash]uint16), 297 }}, 298 preimages: preimage, 299 } 300 return db 301 } 302 303 // DiskDB retrieves the persistent storage backing the trie database. 304 func (db *Database) DiskDB() ethdb.KeyValueStore { 305 return db.diskdb 306 } 307 308 // insert inserts a collapsed trie node into the memory database. 309 // The blob size must be specified to allow proper size tracking. 310 // All nodes inserted by this function will be reference tracked 311 // and in theory should only used for **trie nodes** insertion. 312 func (db *Database) insert(hash common.Hash, size int, node node) { 313 db.lock.Lock() 314 defer db.lock.Unlock() 315 316 // If the node's already cached, skip 317 if _, ok := db.dirties[hash]; ok { 318 return 319 } 320 memcacheDirtyWriteMeter.Mark(int64(size)) 321 322 // Create the cached entry for this node 323 entry := &cachedNode{ 324 node: simplifyNode(node), 325 size: uint16(size), 326 flushPrev: db.newest, 327 } 328 entry.forChilds(func(child common.Hash) { 329 if c := db.dirties[child]; c != nil { 330 c.parents++ 331 } 332 }) 333 db.dirties[hash] = entry 334 335 // Update the flush-list endpoints 336 if db.oldest == (common.Hash{}) { 337 db.oldest, db.newest = hash, hash 338 } else { 339 db.dirties[db.newest].flushNext, db.newest = hash, hash 340 } 341 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 342 } 343 344 // node retrieves a cached trie node from memory, or returns nil if none can be 345 // found in the memory cache. 346 func (db *Database) node(hash common.Hash) node { 347 // Retrieve the node from the clean cache if available 348 if db.cleans != nil { 349 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 350 memcacheCleanHitMeter.Mark(1) 351 memcacheCleanReadMeter.Mark(int64(len(enc))) 352 return mustDecodeNode(hash[:], enc) 353 } 354 } 355 // Retrieve the node from the dirty cache if available 356 db.lock.RLock() 357 dirty := db.dirties[hash] 358 db.lock.RUnlock() 359 360 if dirty != nil { 361 memcacheDirtyHitMeter.Mark(1) 362 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 363 return dirty.obj(hash) 364 } 365 memcacheDirtyMissMeter.Mark(1) 366 367 // Content unavailable in memory, attempt to retrieve from disk 368 enc, err := db.diskdb.Get(hash[:]) 369 if err != nil || enc == nil { 370 return nil 371 } 372 if db.cleans != nil { 373 db.cleans.Set(hash[:], enc) 374 memcacheCleanMissMeter.Mark(1) 375 memcacheCleanWriteMeter.Mark(int64(len(enc))) 376 } 377 return mustDecodeNode(hash[:], enc) 378 } 379 380 // Node retrieves an encoded cached trie node from memory. If it cannot be found 381 // cached, the method queries the persistent database for the content. 382 func (db *Database) Node(hash common.Hash) ([]byte, error) { 383 // It doesn't make sense to retrieve the metaroot 384 if hash == (common.Hash{}) { 385 return nil, errors.New("not found") 386 } 387 // Retrieve the node from the clean cache if available 388 if db.cleans != nil { 389 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 390 memcacheCleanHitMeter.Mark(1) 391 memcacheCleanReadMeter.Mark(int64(len(enc))) 392 return enc, nil 393 } 394 } 395 // Retrieve the node from the dirty cache if available 396 db.lock.RLock() 397 dirty := db.dirties[hash] 398 db.lock.RUnlock() 399 400 if dirty != nil { 401 memcacheDirtyHitMeter.Mark(1) 402 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 403 return dirty.rlp(), nil 404 } 405 memcacheDirtyMissMeter.Mark(1) 406 407 // Content unavailable in memory, attempt to retrieve from disk 408 enc := rawdb.ReadTrieNode(db.diskdb, hash) 409 if len(enc) != 0 { 410 if db.cleans != nil { 411 db.cleans.Set(hash[:], enc) 412 memcacheCleanMissMeter.Mark(1) 413 memcacheCleanWriteMeter.Mark(int64(len(enc))) 414 } 415 return enc, nil 416 } 417 return nil, errors.New("not found") 418 } 419 420 // Nodes retrieves the hashes of all the nodes cached within the memory database. 421 // This method is extremely expensive and should only be used to validate internal 422 // states in test code. 423 func (db *Database) Nodes() []common.Hash { 424 db.lock.RLock() 425 defer db.lock.RUnlock() 426 427 var hashes = make([]common.Hash, 0, len(db.dirties)) 428 for hash := range db.dirties { 429 if hash != (common.Hash{}) { // Special case for "root" references/nodes 430 hashes = append(hashes, hash) 431 } 432 } 433 return hashes 434 } 435 436 // Reference adds a new reference from a parent node to a child node. 437 // This function is used to add reference between internal trie node 438 // and external node(e.g. storage trie root), all internal trie nodes 439 // are referenced together by database itself. 440 func (db *Database) Reference(child common.Hash, parent common.Hash) { 441 db.lock.Lock() 442 defer db.lock.Unlock() 443 444 db.reference(child, parent) 445 } 446 447 // reference is the private locked version of Reference. 448 func (db *Database) reference(child common.Hash, parent common.Hash) { 449 // If the node does not exist, it's a node pulled from disk, skip 450 node, ok := db.dirties[child] 451 if !ok { 452 return 453 } 454 // If the reference already exists, only duplicate for roots 455 if db.dirties[parent].children == nil { 456 db.dirties[parent].children = make(map[common.Hash]uint16) 457 db.childrenSize += cachedNodeChildrenSize 458 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 459 return 460 } 461 node.parents++ 462 db.dirties[parent].children[child]++ 463 if db.dirties[parent].children[child] == 1 { 464 db.childrenSize += common.HashLength + 2 // uint16 counter 465 } 466 } 467 468 // Dereference removes an existing reference from a root node. 469 func (db *Database) Dereference(root common.Hash) { 470 // Sanity check to ensure that the meta-root is not removed 471 if root == (common.Hash{}) { 472 log.Error("Attempted to dereference the trie cache meta root") 473 return 474 } 475 db.lock.Lock() 476 defer db.lock.Unlock() 477 478 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 479 db.dereference(root, common.Hash{}) 480 481 db.gcnodes += uint64(nodes - len(db.dirties)) 482 db.gcsize += storage - db.dirtiesSize 483 db.gctime += time.Since(start) 484 485 memcacheGCTimeTimer.Update(time.Since(start)) 486 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 487 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 488 489 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 490 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 491 } 492 493 // dereference is the private locked version of Dereference. 494 func (db *Database) dereference(child common.Hash, parent common.Hash) { 495 // Dereference the parent-child 496 node := db.dirties[parent] 497 498 if node.children != nil && node.children[child] > 0 { 499 node.children[child]-- 500 if node.children[child] == 0 { 501 delete(node.children, child) 502 db.childrenSize -= (common.HashLength + 2) // uint16 counter 503 } 504 } 505 // If the child does not exist, it's a previously committed node. 506 node, ok := db.dirties[child] 507 if !ok { 508 return 509 } 510 // If there are no more references to the child, delete it and cascade 511 if node.parents > 0 { 512 // This is a special cornercase where a node loaded from disk (i.e. not in the 513 // memcache any more) gets reinjected as a new node (short node split into full, 514 // then reverted into short), causing a cached node to have no parents. That is 515 // no problem in itself, but don't make maxint parents out of it. 516 node.parents-- 517 } 518 if node.parents == 0 { 519 // Remove the node from the flush-list 520 switch child { 521 case db.oldest: 522 db.oldest = node.flushNext 523 db.dirties[node.flushNext].flushPrev = common.Hash{} 524 case db.newest: 525 db.newest = node.flushPrev 526 db.dirties[node.flushPrev].flushNext = common.Hash{} 527 default: 528 db.dirties[node.flushPrev].flushNext = node.flushNext 529 db.dirties[node.flushNext].flushPrev = node.flushPrev 530 } 531 // Dereference all children and delete the node 532 node.forChilds(func(hash common.Hash) { 533 db.dereference(hash, child) 534 }) 535 delete(db.dirties, child) 536 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 537 if node.children != nil { 538 db.childrenSize -= cachedNodeChildrenSize 539 } 540 } 541 } 542 543 // Cap iteratively flushes old but still referenced trie nodes until the total 544 // memory usage goes below the given threshold. 545 // 546 // Note, this method is a non-synchronized mutator. It is unsafe to call this 547 // concurrently with other mutators. 548 func (db *Database) Cap(limit common.StorageSize) error { 549 // Create a database batch to flush persistent data out. It is important that 550 // outside code doesn't see an inconsistent state (referenced data removed from 551 // memory cache during commit but not yet in persistent storage). This is ensured 552 // by only uncaching existing data when the database write finalizes. 553 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 554 batch := db.diskdb.NewBatch() 555 556 // db.dirtiesSize only contains the useful data in the cache, but when reporting 557 // the total memory consumption, the maintenance metadata is also needed to be 558 // counted. 559 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 560 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 561 562 // If the preimage cache got large enough, push to disk. If it's still small 563 // leave for later to deduplicate writes. 564 if db.preimages != nil { 565 db.preimages.commit(false) 566 } 567 // Keep committing nodes from the flush-list until we're below allowance 568 oldest := db.oldest 569 for size > limit && oldest != (common.Hash{}) { 570 // Fetch the oldest referenced node and push into the batch 571 node := db.dirties[oldest] 572 rawdb.WriteTrieNode(batch, oldest, node.rlp()) 573 574 // If we exceeded the ideal batch size, commit and reset 575 if batch.ValueSize() >= ethdb.IdealBatchSize { 576 if err := batch.Write(); err != nil { 577 log.Error("Failed to write flush list to disk", "err", err) 578 return err 579 } 580 batch.Reset() 581 } 582 // Iterate to the next flush item, or abort if the size cap was achieved. Size 583 // is the total size, including the useful cached data (hash -> blob), the 584 // cache item metadata, as well as external children mappings. 585 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 586 if node.children != nil { 587 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 588 } 589 oldest = node.flushNext 590 } 591 // Flush out any remainder data from the last batch 592 if err := batch.Write(); err != nil { 593 log.Error("Failed to write flush list to disk", "err", err) 594 return err 595 } 596 // Write successful, clear out the flushed data 597 db.lock.Lock() 598 defer db.lock.Unlock() 599 600 for db.oldest != oldest { 601 node := db.dirties[db.oldest] 602 delete(db.dirties, db.oldest) 603 db.oldest = node.flushNext 604 605 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 606 if node.children != nil { 607 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 608 } 609 } 610 if db.oldest != (common.Hash{}) { 611 db.dirties[db.oldest].flushPrev = common.Hash{} 612 } 613 db.flushnodes += uint64(nodes - len(db.dirties)) 614 db.flushsize += storage - db.dirtiesSize 615 db.flushtime += time.Since(start) 616 617 memcacheFlushTimeTimer.Update(time.Since(start)) 618 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 619 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 620 621 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 622 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 623 624 return nil 625 } 626 627 // Commit iterates over all the children of a particular node, writes them out 628 // to disk, forcefully tearing down all references in both directions. As a side 629 // effect, all pre-images accumulated up to this point are also written. 630 // 631 // Note, this method is a non-synchronized mutator. It is unsafe to call this 632 // concurrently with other mutators. 633 func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { 634 // Create a database batch to flush persistent data out. It is important that 635 // outside code doesn't see an inconsistent state (referenced data removed from 636 // memory cache during commit but not yet in persistent storage). This is ensured 637 // by only uncaching existing data when the database write finalizes. 638 start := time.Now() 639 batch := db.diskdb.NewBatch() 640 641 // Move all of the accumulated preimages into a write batch 642 if db.preimages != nil { 643 db.preimages.commit(true) 644 } 645 // Move the trie itself into the batch, flushing if enough data is accumulated 646 nodes, storage := len(db.dirties), db.dirtiesSize 647 648 uncacher := &cleaner{db} 649 if err := db.commit(node, batch, uncacher, callback); err != nil { 650 log.Error("Failed to commit trie from trie database", "err", err) 651 return err 652 } 653 // Trie mostly committed to disk, flush any batch leftovers 654 if err := batch.Write(); err != nil { 655 log.Error("Failed to write trie to disk", "err", err) 656 return err 657 } 658 // Uncache any leftovers in the last batch 659 db.lock.Lock() 660 defer db.lock.Unlock() 661 662 batch.Replay(uncacher) 663 batch.Reset() 664 665 // Reset the storage counters and bumped metrics 666 memcacheCommitTimeTimer.Update(time.Since(start)) 667 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 668 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 669 670 logger := log.Info 671 if !report { 672 logger = log.Debug 673 } 674 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 675 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 676 677 // Reset the garbage collection statistics 678 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 679 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 680 681 return nil 682 } 683 684 // commit is the private locked version of Commit. 685 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error { 686 // If the node does not exist, it's a previously committed node 687 node, ok := db.dirties[hash] 688 if !ok { 689 return nil 690 } 691 var err error 692 node.forChilds(func(child common.Hash) { 693 if err == nil { 694 err = db.commit(child, batch, uncacher, callback) 695 } 696 }) 697 if err != nil { 698 return err 699 } 700 // If we've reached an optimal batch size, commit and start over 701 rawdb.WriteTrieNode(batch, hash, node.rlp()) 702 if callback != nil { 703 callback(hash) 704 } 705 if batch.ValueSize() >= ethdb.IdealBatchSize { 706 if err := batch.Write(); err != nil { 707 return err 708 } 709 db.lock.Lock() 710 batch.Replay(uncacher) 711 batch.Reset() 712 db.lock.Unlock() 713 } 714 return nil 715 } 716 717 // cleaner is a database batch replayer that takes a batch of write operations 718 // and cleans up the trie database from anything written to disk. 719 type cleaner struct { 720 db *Database 721 } 722 723 // Put reacts to database writes and implements dirty data uncaching. This is the 724 // post-processing step of a commit operation where the already persisted trie is 725 // removed from the dirty cache and moved into the clean cache. The reason behind 726 // the two-phase commit is to ensure data availability while moving from memory 727 // to disk. 728 func (c *cleaner) Put(key []byte, rlp []byte) error { 729 hash := common.BytesToHash(key) 730 731 // If the node does not exist, we're done on this path 732 node, ok := c.db.dirties[hash] 733 if !ok { 734 return nil 735 } 736 // Node still exists, remove it from the flush-list 737 switch hash { 738 case c.db.oldest: 739 c.db.oldest = node.flushNext 740 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 741 case c.db.newest: 742 c.db.newest = node.flushPrev 743 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 744 default: 745 c.db.dirties[node.flushPrev].flushNext = node.flushNext 746 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 747 } 748 // Remove the node from the dirty cache 749 delete(c.db.dirties, hash) 750 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 751 if node.children != nil { 752 c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 753 } 754 // Move the flushed node into the clean cache to prevent insta-reloads 755 if c.db.cleans != nil { 756 c.db.cleans.Set(hash[:], rlp) 757 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 758 } 759 return nil 760 } 761 762 func (c *cleaner) Delete(key []byte) error { 763 panic("not implemented") 764 } 765 766 // Size returns the current storage size of the memory cache in front of the 767 // persistent database layer. 768 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 769 db.lock.RLock() 770 defer db.lock.RUnlock() 771 772 // db.dirtiesSize only contains the useful data in the cache, but when reporting 773 // the total memory consumption, the maintenance metadata is also needed to be 774 // counted. 775 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 776 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 777 var preimageSize common.StorageSize 778 if db.preimages != nil { 779 preimageSize = db.preimages.size() 780 } 781 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize 782 } 783 784 // saveCache saves clean state cache to given directory path 785 // using specified CPU cores. 786 func (db *Database) saveCache(dir string, threads int) error { 787 if db.cleans == nil { 788 return nil 789 } 790 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 791 792 start := time.Now() 793 err := db.cleans.SaveToFileConcurrent(dir, threads) 794 if err != nil { 795 log.Error("Failed to persist clean trie cache", "error", err) 796 return err 797 } 798 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 799 return nil 800 } 801 802 // SaveCache atomically saves fast cache data to the given dir using all 803 // available CPU cores. 804 func (db *Database) SaveCache(dir string) error { 805 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 806 } 807 808 // SaveCachePeriodically atomically saves fast cache data to the given dir with 809 // the specified interval. All dump operation will only use a single CPU core. 810 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 811 ticker := time.NewTicker(interval) 812 defer ticker.Stop() 813 814 for { 815 select { 816 case <-ticker.C: 817 db.saveCache(dir, 1) 818 case <-stopCh: 819 return 820 } 821 } 822 }