github.com/phillinzzz/newBsc@v1.1.6/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 "github.com/phillinzzz/newBsc/common" 30 "github.com/phillinzzz/newBsc/core/rawdb" 31 "github.com/phillinzzz/newBsc/ethdb" 32 "github.com/phillinzzz/newBsc/log" 33 "github.com/phillinzzz/newBsc/metrics" 34 "github.com/phillinzzz/newBsc/rlp" 35 ) 36 37 var ( 38 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 39 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 40 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 41 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 42 43 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 44 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 45 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 46 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 47 48 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 49 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 50 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 51 52 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 53 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 54 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 55 56 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 57 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 58 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 59 ) 60 61 // Database is an intermediate write layer between the trie data structures and 62 // the disk database. The aim is to accumulate trie writes in-memory and only 63 // periodically flush a couple tries to disk, garbage collecting the remainder. 64 // 65 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 66 // thread safe in providing individual, independent node access. The rationale 67 // behind this split design is to provide read access to RPC handlers and sync 68 // servers even while the trie is executing expensive garbage collection. 69 type Database struct { 70 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 71 72 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 73 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 74 oldest common.Hash // Oldest tracked node, flush-list head 75 newest common.Hash // Newest tracked node, flush-list tail 76 77 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 78 79 gctime time.Duration // Time spent on garbage collection since last commit 80 gcnodes uint64 // Nodes garbage collected since last commit 81 gcsize common.StorageSize // Data storage garbage collected since last commit 82 83 flushtime time.Duration // Time spent on data flushing since last commit 84 flushnodes uint64 // Nodes flushed since last commit 85 flushsize common.StorageSize // Data storage flushed since last commit 86 87 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 88 childrenSize common.StorageSize // Storage size of the external children tracking 89 preimagesSize common.StorageSize // Storage size of the preimages cache 90 91 //metrics with light lock 92 sizeLock sync.RWMutex 93 roughPreimagesSize common.StorageSize 94 roughDirtiesSize common.StorageSize 95 96 lock sync.RWMutex 97 } 98 99 // rawNode is a simple binary blob used to differentiate between collapsed trie 100 // nodes and already encoded RLP binary blobs (while at the same time store them 101 // in the same cache fields). 102 type rawNode []byte 103 104 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 105 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 106 107 func (n rawNode) EncodeRLP(w io.Writer) error { 108 _, err := w.Write(n) 109 return err 110 } 111 112 // rawFullNode represents only the useful data content of a full node, with the 113 // caches and flags stripped out to minimize its data storage. This type honors 114 // the same RLP encoding as the original parent. 115 type rawFullNode [17]node 116 117 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 118 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 119 120 func (n rawFullNode) EncodeRLP(w io.Writer) error { 121 var nodes [17]node 122 123 for i, child := range n { 124 if child != nil { 125 nodes[i] = child 126 } else { 127 nodes[i] = nilValueNode 128 } 129 } 130 return rlp.Encode(w, nodes) 131 } 132 133 // rawShortNode represents only the useful data content of a short node, with the 134 // caches and flags stripped out to minimize its data storage. This type honors 135 // the same RLP encoding as the original parent. 136 type rawShortNode struct { 137 Key []byte 138 Val node 139 } 140 141 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 142 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 143 144 // cachedNode is all the information we know about a single cached trie node 145 // in the memory database write layer. 146 type cachedNode struct { 147 node node // Cached collapsed trie node, or raw rlp data 148 size uint16 // Byte size of the useful cached data 149 150 parents uint32 // Number of live nodes referencing this one 151 children map[common.Hash]uint16 // External children referenced by this node 152 153 flushPrev common.Hash // Previous node in the flush-list 154 flushNext common.Hash // Next node in the flush-list 155 } 156 157 // cachedNodeSize is the raw size of a cachedNode data structure without any 158 // node data included. It's an approximate size, but should be a lot better 159 // than not counting them. 160 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 161 162 // cachedNodeChildrenSize is the raw size of an initialized but empty external 163 // reference map. 164 const cachedNodeChildrenSize = 48 165 166 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 167 // from the cache, or by regenerating it from the collapsed node. 168 func (n *cachedNode) rlp() []byte { 169 if node, ok := n.node.(rawNode); ok { 170 return node 171 } 172 blob, err := rlp.EncodeToBytes(n.node) 173 if err != nil { 174 panic(err) 175 } 176 return blob 177 } 178 179 // obj returns the decoded and expanded trie node, either directly from the cache, 180 // or by regenerating it from the rlp encoded blob. 181 func (n *cachedNode) obj(hash common.Hash) node { 182 if node, ok := n.node.(rawNode); ok { 183 return mustDecodeNode(hash[:], node) 184 } 185 return expandNode(hash[:], n.node) 186 } 187 188 // forChilds invokes the callback for all the tracked children of this node, 189 // both the implicit ones from inside the node as well as the explicit ones 190 // from outside the node. 191 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 192 for child := range n.children { 193 onChild(child) 194 } 195 if _, ok := n.node.(rawNode); !ok { 196 forGatherChildren(n.node, onChild) 197 } 198 } 199 200 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 201 // invokes the callback for all the hashnode children. 202 func forGatherChildren(n node, onChild func(hash common.Hash)) { 203 switch n := n.(type) { 204 case *rawShortNode: 205 forGatherChildren(n.Val, onChild) 206 case rawFullNode: 207 for i := 0; i < 16; i++ { 208 forGatherChildren(n[i], onChild) 209 } 210 case hashNode: 211 onChild(common.BytesToHash(n)) 212 case valueNode, nil, rawNode: 213 default: 214 panic(fmt.Sprintf("unknown node type: %T", n)) 215 } 216 } 217 218 // simplifyNode traverses the hierarchy of an expanded memory node and discards 219 // all the internal caches, returning a node that only contains the raw data. 220 func simplifyNode(n node) node { 221 switch n := n.(type) { 222 case *shortNode: 223 // Short nodes discard the flags and cascade 224 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 225 226 case *fullNode: 227 // Full nodes discard the flags and cascade 228 node := rawFullNode(n.Children) 229 for i := 0; i < len(node); i++ { 230 if node[i] != nil { 231 node[i] = simplifyNode(node[i]) 232 } 233 } 234 return node 235 236 case valueNode, hashNode, rawNode: 237 return n 238 239 default: 240 panic(fmt.Sprintf("unknown node type: %T", n)) 241 } 242 } 243 244 // expandNode traverses the node hierarchy of a collapsed storage node and converts 245 // all fields and keys into expanded memory form. 246 func expandNode(hash hashNode, n node) node { 247 switch n := n.(type) { 248 case *rawShortNode: 249 // Short nodes need key and child expansion 250 return &shortNode{ 251 Key: compactToHex(n.Key), 252 Val: expandNode(nil, n.Val), 253 flags: nodeFlag{ 254 hash: hash, 255 }, 256 } 257 258 case rawFullNode: 259 // Full nodes need child expansion 260 node := &fullNode{ 261 flags: nodeFlag{ 262 hash: hash, 263 }, 264 } 265 for i := 0; i < len(node.Children); i++ { 266 if n[i] != nil { 267 node.Children[i] = expandNode(nil, n[i]) 268 } 269 } 270 return node 271 272 case valueNode, hashNode: 273 return n 274 275 default: 276 panic(fmt.Sprintf("unknown node type: %T", n)) 277 } 278 } 279 280 // Config defines all necessary options for database. 281 type Config struct { 282 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 283 Journal string // Journal of clean cache to survive node restarts 284 Preimages bool // Flag whether the preimage of trie key is recorded 285 } 286 287 // NewDatabase creates a new trie database to store ephemeral trie content before 288 // its written out to disk or garbage collected. No read cache is created, so all 289 // data retrievals will hit the underlying disk database. 290 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 291 return NewDatabaseWithConfig(diskdb, nil) 292 } 293 294 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 295 // before its written out to disk or garbage collected. It also acts as a read cache 296 // for nodes loaded from disk. 297 func NewDatabaseWithConfig(diskdb ethdb.KeyValueStore, config *Config) *Database { 298 var cleans *fastcache.Cache 299 if config != nil && config.Cache > 0 { 300 if config.Journal == "" { 301 cleans = fastcache.New(config.Cache * 1024 * 1024) 302 } else { 303 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 304 } 305 } 306 db := &Database{ 307 diskdb: diskdb, 308 cleans: cleans, 309 dirties: map[common.Hash]*cachedNode{{}: { 310 children: make(map[common.Hash]uint16), 311 }}, 312 } 313 if config == nil || config.Preimages { // TODO(karalabe): Flip to default off in the future 314 db.preimages = make(map[common.Hash][]byte) 315 } 316 return db 317 } 318 319 // DiskDB retrieves the persistent storage backing the trie database. 320 func (db *Database) DiskDB() ethdb.KeyValueStore { 321 return db.diskdb 322 } 323 324 // insert inserts a collapsed trie node into the memory database. 325 // The blob size must be specified to allow proper size tracking. 326 // All nodes inserted by this function will be reference tracked 327 // and in theory should only used for **trie nodes** insertion. 328 func (db *Database) insert(hash common.Hash, size int, node node) { 329 // If the node's already cached, skip 330 if _, ok := db.dirties[hash]; ok { 331 return 332 } 333 memcacheDirtyWriteMeter.Mark(int64(size)) 334 335 // Create the cached entry for this node 336 entry := &cachedNode{ 337 node: simplifyNode(node), 338 size: uint16(size), 339 flushPrev: db.newest, 340 } 341 entry.forChilds(func(child common.Hash) { 342 if c := db.dirties[child]; c != nil { 343 c.parents++ 344 } 345 }) 346 db.dirties[hash] = entry 347 348 // Update the flush-list endpoints 349 if db.oldest == (common.Hash{}) { 350 db.oldest, db.newest = hash, hash 351 } else { 352 db.dirties[db.newest].flushNext, db.newest = hash, hash 353 } 354 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 355 } 356 357 // insertPreimage writes a new trie node pre-image to the memory database if it's 358 // yet unknown. The method will NOT make a copy of the slice, 359 // only use if the preimage will NOT be changed later on. 360 // 361 // Note, this method assumes that the database's lock is held! 362 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 363 // Short circuit if preimage collection is disabled 364 if db.preimages == nil { 365 return 366 } 367 // Track the preimage if a yet unknown one 368 if _, ok := db.preimages[hash]; ok { 369 return 370 } 371 db.preimages[hash] = preimage 372 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 373 } 374 375 // node retrieves a cached trie node from memory, or returns nil if none can be 376 // found in the memory cache. 377 func (db *Database) node(hash common.Hash) node { 378 // Retrieve the node from the clean cache if available 379 if db.cleans != nil { 380 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 381 memcacheCleanHitMeter.Mark(1) 382 memcacheCleanReadMeter.Mark(int64(len(enc))) 383 return mustDecodeNode(hash[:], enc) 384 } 385 } 386 // Retrieve the node from the dirty cache if available 387 db.lock.RLock() 388 dirty := db.dirties[hash] 389 db.lock.RUnlock() 390 391 if dirty != nil { 392 memcacheDirtyHitMeter.Mark(1) 393 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 394 return dirty.obj(hash) 395 } 396 memcacheDirtyMissMeter.Mark(1) 397 398 // Content unavailable in memory, attempt to retrieve from disk 399 enc, err := db.diskdb.Get(hash[:]) 400 if err != nil || enc == nil { 401 return nil 402 } 403 if db.cleans != nil { 404 db.cleans.Set(hash[:], enc) 405 memcacheCleanMissMeter.Mark(1) 406 memcacheCleanWriteMeter.Mark(int64(len(enc))) 407 } 408 return mustDecodeNode(hash[:], enc) 409 } 410 411 // Node retrieves an encoded cached trie node from memory. If it cannot be found 412 // cached, the method queries the persistent database for the content. 413 func (db *Database) Node(hash common.Hash) ([]byte, error) { 414 // It doesn't make sense to retrieve the metaroot 415 if hash == (common.Hash{}) { 416 return nil, errors.New("not found") 417 } 418 // Retrieve the node from the clean cache if available 419 if db.cleans != nil { 420 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 421 memcacheCleanHitMeter.Mark(1) 422 memcacheCleanReadMeter.Mark(int64(len(enc))) 423 return enc, nil 424 } 425 } 426 // Retrieve the node from the dirty cache if available 427 db.lock.RLock() 428 dirty := db.dirties[hash] 429 db.lock.RUnlock() 430 431 if dirty != nil { 432 memcacheDirtyHitMeter.Mark(1) 433 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 434 return dirty.rlp(), nil 435 } 436 memcacheDirtyMissMeter.Mark(1) 437 438 // Content unavailable in memory, attempt to retrieve from disk 439 enc := rawdb.ReadTrieNode(db.diskdb, hash) 440 if len(enc) != 0 { 441 if db.cleans != nil { 442 db.cleans.Set(hash[:], enc) 443 memcacheCleanMissMeter.Mark(1) 444 memcacheCleanWriteMeter.Mark(int64(len(enc))) 445 } 446 return enc, nil 447 } 448 return nil, errors.New("not found") 449 } 450 451 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 452 // found cached, the method queries the persistent database for the content. 453 func (db *Database) preimage(hash common.Hash) []byte { 454 // Short circuit if preimage collection is disabled 455 if db.preimages == nil { 456 return nil 457 } 458 // Retrieve the node from cache if available 459 db.lock.RLock() 460 preimage := db.preimages[hash] 461 db.lock.RUnlock() 462 463 if preimage != nil { 464 return preimage 465 } 466 return rawdb.ReadPreimage(db.diskdb, hash) 467 } 468 469 // Nodes retrieves the hashes of all the nodes cached within the memory database. 470 // This method is extremely expensive and should only be used to validate internal 471 // states in test code. 472 func (db *Database) Nodes() []common.Hash { 473 db.lock.RLock() 474 defer db.lock.RUnlock() 475 476 var hashes = make([]common.Hash, 0, len(db.dirties)) 477 for hash := range db.dirties { 478 if hash != (common.Hash{}) { // Special case for "root" references/nodes 479 hashes = append(hashes, hash) 480 } 481 } 482 return hashes 483 } 484 485 // Reference adds a new reference from a parent node to a child node. 486 // This function is used to add reference between internal trie node 487 // and external node(e.g. storage trie root), all internal trie nodes 488 // are referenced together by database itself. 489 func (db *Database) Reference(child common.Hash, parent common.Hash) { 490 db.lock.Lock() 491 db.reference(child, parent) 492 var roughDirtiesSize = common.StorageSize((len(db.dirties)-1)*cachedNodeSize) + db.dirtiesSize + db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 493 var roughPreimagesSize = db.preimagesSize 494 db.lock.Unlock() 495 496 db.sizeLock.Lock() 497 db.roughDirtiesSize = roughDirtiesSize 498 db.roughPreimagesSize = roughPreimagesSize 499 db.sizeLock.Unlock() 500 } 501 502 // reference is the private locked version of Reference. 503 func (db *Database) reference(child common.Hash, parent common.Hash) { 504 // If the node does not exist, it's a node pulled from disk, skip 505 node, ok := db.dirties[child] 506 if !ok { 507 return 508 } 509 // If the reference already exists, only duplicate for roots 510 if db.dirties[parent].children == nil { 511 db.dirties[parent].children = make(map[common.Hash]uint16) 512 db.childrenSize += cachedNodeChildrenSize 513 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 514 return 515 } 516 node.parents++ 517 db.dirties[parent].children[child]++ 518 if db.dirties[parent].children[child] == 1 { 519 db.childrenSize += common.HashLength + 2 // uint16 counter 520 } 521 } 522 523 // Dereference removes an existing reference from a root node. 524 func (db *Database) Dereference(root common.Hash) { 525 // Sanity check to ensure that the meta-root is not removed 526 if root == (common.Hash{}) { 527 log.Error("Attempted to dereference the trie cache meta root") 528 return 529 } 530 db.lock.Lock() 531 defer db.lock.Unlock() 532 533 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 534 db.dereference(root, common.Hash{}) 535 536 db.gcnodes += uint64(nodes - len(db.dirties)) 537 db.gcsize += storage - db.dirtiesSize 538 db.gctime += time.Since(start) 539 540 memcacheGCTimeTimer.Update(time.Since(start)) 541 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 542 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 543 544 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 545 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 546 } 547 548 // dereference is the private locked version of Dereference. 549 func (db *Database) dereference(child common.Hash, parent common.Hash) { 550 // Dereference the parent-child 551 node := db.dirties[parent] 552 553 if node.children != nil && node.children[child] > 0 { 554 node.children[child]-- 555 if node.children[child] == 0 { 556 delete(node.children, child) 557 db.childrenSize -= (common.HashLength + 2) // uint16 counter 558 } 559 } 560 // If the child does not exist, it's a previously committed node. 561 node, ok := db.dirties[child] 562 if !ok { 563 return 564 } 565 // If there are no more references to the child, delete it and cascade 566 if node.parents > 0 { 567 // This is a special cornercase where a node loaded from disk (i.e. not in the 568 // memcache any more) gets reinjected as a new node (short node split into full, 569 // then reverted into short), causing a cached node to have no parents. That is 570 // no problem in itself, but don't make maxint parents out of it. 571 node.parents-- 572 } 573 if node.parents == 0 { 574 // Remove the node from the flush-list 575 switch child { 576 case db.oldest: 577 db.oldest = node.flushNext 578 db.dirties[node.flushNext].flushPrev = common.Hash{} 579 case db.newest: 580 db.newest = node.flushPrev 581 db.dirties[node.flushPrev].flushNext = common.Hash{} 582 default: 583 db.dirties[node.flushPrev].flushNext = node.flushNext 584 db.dirties[node.flushNext].flushPrev = node.flushPrev 585 } 586 // Dereference all children and delete the node 587 node.forChilds(func(hash common.Hash) { 588 db.dereference(hash, child) 589 }) 590 delete(db.dirties, child) 591 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 592 if node.children != nil { 593 db.childrenSize -= cachedNodeChildrenSize 594 } 595 } 596 } 597 598 // Cap iteratively flushes old but still referenced trie nodes until the total 599 // memory usage goes below the given threshold. 600 // 601 // Note, this method is a non-synchronized mutator. It is unsafe to call this 602 // concurrently with other mutators. 603 func (db *Database) Cap(limit common.StorageSize) error { 604 // Create a database batch to flush persistent data out. It is important that 605 // outside code doesn't see an inconsistent state (referenced data removed from 606 // memory cache during commit but not yet in persistent storage). This is ensured 607 // by only uncaching existing data when the database write finalizes. 608 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 609 batch := db.diskdb.NewBatch() 610 611 // db.dirtiesSize only contains the useful data in the cache, but when reporting 612 // the total memory consumption, the maintenance metadata is also needed to be 613 // counted. 614 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 615 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 616 617 // If the preimage cache got large enough, push to disk. If it's still small 618 // leave for later to deduplicate writes. 619 flushPreimages := db.preimagesSize > 4*1024*1024 620 if flushPreimages { 621 if db.preimages == nil { 622 log.Error("Attempted to write preimages whilst disabled") 623 } else { 624 rawdb.WritePreimages(batch, db.preimages) 625 if batch.ValueSize() > ethdb.IdealBatchSize { 626 if err := batch.Write(); err != nil { 627 return err 628 } 629 batch.Reset() 630 } 631 } 632 } 633 // Keep committing nodes from the flush-list until we're below allowance 634 oldest := db.oldest 635 for size > limit && oldest != (common.Hash{}) { 636 // Fetch the oldest referenced node and push into the batch 637 node := db.dirties[oldest] 638 rawdb.WriteTrieNode(batch, oldest, node.rlp()) 639 640 // If we exceeded the ideal batch size, commit and reset 641 if batch.ValueSize() >= ethdb.IdealBatchSize { 642 if err := batch.Write(); err != nil { 643 log.Error("Failed to write flush list to disk", "err", err) 644 return err 645 } 646 batch.Reset() 647 } 648 // Iterate to the next flush item, or abort if the size cap was achieved. Size 649 // is the total size, including the useful cached data (hash -> blob), the 650 // cache item metadata, as well as external children mappings. 651 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 652 if node.children != nil { 653 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 654 } 655 oldest = node.flushNext 656 } 657 // Flush out any remainder data from the last batch 658 if err := batch.Write(); err != nil { 659 log.Error("Failed to write flush list to disk", "err", err) 660 return err 661 } 662 // Write successful, clear out the flushed data 663 db.lock.Lock() 664 defer db.lock.Unlock() 665 666 if flushPreimages { 667 if db.preimages == nil { 668 log.Error("Attempted to reset preimage cache whilst disabled") 669 } else { 670 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 671 } 672 } 673 for db.oldest != oldest { 674 node := db.dirties[db.oldest] 675 delete(db.dirties, db.oldest) 676 db.oldest = node.flushNext 677 678 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 679 if node.children != nil { 680 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 681 } 682 } 683 if db.oldest != (common.Hash{}) { 684 db.dirties[db.oldest].flushPrev = common.Hash{} 685 } 686 db.flushnodes += uint64(nodes - len(db.dirties)) 687 db.flushsize += storage - db.dirtiesSize 688 db.flushtime += time.Since(start) 689 690 memcacheFlushTimeTimer.Update(time.Since(start)) 691 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 692 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 693 694 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 695 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 696 697 return nil 698 } 699 700 // Commit iterates over all the children of a particular node, writes them out 701 // to disk, forcefully tearing down all references in both directions. As a side 702 // effect, all pre-images accumulated up to this point are also written. 703 // 704 // Note, this method is a non-synchronized mutator. It is unsafe to call this 705 // concurrently with other mutators. 706 func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { 707 // Create a database batch to flush persistent data out. It is important that 708 // outside code doesn't see an inconsistent state (referenced data removed from 709 // memory cache during commit but not yet in persistent storage). This is ensured 710 // by only uncaching existing data when the database write finalizes. 711 start := time.Now() 712 batch := db.diskdb.NewBatch() 713 714 // Move all of the accumulated preimages into a write batch 715 if db.preimages != nil { 716 rawdb.WritePreimages(batch, db.preimages) 717 // Since we're going to replay trie node writes into the clean cache, flush out 718 // any batched pre-images before continuing. 719 if err := batch.Write(); err != nil { 720 return err 721 } 722 batch.Reset() 723 } 724 // Move the trie itself into the batch, flushing if enough data is accumulated 725 nodes, storage := len(db.dirties), db.dirtiesSize 726 727 uncacher := &cleaner{db} 728 if err := db.commit(node, batch, uncacher, callback); err != nil { 729 log.Error("Failed to commit trie from trie database", "err", err) 730 return err 731 } 732 // Trie mostly committed to disk, flush any batch leftovers 733 if err := batch.Write(); err != nil { 734 log.Error("Failed to write trie to disk", "err", err) 735 return err 736 } 737 // Uncache any leftovers in the last batch 738 db.lock.Lock() 739 defer db.lock.Unlock() 740 741 batch.Replay(uncacher) 742 batch.Reset() 743 744 // Reset the storage counters and bumped metrics 745 if db.preimages != nil { 746 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 747 } 748 memcacheCommitTimeTimer.Update(time.Since(start)) 749 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 750 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 751 752 logger := log.Info 753 if !report { 754 logger = log.Debug 755 } 756 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 757 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 758 759 // Reset the garbage collection statistics 760 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 761 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 762 763 return nil 764 } 765 766 // commit is the private locked version of Commit. 767 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner, callback func(common.Hash)) error { 768 // If the node does not exist, it's a previously committed node 769 node, ok := db.dirties[hash] 770 if !ok { 771 return nil 772 } 773 var err error 774 node.forChilds(func(child common.Hash) { 775 if err == nil { 776 err = db.commit(child, batch, uncacher, callback) 777 } 778 }) 779 if err != nil { 780 return err 781 } 782 // If we've reached an optimal batch size, commit and start over 783 rawdb.WriteTrieNode(batch, hash, node.rlp()) 784 if callback != nil { 785 callback(hash) 786 } 787 if batch.ValueSize() >= ethdb.IdealBatchSize { 788 if err := batch.Write(); err != nil { 789 return err 790 } 791 db.lock.Lock() 792 batch.Replay(uncacher) 793 batch.Reset() 794 db.lock.Unlock() 795 } 796 return nil 797 } 798 799 // cleaner is a database batch replayer that takes a batch of write operations 800 // and cleans up the trie database from anything written to disk. 801 type cleaner struct { 802 db *Database 803 } 804 805 // Put reacts to database writes and implements dirty data uncaching. This is the 806 // post-processing step of a commit operation where the already persisted trie is 807 // removed from the dirty cache and moved into the clean cache. The reason behind 808 // the two-phase commit is to ensure ensure data availability while moving from 809 // memory to disk. 810 func (c *cleaner) Put(key []byte, rlp []byte) error { 811 hash := common.BytesToHash(key) 812 813 // If the node does not exist, we're done on this path 814 node, ok := c.db.dirties[hash] 815 if !ok { 816 return nil 817 } 818 // Node still exists, remove it from the flush-list 819 switch hash { 820 case c.db.oldest: 821 c.db.oldest = node.flushNext 822 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 823 case c.db.newest: 824 c.db.newest = node.flushPrev 825 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 826 default: 827 c.db.dirties[node.flushPrev].flushNext = node.flushNext 828 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 829 } 830 // Remove the node from the dirty cache 831 delete(c.db.dirties, hash) 832 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 833 if node.children != nil { 834 c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 835 } 836 // Move the flushed node into the clean cache to prevent insta-reloads 837 if c.db.cleans != nil { 838 c.db.cleans.Set(hash[:], rlp) 839 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 840 } 841 return nil 842 } 843 844 func (c *cleaner) Delete(key []byte) error { 845 panic("not implemented") 846 } 847 848 // Size returns the current storage size of the memory cache in front of the 849 // persistent database layer. 850 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 851 db.sizeLock.RLock() 852 defer db.sizeLock.RUnlock() 853 return db.roughDirtiesSize, db.roughPreimagesSize 854 } 855 856 // saveCache saves clean state cache to given directory path 857 // using specified CPU cores. 858 func (db *Database) saveCache(dir string, threads int) error { 859 if db.cleans == nil { 860 return nil 861 } 862 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 863 864 start := time.Now() 865 err := db.cleans.SaveToFileConcurrent(dir, threads) 866 if err != nil { 867 log.Error("Failed to persist clean trie cache", "error", err) 868 return err 869 } 870 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 871 return nil 872 } 873 874 // SaveCache atomically saves fast cache data to the given dir using all 875 // available CPU cores. 876 func (db *Database) SaveCache(dir string) error { 877 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 878 } 879 880 // SaveCachePeriodically atomically saves fast cache data to the given dir with 881 // the specified interval. All dump operation will only use a single CPU core. 882 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 883 ticker := time.NewTicker(interval) 884 defer ticker.Stop() 885 886 for { 887 select { 888 case <-ticker.C: 889 db.saveCache(dir, 1) 890 case <-stopCh: 891 return 892 } 893 } 894 }