github.com/core-coin/go-core/v2@v2.1.9/trie/database.go (about) 1 // Copyright 2018 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 30 "github.com/core-coin/go-core/v2/xcbdb" 31 32 "github.com/core-coin/go-core/v2/common" 33 "github.com/core-coin/go-core/v2/core/rawdb" 34 "github.com/core-coin/go-core/v2/log" 35 "github.com/core-coin/go-core/v2/metrics" 36 "github.com/core-coin/go-core/v2/rlp" 37 ) 38 39 var ( 40 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 41 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 42 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 43 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 44 45 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 46 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 47 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 48 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 49 50 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 51 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 52 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 53 54 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 55 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 56 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 57 58 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 59 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 60 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 61 ) 62 63 // Database is an intermediate write layer between the trie data structures and 64 // the disk database. The aim is to accumulate trie writes in-memory and only 65 // periodically flush a couple tries to disk, garbage collecting the remainder. 66 // 67 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 68 // thread safe in providing individual, independent node access. The rationale 69 // behind this split design is to provide read access to RPC handlers and sync 70 // servers even while the trie is executing expensive garbage collection. 71 type Database struct { 72 diskdb xcbdb.KeyValueStore // Persistent storage for matured trie nodes 73 74 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 75 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 76 oldest common.Hash // Oldest tracked node, flush-list head 77 newest common.Hash // Newest tracked node, flush-list tail 78 79 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 80 81 gctime time.Duration // Time spent on garbage collection since last commit 82 gcnodes uint64 // Nodes garbage collected since last commit 83 gcsize common.StorageSize // Data storage garbage collected since last commit 84 85 flushtime time.Duration // Time spent on data flushing since last commit 86 flushnodes uint64 // Nodes flushed since last commit 87 flushsize common.StorageSize // Data storage flushed since last commit 88 89 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 90 childrenSize common.StorageSize // Storage size of the external children tracking 91 preimagesSize common.StorageSize // Storage size of the preimages cache 92 93 lock sync.RWMutex 94 } 95 96 // rawNode is a simple binary blob used to differentiate between collapsed trie 97 // nodes and already encoded RLP binary blobs (while at the same time store them 98 // in the same cache fields). 99 type rawNode []byte 100 101 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 102 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 103 104 func (n rawNode) EncodeRLP(w io.Writer) error { 105 _, err := w.Write(n) 106 return err 107 } 108 109 // rawFullNode represents only the useful data content of a full node, with the 110 // caches and flags stripped out to minimize its data storage. This type honors 111 // the same RLP encoding as the original parent. 112 type rawFullNode [17]node 113 114 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 115 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 116 117 func (n rawFullNode) EncodeRLP(w io.Writer) error { 118 var nodes [17]node 119 120 for i, child := range n { 121 if child != nil { 122 nodes[i] = child 123 } else { 124 nodes[i] = nilValueNode 125 } 126 } 127 return rlp.Encode(w, nodes) 128 } 129 130 // rawShortNode represents only the useful data content of a short node, with the 131 // caches and flags stripped out to minimize its data storage. This type honors 132 // the same RLP encoding as the original parent. 133 type rawShortNode struct { 134 Key []byte 135 Val node 136 } 137 138 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 139 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 140 141 // cachedNode is all the information we know about a single cached trie node 142 // in the memory database write layer. 143 type cachedNode struct { 144 node node // Cached collapsed trie node, or raw rlp data 145 size uint16 // Byte size of the useful cached data 146 147 parents uint32 // Number of live nodes referencing this one 148 children map[common.Hash]uint16 // External children referenced by this node 149 150 flushPrev common.Hash // Previous node in the flush-list 151 flushNext common.Hash // Next node in the flush-list 152 } 153 154 // cachedNodeSize is the raw size of a cachedNode data structure without any 155 // node data included. It's an approximate size, but should be a lot better 156 // than not counting them. 157 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 158 159 // cachedNodeChildrenSize is the raw size of an initialized but empty external 160 // reference map. 161 const cachedNodeChildrenSize = 48 162 163 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 164 // from the cache, or by regenerating it from the collapsed node. 165 func (n *cachedNode) rlp() []byte { 166 if node, ok := n.node.(rawNode); ok { 167 return node 168 } 169 blob, err := rlp.EncodeToBytes(n.node) 170 if err != nil { 171 panic(err) 172 } 173 return blob 174 } 175 176 // obj returns the decoded and expanded trie node, either directly from the cache, 177 // or by regenerating it from the rlp encoded blob. 178 func (n *cachedNode) obj(hash common.Hash) node { 179 if node, ok := n.node.(rawNode); ok { 180 return mustDecodeNode(hash[:], node) 181 } 182 return expandNode(hash[:], n.node) 183 } 184 185 // forChilds invokes the callback for all the tracked children of this node, 186 // both the implicit ones from inside the node as well as the explicit ones 187 // from outside the node. 188 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 189 for child := range n.children { 190 onChild(child) 191 } 192 if _, ok := n.node.(rawNode); !ok { 193 forGatherChildren(n.node, onChild) 194 } 195 } 196 197 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 198 // invokes the callback for all the hashnode children. 199 func forGatherChildren(n node, onChild func(hash common.Hash)) { 200 switch n := n.(type) { 201 case *rawShortNode: 202 forGatherChildren(n.Val, onChild) 203 case rawFullNode: 204 for i := 0; i < 16; i++ { 205 forGatherChildren(n[i], onChild) 206 } 207 case hashNode: 208 onChild(common.BytesToHash(n)) 209 case valueNode, nil, rawNode: 210 default: 211 panic(fmt.Sprintf("unknown node type: %T", n)) 212 } 213 } 214 215 // simplifyNode traverses the hierarchy of an expanded memory node and discards 216 // all the internal caches, returning a node that only contains the raw data. 217 func simplifyNode(n node) node { 218 switch n := n.(type) { 219 case *shortNode: 220 // Short nodes discard the flags and cascade 221 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 222 223 case *fullNode: 224 // Full nodes discard the flags and cascade 225 node := rawFullNode(n.Children) 226 for i := 0; i < len(node); i++ { 227 if node[i] != nil { 228 node[i] = simplifyNode(node[i]) 229 } 230 } 231 return node 232 233 case valueNode, hashNode, rawNode: 234 return n 235 236 default: 237 panic(fmt.Sprintf("unknown node type: %T", n)) 238 } 239 } 240 241 // expandNode traverses the node hierarchy of a collapsed storage node and converts 242 // all fields and keys into expanded memory form. 243 func expandNode(hash hashNode, n node) node { 244 switch n := n.(type) { 245 case *rawShortNode: 246 // Short nodes need key and child expansion 247 return &shortNode{ 248 Key: compactToHex(n.Key), 249 Val: expandNode(nil, n.Val), 250 flags: nodeFlag{ 251 hash: hash, 252 }, 253 } 254 255 case rawFullNode: 256 // Full nodes need child expansion 257 node := &fullNode{ 258 flags: nodeFlag{ 259 hash: hash, 260 }, 261 } 262 for i := 0; i < len(node.Children); i++ { 263 if n[i] != nil { 264 node.Children[i] = expandNode(nil, n[i]) 265 } 266 } 267 return node 268 269 case valueNode, hashNode: 270 return n 271 272 default: 273 panic(fmt.Sprintf("unknown node type: %T", n)) 274 } 275 } 276 277 // Config defines all necessary options for database. 278 type Config struct { 279 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 280 Journal string // Journal of clean cache to survive node restarts 281 Preimages bool // Flag whether the preimage of trie key is recorded 282 } 283 284 // NewDatabase creates a new trie database to store ephemeral trie content before 285 // its written out to disk or garbage collected. No read cache is created, so all 286 // data retrievals will hit the underlying disk database. 287 func NewDatabase(diskdb xcbdb.KeyValueStore) *Database { 288 return NewDatabaseWithConfig(diskdb, nil) 289 } 290 291 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 292 // before its written out to disk or garbage collected. It also acts as a read cache 293 // for nodes loaded from disk. 294 func NewDatabaseWithConfig(diskdb xcbdb.KeyValueStore, config *Config) *Database { 295 var cleans *fastcache.Cache 296 if config != nil && config.Cache > 0 { 297 if config.Journal == "" { 298 cleans = fastcache.New(config.Cache * 1024 * 1024) 299 } else { 300 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 301 } 302 } 303 db := &Database{ 304 diskdb: diskdb, 305 cleans: cleans, 306 dirties: map[common.Hash]*cachedNode{{}: { 307 children: make(map[common.Hash]uint16), 308 }}, 309 } 310 if config == nil || config.Preimages { // TODO(raisty): Flip to default off in the future 311 db.preimages = make(map[common.Hash][]byte) 312 } 313 return db 314 } 315 316 // DiskDB retrieves the persistent storage backing the trie database. 317 func (db *Database) DiskDB() xcbdb.KeyValueStore { 318 return db.diskdb 319 } 320 321 // insert inserts a collapsed trie node into the memory database. 322 // The blob size must be specified to allow proper size tracking. 323 // All nodes inserted by this function will be reference tracked 324 // and in theory should only used for **trie nodes** insertion. 325 func (db *Database) insert(hash common.Hash, size int, node node) { 326 // If the node's already cached, skip 327 if _, ok := db.dirties[hash]; ok { 328 return 329 } 330 memcacheDirtyWriteMeter.Mark(int64(size)) 331 332 // Create the cached entry for this node 333 entry := &cachedNode{ 334 node: simplifyNode(node), 335 size: uint16(size), 336 flushPrev: db.newest, 337 } 338 entry.forChilds(func(child common.Hash) { 339 if c := db.dirties[child]; c != nil { 340 c.parents++ 341 } 342 }) 343 db.dirties[hash] = entry 344 345 // Update the flush-list endpoints 346 if db.oldest == (common.Hash{}) { 347 db.oldest, db.newest = hash, hash 348 } else { 349 db.dirties[db.newest].flushNext, db.newest = hash, hash 350 } 351 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 352 } 353 354 // insertPreimage writes a new trie node pre-image to the memory database if it's 355 // yet unknown. The method will NOT make a copy of the slice, 356 // only use if the preimage will NOT be changed later on. 357 // 358 // Note, this method assumes that the database's lock is held! 359 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 360 // Short circuit if preimage collection is disabled 361 if db.preimages == nil { 362 return 363 } 364 // Track the preimage if a yet unknown one 365 if _, ok := db.preimages[hash]; ok { 366 return 367 } 368 db.preimages[hash] = preimage 369 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 370 } 371 372 // node retrieves a cached trie node from memory, or returns nil if none can be 373 // found in the memory cache. 374 func (db *Database) node(hash common.Hash) node { 375 // Retrieve the node from the clean cache if available 376 if db.cleans != nil { 377 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 378 memcacheCleanHitMeter.Mark(1) 379 memcacheCleanReadMeter.Mark(int64(len(enc))) 380 return mustDecodeNode(hash[:], enc) 381 } 382 } 383 // Retrieve the node from the dirty cache if available 384 db.lock.RLock() 385 dirty := db.dirties[hash] 386 db.lock.RUnlock() 387 388 if dirty != nil { 389 memcacheDirtyHitMeter.Mark(1) 390 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 391 return dirty.obj(hash) 392 } 393 memcacheDirtyMissMeter.Mark(1) 394 395 // Content unavailable in memory, attempt to retrieve from disk 396 enc, err := db.diskdb.Get(hash[:]) 397 if err != nil || enc == nil { 398 return nil 399 } 400 if db.cleans != nil { 401 db.cleans.Set(hash[:], enc) 402 memcacheCleanMissMeter.Mark(1) 403 memcacheCleanWriteMeter.Mark(int64(len(enc))) 404 } 405 return mustDecodeNode(hash[:], enc) 406 } 407 408 // Node retrieves an encoded cached trie node from memory. If it cannot be found 409 // cached, the method queries the persistent database for the content. 410 func (db *Database) Node(hash common.Hash) ([]byte, error) { 411 // It doesn't make sense to retrieve the metaroot 412 if hash == (common.Hash{}) { 413 return nil, errors.New("not found") 414 } 415 // Retrieve the node from the clean cache if available 416 if db.cleans != nil { 417 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 418 memcacheCleanHitMeter.Mark(1) 419 memcacheCleanReadMeter.Mark(int64(len(enc))) 420 return enc, nil 421 } 422 } 423 // Retrieve the node from the dirty cache if available 424 db.lock.RLock() 425 dirty := db.dirties[hash] 426 db.lock.RUnlock() 427 428 if dirty != nil { 429 memcacheDirtyHitMeter.Mark(1) 430 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 431 return dirty.rlp(), nil 432 } 433 memcacheDirtyMissMeter.Mark(1) 434 435 // Content unavailable in memory, attempt to retrieve from disk 436 enc := rawdb.ReadTrieNode(db.diskdb, hash) 437 if len(enc) != 0 { 438 if db.cleans != nil { 439 db.cleans.Set(hash[:], enc) 440 memcacheCleanMissMeter.Mark(1) 441 memcacheCleanWriteMeter.Mark(int64(len(enc))) 442 } 443 return enc, nil 444 } 445 return nil, errors.New("not found") 446 } 447 448 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 449 // found cached, the method queries the persistent database for the content. 450 func (db *Database) preimage(hash common.Hash) []byte { 451 // Short circuit if preimage collection is disabled 452 if db.preimages == nil { 453 return nil 454 } 455 // Retrieve the node from cache if available 456 db.lock.RLock() 457 preimage := db.preimages[hash] 458 db.lock.RUnlock() 459 460 if preimage != nil { 461 return preimage 462 } 463 return rawdb.ReadPreimage(db.diskdb, hash) 464 } 465 466 // Nodes retrieves the hashes of all the nodes cached within the memory database. 467 // This method is extremely expensive and should only be used to validate internal 468 // states in test code. 469 func (db *Database) Nodes() []common.Hash { 470 db.lock.RLock() 471 defer db.lock.RUnlock() 472 473 var hashes = make([]common.Hash, 0, len(db.dirties)) 474 for hash := range db.dirties { 475 if hash != (common.Hash{}) { // Special case for "root" references/nodes 476 hashes = append(hashes, hash) 477 } 478 } 479 return hashes 480 } 481 482 // Reference adds a new reference from a parent node to a child node. 483 // This function is used to add reference between internal trie node 484 // and external node(e.g. storage trie root), all internal trie nodes 485 // are referenced together by database itself. 486 func (db *Database) Reference(child common.Hash, parent common.Hash) { 487 db.lock.Lock() 488 defer db.lock.Unlock() 489 490 db.reference(child, parent) 491 } 492 493 // reference is the private locked version of Reference. 494 func (db *Database) reference(child common.Hash, parent common.Hash) { 495 // If the node does not exist, it's a node pulled from disk, skip 496 node, ok := db.dirties[child] 497 if !ok { 498 return 499 } 500 // If the reference already exists, only duplicate for roots 501 if db.dirties[parent].children == nil { 502 db.dirties[parent].children = make(map[common.Hash]uint16) 503 db.childrenSize += cachedNodeChildrenSize 504 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 505 return 506 } 507 node.parents++ 508 db.dirties[parent].children[child]++ 509 if db.dirties[parent].children[child] == 1 { 510 db.childrenSize += common.HashLength + 2 // uint16 counter 511 } 512 } 513 514 // Dereference removes an existing reference from a root node. 515 func (db *Database) Dereference(root common.Hash) { 516 // Sanity check to ensure that the meta-root is not removed 517 if root == (common.Hash{}) { 518 log.Error("Attempted to dereference the trie cache meta root") 519 return 520 } 521 db.lock.Lock() 522 defer db.lock.Unlock() 523 524 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 525 db.dereference(root, common.Hash{}) 526 527 db.gcnodes += uint64(nodes - len(db.dirties)) 528 db.gcsize += storage - db.dirtiesSize 529 db.gctime += time.Since(start) 530 531 memcacheGCTimeTimer.Update(time.Since(start)) 532 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 533 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 534 535 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 536 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 537 } 538 539 // dereference is the private locked version of Dereference. 540 func (db *Database) dereference(child common.Hash, parent common.Hash) { 541 // Dereference the parent-child 542 node := db.dirties[parent] 543 544 if node.children != nil && node.children[child] > 0 { 545 node.children[child]-- 546 if node.children[child] == 0 { 547 delete(node.children, child) 548 db.childrenSize -= (common.HashLength + 2) // uint16 counter 549 } 550 } 551 // If the child does not exist, it's a previously committed node. 552 node, ok := db.dirties[child] 553 if !ok { 554 return 555 } 556 // If there are no more references to the child, delete it and cascade 557 if node.parents > 0 { 558 // This is a special cornercase where a node loaded from disk (i.e. not in the 559 // memcache any more) gets reinjected as a new node (short node split into full, 560 // then reverted into short), causing a cached node to have no parents. That is 561 // no problem in itself, but don't make maxint parents out of it. 562 node.parents-- 563 } 564 if node.parents == 0 { 565 // Remove the node from the flush-list 566 switch child { 567 case db.oldest: 568 db.oldest = node.flushNext 569 db.dirties[node.flushNext].flushPrev = common.Hash{} 570 case db.newest: 571 db.newest = node.flushPrev 572 db.dirties[node.flushPrev].flushNext = common.Hash{} 573 default: 574 db.dirties[node.flushPrev].flushNext = node.flushNext 575 db.dirties[node.flushNext].flushPrev = node.flushPrev 576 } 577 // Dereference all children and delete the node 578 node.forChilds(func(hash common.Hash) { 579 db.dereference(hash, child) 580 }) 581 delete(db.dirties, child) 582 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 583 if node.children != nil { 584 db.childrenSize -= cachedNodeChildrenSize 585 } 586 } 587 } 588 589 // Cap iteratively flushes old but still referenced trie nodes until the total 590 // memory usage goes below the given threshold. 591 // 592 // Note, this method is a non-synchronized mutator. It is unsafe to call this 593 // concurrently with other mutators. 594 func (db *Database) Cap(limit common.StorageSize) error { 595 // Create a database batch to flush persistent data out. It is important that 596 // outside code doesn't see an inconsistent state (referenced data removed from 597 // memory cache during commit but not yet in persistent storage). This is ensured 598 // by only uncaching existing data when the database write finalizes. 599 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 600 batch := db.diskdb.NewBatch() 601 602 // db.dirtiesSize only contains the useful data in the cache, but when reporting 603 // the total memory consumption, the maintenance metadata is also needed to be 604 // counted. 605 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 606 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 607 608 // If the preimage cache got large enough, push to disk. If it's still small 609 // leave for later to deduplicate writes. 610 flushPreimages := db.preimagesSize > 4*1024*1024 611 if flushPreimages { 612 if db.preimages == nil { 613 log.Error("Attempted to write preimages whilst disabled") 614 } else { 615 rawdb.WritePreimages(batch, db.preimages) 616 if batch.ValueSize() > xcbdb.IdealBatchSize { 617 if err := batch.Write(); err != nil { 618 return err 619 } 620 batch.Reset() 621 } 622 } 623 } 624 // Keep committing nodes from the flush-list until we're below allowance 625 oldest := db.oldest 626 for size > limit && oldest != (common.Hash{}) { 627 // Fetch the oldest referenced node and push into the batch 628 node := db.dirties[oldest] 629 rawdb.WriteTrieNode(batch, oldest, node.rlp()) 630 631 // If we exceeded the ideal batch size, commit and reset 632 if batch.ValueSize() >= xcbdb.IdealBatchSize { 633 if err := batch.Write(); err != nil { 634 log.Error("Failed to write flush list to disk", "err", err) 635 return err 636 } 637 batch.Reset() 638 } 639 // Iterate to the next flush item, or abort if the size cap was achieved. Size 640 // is the total size, including the useful cached data (hash -> blob), the 641 // cache item metadata, as well as external children mappings. 642 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 643 if node.children != nil { 644 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 645 } 646 oldest = node.flushNext 647 } 648 // Flush out any remainder data from the last batch 649 if err := batch.Write(); err != nil { 650 log.Error("Failed to write flush list to disk", "err", err) 651 return err 652 } 653 // Write successful, clear out the flushed data 654 db.lock.Lock() 655 defer db.lock.Unlock() 656 657 if flushPreimages { 658 if db.preimages == nil { 659 log.Error("Attempted to reset preimage cache whilst disabled") 660 } else { 661 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 662 } 663 } 664 for db.oldest != oldest { 665 node := db.dirties[db.oldest] 666 delete(db.dirties, db.oldest) 667 db.oldest = node.flushNext 668 669 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 670 if node.children != nil { 671 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 672 } 673 } 674 if db.oldest != (common.Hash{}) { 675 db.dirties[db.oldest].flushPrev = common.Hash{} 676 } 677 db.flushnodes += uint64(nodes - len(db.dirties)) 678 db.flushsize += storage - db.dirtiesSize 679 db.flushtime += time.Since(start) 680 681 memcacheFlushTimeTimer.Update(time.Since(start)) 682 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 683 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 684 685 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 686 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 687 688 return nil 689 } 690 691 // Commit iterates over all the children of a particular node, writes them out 692 // to disk, forcefully tearing down all references in both directions. As a side 693 // effect, all pre-images accumulated up to this point are also written. 694 // 695 // Note, this method is a non-synchronized mutator. It is unsafe to call this 696 // concurrently with other mutators. 697 func (db *Database) Commit(node common.Hash, report bool, callback func(common.Hash)) error { 698 // Create a database batch to flush persistent data out. It is important that 699 // outside code doesn't see an inconsistent state (referenced data removed from 700 // memory cache during commit but not yet in persistent storage). This is ensured 701 // by only uncaching existing data when the database write finalizes. 702 start := time.Now() 703 batch := db.diskdb.NewBatch() 704 705 // Move all of the accumulated preimages into a write batch 706 if db.preimages != nil { 707 rawdb.WritePreimages(batch, db.preimages) 708 if batch.ValueSize() > xcbdb.IdealBatchSize { 709 if err := batch.Write(); err != nil { 710 return err 711 } 712 batch.Reset() 713 } 714 // Since we're going to replay trie node writes into the clean cache, flush out 715 // any batched pre-images before continuing. 716 if err := batch.Write(); err != nil { 717 return err 718 } 719 batch.Reset() 720 } 721 // Move the trie itself into the batch, flushing if enough data is accumulated 722 nodes, storage := len(db.dirties), db.dirtiesSize 723 724 uncacher := &cleaner{db} 725 if err := db.commit(node, batch, uncacher, callback); err != nil { 726 log.Error("Failed to commit trie from trie database", "err", err) 727 return err 728 } 729 // Trie mostly committed to disk, flush any batch leftovers 730 if err := batch.Write(); err != nil { 731 log.Error("Failed to write trie to disk", "err", err) 732 return err 733 } 734 // Uncache any leftovers in the last batch 735 db.lock.Lock() 736 defer db.lock.Unlock() 737 738 batch.Replay(uncacher) 739 batch.Reset() 740 741 // Reset the storage counters and bumpd metrics 742 if db.preimages != nil { 743 db.preimages, db.preimagesSize = make(map[common.Hash][]byte), 0 744 } 745 memcacheCommitTimeTimer.Update(time.Since(start)) 746 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 747 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 748 749 logger := log.Info 750 if !report { 751 logger = log.Debug 752 } 753 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 754 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 755 756 // Reset the garbage collection statistics 757 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 758 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 759 760 return nil 761 } 762 763 // commit is the private locked version of Commit. 764 func (db *Database) commit(hash common.Hash, batch xcbdb.Batch, uncacher *cleaner, callback func(common.Hash)) error { 765 // If the node does not exist, it's a previously committed node 766 node, ok := db.dirties[hash] 767 if !ok { 768 return nil 769 } 770 var err error 771 node.forChilds(func(child common.Hash) { 772 if err == nil { 773 err = db.commit(child, batch, uncacher, callback) 774 } 775 }) 776 if err != nil { 777 return err 778 } 779 // If we've reached an optimal batch size, commit and start over 780 rawdb.WriteTrieNode(batch, hash, node.rlp()) 781 if callback != nil { 782 callback(hash) 783 } 784 if batch.ValueSize() >= xcbdb.IdealBatchSize { 785 if err := batch.Write(); err != nil { 786 return err 787 } 788 db.lock.Lock() 789 batch.Replay(uncacher) 790 batch.Reset() 791 db.lock.Unlock() 792 } 793 return nil 794 } 795 796 // cleaner is a database batch replayer that takes a batch of write operations 797 // and cleans up the trie database from anything written to disk. 798 type cleaner struct { 799 db *Database 800 } 801 802 // Put reacts to database writes and implements dirty data uncaching. This is the 803 // post-processing step of a commit operation where the already persisted trie is 804 // removed from the dirty cache and moved into the clean cache. The reason behind 805 // the two-phase commit is to ensure ensure data availability while moving from 806 // memory to disk. 807 func (c *cleaner) Put(key []byte, rlp []byte) error { 808 hash := common.BytesToHash(key) 809 810 // If the node does not exist, we're done on this path 811 node, ok := c.db.dirties[hash] 812 if !ok { 813 return nil 814 } 815 // Node still exists, remove it from the flush-list 816 switch hash { 817 case c.db.oldest: 818 c.db.oldest = node.flushNext 819 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 820 case c.db.newest: 821 c.db.newest = node.flushPrev 822 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 823 default: 824 c.db.dirties[node.flushPrev].flushNext = node.flushNext 825 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 826 } 827 // Remove the node from the dirty cache 828 delete(c.db.dirties, hash) 829 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 830 if node.children != nil { 831 c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 832 } 833 // Move the flushed node into the clean cache to prevent insta-reloads 834 if c.db.cleans != nil { 835 c.db.cleans.Set(hash[:], rlp) 836 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 837 } 838 return nil 839 } 840 841 func (c *cleaner) Delete(key []byte) error { 842 panic("not implemented") 843 } 844 845 // Size returns the current storage size of the memory cache in front of the 846 // persistent database layer. 847 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 848 db.lock.RLock() 849 defer db.lock.RUnlock() 850 851 // db.dirtiesSize only contains the useful data in the cache, but when reporting 852 // the total memory consumption, the maintenance metadata is also needed to be 853 // counted. 854 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 855 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 856 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize 857 } 858 859 // saveCache saves clean state cache to given directory path 860 // using specified CPU cores. 861 func (db *Database) saveCache(dir string, threads int) error { 862 if db.cleans == nil { 863 return nil 864 } 865 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 866 867 start := time.Now() 868 err := db.cleans.SaveToFileConcurrent(dir, threads) 869 if err != nil { 870 log.Error("Failed to persist clean trie cache", "error", err) 871 return err 872 } 873 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 874 return nil 875 } 876 877 // SaveCache atomically saves fast cache data to the given dir using all 878 // available CPU cores. 879 func (db *Database) SaveCache(dir string) error { 880 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 881 } 882 883 // SaveCachePeriodically atomically saves fast cache data to the given dir with 884 // the specified interval. All dump operation will only use a single CPU core. 885 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 886 ticker := time.NewTicker(interval) 887 defer ticker.Stop() 888 889 for { 890 select { 891 case <-ticker.C: 892 db.saveCache(dir, 1) 893 case <-stopCh: 894 return 895 } 896 } 897 }