github.com/tacshi/go-ethereum@v0.0.0-20230616113857-84a434e20921/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package trie 18 19 import ( 20 "errors" 21 "fmt" 22 "io" 23 "reflect" 24 "runtime" 25 "sync" 26 "time" 27 28 "github.com/VictoriaMetrics/fastcache" 29 "github.com/tacshi/go-ethereum/common" 30 "github.com/tacshi/go-ethereum/core/rawdb" 31 "github.com/tacshi/go-ethereum/core/types" 32 "github.com/tacshi/go-ethereum/ethdb" 33 "github.com/tacshi/go-ethereum/log" 34 "github.com/tacshi/go-ethereum/metrics" 35 "github.com/tacshi/go-ethereum/rlp" 36 ) 37 38 var ( 39 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 40 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 41 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 42 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 43 44 memcacheDirtyHitMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/hit", nil) 45 memcacheDirtyMissMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/miss", nil) 46 memcacheDirtyReadMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/read", nil) 47 memcacheDirtyWriteMeter = metrics.NewRegisteredMeter("trie/memcache/dirty/write", nil) 48 49 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 50 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 51 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 52 53 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 54 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 55 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 56 57 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 58 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 59 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 60 ) 61 62 // Database is an intermediate write layer between the trie data structures and 63 // the disk database. The aim is to accumulate trie writes in-memory and only 64 // periodically flush a couple tries to disk, garbage collecting the remainder. 65 // 66 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 67 // thread safe in providing individual, independent node access. The rationale 68 // behind this split design is to provide read access to RPC handlers and sync 69 // servers even while the trie is executing expensive garbage collection. 70 type Database struct { 71 diskdb ethdb.Database // Persistent storage for matured trie nodes 72 73 cleans *fastcache.Cache // GC friendly memory cache of clean node RLPs 74 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty trie nodes 75 oldest common.Hash // Oldest tracked node, flush-list head 76 newest common.Hash // Newest tracked node, flush-list tail 77 78 gctime time.Duration // Time spent on garbage collection since last commit 79 gcnodes uint64 // Nodes garbage collected since last commit 80 gcsize common.StorageSize // Data storage garbage collected since last commit 81 82 flushtime time.Duration // Time spent on data flushing since last commit 83 flushnodes uint64 // Nodes flushed since last commit 84 flushsize common.StorageSize // Data storage flushed since last commit 85 86 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 87 childrenSize common.StorageSize // Storage size of the external children tracking 88 preimages *preimageStore // The store for caching preimages 89 90 lock sync.RWMutex 91 } 92 93 // rawNode is a simple binary blob used to differentiate between collapsed trie 94 // nodes and already encoded RLP binary blobs (while at the same time store them 95 // in the same cache fields). 96 type rawNode []byte 97 98 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 99 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 100 101 func (n rawNode) EncodeRLP(w io.Writer) error { 102 _, err := w.Write(n) 103 return err 104 } 105 106 // rawFullNode represents only the useful data content of a full node, with the 107 // caches and flags stripped out to minimize its data storage. This type honors 108 // the same RLP encoding as the original parent. 109 type rawFullNode [17]node 110 111 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 112 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 113 114 func (n rawFullNode) EncodeRLP(w io.Writer) error { 115 eb := rlp.NewEncoderBuffer(w) 116 n.encode(eb) 117 return eb.Flush() 118 } 119 120 // rawShortNode represents only the useful data content of a short node, with the 121 // caches and flags stripped out to minimize its data storage. This type honors 122 // the same RLP encoding as the original parent. 123 type rawShortNode struct { 124 Key []byte 125 Val node 126 } 127 128 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 129 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 130 131 // cachedNode is all the information we know about a single cached trie node 132 // in the memory database write layer. 133 type cachedNode struct { 134 node node // Cached collapsed trie node, or raw rlp data 135 size uint16 // Byte size of the useful cached data 136 137 parents uint32 // Number of live nodes referencing this one 138 children map[common.Hash]uint16 // External children referenced by this node 139 140 flushPrev common.Hash // Previous node in the flush-list 141 flushNext common.Hash // Next node in the flush-list 142 } 143 144 // cachedNodeSize is the raw size of a cachedNode data structure without any 145 // node data included. It's an approximate size, but should be a lot better 146 // than not counting them. 147 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 148 149 // cachedNodeChildrenSize is the raw size of an initialized but empty external 150 // reference map. 151 const cachedNodeChildrenSize = 48 152 153 // rlp returns the raw rlp encoded blob of the cached trie node, either directly 154 // from the cache, or by regenerating it from the collapsed node. 155 func (n *cachedNode) rlp() []byte { 156 if node, ok := n.node.(rawNode); ok { 157 return node 158 } 159 return nodeToBytes(n.node) 160 } 161 162 // obj returns the decoded and expanded trie node, either directly from the cache, 163 // or by regenerating it from the rlp encoded blob. 164 func (n *cachedNode) obj(hash common.Hash) node { 165 if node, ok := n.node.(rawNode); ok { 166 // The raw-blob format nodes are loaded either from the 167 // clean cache or the database, they are all in their own 168 // copy and safe to use unsafe decoder. 169 return mustDecodeNodeUnsafe(hash[:], node) 170 } 171 return expandNode(hash[:], n.node) 172 } 173 174 // forChilds invokes the callback for all the tracked children of this node, 175 // both the implicit ones from inside the node as well as the explicit ones 176 // from outside the node. 177 func (n *cachedNode) forChilds(onChild func(hash common.Hash)) { 178 for child := range n.children { 179 onChild(child) 180 } 181 if _, ok := n.node.(rawNode); !ok { 182 forGatherChildren(n.node, onChild) 183 } 184 } 185 186 // forGatherChildren traverses the node hierarchy of a collapsed storage node and 187 // invokes the callback for all the hashnode children. 188 func forGatherChildren(n node, onChild func(hash common.Hash)) { 189 switch n := n.(type) { 190 case *rawShortNode: 191 forGatherChildren(n.Val, onChild) 192 case rawFullNode: 193 for i := 0; i < 16; i++ { 194 forGatherChildren(n[i], onChild) 195 } 196 case hashNode: 197 onChild(common.BytesToHash(n)) 198 case valueNode, nil, rawNode: 199 default: 200 panic(fmt.Sprintf("unknown node type: %T", n)) 201 } 202 } 203 204 // simplifyNode traverses the hierarchy of an expanded memory node and discards 205 // all the internal caches, returning a node that only contains the raw data. 206 func simplifyNode(n node) node { 207 switch n := n.(type) { 208 case *shortNode: 209 // Short nodes discard the flags and cascade 210 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 211 212 case *fullNode: 213 // Full nodes discard the flags and cascade 214 node := rawFullNode(n.Children) 215 for i := 0; i < len(node); i++ { 216 if node[i] != nil { 217 node[i] = simplifyNode(node[i]) 218 } 219 } 220 return node 221 222 case valueNode, hashNode, rawNode: 223 return n 224 225 default: 226 panic(fmt.Sprintf("unknown node type: %T", n)) 227 } 228 } 229 230 // expandNode traverses the node hierarchy of a collapsed storage node and converts 231 // all fields and keys into expanded memory form. 232 func expandNode(hash hashNode, n node) node { 233 switch n := n.(type) { 234 case *rawShortNode: 235 // Short nodes need key and child expansion 236 return &shortNode{ 237 Key: compactToHex(n.Key), 238 Val: expandNode(nil, n.Val), 239 flags: nodeFlag{ 240 hash: hash, 241 }, 242 } 243 244 case rawFullNode: 245 // Full nodes need child expansion 246 node := &fullNode{ 247 flags: nodeFlag{ 248 hash: hash, 249 }, 250 } 251 for i := 0; i < len(node.Children); i++ { 252 if n[i] != nil { 253 node.Children[i] = expandNode(nil, n[i]) 254 } 255 } 256 return node 257 258 case valueNode, hashNode: 259 return n 260 261 default: 262 panic(fmt.Sprintf("unknown node type: %T", n)) 263 } 264 } 265 266 // Config defines all necessary options for database. 267 type Config struct { 268 Cache int // Memory allowance (MB) to use for caching trie nodes in memory 269 Journal string // Journal of clean cache to survive node restarts 270 Preimages bool // Flag whether the preimage of trie key is recorded 271 } 272 273 // NewDatabase creates a new trie database to store ephemeral trie content before 274 // its written out to disk or garbage collected. No read cache is created, so all 275 // data retrievals will hit the underlying disk database. 276 func NewDatabase(diskdb ethdb.Database) *Database { 277 return NewDatabaseWithConfig(diskdb, nil) 278 } 279 280 // NewDatabaseWithConfig creates a new trie database to store ephemeral trie content 281 // before its written out to disk or garbage collected. It also acts as a read cache 282 // for nodes loaded from disk. 283 func NewDatabaseWithConfig(diskdb ethdb.Database, config *Config) *Database { 284 var cleans *fastcache.Cache 285 if config != nil && config.Cache > 0 { 286 if config.Journal == "" { 287 cleans = fastcache.New(config.Cache * 1024 * 1024) 288 } else { 289 cleans = fastcache.LoadFromFileOrNew(config.Journal, config.Cache*1024*1024) 290 } 291 } 292 var preimage *preimageStore 293 if config != nil && config.Preimages { 294 preimage = newPreimageStore(diskdb) 295 } 296 db := &Database{ 297 diskdb: diskdb, 298 cleans: cleans, 299 dirties: map[common.Hash]*cachedNode{{}: { 300 children: make(map[common.Hash]uint16), 301 }}, 302 preimages: preimage, 303 } 304 runtime.SetFinalizer(db, (*Database).finalizer) 305 return db 306 } 307 308 // must call Reset() to reclaim memory used by fastcache 309 func (db *Database) finalizer() { 310 if db.cleans != nil { 311 db.cleans.Reset() 312 } 313 } 314 315 // insert inserts a simplified trie node into the memory database. 316 // All nodes inserted by this function will be reference tracked 317 // and in theory should only used for **trie nodes** insertion. 318 func (db *Database) insert(hash common.Hash, size int, node node) { 319 // If the node's already cached, skip 320 if _, ok := db.dirties[hash]; ok { 321 return 322 } 323 memcacheDirtyWriteMeter.Mark(int64(size)) 324 325 // Create the cached entry for this node 326 entry := &cachedNode{ 327 node: node, 328 size: uint16(size), 329 flushPrev: db.newest, 330 } 331 entry.forChilds(func(child common.Hash) { 332 if c := db.dirties[child]; c != nil { 333 c.parents++ 334 } 335 }) 336 db.dirties[hash] = entry 337 338 // Update the flush-list endpoints 339 if db.oldest == (common.Hash{}) { 340 db.oldest, db.newest = hash, hash 341 } else { 342 db.dirties[db.newest].flushNext, db.newest = hash, hash 343 } 344 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 345 } 346 347 // node retrieves a cached trie node from memory, or returns nil if none can be 348 // found in the memory cache. 349 func (db *Database) node(hash common.Hash) node { 350 // Retrieve the node from the clean cache if available 351 if db.cleans != nil { 352 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 353 memcacheCleanHitMeter.Mark(1) 354 memcacheCleanReadMeter.Mark(int64(len(enc))) 355 356 // The returned value from cache is in its own copy, 357 // safe to use mustDecodeNodeUnsafe for decoding. 358 return mustDecodeNodeUnsafe(hash[:], enc) 359 } 360 } 361 // Retrieve the node from the dirty cache if available 362 db.lock.RLock() 363 dirty := db.dirties[hash] 364 db.lock.RUnlock() 365 366 if dirty != nil { 367 memcacheDirtyHitMeter.Mark(1) 368 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 369 return dirty.obj(hash) 370 } 371 memcacheDirtyMissMeter.Mark(1) 372 373 // Content unavailable in memory, attempt to retrieve from disk 374 enc, err := db.diskdb.Get(hash[:]) 375 if err != nil || enc == nil { 376 return nil 377 } 378 if db.cleans != nil { 379 db.cleans.Set(hash[:], enc) 380 memcacheCleanMissMeter.Mark(1) 381 memcacheCleanWriteMeter.Mark(int64(len(enc))) 382 } 383 // The returned value from database is in its own copy, 384 // safe to use mustDecodeNodeUnsafe for decoding. 385 return mustDecodeNodeUnsafe(hash[:], enc) 386 } 387 388 // Node retrieves an encoded cached trie node from memory. If it cannot be found 389 // cached, the method queries the persistent database for the content. 390 func (db *Database) Node(hash common.Hash) ([]byte, error) { 391 // It doesn't make sense to retrieve the metaroot 392 if hash == (common.Hash{}) { 393 return nil, errors.New("not found") 394 } 395 // Retrieve the node from the clean cache if available 396 if db.cleans != nil { 397 if enc := db.cleans.Get(nil, hash[:]); enc != nil { 398 memcacheCleanHitMeter.Mark(1) 399 memcacheCleanReadMeter.Mark(int64(len(enc))) 400 return enc, nil 401 } 402 } 403 // Retrieve the node from the dirty cache if available 404 db.lock.RLock() 405 dirty := db.dirties[hash] 406 db.lock.RUnlock() 407 408 if dirty != nil { 409 memcacheDirtyHitMeter.Mark(1) 410 memcacheDirtyReadMeter.Mark(int64(dirty.size)) 411 return dirty.rlp(), nil 412 } 413 memcacheDirtyMissMeter.Mark(1) 414 415 // Content unavailable in memory, attempt to retrieve from disk 416 enc := rawdb.ReadLegacyTrieNode(db.diskdb, hash) 417 if len(enc) != 0 { 418 if db.cleans != nil { 419 db.cleans.Set(hash[:], enc) 420 memcacheCleanMissMeter.Mark(1) 421 memcacheCleanWriteMeter.Mark(int64(len(enc))) 422 } 423 return enc, nil 424 } 425 return nil, errors.New("not found") 426 } 427 428 // Nodes retrieves the hashes of all the nodes cached within the memory database. 429 // This method is extremely expensive and should only be used to validate internal 430 // states in test code. 431 func (db *Database) Nodes() []common.Hash { 432 db.lock.RLock() 433 defer db.lock.RUnlock() 434 435 var hashes = make([]common.Hash, 0, len(db.dirties)) 436 for hash := range db.dirties { 437 if hash != (common.Hash{}) { // Special case for "root" references/nodes 438 hashes = append(hashes, hash) 439 } 440 } 441 return hashes 442 } 443 444 // Reference adds a new reference from a parent node to a child node. 445 // This function is used to add reference between internal trie node 446 // and external node(e.g. storage trie root), all internal trie nodes 447 // are referenced together by database itself. 448 func (db *Database) Reference(child common.Hash, parent common.Hash) { 449 db.lock.Lock() 450 defer db.lock.Unlock() 451 452 db.reference(child, parent) 453 } 454 455 // reference is the private locked version of Reference. 456 func (db *Database) reference(child common.Hash, parent common.Hash) { 457 // If the node does not exist, it's a node pulled from disk, skip 458 node, ok := db.dirties[child] 459 if !ok { 460 return 461 } 462 // If the reference already exists, only duplicate for roots 463 if db.dirties[parent].children == nil { 464 db.dirties[parent].children = make(map[common.Hash]uint16) 465 db.childrenSize += cachedNodeChildrenSize 466 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 467 return 468 } 469 node.parents++ 470 db.dirties[parent].children[child]++ 471 if db.dirties[parent].children[child] == 1 { 472 db.childrenSize += common.HashLength + 2 // uint16 counter 473 } 474 } 475 476 // Dereference removes an existing reference from a root node. 477 func (db *Database) Dereference(root common.Hash) { 478 // Sanity check to ensure that the meta-root is not removed 479 if root == (common.Hash{}) { 480 log.Error("Attempted to dereference the trie cache meta root") 481 return 482 } 483 db.lock.Lock() 484 defer db.lock.Unlock() 485 486 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 487 db.dereference(root, common.Hash{}) 488 489 db.gcnodes += uint64(nodes - len(db.dirties)) 490 db.gcsize += storage - db.dirtiesSize 491 db.gctime += time.Since(start) 492 493 memcacheGCTimeTimer.Update(time.Since(start)) 494 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 495 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 496 497 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 498 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 499 } 500 501 // dereference is the private locked version of Dereference. 502 func (db *Database) dereference(child common.Hash, parent common.Hash) { 503 // Dereference the parent-child 504 node := db.dirties[parent] 505 506 if node.children != nil && node.children[child] > 0 { 507 node.children[child]-- 508 if node.children[child] == 0 { 509 delete(node.children, child) 510 db.childrenSize -= (common.HashLength + 2) // uint16 counter 511 } 512 } 513 // If the child does not exist, it's a previously committed node. 514 node, ok := db.dirties[child] 515 if !ok { 516 return 517 } 518 // If there are no more references to the child, delete it and cascade 519 if node.parents > 0 { 520 // This is a special cornercase where a node loaded from disk (i.e. not in the 521 // memcache any more) gets reinjected as a new node (short node split into full, 522 // then reverted into short), causing a cached node to have no parents. That is 523 // no problem in itself, but don't make maxint parents out of it. 524 node.parents-- 525 } 526 if node.parents == 0 { 527 // Remove the node from the flush-list 528 switch child { 529 case db.oldest: 530 db.oldest = node.flushNext 531 db.dirties[node.flushNext].flushPrev = common.Hash{} 532 case db.newest: 533 db.newest = node.flushPrev 534 db.dirties[node.flushPrev].flushNext = common.Hash{} 535 default: 536 db.dirties[node.flushPrev].flushNext = node.flushNext 537 db.dirties[node.flushNext].flushPrev = node.flushPrev 538 } 539 // Dereference all children and delete the node 540 node.forChilds(func(hash common.Hash) { 541 db.dereference(hash, child) 542 }) 543 delete(db.dirties, child) 544 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 545 if node.children != nil { 546 db.childrenSize -= cachedNodeChildrenSize 547 } 548 } 549 } 550 551 // Cap iteratively flushes old but still referenced trie nodes until the total 552 // memory usage goes below the given threshold. 553 // 554 // Note, this method is a non-synchronized mutator. It is unsafe to call this 555 // concurrently with other mutators. 556 func (db *Database) Cap(limit common.StorageSize) error { 557 // Create a database batch to flush persistent data out. It is important that 558 // outside code doesn't see an inconsistent state (referenced data removed from 559 // memory cache during commit but not yet in persistent storage). This is ensured 560 // by only uncaching existing data when the database write finalizes. 561 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 562 batch := db.diskdb.NewBatch() 563 564 // db.dirtiesSize only contains the useful data in the cache, but when reporting 565 // the total memory consumption, the maintenance metadata is also needed to be 566 // counted. 567 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 568 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 569 570 // If the preimage cache got large enough, push to disk. If it's still small 571 // leave for later to deduplicate writes. 572 if db.preimages != nil { 573 if err := db.preimages.commit(false); err != nil { 574 return err 575 } 576 } 577 // Keep committing nodes from the flush-list until we're below allowance 578 oldest := db.oldest 579 for size > limit && oldest != (common.Hash{}) { 580 // Fetch the oldest referenced node and push into the batch 581 node := db.dirties[oldest] 582 rawdb.WriteLegacyTrieNode(batch, oldest, node.rlp()) 583 584 // If we exceeded the ideal batch size, commit and reset 585 if batch.ValueSize() >= ethdb.IdealBatchSize { 586 if err := batch.Write(); err != nil { 587 log.Error("Failed to write flush list to disk", "err", err) 588 return err 589 } 590 batch.Reset() 591 } 592 // Iterate to the next flush item, or abort if the size cap was achieved. Size 593 // is the total size, including the useful cached data (hash -> blob), the 594 // cache item metadata, as well as external children mappings. 595 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 596 if node.children != nil { 597 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 598 } 599 oldest = node.flushNext 600 } 601 // Flush out any remainder data from the last batch 602 if err := batch.Write(); err != nil { 603 log.Error("Failed to write flush list to disk", "err", err) 604 return err 605 } 606 // Write successful, clear out the flushed data 607 db.lock.Lock() 608 defer db.lock.Unlock() 609 610 for db.oldest != oldest { 611 node := db.dirties[db.oldest] 612 delete(db.dirties, db.oldest) 613 db.oldest = node.flushNext 614 615 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 616 if node.children != nil { 617 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 618 } 619 } 620 if db.oldest != (common.Hash{}) { 621 db.dirties[db.oldest].flushPrev = common.Hash{} 622 } 623 db.flushnodes += uint64(nodes - len(db.dirties)) 624 db.flushsize += storage - db.dirtiesSize 625 db.flushtime += time.Since(start) 626 627 memcacheFlushTimeTimer.Update(time.Since(start)) 628 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 629 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 630 631 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 632 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 633 634 return nil 635 } 636 637 // Commit iterates over all the children of a particular node, writes them out 638 // to disk, forcefully tearing down all references in both directions. As a side 639 // effect, all pre-images accumulated up to this point are also written. 640 // 641 // Note, this method is a non-synchronized mutator. It is unsafe to call this 642 // concurrently with other mutators. 643 func (db *Database) Commit(node common.Hash, report bool) error { 644 if node == (common.Hash{}) { 645 // There's no data to commit in this node 646 return nil 647 } 648 649 // Create a database batch to flush persistent data out. It is important that 650 // outside code doesn't see an inconsistent state (referenced data removed from 651 // memory cache during commit but not yet in persistent storage). This is ensured 652 // by only uncaching existing data when the database write finalizes. 653 start := time.Now() 654 batch := db.diskdb.NewBatch() 655 656 // Move all of the accumulated preimages into a write batch 657 if db.preimages != nil { 658 if err := db.preimages.commit(true); err != nil { 659 return err 660 } 661 } 662 // Move the trie itself into the batch, flushing if enough data is accumulated 663 nodes, storage := len(db.dirties), db.dirtiesSize 664 665 uncacher := &cleaner{db} 666 if err := db.commit(node, batch, uncacher); err != nil { 667 log.Error("Failed to commit trie from trie database", "err", err) 668 return err 669 } 670 // Trie mostly committed to disk, flush any batch leftovers 671 if err := batch.Write(); err != nil { 672 log.Error("Failed to write trie to disk", "err", err) 673 return err 674 } 675 // Uncache any leftovers in the last batch 676 db.lock.Lock() 677 defer db.lock.Unlock() 678 if err := batch.Replay(uncacher); err != nil { 679 return err 680 } 681 batch.Reset() 682 683 // Reset the storage counters and bumped metrics 684 memcacheCommitTimeTimer.Update(time.Since(start)) 685 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 686 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 687 688 logger := log.Info 689 if !report { 690 logger = log.Debug 691 } 692 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 693 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 694 695 // Reset the garbage collection statistics 696 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 697 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 698 699 return nil 700 } 701 702 // commit is the private locked version of Commit. 703 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error { 704 // If the node does not exist, it's a previously committed node 705 node, ok := db.dirties[hash] 706 if !ok { 707 return nil 708 } 709 var err error 710 node.forChilds(func(child common.Hash) { 711 if err == nil { 712 err = db.commit(child, batch, uncacher) 713 } 714 }) 715 if err != nil { 716 return err 717 } 718 // If we've reached an optimal batch size, commit and start over 719 rawdb.WriteLegacyTrieNode(batch, hash, node.rlp()) 720 if batch.ValueSize() >= ethdb.IdealBatchSize { 721 if err := batch.Write(); err != nil { 722 return err 723 } 724 db.lock.Lock() 725 err := batch.Replay(uncacher) 726 batch.Reset() 727 db.lock.Unlock() 728 if err != nil { 729 return err 730 } 731 } 732 return nil 733 } 734 735 // cleaner is a database batch replayer that takes a batch of write operations 736 // and cleans up the trie database from anything written to disk. 737 type cleaner struct { 738 db *Database 739 } 740 741 // Put reacts to database writes and implements dirty data uncaching. This is the 742 // post-processing step of a commit operation where the already persisted trie is 743 // removed from the dirty cache and moved into the clean cache. The reason behind 744 // the two-phase commit is to ensure data availability while moving from memory 745 // to disk. 746 func (c *cleaner) Put(key []byte, rlp []byte) error { 747 hash := common.BytesToHash(key) 748 749 // If the node does not exist, we're done on this path 750 node, ok := c.db.dirties[hash] 751 if !ok { 752 return nil 753 } 754 // Node still exists, remove it from the flush-list 755 switch hash { 756 case c.db.oldest: 757 c.db.oldest = node.flushNext 758 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 759 case c.db.newest: 760 c.db.newest = node.flushPrev 761 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 762 default: 763 c.db.dirties[node.flushPrev].flushNext = node.flushNext 764 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 765 } 766 // Remove the node from the dirty cache 767 delete(c.db.dirties, hash) 768 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 769 if node.children != nil { 770 c.db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 771 } 772 // Move the flushed node into the clean cache to prevent insta-reloads 773 if c.db.cleans != nil { 774 c.db.cleans.Set(hash[:], rlp) 775 memcacheCleanWriteMeter.Mark(int64(len(rlp))) 776 } 777 return nil 778 } 779 780 func (c *cleaner) Delete(key []byte) error { 781 panic("not implemented") 782 } 783 784 // Update inserts the dirty nodes in provided nodeset into database and 785 // link the account trie with multiple storage tries if necessary. 786 func (db *Database) Update(nodes *MergedNodeSet) error { 787 db.lock.Lock() 788 defer db.lock.Unlock() 789 790 // Insert dirty nodes into the database. In the same tree, it must be 791 // ensured that children are inserted first, then parent so that children 792 // can be linked with their parent correctly. 793 // 794 // Note, the storage tries must be flushed before the account trie to 795 // retain the invariant that children go into the dirty cache first. 796 var order []common.Hash 797 for owner := range nodes.sets { 798 if owner == (common.Hash{}) { 799 continue 800 } 801 order = append(order, owner) 802 } 803 if _, ok := nodes.sets[common.Hash{}]; ok { 804 order = append(order, common.Hash{}) 805 } 806 for _, owner := range order { 807 subset := nodes.sets[owner] 808 for _, path := range subset.updates.order { 809 n, ok := subset.updates.nodes[path] 810 if !ok { 811 return fmt.Errorf("missing node %x %v", owner, path) 812 } 813 db.insert(n.hash, int(n.size), n.node) 814 } 815 } 816 // Link up the account trie and storage trie if the node points 817 // to an account trie leaf. 818 if set, present := nodes.sets[common.Hash{}]; present { 819 for _, n := range set.leaves { 820 var account types.StateAccount 821 if err := rlp.DecodeBytes(n.blob, &account); err != nil { 822 return err 823 } 824 if account.Root != types.EmptyRootHash { 825 db.reference(account.Root, n.parent) 826 } 827 } 828 } 829 return nil 830 } 831 832 // Size returns the current storage size of the memory cache in front of the 833 // persistent database layer. 834 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 835 db.lock.RLock() 836 defer db.lock.RUnlock() 837 838 // db.dirtiesSize only contains the useful data in the cache, but when reporting 839 // the total memory consumption, the maintenance metadata is also needed to be 840 // counted. 841 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 842 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 843 var preimageSize common.StorageSize 844 if db.preimages != nil { 845 preimageSize = db.preimages.size() 846 } 847 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, preimageSize 848 } 849 850 // GetReader retrieves a node reader belonging to the given state root. 851 func (db *Database) GetReader(root common.Hash) Reader { 852 return newHashReader(db) 853 } 854 855 // hashReader is reader of hashDatabase which implements the Reader interface. 856 type hashReader struct { 857 db *Database 858 } 859 860 // newHashReader initializes the hash reader. 861 func newHashReader(db *Database) *hashReader { 862 return &hashReader{db: db} 863 } 864 865 // Node retrieves the trie node with the given node hash. 866 // No error will be returned if the node is not found. 867 func (reader *hashReader) Node(_ common.Hash, _ []byte, hash common.Hash) (node, error) { 868 return reader.db.node(hash), nil 869 } 870 871 // NodeBlob retrieves the RLP-encoded trie node blob with the given node hash. 872 // No error will be returned if the node is not found. 873 func (reader *hashReader) NodeBlob(_ common.Hash, _ []byte, hash common.Hash) ([]byte, error) { 874 blob, _ := reader.db.Node(hash) 875 return blob, nil 876 } 877 878 // saveCache saves clean state cache to given directory path 879 // using specified CPU cores. 880 func (db *Database) saveCache(dir string, threads int) error { 881 if db.cleans == nil { 882 return nil 883 } 884 log.Info("Writing clean trie cache to disk", "path", dir, "threads", threads) 885 886 start := time.Now() 887 err := db.cleans.SaveToFileConcurrent(dir, threads) 888 if err != nil { 889 log.Error("Failed to persist clean trie cache", "error", err) 890 return err 891 } 892 log.Info("Persisted the clean trie cache", "path", dir, "elapsed", common.PrettyDuration(time.Since(start))) 893 return nil 894 } 895 896 // SaveCache atomically saves fast cache data to the given dir using all 897 // available CPU cores. 898 func (db *Database) SaveCache(dir string) error { 899 return db.saveCache(dir, runtime.GOMAXPROCS(0)) 900 } 901 902 // SaveCachePeriodically atomically saves fast cache data to the given dir with 903 // the specified interval. All dump operation will only use a single CPU core. 904 func (db *Database) SaveCachePeriodically(dir string, interval time.Duration, stopCh <-chan struct{}) { 905 ticker := time.NewTicker(interval) 906 defer ticker.Stop() 907 908 for { 909 select { 910 case <-ticker.C: 911 db.saveCache(dir, 1) 912 case <-stopCh: 913 return 914 } 915 } 916 } 917 918 // CommitPreimages flushes the dangling preimages to disk. It is meant to be 919 // called when closing the blockchain object, so that preimages are persisted 920 // to the database. 921 func (db *Database) CommitPreimages() error { 922 db.lock.Lock() 923 defer db.lock.Unlock() 924 925 if db.preimages == nil { 926 return nil 927 } 928 return db.preimages.commit(true) 929 } 930 931 // Scheme returns the node scheme used in the database. 932 func (db *Database) Scheme() string { 933 return rawdb.HashScheme 934 }