github.com/aigarnetwork/aigar@v0.0.0-20191115204914-d59a6eb70f8e/trie/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // Copyright 2019 The go-aigar Authors 3 // This file is part of the go-aigar library. 4 // 5 // The go-aigar library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-aigar library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-aigar library. If not, see <http://www.gnu.org/licenses/>. 17 18 package trie 19 20 import ( 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "io" 25 "reflect" 26 "sync" 27 "time" 28 29 "github.com/AigarNetwork/aigar/common" 30 "github.com/AigarNetwork/aigar/ethdb" 31 "github.com/AigarNetwork/aigar/log" 32 "github.com/AigarNetwork/aigar/metrics" 33 "github.com/AigarNetwork/aigar/rlp" 34 "github.com/allegro/bigcache" 35 ) 36 37 var ( 38 memcacheCleanHitMeter = metrics.NewRegisteredMeter("trie/memcache/clean/hit", nil) 39 memcacheCleanMissMeter = metrics.NewRegisteredMeter("trie/memcache/clean/miss", nil) 40 memcacheCleanReadMeter = metrics.NewRegisteredMeter("trie/memcache/clean/read", nil) 41 memcacheCleanWriteMeter = metrics.NewRegisteredMeter("trie/memcache/clean/write", nil) 42 43 memcacheFlushTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/flush/time", nil) 44 memcacheFlushNodesMeter = metrics.NewRegisteredMeter("trie/memcache/flush/nodes", nil) 45 memcacheFlushSizeMeter = metrics.NewRegisteredMeter("trie/memcache/flush/size", nil) 46 47 memcacheGCTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/gc/time", nil) 48 memcacheGCNodesMeter = metrics.NewRegisteredMeter("trie/memcache/gc/nodes", nil) 49 memcacheGCSizeMeter = metrics.NewRegisteredMeter("trie/memcache/gc/size", nil) 50 51 memcacheCommitTimeTimer = metrics.NewRegisteredResettingTimer("trie/memcache/commit/time", nil) 52 memcacheCommitNodesMeter = metrics.NewRegisteredMeter("trie/memcache/commit/nodes", nil) 53 memcacheCommitSizeMeter = metrics.NewRegisteredMeter("trie/memcache/commit/size", nil) 54 ) 55 56 // secureKeyPrefix is the database key prefix used to store trie node preimages. 57 var secureKeyPrefix = []byte("secure-key-") 58 59 // secureKeyLength is the length of the above prefix + 32byte hash. 60 const secureKeyLength = 11 + 32 61 62 // Database is an intermediate write layer between the trie data structures and 63 // the disk database. The aim is to accumulate trie writes in-memory and only 64 // periodically flush a couple tries to disk, garbage collecting the remainder. 65 // 66 // Note, the trie Database is **not** thread safe in its mutations, but it **is** 67 // thread safe in providing individual, independent node access. The rationale 68 // behind this split design is to provide read access to RPC handlers and sync 69 // servers even while the trie is executing expensive garbage collection. 70 type Database struct { 71 diskdb ethdb.KeyValueStore // Persistent storage for matured trie nodes 72 73 cleans *bigcache.BigCache // GC friendly memory cache of clean node RLPs 74 dirties map[common.Hash]*cachedNode // Data and references relationships of dirty nodes 75 oldest common.Hash // Oldest tracked node, flush-list head 76 newest common.Hash // Newest tracked node, flush-list tail 77 78 preimages map[common.Hash][]byte // Preimages of nodes from the secure trie 79 seckeybuf [secureKeyLength]byte // Ephemeral buffer for calculating preimage keys 80 81 gctime time.Duration // Time spent on garbage collection since last commit 82 gcnodes uint64 // Nodes garbage collected since last commit 83 gcsize common.StorageSize // Data storage garbage collected since last commit 84 85 flushtime time.Duration // Time spent on data flushing since last commit 86 flushnodes uint64 // Nodes flushed since last commit 87 flushsize common.StorageSize // Data storage flushed since last commit 88 89 dirtiesSize common.StorageSize // Storage size of the dirty node cache (exc. metadata) 90 childrenSize common.StorageSize // Storage size of the external children tracking 91 preimagesSize common.StorageSize // Storage size of the preimages cache 92 93 lock sync.RWMutex 94 } 95 96 // rawNode is a simple binary blob used to differentiate between collapsed trie 97 // nodes and already encoded RLP binary blobs (while at the same time store them 98 // in the same cache fields). 99 type rawNode []byte 100 101 func (n rawNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 102 func (n rawNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 103 func (n rawNode) fstring(ind string) string { panic("this should never end up in a live trie") } 104 105 // rawFullNode represents only the useful data content of a full node, with the 106 // caches and flags stripped out to minimize its data storage. This type honors 107 // the same RLP encoding as the original parent. 108 type rawFullNode [17]node 109 110 func (n rawFullNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 111 func (n rawFullNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 112 func (n rawFullNode) fstring(ind string) string { panic("this should never end up in a live trie") } 113 114 func (n rawFullNode) EncodeRLP(w io.Writer) error { 115 var nodes [17]node 116 117 for i, child := range n { 118 if child != nil { 119 nodes[i] = child 120 } else { 121 nodes[i] = nilValueNode 122 } 123 } 124 return rlp.Encode(w, nodes) 125 } 126 127 // rawShortNode represents only the useful data content of a short node, with the 128 // caches and flags stripped out to minimize its data storage. This type honors 129 // the same RLP encoding as the original parent. 130 type rawShortNode struct { 131 Key []byte 132 Val node 133 } 134 135 func (n rawShortNode) canUnload(uint16, uint16) bool { panic("this should never end up in a live trie") } 136 func (n rawShortNode) cache() (hashNode, bool) { panic("this should never end up in a live trie") } 137 func (n rawShortNode) fstring(ind string) string { panic("this should never end up in a live trie") } 138 139 // cachedNode is all the information we know about a single cached node in the 140 // memory database write layer. 141 type cachedNode struct { 142 node node // Cached collapsed trie node, or raw rlp data 143 size uint16 // Byte size of the useful cached data 144 145 parents uint32 // Number of live nodes referencing this one 146 children map[common.Hash]uint16 // External children referenced by this node 147 148 flushPrev common.Hash // Previous node in the flush-list 149 flushNext common.Hash // Next node in the flush-list 150 } 151 152 // cachedNodeSize is the raw size of a cachedNode data structure without any 153 // node data included. It's an approximate size, but should be a lot better 154 // than not counting them. 155 var cachedNodeSize = int(reflect.TypeOf(cachedNode{}).Size()) 156 157 // cachedNodeChildrenSize is the raw size of an initialized but empty external 158 // reference map. 159 const cachedNodeChildrenSize = 48 160 161 // rlp returns the raw rlp encoded blob of the cached node, either directly from 162 // the cache, or by regenerating it from the collapsed node. 163 func (n *cachedNode) rlp() []byte { 164 if node, ok := n.node.(rawNode); ok { 165 return node 166 } 167 blob, err := rlp.EncodeToBytes(n.node) 168 if err != nil { 169 panic(err) 170 } 171 return blob 172 } 173 174 // obj returns the decoded and expanded trie node, either directly from the cache, 175 // or by regenerating it from the rlp encoded blob. 176 func (n *cachedNode) obj(hash common.Hash) node { 177 if node, ok := n.node.(rawNode); ok { 178 return mustDecodeNode(hash[:], node) 179 } 180 return expandNode(hash[:], n.node) 181 } 182 183 // childs returns all the tracked children of this node, both the implicit ones 184 // from inside the node as well as the explicit ones from outside the node. 185 func (n *cachedNode) childs() []common.Hash { 186 children := make([]common.Hash, 0, 16) 187 for child := range n.children { 188 children = append(children, child) 189 } 190 if _, ok := n.node.(rawNode); !ok { 191 gatherChildren(n.node, &children) 192 } 193 return children 194 } 195 196 // gatherChildren traverses the node hierarchy of a collapsed storage node and 197 // retrieves all the hashnode children. 198 func gatherChildren(n node, children *[]common.Hash) { 199 switch n := n.(type) { 200 case *rawShortNode: 201 gatherChildren(n.Val, children) 202 203 case rawFullNode: 204 for i := 0; i < 16; i++ { 205 gatherChildren(n[i], children) 206 } 207 case hashNode: 208 *children = append(*children, common.BytesToHash(n)) 209 210 case valueNode, nil: 211 212 default: 213 panic(fmt.Sprintf("unknown node type: %T", n)) 214 } 215 } 216 217 // simplifyNode traverses the hierarchy of an expanded memory node and discards 218 // all the internal caches, returning a node that only contains the raw data. 219 func simplifyNode(n node) node { 220 switch n := n.(type) { 221 case *shortNode: 222 // Short nodes discard the flags and cascade 223 return &rawShortNode{Key: n.Key, Val: simplifyNode(n.Val)} 224 225 case *fullNode: 226 // Full nodes discard the flags and cascade 227 node := rawFullNode(n.Children) 228 for i := 0; i < len(node); i++ { 229 if node[i] != nil { 230 node[i] = simplifyNode(node[i]) 231 } 232 } 233 return node 234 235 case valueNode, hashNode, rawNode: 236 return n 237 238 default: 239 panic(fmt.Sprintf("unknown node type: %T", n)) 240 } 241 } 242 243 // expandNode traverses the node hierarchy of a collapsed storage node and converts 244 // all fields and keys into expanded memory form. 245 func expandNode(hash hashNode, n node) node { 246 switch n := n.(type) { 247 case *rawShortNode: 248 // Short nodes need key and child expansion 249 return &shortNode{ 250 Key: compactToHex(n.Key), 251 Val: expandNode(nil, n.Val), 252 flags: nodeFlag{ 253 hash: hash, 254 }, 255 } 256 257 case rawFullNode: 258 // Full nodes need child expansion 259 node := &fullNode{ 260 flags: nodeFlag{ 261 hash: hash, 262 }, 263 } 264 for i := 0; i < len(node.Children); i++ { 265 if n[i] != nil { 266 node.Children[i] = expandNode(nil, n[i]) 267 } 268 } 269 return node 270 271 case valueNode, hashNode: 272 return n 273 274 default: 275 panic(fmt.Sprintf("unknown node type: %T", n)) 276 } 277 } 278 279 // trienodeHasher is a struct to be used with BigCache, which uses a Hasher to 280 // determine which shard to place an entry into. It's not a cryptographic hash, 281 // just to provide a bit of anti-collision (default is FNV64a). 282 // 283 // Since trie keys are already hashes, we can just use the key directly to 284 // map shard id. 285 type trienodeHasher struct{} 286 287 // Sum64 implements the bigcache.Hasher interface. 288 func (t trienodeHasher) Sum64(key string) uint64 { 289 return binary.BigEndian.Uint64([]byte(key)) 290 } 291 292 // NewDatabase creates a new trie database to store ephemeral trie content before 293 // its written out to disk or garbage collected. No read cache is created, so all 294 // data retrievals will hit the underlying disk database. 295 func NewDatabase(diskdb ethdb.KeyValueStore) *Database { 296 return NewDatabaseWithCache(diskdb, 0) 297 } 298 299 // NewDatabaseWithCache creates a new trie database to store ephemeral trie content 300 // before its written out to disk or garbage collected. It also acts as a read cache 301 // for nodes loaded from disk. 302 func NewDatabaseWithCache(diskdb ethdb.KeyValueStore, cache int) *Database { 303 var cleans *bigcache.BigCache 304 if cache > 0 { 305 cleans, _ = bigcache.NewBigCache(bigcache.Config{ 306 Shards: 1024, 307 LifeWindow: time.Hour, 308 MaxEntriesInWindow: cache * 1024, 309 MaxEntrySize: 512, 310 HardMaxCacheSize: cache, 311 Hasher: trienodeHasher{}, 312 }) 313 } 314 return &Database{ 315 diskdb: diskdb, 316 cleans: cleans, 317 dirties: map[common.Hash]*cachedNode{{}: { 318 children: make(map[common.Hash]uint16), 319 }}, 320 preimages: make(map[common.Hash][]byte), 321 } 322 } 323 324 // DiskDB retrieves the persistent storage backing the trie database. 325 func (db *Database) DiskDB() ethdb.KeyValueReader { 326 return db.diskdb 327 } 328 329 // InsertBlob writes a new reference tracked blob to the memory database if it's 330 // yet unknown. This method should only be used for non-trie nodes that require 331 // reference counting, since trie nodes are garbage collected directly through 332 // their embedded children. 333 func (db *Database) InsertBlob(hash common.Hash, blob []byte) { 334 db.lock.Lock() 335 defer db.lock.Unlock() 336 337 db.insert(hash, blob, rawNode(blob)) 338 } 339 340 // insert inserts a collapsed trie node into the memory database. This method is 341 // a more generic version of InsertBlob, supporting both raw blob insertions as 342 // well ex trie node insertions. The blob must always be specified to allow proper 343 // size tracking. 344 func (db *Database) insert(hash common.Hash, blob []byte, node node) { 345 // If the node's already cached, skip 346 if _, ok := db.dirties[hash]; ok { 347 return 348 } 349 // Create the cached entry for this node 350 entry := &cachedNode{ 351 node: simplifyNode(node), 352 size: uint16(len(blob)), 353 flushPrev: db.newest, 354 } 355 for _, child := range entry.childs() { 356 if c := db.dirties[child]; c != nil { 357 c.parents++ 358 } 359 } 360 db.dirties[hash] = entry 361 362 // Update the flush-list endpoints 363 if db.oldest == (common.Hash{}) { 364 db.oldest, db.newest = hash, hash 365 } else { 366 db.dirties[db.newest].flushNext, db.newest = hash, hash 367 } 368 db.dirtiesSize += common.StorageSize(common.HashLength + entry.size) 369 } 370 371 // insertPreimage writes a new trie node pre-image to the memory database if it's 372 // yet unknown. The method will make a copy of the slice. 373 // 374 // Note, this method assumes that the database's lock is held! 375 func (db *Database) insertPreimage(hash common.Hash, preimage []byte) { 376 if _, ok := db.preimages[hash]; ok { 377 return 378 } 379 db.preimages[hash] = common.CopyBytes(preimage) 380 db.preimagesSize += common.StorageSize(common.HashLength + len(preimage)) 381 } 382 383 // node retrieves a cached trie node from memory, or returns nil if none can be 384 // found in the memory cache. 385 func (db *Database) node(hash common.Hash) node { 386 // Retrieve the node from the clean cache if available 387 if db.cleans != nil { 388 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 389 memcacheCleanHitMeter.Mark(1) 390 memcacheCleanReadMeter.Mark(int64(len(enc))) 391 return mustDecodeNode(hash[:], enc) 392 } 393 } 394 // Retrieve the node from the dirty cache if available 395 db.lock.RLock() 396 dirty := db.dirties[hash] 397 db.lock.RUnlock() 398 399 if dirty != nil { 400 return dirty.obj(hash) 401 } 402 // Content unavailable in memory, attempt to retrieve from disk 403 enc, err := db.diskdb.Get(hash[:]) 404 if err != nil || enc == nil { 405 return nil 406 } 407 if db.cleans != nil { 408 db.cleans.Set(string(hash[:]), enc) 409 memcacheCleanMissMeter.Mark(1) 410 memcacheCleanWriteMeter.Mark(int64(len(enc))) 411 } 412 return mustDecodeNode(hash[:], enc) 413 } 414 415 // Node retrieves an encoded cached trie node from memory. If it cannot be found 416 // cached, the method queries the persistent database for the content. 417 func (db *Database) Node(hash common.Hash) ([]byte, error) { 418 // It doens't make sense to retrieve the metaroot 419 if hash == (common.Hash{}) { 420 return nil, errors.New("not found") 421 } 422 // Retrieve the node from the clean cache if available 423 if db.cleans != nil { 424 if enc, err := db.cleans.Get(string(hash[:])); err == nil && enc != nil { 425 memcacheCleanHitMeter.Mark(1) 426 memcacheCleanReadMeter.Mark(int64(len(enc))) 427 return enc, nil 428 } 429 } 430 // Retrieve the node from the dirty cache if available 431 db.lock.RLock() 432 dirty := db.dirties[hash] 433 db.lock.RUnlock() 434 435 if dirty != nil { 436 return dirty.rlp(), nil 437 } 438 // Content unavailable in memory, attempt to retrieve from disk 439 enc, err := db.diskdb.Get(hash[:]) 440 if err == nil && enc != nil { 441 if db.cleans != nil { 442 db.cleans.Set(string(hash[:]), enc) 443 memcacheCleanMissMeter.Mark(1) 444 memcacheCleanWriteMeter.Mark(int64(len(enc))) 445 } 446 } 447 return enc, err 448 } 449 450 // preimage retrieves a cached trie node pre-image from memory. If it cannot be 451 // found cached, the method queries the persistent database for the content. 452 func (db *Database) preimage(hash common.Hash) ([]byte, error) { 453 // Retrieve the node from cache if available 454 db.lock.RLock() 455 preimage := db.preimages[hash] 456 db.lock.RUnlock() 457 458 if preimage != nil { 459 return preimage, nil 460 } 461 // Content unavailable in memory, attempt to retrieve from disk 462 return db.diskdb.Get(db.secureKey(hash[:])) 463 } 464 465 // secureKey returns the database key for the preimage of key, as an ephemeral 466 // buffer. The caller must not hold onto the return value because it will become 467 // invalid on the next call. 468 func (db *Database) secureKey(key []byte) []byte { 469 buf := append(db.seckeybuf[:0], secureKeyPrefix...) 470 buf = append(buf, key...) 471 return buf 472 } 473 474 // Nodes retrieves the hashes of all the nodes cached within the memory database. 475 // This method is extremely expensive and should only be used to validate internal 476 // states in test code. 477 func (db *Database) Nodes() []common.Hash { 478 db.lock.RLock() 479 defer db.lock.RUnlock() 480 481 var hashes = make([]common.Hash, 0, len(db.dirties)) 482 for hash := range db.dirties { 483 if hash != (common.Hash{}) { // Special case for "root" references/nodes 484 hashes = append(hashes, hash) 485 } 486 } 487 return hashes 488 } 489 490 // Reference adds a new reference from a parent node to a child node. 491 func (db *Database) Reference(child common.Hash, parent common.Hash) { 492 db.lock.Lock() 493 defer db.lock.Unlock() 494 495 db.reference(child, parent) 496 } 497 498 // reference is the private locked version of Reference. 499 func (db *Database) reference(child common.Hash, parent common.Hash) { 500 // If the node does not exist, it's a node pulled from disk, skip 501 node, ok := db.dirties[child] 502 if !ok { 503 return 504 } 505 // If the reference already exists, only duplicate for roots 506 if db.dirties[parent].children == nil { 507 db.dirties[parent].children = make(map[common.Hash]uint16) 508 db.childrenSize += cachedNodeChildrenSize 509 } else if _, ok = db.dirties[parent].children[child]; ok && parent != (common.Hash{}) { 510 return 511 } 512 node.parents++ 513 db.dirties[parent].children[child]++ 514 if db.dirties[parent].children[child] == 1 { 515 db.childrenSize += common.HashLength + 2 // uint16 counter 516 } 517 } 518 519 // Dereference removes an existing reference from a root node. 520 func (db *Database) Dereference(root common.Hash) { 521 // Sanity check to ensure that the meta-root is not removed 522 if root == (common.Hash{}) { 523 log.Error("Attempted to dereference the trie cache meta root") 524 return 525 } 526 db.lock.Lock() 527 defer db.lock.Unlock() 528 529 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 530 db.dereference(root, common.Hash{}) 531 532 db.gcnodes += uint64(nodes - len(db.dirties)) 533 db.gcsize += storage - db.dirtiesSize 534 db.gctime += time.Since(start) 535 536 memcacheGCTimeTimer.Update(time.Since(start)) 537 memcacheGCSizeMeter.Mark(int64(storage - db.dirtiesSize)) 538 memcacheGCNodesMeter.Mark(int64(nodes - len(db.dirties))) 539 540 log.Debug("Dereferenced trie from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 541 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 542 } 543 544 // dereference is the private locked version of Dereference. 545 func (db *Database) dereference(child common.Hash, parent common.Hash) { 546 // Dereference the parent-child 547 node := db.dirties[parent] 548 549 if node.children != nil && node.children[child] > 0 { 550 node.children[child]-- 551 if node.children[child] == 0 { 552 delete(node.children, child) 553 db.childrenSize -= (common.HashLength + 2) // uint16 counter 554 } 555 } 556 // If the child does not exist, it's a previously committed node. 557 node, ok := db.dirties[child] 558 if !ok { 559 return 560 } 561 // If there are no more references to the child, delete it and cascade 562 if node.parents > 0 { 563 // This is a special cornercase where a node loaded from disk (i.e. not in the 564 // memcache any more) gets reinjected as a new node (short node split into full, 565 // then reverted into short), causing a cached node to have no parents. That is 566 // no problem in itself, but don't make maxint parents out of it. 567 node.parents-- 568 } 569 if node.parents == 0 { 570 // Remove the node from the flush-list 571 switch child { 572 case db.oldest: 573 db.oldest = node.flushNext 574 db.dirties[node.flushNext].flushPrev = common.Hash{} 575 case db.newest: 576 db.newest = node.flushPrev 577 db.dirties[node.flushPrev].flushNext = common.Hash{} 578 default: 579 db.dirties[node.flushPrev].flushNext = node.flushNext 580 db.dirties[node.flushNext].flushPrev = node.flushPrev 581 } 582 // Dereference all children and delete the node 583 for _, hash := range node.childs() { 584 db.dereference(hash, child) 585 } 586 delete(db.dirties, child) 587 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 588 if node.children != nil { 589 db.childrenSize -= cachedNodeChildrenSize 590 } 591 } 592 } 593 594 // Cap iteratively flushes old but still referenced trie nodes until the total 595 // memory usage goes below the given threshold. 596 // 597 // Note, this method is a non-synchronized mutator. It is unsafe to call this 598 // concurrently with other mutators. 599 func (db *Database) Cap(limit common.StorageSize) error { 600 // Create a database batch to flush persistent data out. It is important that 601 // outside code doesn't see an inconsistent state (referenced data removed from 602 // memory cache during commit but not yet in persistent storage). This is ensured 603 // by only uncaching existing data when the database write finalizes. 604 nodes, storage, start := len(db.dirties), db.dirtiesSize, time.Now() 605 batch := db.diskdb.NewBatch() 606 607 // db.dirtiesSize only contains the useful data in the cache, but when reporting 608 // the total memory consumption, the maintenance metadata is also needed to be 609 // counted. 610 size := db.dirtiesSize + common.StorageSize((len(db.dirties)-1)*cachedNodeSize) 611 size += db.childrenSize - common.StorageSize(len(db.dirties[common.Hash{}].children)*(common.HashLength+2)) 612 613 // If the preimage cache got large enough, push to disk. If it's still small 614 // leave for later to deduplicate writes. 615 flushPreimages := db.preimagesSize > 4*1024*1024 616 if flushPreimages { 617 for hash, preimage := range db.preimages { 618 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 619 log.Error("Failed to commit preimage from trie database", "err", err) 620 return err 621 } 622 if batch.ValueSize() > ethdb.IdealBatchSize { 623 if err := batch.Write(); err != nil { 624 return err 625 } 626 batch.Reset() 627 } 628 } 629 } 630 // Keep committing nodes from the flush-list until we're below allowance 631 oldest := db.oldest 632 for size > limit && oldest != (common.Hash{}) { 633 // Fetch the oldest referenced node and push into the batch 634 node := db.dirties[oldest] 635 if err := batch.Put(oldest[:], node.rlp()); err != nil { 636 return err 637 } 638 // If we exceeded the ideal batch size, commit and reset 639 if batch.ValueSize() >= ethdb.IdealBatchSize { 640 if err := batch.Write(); err != nil { 641 log.Error("Failed to write flush list to disk", "err", err) 642 return err 643 } 644 batch.Reset() 645 } 646 // Iterate to the next flush item, or abort if the size cap was achieved. Size 647 // is the total size, including the useful cached data (hash -> blob), the 648 // cache item metadata, as well as external children mappings. 649 size -= common.StorageSize(common.HashLength + int(node.size) + cachedNodeSize) 650 if node.children != nil { 651 size -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 652 } 653 oldest = node.flushNext 654 } 655 // Flush out any remainder data from the last batch 656 if err := batch.Write(); err != nil { 657 log.Error("Failed to write flush list to disk", "err", err) 658 return err 659 } 660 // Write successful, clear out the flushed data 661 db.lock.Lock() 662 defer db.lock.Unlock() 663 664 if flushPreimages { 665 db.preimages = make(map[common.Hash][]byte) 666 db.preimagesSize = 0 667 } 668 for db.oldest != oldest { 669 node := db.dirties[db.oldest] 670 delete(db.dirties, db.oldest) 671 db.oldest = node.flushNext 672 673 db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 674 if node.children != nil { 675 db.childrenSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 676 } 677 } 678 if db.oldest != (common.Hash{}) { 679 db.dirties[db.oldest].flushPrev = common.Hash{} 680 } 681 db.flushnodes += uint64(nodes - len(db.dirties)) 682 db.flushsize += storage - db.dirtiesSize 683 db.flushtime += time.Since(start) 684 685 memcacheFlushTimeTimer.Update(time.Since(start)) 686 memcacheFlushSizeMeter.Mark(int64(storage - db.dirtiesSize)) 687 memcacheFlushNodesMeter.Mark(int64(nodes - len(db.dirties))) 688 689 log.Debug("Persisted nodes from memory database", "nodes", nodes-len(db.dirties), "size", storage-db.dirtiesSize, "time", time.Since(start), 690 "flushnodes", db.flushnodes, "flushsize", db.flushsize, "flushtime", db.flushtime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 691 692 return nil 693 } 694 695 // Commit iterates over all the children of a particular node, writes them out 696 // to disk, forcefully tearing down all references in both directions. As a side 697 // effect, all pre-images accumulated up to this point are also written. 698 // 699 // Note, this method is a non-synchronized mutator. It is unsafe to call this 700 // concurrently with other mutators. 701 func (db *Database) Commit(node common.Hash, report bool) error { 702 // Create a database batch to flush persistent data out. It is important that 703 // outside code doesn't see an inconsistent state (referenced data removed from 704 // memory cache during commit but not yet in persistent storage). This is ensured 705 // by only uncaching existing data when the database write finalizes. 706 start := time.Now() 707 batch := db.diskdb.NewBatch() 708 709 // Move all of the accumulated preimages into a write batch 710 for hash, preimage := range db.preimages { 711 if err := batch.Put(db.secureKey(hash[:]), preimage); err != nil { 712 log.Error("Failed to commit preimage from trie database", "err", err) 713 return err 714 } 715 // If the batch is too large, flush to disk 716 if batch.ValueSize() > ethdb.IdealBatchSize { 717 if err := batch.Write(); err != nil { 718 return err 719 } 720 batch.Reset() 721 } 722 } 723 // Since we're going to replay trie node writes into the clean cache, flush out 724 // any batched pre-images before continuing. 725 if err := batch.Write(); err != nil { 726 return err 727 } 728 batch.Reset() 729 730 // Move the trie itself into the batch, flushing if enough data is accumulated 731 nodes, storage := len(db.dirties), db.dirtiesSize 732 733 uncacher := &cleaner{db} 734 if err := db.commit(node, batch, uncacher); err != nil { 735 log.Error("Failed to commit trie from trie database", "err", err) 736 return err 737 } 738 // Trie mostly committed to disk, flush any batch leftovers 739 if err := batch.Write(); err != nil { 740 log.Error("Failed to write trie to disk", "err", err) 741 return err 742 } 743 // Uncache any leftovers in the last batch 744 db.lock.Lock() 745 defer db.lock.Unlock() 746 747 batch.Replay(uncacher) 748 batch.Reset() 749 750 // Reset the storage counters and bumpd metrics 751 db.preimages = make(map[common.Hash][]byte) 752 db.preimagesSize = 0 753 754 memcacheCommitTimeTimer.Update(time.Since(start)) 755 memcacheCommitSizeMeter.Mark(int64(storage - db.dirtiesSize)) 756 memcacheCommitNodesMeter.Mark(int64(nodes - len(db.dirties))) 757 758 logger := log.Info 759 if !report { 760 logger = log.Debug 761 } 762 logger("Persisted trie from memory database", "nodes", nodes-len(db.dirties)+int(db.flushnodes), "size", storage-db.dirtiesSize+db.flushsize, "time", time.Since(start)+db.flushtime, 763 "gcnodes", db.gcnodes, "gcsize", db.gcsize, "gctime", db.gctime, "livenodes", len(db.dirties), "livesize", db.dirtiesSize) 764 765 // Reset the garbage collection statistics 766 db.gcnodes, db.gcsize, db.gctime = 0, 0, 0 767 db.flushnodes, db.flushsize, db.flushtime = 0, 0, 0 768 769 return nil 770 } 771 772 // commit is the private locked version of Commit. 773 func (db *Database) commit(hash common.Hash, batch ethdb.Batch, uncacher *cleaner) error { 774 // If the node does not exist, it's a previously committed node 775 node, ok := db.dirties[hash] 776 if !ok { 777 return nil 778 } 779 for _, child := range node.childs() { 780 if err := db.commit(child, batch, uncacher); err != nil { 781 return err 782 } 783 } 784 if err := batch.Put(hash[:], node.rlp()); err != nil { 785 return err 786 } 787 // If we've reached an optimal batch size, commit and start over 788 if batch.ValueSize() >= ethdb.IdealBatchSize { 789 if err := batch.Write(); err != nil { 790 return err 791 } 792 db.lock.Lock() 793 batch.Replay(uncacher) 794 batch.Reset() 795 db.lock.Unlock() 796 } 797 return nil 798 } 799 800 // cleaner is a database batch replayer that takes a batch of write operations 801 // and cleans up the trie database from anything written to disk. 802 type cleaner struct { 803 db *Database 804 } 805 806 // Put reacts to database writes and implements dirty data uncaching. This is the 807 // post-processing step of a commit operation where the already persisted trie is 808 // removed from the dirty cache and moved into the clean cache. The reason behind 809 // the two-phase commit is to ensure ensure data availability while moving from 810 // memory to disk. 811 func (c *cleaner) Put(key []byte, rlp []byte) error { 812 hash := common.BytesToHash(key) 813 814 // If the node does not exist, we're done on this path 815 node, ok := c.db.dirties[hash] 816 if !ok { 817 return nil 818 } 819 // Node still exists, remove it from the flush-list 820 switch hash { 821 case c.db.oldest: 822 c.db.oldest = node.flushNext 823 c.db.dirties[node.flushNext].flushPrev = common.Hash{} 824 case c.db.newest: 825 c.db.newest = node.flushPrev 826 c.db.dirties[node.flushPrev].flushNext = common.Hash{} 827 default: 828 c.db.dirties[node.flushPrev].flushNext = node.flushNext 829 c.db.dirties[node.flushNext].flushPrev = node.flushPrev 830 } 831 // Remove the node from the dirty cache 832 delete(c.db.dirties, hash) 833 c.db.dirtiesSize -= common.StorageSize(common.HashLength + int(node.size)) 834 if node.children != nil { 835 c.db.dirtiesSize -= common.StorageSize(cachedNodeChildrenSize + len(node.children)*(common.HashLength+2)) 836 } 837 // Move the flushed node into the clean cache to prevent insta-reloads 838 if c.db.cleans != nil { 839 c.db.cleans.Set(string(hash[:]), rlp) 840 } 841 return nil 842 } 843 844 func (c *cleaner) Delete(key []byte) error { 845 panic("Not implemented") 846 } 847 848 // Size returns the current storage size of the memory cache in front of the 849 // persistent database layer. 850 func (db *Database) Size() (common.StorageSize, common.StorageSize) { 851 db.lock.RLock() 852 defer db.lock.RUnlock() 853 854 // db.dirtiesSize only contains the useful data in the cache, but when reporting 855 // the total memory consumption, the maintenance metadata is also needed to be 856 // counted. 857 var metadataSize = common.StorageSize((len(db.dirties) - 1) * cachedNodeSize) 858 var metarootRefs = common.StorageSize(len(db.dirties[common.Hash{}].children) * (common.HashLength + 2)) 859 return db.dirtiesSize + db.childrenSize + metadataSize - metarootRefs, db.preimagesSize 860 } 861 862 // verifyIntegrity is a debug method to iterate over the entire trie stored in 863 // memory and check whether every node is reachable from the meta root. The goal 864 // is to find any errors that might cause memory leaks and or trie nodes to go 865 // missing. 866 // 867 // This method is extremely CPU and memory intensive, only use when must. 868 func (db *Database) verifyIntegrity() { 869 // Iterate over all the cached nodes and accumulate them into a set 870 reachable := map[common.Hash]struct{}{{}: {}} 871 872 for child := range db.dirties[common.Hash{}].children { 873 db.accumulate(child, reachable) 874 } 875 // Find any unreachable but cached nodes 876 var unreachable []string 877 for hash, node := range db.dirties { 878 if _, ok := reachable[hash]; !ok { 879 unreachable = append(unreachable, fmt.Sprintf("%x: {Node: %v, Parents: %d, Prev: %x, Next: %x}", 880 hash, node.node, node.parents, node.flushPrev, node.flushNext)) 881 } 882 } 883 if len(unreachable) != 0 { 884 panic(fmt.Sprintf("trie cache memory leak: %v", unreachable)) 885 } 886 } 887 888 // accumulate iterates over the trie defined by hash and accumulates all the 889 // cached children found in memory. 890 func (db *Database) accumulate(hash common.Hash, reachable map[common.Hash]struct{}) { 891 // Mark the node reachable if present in the memory cache 892 node, ok := db.dirties[hash] 893 if !ok { 894 return 895 } 896 reachable[hash] = struct{}{} 897 898 // Iterate over all the children and accumulate them too 899 for _, child := range node.childs() { 900 db.accumulate(child, reachable) 901 } 902 }