github.com/snowblossomcoin/go-ethereum@v1.9.25/core/state/snapshot/snapshot.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package snapshot implements a journalled, dynamic state dump. 18 package snapshot 19 20 import ( 21 "bytes" 22 "errors" 23 "fmt" 24 "sync" 25 "sync/atomic" 26 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/ethdb" 30 "github.com/ethereum/go-ethereum/log" 31 "github.com/ethereum/go-ethereum/metrics" 32 "github.com/ethereum/go-ethereum/rlp" 33 "github.com/ethereum/go-ethereum/trie" 34 ) 35 36 var ( 37 snapshotCleanAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil) 38 snapshotCleanAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil) 39 snapshotCleanAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil) 40 snapshotCleanAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil) 41 snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil) 42 43 snapshotCleanStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil) 44 snapshotCleanStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil) 45 snapshotCleanStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil) 46 snapshotCleanStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil) 47 snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil) 48 49 snapshotDirtyAccountHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil) 50 snapshotDirtyAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil) 51 snapshotDirtyAccountInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil) 52 snapshotDirtyAccountReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil) 53 snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil) 54 55 snapshotDirtyStorageHitMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil) 56 snapshotDirtyStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil) 57 snapshotDirtyStorageInexMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil) 58 snapshotDirtyStorageReadMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil) 59 snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil) 60 61 snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) 62 snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015)) 63 64 snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil) 65 snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil) 66 snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil) 67 snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil) 68 69 snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil) 70 snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil) 71 72 snapshotBloomAccountTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil) 73 snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil) 74 snapshotBloomAccountMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil) 75 76 snapshotBloomStorageTrueHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil) 77 snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil) 78 snapshotBloomStorageMissMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil) 79 80 // ErrSnapshotStale is returned from data accessors if the underlying snapshot 81 // layer had been invalidated due to the chain progressing forward far enough 82 // to not maintain the layer's original state. 83 ErrSnapshotStale = errors.New("snapshot stale") 84 85 // ErrNotCoveredYet is returned from data accessors if the underlying snapshot 86 // is being generated currently and the requested data item is not yet in the 87 // range of accounts covered. 88 ErrNotCoveredYet = errors.New("not covered yet") 89 90 // ErrNotConstructed is returned if the callers want to iterate the snapshot 91 // while the generation is not finished yet. 92 ErrNotConstructed = errors.New("snapshot is not constructed") 93 94 // errSnapshotCycle is returned if a snapshot is attempted to be inserted 95 // that forms a cycle in the snapshot tree. 96 errSnapshotCycle = errors.New("snapshot cycle") 97 ) 98 99 // Snapshot represents the functionality supported by a snapshot storage layer. 100 type Snapshot interface { 101 // Root returns the root hash for which this snapshot was made. 102 Root() common.Hash 103 104 // Account directly retrieves the account associated with a particular hash in 105 // the snapshot slim data format. 106 Account(hash common.Hash) (*Account, error) 107 108 // AccountRLP directly retrieves the account RLP associated with a particular 109 // hash in the snapshot slim data format. 110 AccountRLP(hash common.Hash) ([]byte, error) 111 112 // Storage directly retrieves the storage data associated with a particular hash, 113 // within a particular account. 114 Storage(accountHash, storageHash common.Hash) ([]byte, error) 115 } 116 117 // snapshot is the internal version of the snapshot data layer that supports some 118 // additional methods compared to the public API. 119 type snapshot interface { 120 Snapshot 121 122 // Parent returns the subsequent layer of a snapshot, or nil if the base was 123 // reached. 124 // 125 // Note, the method is an internal helper to avoid type switching between the 126 // disk and diff layers. There is no locking involved. 127 Parent() snapshot 128 129 // Update creates a new layer on top of the existing snapshot diff tree with 130 // the specified data items. 131 // 132 // Note, the maps are retained by the method to avoid copying everything. 133 Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer 134 135 // Journal commits an entire diff hierarchy to disk into a single journal entry. 136 // This is meant to be used during shutdown to persist the snapshot without 137 // flattening everything down (bad for reorgs). 138 Journal(buffer *bytes.Buffer) (common.Hash, error) 139 140 // LegacyJournal is basically identical to Journal. it's the legacy version for 141 // flushing legacy journal. Now the only purpose of this function is for testing. 142 LegacyJournal(buffer *bytes.Buffer) (common.Hash, error) 143 144 // Stale return whether this layer has become stale (was flattened across) or 145 // if it's still live. 146 Stale() bool 147 148 // AccountIterator creates an account iterator over an arbitrary layer. 149 AccountIterator(seek common.Hash) AccountIterator 150 151 // StorageIterator creates a storage iterator over an arbitrary layer. 152 StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool) 153 } 154 155 // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent 156 // base layer backed by a key-value store, on top of which arbitrarily many in- 157 // memory diff layers are topped. The memory diffs can form a tree with branching, 158 // but the disk layer is singleton and common to all. If a reorg goes deeper than 159 // the disk layer, everything needs to be deleted. 160 // 161 // The goal of a state snapshot is twofold: to allow direct access to account and 162 // storage data to avoid expensive multi-level trie lookups; and to allow sorted, 163 // cheap iteration of the account/storage tries for sync aid. 164 type Tree struct { 165 diskdb ethdb.KeyValueStore // Persistent database to store the snapshot 166 triedb *trie.Database // In-memory cache to access the trie through 167 cache int // Megabytes permitted to use for read caches 168 layers map[common.Hash]snapshot // Collection of all known layers 169 lock sync.RWMutex 170 } 171 172 // New attempts to load an already existing snapshot from a persistent key-value 173 // store (with a number of memory layers from a journal), ensuring that the head 174 // of the snapshot matches the expected one. 175 // 176 // If the snapshot is missing or the disk layer is broken, the entire is deleted 177 // and will be reconstructed from scratch based on the tries in the key-value 178 // store, on a background thread. If the memory layers from the journal is not 179 // continuous with disk layer or the journal is missing, all diffs will be discarded 180 // iff it's in "recovery" mode, otherwise rebuild is mandatory. 181 func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, recovery bool) *Tree { 182 // Create a new, empty snapshot tree 183 snap := &Tree{ 184 diskdb: diskdb, 185 triedb: triedb, 186 cache: cache, 187 layers: make(map[common.Hash]snapshot), 188 } 189 if !async { 190 defer snap.waitBuild() 191 } 192 // Attempt to load a previously persisted snapshot and rebuild one if failed 193 head, err := loadSnapshot(diskdb, triedb, cache, root, recovery) 194 if err != nil { 195 log.Warn("Failed to load snapshot, regenerating", "err", err) 196 snap.Rebuild(root) 197 return snap 198 } 199 // Existing snapshot loaded, seed all the layers 200 for head != nil { 201 snap.layers[head.Root()] = head 202 head = head.Parent() 203 } 204 return snap 205 } 206 207 // waitBuild blocks until the snapshot finishes rebuilding. This method is meant 208 // to be used by tests to ensure we're testing what we believe we are. 209 func (t *Tree) waitBuild() { 210 // Find the rebuild termination channel 211 var done chan struct{} 212 213 t.lock.RLock() 214 for _, layer := range t.layers { 215 if layer, ok := layer.(*diskLayer); ok { 216 done = layer.genPending 217 break 218 } 219 } 220 t.lock.RUnlock() 221 222 // Wait until the snapshot is generated 223 if done != nil { 224 <-done 225 } 226 } 227 228 // Snapshot retrieves a snapshot belonging to the given block root, or nil if no 229 // snapshot is maintained for that block. 230 func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot { 231 t.lock.RLock() 232 defer t.lock.RUnlock() 233 234 return t.layers[blockRoot] 235 } 236 237 // Update adds a new snapshot into the tree, if that can be linked to an existing 238 // old parent. It is disallowed to insert a disk layer (the origin of all). 239 func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error { 240 // Reject noop updates to avoid self-loops in the snapshot tree. This is a 241 // special case that can only happen for Clique networks where empty blocks 242 // don't modify the state (0 block subsidy). 243 // 244 // Although we could silently ignore this internally, it should be the caller's 245 // responsibility to avoid even attempting to insert such a snapshot. 246 if blockRoot == parentRoot { 247 return errSnapshotCycle 248 } 249 // Generate a new snapshot on top of the parent 250 parent := t.Snapshot(parentRoot).(snapshot) 251 if parent == nil { 252 return fmt.Errorf("parent [%#x] snapshot missing", parentRoot) 253 } 254 snap := parent.Update(blockRoot, destructs, accounts, storage) 255 256 // Save the new snapshot for later 257 t.lock.Lock() 258 defer t.lock.Unlock() 259 260 t.layers[snap.root] = snap 261 return nil 262 } 263 264 // Cap traverses downwards the snapshot tree from a head block hash until the 265 // number of allowed layers are crossed. All layers beyond the permitted number 266 // are flattened downwards. 267 func (t *Tree) Cap(root common.Hash, layers int) error { 268 // Retrieve the head snapshot to cap from 269 snap := t.Snapshot(root) 270 if snap == nil { 271 return fmt.Errorf("snapshot [%#x] missing", root) 272 } 273 diff, ok := snap.(*diffLayer) 274 if !ok { 275 return fmt.Errorf("snapshot [%#x] is disk layer", root) 276 } 277 // If the generator is still running, use a more aggressive cap 278 diff.origin.lock.RLock() 279 if diff.origin.genMarker != nil && layers > 8 { 280 layers = 8 281 } 282 diff.origin.lock.RUnlock() 283 284 // Run the internal capping and discard all stale layers 285 t.lock.Lock() 286 defer t.lock.Unlock() 287 288 // Flattening the bottom-most diff layer requires special casing since there's 289 // no child to rewire to the grandparent. In that case we can fake a temporary 290 // child for the capping and then remove it. 291 var persisted *diskLayer 292 293 switch layers { 294 case 0: 295 // If full commit was requested, flatten the diffs and merge onto disk 296 diff.lock.RLock() 297 base := diffToDisk(diff.flatten().(*diffLayer)) 298 diff.lock.RUnlock() 299 300 // Replace the entire snapshot tree with the flat base 301 t.layers = map[common.Hash]snapshot{base.root: base} 302 return nil 303 304 case 1: 305 // If full flattening was requested, flatten the diffs but only merge if the 306 // memory limit was reached 307 var ( 308 bottom *diffLayer 309 base *diskLayer 310 ) 311 diff.lock.RLock() 312 bottom = diff.flatten().(*diffLayer) 313 if bottom.memory >= aggregatorMemoryLimit { 314 base = diffToDisk(bottom) 315 } 316 diff.lock.RUnlock() 317 318 // If all diff layers were removed, replace the entire snapshot tree 319 if base != nil { 320 t.layers = map[common.Hash]snapshot{base.root: base} 321 return nil 322 } 323 // Merge the new aggregated layer into the snapshot tree, clean stales below 324 t.layers[bottom.root] = bottom 325 326 default: 327 // Many layers requested to be retained, cap normally 328 persisted = t.cap(diff, layers) 329 } 330 // Remove any layer that is stale or links into a stale layer 331 children := make(map[common.Hash][]common.Hash) 332 for root, snap := range t.layers { 333 if diff, ok := snap.(*diffLayer); ok { 334 parent := diff.parent.Root() 335 children[parent] = append(children[parent], root) 336 } 337 } 338 var remove func(root common.Hash) 339 remove = func(root common.Hash) { 340 delete(t.layers, root) 341 for _, child := range children[root] { 342 remove(child) 343 } 344 delete(children, root) 345 } 346 for root, snap := range t.layers { 347 if snap.Stale() { 348 remove(root) 349 } 350 } 351 // If the disk layer was modified, regenerate all the cumulative blooms 352 if persisted != nil { 353 var rebloom func(root common.Hash) 354 rebloom = func(root common.Hash) { 355 if diff, ok := t.layers[root].(*diffLayer); ok { 356 diff.rebloom(persisted) 357 } 358 for _, child := range children[root] { 359 rebloom(child) 360 } 361 } 362 rebloom(persisted.root) 363 } 364 return nil 365 } 366 367 // cap traverses downwards the diff tree until the number of allowed layers are 368 // crossed. All diffs beyond the permitted number are flattened downwards. If the 369 // layer limit is reached, memory cap is also enforced (but not before). 370 // 371 // The method returns the new disk layer if diffs were persistend into it. 372 func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer { 373 // Dive until we run out of layers or reach the persistent database 374 for ; layers > 2; layers-- { 375 // If we still have diff layers below, continue down 376 if parent, ok := diff.parent.(*diffLayer); ok { 377 diff = parent 378 } else { 379 // Diff stack too shallow, return without modifications 380 return nil 381 } 382 } 383 // We're out of layers, flatten anything below, stopping if it's the disk or if 384 // the memory limit is not yet exceeded. 385 switch parent := diff.parent.(type) { 386 case *diskLayer: 387 return nil 388 389 case *diffLayer: 390 // Flatten the parent into the grandparent. The flattening internally obtains a 391 // write lock on grandparent. 392 flattened := parent.flatten().(*diffLayer) 393 t.layers[flattened.root] = flattened 394 395 diff.lock.Lock() 396 defer diff.lock.Unlock() 397 398 diff.parent = flattened 399 if flattened.memory < aggregatorMemoryLimit { 400 // Accumulator layer is smaller than the limit, so we can abort, unless 401 // there's a snapshot being generated currently. In that case, the trie 402 // will move fron underneath the generator so we **must** merge all the 403 // partial data down into the snapshot and restart the generation. 404 if flattened.parent.(*diskLayer).genAbort == nil { 405 return nil 406 } 407 } 408 default: 409 panic(fmt.Sprintf("unknown data layer: %T", parent)) 410 } 411 // If the bottom-most layer is larger than our memory cap, persist to disk 412 bottom := diff.parent.(*diffLayer) 413 414 bottom.lock.RLock() 415 base := diffToDisk(bottom) 416 bottom.lock.RUnlock() 417 418 t.layers[base.root] = base 419 diff.parent = base 420 return base 421 } 422 423 // diffToDisk merges a bottom-most diff into the persistent disk layer underneath 424 // it. The method will panic if called onto a non-bottom-most diff layer. 425 // 426 // The disk layer persistence should be operated in an atomic way. All updates should 427 // be discarded if the whole transition if not finished. 428 func diffToDisk(bottom *diffLayer) *diskLayer { 429 var ( 430 base = bottom.parent.(*diskLayer) 431 batch = base.diskdb.NewBatch() 432 stats *generatorStats 433 ) 434 // If the disk layer is running a snapshot generator, abort it 435 if base.genAbort != nil { 436 abort := make(chan *generatorStats) 437 base.genAbort <- abort 438 stats = <-abort 439 } 440 // Put the deletion in the batch writer, flush all updates in the final step. 441 rawdb.DeleteSnapshotRoot(batch) 442 443 // Mark the original base as stale as we're going to create a new wrapper 444 base.lock.Lock() 445 if base.stale { 446 panic("parent disk layer is stale") // we've committed into the same base from two children, boo 447 } 448 base.stale = true 449 base.lock.Unlock() 450 451 // Destroy all the destructed accounts from the database 452 for hash := range bottom.destructSet { 453 // Skip any account not covered yet by the snapshot 454 if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { 455 continue 456 } 457 // Remove all storage slots 458 rawdb.DeleteAccountSnapshot(batch, hash) 459 base.cache.Set(hash[:], nil) 460 461 it := rawdb.IterateStorageSnapshots(base.diskdb, hash) 462 for it.Next() { 463 if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator 464 batch.Delete(key) 465 base.cache.Del(key[1:]) 466 467 snapshotFlushStorageItemMeter.Mark(1) 468 } 469 } 470 it.Release() 471 } 472 // Push all updated accounts into the database 473 for hash, data := range bottom.accountData { 474 // Skip any account not covered yet by the snapshot 475 if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 { 476 continue 477 } 478 // Push the account to disk 479 rawdb.WriteAccountSnapshot(batch, hash, data) 480 base.cache.Set(hash[:], data) 481 snapshotCleanAccountWriteMeter.Mark(int64(len(data))) 482 483 snapshotFlushAccountItemMeter.Mark(1) 484 snapshotFlushAccountSizeMeter.Mark(int64(len(data))) 485 } 486 // Push all the storage slots into the database 487 for accountHash, storage := range bottom.storageData { 488 // Skip any account not covered yet by the snapshot 489 if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 { 490 continue 491 } 492 // Generation might be mid-account, track that case too 493 midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength]) 494 495 for storageHash, data := range storage { 496 // Skip any slot not covered yet by the snapshot 497 if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 { 498 continue 499 } 500 if len(data) > 0 { 501 rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data) 502 base.cache.Set(append(accountHash[:], storageHash[:]...), data) 503 snapshotCleanStorageWriteMeter.Mark(int64(len(data))) 504 } else { 505 rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash) 506 base.cache.Set(append(accountHash[:], storageHash[:]...), nil) 507 } 508 snapshotFlushStorageItemMeter.Mark(1) 509 snapshotFlushStorageSizeMeter.Mark(int64(len(data))) 510 } 511 } 512 // Update the snapshot block marker and write any remainder data 513 rawdb.WriteSnapshotRoot(batch, bottom.root) 514 515 // Write out the generator marker 516 entry := journalGenerator{ 517 Done: base.genMarker == nil, 518 Marker: base.genMarker, 519 } 520 if stats != nil { 521 entry.Wiping = (stats.wiping != nil) 522 entry.Accounts = stats.accounts 523 entry.Slots = stats.slots 524 entry.Storage = uint64(stats.storage) 525 } 526 blob, err := rlp.EncodeToBytes(entry) 527 if err != nil { 528 panic(fmt.Sprintf("Failed to RLP encode generator %v", err)) 529 } 530 rawdb.WriteSnapshotGenerator(batch, blob) 531 532 // Flush all the updates in the single db operation. Ensure the 533 // disk layer transition is atomic. 534 if err := batch.Write(); err != nil { 535 log.Crit("Failed to write leftover snapshot", "err", err) 536 } 537 log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil) 538 res := &diskLayer{ 539 root: bottom.root, 540 cache: base.cache, 541 diskdb: base.diskdb, 542 triedb: base.triedb, 543 genMarker: base.genMarker, 544 genPending: base.genPending, 545 } 546 // If snapshot generation hasn't finished yet, port over all the starts and 547 // continue where the previous round left off. 548 // 549 // Note, the `base.genAbort` comparison is not used normally, it's checked 550 // to allow the tests to play with the marker without triggering this path. 551 if base.genMarker != nil && base.genAbort != nil { 552 res.genMarker = base.genMarker 553 res.genAbort = make(chan chan *generatorStats) 554 go res.generate(stats) 555 } 556 return res 557 } 558 559 // Journal commits an entire diff hierarchy to disk into a single journal entry. 560 // This is meant to be used during shutdown to persist the snapshot without 561 // flattening everything down (bad for reorgs). 562 // 563 // The method returns the root hash of the base layer that needs to be persisted 564 // to disk as a trie too to allow continuing any pending generation op. 565 func (t *Tree) Journal(root common.Hash) (common.Hash, error) { 566 // Retrieve the head snapshot to journal from var snap snapshot 567 snap := t.Snapshot(root) 568 if snap == nil { 569 return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) 570 } 571 // Run the journaling 572 t.lock.Lock() 573 defer t.lock.Unlock() 574 575 // Firstly write out the metadata of journal 576 journal := new(bytes.Buffer) 577 if err := rlp.Encode(journal, journalVersion); err != nil { 578 return common.Hash{}, err 579 } 580 diskroot := t.diskRoot() 581 if diskroot == (common.Hash{}) { 582 return common.Hash{}, errors.New("invalid disk root") 583 } 584 // Secondly write out the disk layer root, ensure the 585 // diff journal is continuous with disk. 586 if err := rlp.Encode(journal, diskroot); err != nil { 587 return common.Hash{}, err 588 } 589 // Finally write out the journal of each layer in reverse order. 590 base, err := snap.(snapshot).Journal(journal) 591 if err != nil { 592 return common.Hash{}, err 593 } 594 // Store the journal into the database and return 595 rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes()) 596 return base, nil 597 } 598 599 // LegacyJournal is basically identical to Journal. it's the legacy 600 // version for flushing legacy journal. Now the only purpose of this 601 // function is for testing. 602 func (t *Tree) LegacyJournal(root common.Hash) (common.Hash, error) { 603 // Retrieve the head snapshot to journal from var snap snapshot 604 snap := t.Snapshot(root) 605 if snap == nil { 606 return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root) 607 } 608 // Run the journaling 609 t.lock.Lock() 610 defer t.lock.Unlock() 611 612 journal := new(bytes.Buffer) 613 base, err := snap.(snapshot).LegacyJournal(journal) 614 if err != nil { 615 return common.Hash{}, err 616 } 617 // Store the journal into the database and return 618 rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes()) 619 return base, nil 620 } 621 622 // Rebuild wipes all available snapshot data from the persistent database and 623 // discard all caches and diff layers. Afterwards, it starts a new snapshot 624 // generator with the given root hash. 625 func (t *Tree) Rebuild(root common.Hash) { 626 t.lock.Lock() 627 defer t.lock.Unlock() 628 629 // Firstly delete any recovery flag in the database. Because now we are 630 // building a brand new snapshot. 631 rawdb.DeleteSnapshotRecoveryNumber(t.diskdb) 632 633 // Track whether there's a wipe currently running and keep it alive if so 634 var wiper chan struct{} 635 636 // Iterate over and mark all layers stale 637 for _, layer := range t.layers { 638 switch layer := layer.(type) { 639 case *diskLayer: 640 // If the base layer is generating, abort it and save 641 if layer.genAbort != nil { 642 abort := make(chan *generatorStats) 643 layer.genAbort <- abort 644 645 if stats := <-abort; stats != nil { 646 wiper = stats.wiping 647 } 648 } 649 // Layer should be inactive now, mark it as stale 650 layer.lock.Lock() 651 layer.stale = true 652 layer.lock.Unlock() 653 654 case *diffLayer: 655 // If the layer is a simple diff, simply mark as stale 656 layer.lock.Lock() 657 atomic.StoreUint32(&layer.stale, 1) 658 layer.lock.Unlock() 659 660 default: 661 panic(fmt.Sprintf("unknown layer type: %T", layer)) 662 } 663 } 664 // Start generating a new snapshot from scratch on a backgroung thread. The 665 // generator will run a wiper first if there's not one running right now. 666 log.Info("Rebuilding state snapshot") 667 t.layers = map[common.Hash]snapshot{ 668 root: generateSnapshot(t.diskdb, t.triedb, t.cache, root, wiper), 669 } 670 } 671 672 // AccountIterator creates a new account iterator for the specified root hash and 673 // seeks to a starting account hash. 674 func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) { 675 ok, err := t.generating() 676 if err != nil { 677 return nil, err 678 } 679 if ok { 680 return nil, ErrNotConstructed 681 } 682 return newFastAccountIterator(t, root, seek) 683 } 684 685 // StorageIterator creates a new storage iterator for the specified root hash and 686 // account. The iterator will be move to the specific start position. 687 func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) { 688 ok, err := t.generating() 689 if err != nil { 690 return nil, err 691 } 692 if ok { 693 return nil, ErrNotConstructed 694 } 695 return newFastStorageIterator(t, root, account, seek) 696 } 697 698 // disklayer is an internal helper function to return the disk layer. 699 // The lock of snapTree is assumed to be held already. 700 func (t *Tree) disklayer() *diskLayer { 701 var snap snapshot 702 for _, s := range t.layers { 703 snap = s 704 break 705 } 706 if snap == nil { 707 return nil 708 } 709 switch layer := snap.(type) { 710 case *diskLayer: 711 return layer 712 case *diffLayer: 713 return layer.origin 714 default: 715 panic(fmt.Sprintf("%T: undefined layer", snap)) 716 } 717 } 718 719 // diskRoot is a internal helper function to return the disk layer root. 720 // The lock of snapTree is assumed to be held already. 721 func (t *Tree) diskRoot() common.Hash { 722 disklayer := t.disklayer() 723 if disklayer == nil { 724 return common.Hash{} 725 } 726 return disklayer.Root() 727 } 728 729 // generating is an internal helper function which reports whether the snapshot 730 // is still under the construction. 731 func (t *Tree) generating() (bool, error) { 732 t.lock.Lock() 733 defer t.lock.Unlock() 734 735 layer := t.disklayer() 736 if layer == nil { 737 return false, errors.New("disk layer is missing") 738 } 739 layer.lock.RLock() 740 defer layer.lock.RUnlock() 741 return layer.genMarker != nil, nil 742 } 743 744 // diskRoot is a external helper function to return the disk layer root. 745 func (t *Tree) DiskRoot() common.Hash { 746 t.lock.Lock() 747 defer t.lock.Unlock() 748 749 return t.diskRoot() 750 }