github.com/Debrief-BC/go-debrief@v0.0.0-20200420203408-0c26ca968123/core/state/snapshot/snapshot.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package snapshot implements a journalled, dynamic state dump.
    18  package snapshot
    19  
    20  import (
    21  	"bytes"
    22  	"errors"
    23  	"fmt"
    24  	"sync"
    25  	"sync/atomic"
    26  
    27  	"github.com/Debrief-BC/go-debrief/common"
    28  	"github.com/Debrief-BC/go-debrief/core/rawdb"
    29  	"github.com/Debrief-BC/go-debrief/ethdb"
    30  	"github.com/Debrief-BC/go-debrief/log"
    31  	"github.com/Debrief-BC/go-debrief/metrics"
    32  	"github.com/Debrief-BC/go-debrief/trie"
    33  )
    34  
    35  var (
    36  	snapshotCleanAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
    37  	snapshotCleanAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
    38  	snapshotCleanAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
    39  	snapshotCleanAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
    40  	snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
    41  
    42  	snapshotCleanStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
    43  	snapshotCleanStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
    44  	snapshotCleanStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
    45  	snapshotCleanStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
    46  	snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
    47  
    48  	snapshotDirtyAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
    49  	snapshotDirtyAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
    50  	snapshotDirtyAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
    51  	snapshotDirtyAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
    52  	snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
    53  
    54  	snapshotDirtyStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
    55  	snapshotDirtyStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
    56  	snapshotDirtyStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
    57  	snapshotDirtyStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
    58  	snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
    59  
    60  	snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    61  	snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    62  
    63  	snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
    64  	snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
    65  	snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
    66  	snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
    67  
    68  	snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
    69  	snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
    70  
    71  	snapshotBloomAccountTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
    72  	snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
    73  	snapshotBloomAccountMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
    74  
    75  	snapshotBloomStorageTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
    76  	snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
    77  	snapshotBloomStorageMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
    78  
    79  	// ErrSnapshotStale is returned from data accessors if the underlying snapshot
    80  	// layer had been invalidated due to the chain progressing forward far enough
    81  	// to not maintain the layer's original state.
    82  	ErrSnapshotStale = errors.New("snapshot stale")
    83  
    84  	// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
    85  	// is being generated currently and the requested data item is not yet in the
    86  	// range of accounts covered.
    87  	ErrNotCoveredYet = errors.New("not covered yet")
    88  
    89  	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
    90  	// that forms a cycle in the snapshot tree.
    91  	errSnapshotCycle = errors.New("snapshot cycle")
    92  )
    93  
    94  // Snapshot represents the functionality supported by a snapshot storage layer.
    95  type Snapshot interface {
    96  	// Root returns the root hash for which this snapshot was made.
    97  	Root() common.Hash
    98  
    99  	// Account directly retrieves the account associated with a particular hash in
   100  	// the snapshot slim data format.
   101  	Account(hash common.Hash) (*Account, error)
   102  
   103  	// AccountRLP directly retrieves the account RLP associated with a particular
   104  	// hash in the snapshot slim data format.
   105  	AccountRLP(hash common.Hash) ([]byte, error)
   106  
   107  	// Storage directly retrieves the storage data associated with a particular hash,
   108  	// within a particular account.
   109  	Storage(accountHash, storageHash common.Hash) ([]byte, error)
   110  }
   111  
   112  // snapshot is the internal version of the snapshot data layer that supports some
   113  // additional methods compared to the public API.
   114  type snapshot interface {
   115  	Snapshot
   116  
   117  	// Parent returns the subsequent layer of a snapshot, or nil if the base was
   118  	// reached.
   119  	//
   120  	// Note, the method is an internal helper to avoid type switching between the
   121  	// disk and diff layers. There is no locking involved.
   122  	Parent() snapshot
   123  
   124  	// Update creates a new layer on top of the existing snapshot diff tree with
   125  	// the specified data items.
   126  	//
   127  	// Note, the maps are retained by the method to avoid copying everything.
   128  	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
   129  
   130  	// Journal commits an entire diff hierarchy to disk into a single journal entry.
   131  	// This is meant to be used during shutdown to persist the snapshot without
   132  	// flattening everything down (bad for reorgs).
   133  	Journal(buffer *bytes.Buffer) (common.Hash, error)
   134  
   135  	// Stale return whether this layer has become stale (was flattened across) or
   136  	// if it's still live.
   137  	Stale() bool
   138  
   139  	// AccountIterator creates an account iterator over an arbitrary layer.
   140  	AccountIterator(seek common.Hash) AccountIterator
   141  }
   142  
   143  // SnapshotTree is an Ethereum state snapshot tree. It consists of one persistent
   144  // base layer backed by a key-value store, on top of which arbitrarily many in-
   145  // memory diff layers are topped. The memory diffs can form a tree with branching,
   146  // but the disk layer is singleton and common to all. If a reorg goes deeper than
   147  // the disk layer, everything needs to be deleted.
   148  //
   149  // The goal of a state snapshot is twofold: to allow direct access to account and
   150  // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
   151  // cheap iteration of the account/storage tries for sync aid.
   152  type Tree struct {
   153  	diskdb ethdb.KeyValueStore      // Persistent database to store the snapshot
   154  	triedb *trie.Database           // In-memory cache to access the trie through
   155  	cache  int                      // Megabytes permitted to use for read caches
   156  	layers map[common.Hash]snapshot // Collection of all known layers
   157  	lock   sync.RWMutex
   158  }
   159  
   160  // New attempts to load an already existing snapshot from a persistent key-value
   161  // store (with a number of memory layers from a journal), ensuring that the head
   162  // of the snapshot matches the expected one.
   163  //
   164  // If the snapshot is missing or inconsistent, the entirety is deleted and will
   165  // be reconstructed from scratch based on the tries in the key-value store, on a
   166  // background thread.
   167  func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool) *Tree {
   168  	// Create a new, empty snapshot tree
   169  	snap := &Tree{
   170  		diskdb: diskdb,
   171  		triedb: triedb,
   172  		cache:  cache,
   173  		layers: make(map[common.Hash]snapshot),
   174  	}
   175  	if !async {
   176  		defer snap.waitBuild()
   177  	}
   178  	// Attempt to load a previously persisted snapshot and rebuild one if failed
   179  	head, err := loadSnapshot(diskdb, triedb, cache, root)
   180  	if err != nil {
   181  		log.Warn("Failed to load snapshot, regenerating", "err", err)
   182  		snap.Rebuild(root)
   183  		return snap
   184  	}
   185  	// Existing snapshot loaded, seed all the layers
   186  	for head != nil {
   187  		snap.layers[head.Root()] = head
   188  		head = head.Parent()
   189  	}
   190  	return snap
   191  }
   192  
   193  // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
   194  // to  be used by tests to ensure we're testing what we believe we are.
   195  func (t *Tree) waitBuild() {
   196  	// Find the rebuild termination channel
   197  	var done chan struct{}
   198  
   199  	t.lock.RLock()
   200  	for _, layer := range t.layers {
   201  		if layer, ok := layer.(*diskLayer); ok {
   202  			done = layer.genPending
   203  			break
   204  		}
   205  	}
   206  	t.lock.RUnlock()
   207  
   208  	// Wait until the snapshot is generated
   209  	if done != nil {
   210  		<-done
   211  	}
   212  }
   213  
   214  // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
   215  // snapshot is maintained for that block.
   216  func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
   217  	t.lock.RLock()
   218  	defer t.lock.RUnlock()
   219  
   220  	return t.layers[blockRoot]
   221  }
   222  
   223  // Update adds a new snapshot into the tree, if that can be linked to an existing
   224  // old parent. It is disallowed to insert a disk layer (the origin of all).
   225  func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
   226  	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
   227  	// special case that can only happen for Clique networks where empty blocks
   228  	// don't modify the state (0 block subsidy).
   229  	//
   230  	// Although we could silently ignore this internally, it should be the caller's
   231  	// responsibility to avoid even attempting to insert such a snapshot.
   232  	if blockRoot == parentRoot {
   233  		return errSnapshotCycle
   234  	}
   235  	// Generate a new snapshot on top of the parent
   236  	parent := t.Snapshot(parentRoot).(snapshot)
   237  	if parent == nil {
   238  		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
   239  	}
   240  	snap := parent.Update(blockRoot, destructs, accounts, storage)
   241  
   242  	// Save the new snapshot for later
   243  	t.lock.Lock()
   244  	defer t.lock.Unlock()
   245  
   246  	t.layers[snap.root] = snap
   247  	return nil
   248  }
   249  
   250  // Cap traverses downwards the snapshot tree from a head block hash until the
   251  // number of allowed layers are crossed. All layers beyond the permitted number
   252  // are flattened downwards.
   253  func (t *Tree) Cap(root common.Hash, layers int) error {
   254  	// Retrieve the head snapshot to cap from
   255  	snap := t.Snapshot(root)
   256  	if snap == nil {
   257  		return fmt.Errorf("snapshot [%#x] missing", root)
   258  	}
   259  	diff, ok := snap.(*diffLayer)
   260  	if !ok {
   261  		return fmt.Errorf("snapshot [%#x] is disk layer", root)
   262  	}
   263  	// Run the internal capping and discard all stale layers
   264  	t.lock.Lock()
   265  	defer t.lock.Unlock()
   266  
   267  	// Flattening the bottom-most diff layer requires special casing since there's
   268  	// no child to rewire to the grandparent. In that case we can fake a temporary
   269  	// child for the capping and then remove it.
   270  	var persisted *diskLayer
   271  
   272  	switch layers {
   273  	case 0:
   274  		// If full commit was requested, flatten the diffs and merge onto disk
   275  		diff.lock.RLock()
   276  		base := diffToDisk(diff.flatten().(*diffLayer))
   277  		diff.lock.RUnlock()
   278  
   279  		// Replace the entire snapshot tree with the flat base
   280  		t.layers = map[common.Hash]snapshot{base.root: base}
   281  		return nil
   282  
   283  	case 1:
   284  		// If full flattening was requested, flatten the diffs but only merge if the
   285  		// memory limit was reached
   286  		var (
   287  			bottom *diffLayer
   288  			base   *diskLayer
   289  		)
   290  		diff.lock.RLock()
   291  		bottom = diff.flatten().(*diffLayer)
   292  		if bottom.memory >= aggregatorMemoryLimit {
   293  			base = diffToDisk(bottom)
   294  		}
   295  		diff.lock.RUnlock()
   296  
   297  		// If all diff layers were removed, replace the entire snapshot tree
   298  		if base != nil {
   299  			t.layers = map[common.Hash]snapshot{base.root: base}
   300  			return nil
   301  		}
   302  		// Merge the new aggregated layer into the snapshot tree, clean stales below
   303  		t.layers[bottom.root] = bottom
   304  
   305  	default:
   306  		// Many layers requested to be retained, cap normally
   307  		persisted = t.cap(diff, layers)
   308  	}
   309  	// Remove any layer that is stale or links into a stale layer
   310  	children := make(map[common.Hash][]common.Hash)
   311  	for root, snap := range t.layers {
   312  		if diff, ok := snap.(*diffLayer); ok {
   313  			parent := diff.parent.Root()
   314  			children[parent] = append(children[parent], root)
   315  		}
   316  	}
   317  	var remove func(root common.Hash)
   318  	remove = func(root common.Hash) {
   319  		delete(t.layers, root)
   320  		for _, child := range children[root] {
   321  			remove(child)
   322  		}
   323  		delete(children, root)
   324  	}
   325  	for root, snap := range t.layers {
   326  		if snap.Stale() {
   327  			remove(root)
   328  		}
   329  	}
   330  	// If the disk layer was modified, regenerate all the cummulative blooms
   331  	if persisted != nil {
   332  		var rebloom func(root common.Hash)
   333  		rebloom = func(root common.Hash) {
   334  			if diff, ok := t.layers[root].(*diffLayer); ok {
   335  				diff.rebloom(persisted)
   336  			}
   337  			for _, child := range children[root] {
   338  				rebloom(child)
   339  			}
   340  		}
   341  		rebloom(persisted.root)
   342  	}
   343  	return nil
   344  }
   345  
   346  // cap traverses downwards the diff tree until the number of allowed layers are
   347  // crossed. All diffs beyond the permitted number are flattened downwards. If the
   348  // layer limit is reached, memory cap is also enforced (but not before).
   349  //
   350  // The method returns the new disk layer if diffs were persistend into it.
   351  func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
   352  	// Dive until we run out of layers or reach the persistent database
   353  	for ; layers > 2; layers-- {
   354  		// If we still have diff layers below, continue down
   355  		if parent, ok := diff.parent.(*diffLayer); ok {
   356  			diff = parent
   357  		} else {
   358  			// Diff stack too shallow, return without modifications
   359  			return nil
   360  		}
   361  	}
   362  	// We're out of layers, flatten anything below, stopping if it's the disk or if
   363  	// the memory limit is not yet exceeded.
   364  	switch parent := diff.parent.(type) {
   365  	case *diskLayer:
   366  		return nil
   367  
   368  	case *diffLayer:
   369  		// Flatten the parent into the grandparent. The flattening internally obtains a
   370  		// write lock on grandparent.
   371  		flattened := parent.flatten().(*diffLayer)
   372  		t.layers[flattened.root] = flattened
   373  
   374  		diff.lock.Lock()
   375  		defer diff.lock.Unlock()
   376  
   377  		diff.parent = flattened
   378  		if flattened.memory < aggregatorMemoryLimit {
   379  			// Accumulator layer is smaller than the limit, so we can abort, unless
   380  			// there's a snapshot being generated currently. In that case, the trie
   381  			// will move fron underneath the generator so we **must** merge all the
   382  			// partial data down into the snapshot and restart the generation.
   383  			if flattened.parent.(*diskLayer).genAbort == nil {
   384  				return nil
   385  			}
   386  		}
   387  	default:
   388  		panic(fmt.Sprintf("unknown data layer: %T", parent))
   389  	}
   390  	// If the bottom-most layer is larger than our memory cap, persist to disk
   391  	bottom := diff.parent.(*diffLayer)
   392  
   393  	bottom.lock.RLock()
   394  	base := diffToDisk(bottom)
   395  	bottom.lock.RUnlock()
   396  
   397  	t.layers[base.root] = base
   398  	diff.parent = base
   399  	return base
   400  }
   401  
   402  // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
   403  // it. The method will panic if called onto a non-bottom-most diff layer.
   404  func diffToDisk(bottom *diffLayer) *diskLayer {
   405  	var (
   406  		base  = bottom.parent.(*diskLayer)
   407  		batch = base.diskdb.NewBatch()
   408  		stats *generatorStats
   409  	)
   410  	// If the disk layer is running a snapshot generator, abort it
   411  	if base.genAbort != nil {
   412  		abort := make(chan *generatorStats)
   413  		base.genAbort <- abort
   414  		stats = <-abort
   415  	}
   416  	// Start by temporarily deleting the current snapshot block marker. This
   417  	// ensures that in the case of a crash, the entire snapshot is invalidated.
   418  	rawdb.DeleteSnapshotRoot(batch)
   419  
   420  	// Mark the original base as stale as we're going to create a new wrapper
   421  	base.lock.Lock()
   422  	if base.stale {
   423  		panic("parent disk layer is stale") // we've committed into the same base from two children, boo
   424  	}
   425  	base.stale = true
   426  	base.lock.Unlock()
   427  
   428  	// Destroy all the destructed accounts from the database
   429  	for hash := range bottom.destructSet {
   430  		// Skip any account not covered yet by the snapshot
   431  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   432  			continue
   433  		}
   434  		// Remove all storage slots
   435  		rawdb.DeleteAccountSnapshot(batch, hash)
   436  		base.cache.Set(hash[:], nil)
   437  
   438  		it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
   439  		for it.Next() {
   440  			if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
   441  				batch.Delete(key)
   442  				base.cache.Del(key[1:])
   443  
   444  				snapshotFlushStorageItemMeter.Mark(1)
   445  			}
   446  		}
   447  		it.Release()
   448  	}
   449  	// Push all updated accounts into the database
   450  	for hash, data := range bottom.accountData {
   451  		// Skip any account not covered yet by the snapshot
   452  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   453  			continue
   454  		}
   455  		// Push the account to disk
   456  		rawdb.WriteAccountSnapshot(batch, hash, data)
   457  		base.cache.Set(hash[:], data)
   458  		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
   459  
   460  		if batch.ValueSize() > ethdb.IdealBatchSize {
   461  			if err := batch.Write(); err != nil {
   462  				log.Crit("Failed to write account snapshot", "err", err)
   463  			}
   464  			batch.Reset()
   465  		}
   466  		snapshotFlushAccountItemMeter.Mark(1)
   467  		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
   468  	}
   469  	// Push all the storage slots into the database
   470  	for accountHash, storage := range bottom.storageData {
   471  		// Skip any account not covered yet by the snapshot
   472  		if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
   473  			continue
   474  		}
   475  		// Generation might be mid-account, track that case too
   476  		midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
   477  
   478  		for storageHash, data := range storage {
   479  			// Skip any slot not covered yet by the snapshot
   480  			if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
   481  				continue
   482  			}
   483  			if len(data) > 0 {
   484  				rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
   485  				base.cache.Set(append(accountHash[:], storageHash[:]...), data)
   486  				snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
   487  			} else {
   488  				rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
   489  				base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
   490  			}
   491  			snapshotFlushStorageItemMeter.Mark(1)
   492  			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
   493  		}
   494  		if batch.ValueSize() > ethdb.IdealBatchSize {
   495  			if err := batch.Write(); err != nil {
   496  				log.Crit("Failed to write storage snapshot", "err", err)
   497  			}
   498  			batch.Reset()
   499  		}
   500  	}
   501  	// Update the snapshot block marker and write any remainder data
   502  	rawdb.WriteSnapshotRoot(batch, bottom.root)
   503  	if err := batch.Write(); err != nil {
   504  		log.Crit("Failed to write leftover snapshot", "err", err)
   505  	}
   506  	res := &diskLayer{
   507  		root:       bottom.root,
   508  		cache:      base.cache,
   509  		diskdb:     base.diskdb,
   510  		triedb:     base.triedb,
   511  		genMarker:  base.genMarker,
   512  		genPending: base.genPending,
   513  	}
   514  	// If snapshot generation hasn't finished yet, port over all the starts and
   515  	// continue where the previous round left off.
   516  	//
   517  	// Note, the `base.genAbort` comparison is not used normally, it's checked
   518  	// to allow the tests to play with the marker without triggering this path.
   519  	if base.genMarker != nil && base.genAbort != nil {
   520  		res.genMarker = base.genMarker
   521  		res.genAbort = make(chan chan *generatorStats)
   522  		go res.generate(stats)
   523  	}
   524  	return res
   525  }
   526  
   527  // Journal commits an entire diff hierarchy to disk into a single journal entry.
   528  // This is meant to be used during shutdown to persist the snapshot without
   529  // flattening everything down (bad for reorgs).
   530  //
   531  // The method returns the root hash of the base layer that needs to be persisted
   532  // to disk as a trie too to allow continuing any pending generation op.
   533  func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
   534  	// Retrieve the head snapshot to journal from var snap snapshot
   535  	snap := t.Snapshot(root)
   536  	if snap == nil {
   537  		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
   538  	}
   539  	// Run the journaling
   540  	t.lock.Lock()
   541  	defer t.lock.Unlock()
   542  
   543  	journal := new(bytes.Buffer)
   544  	base, err := snap.(snapshot).Journal(journal)
   545  	if err != nil {
   546  		return common.Hash{}, err
   547  	}
   548  	// Store the journal into the database and return
   549  	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
   550  	return base, nil
   551  }
   552  
   553  // Rebuild wipes all available snapshot data from the persistent database and
   554  // discard all caches and diff layers. Afterwards, it starts a new snapshot
   555  // generator with the given root hash.
   556  func (t *Tree) Rebuild(root common.Hash) {
   557  	t.lock.Lock()
   558  	defer t.lock.Unlock()
   559  
   560  	// Track whether there's a wipe currently running and keep it alive if so
   561  	var wiper chan struct{}
   562  
   563  	// Iterate over and mark all layers stale
   564  	for _, layer := range t.layers {
   565  		switch layer := layer.(type) {
   566  		case *diskLayer:
   567  			// If the base layer is generating, abort it and save
   568  			if layer.genAbort != nil {
   569  				abort := make(chan *generatorStats)
   570  				layer.genAbort <- abort
   571  
   572  				if stats := <-abort; stats != nil {
   573  					wiper = stats.wiping
   574  				}
   575  			}
   576  			// Layer should be inactive now, mark it as stale
   577  			layer.lock.Lock()
   578  			layer.stale = true
   579  			layer.lock.Unlock()
   580  
   581  		case *diffLayer:
   582  			// If the layer is a simple diff, simply mark as stale
   583  			layer.lock.Lock()
   584  			atomic.StoreUint32(&layer.stale, 1)
   585  			layer.lock.Unlock()
   586  
   587  		default:
   588  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   589  		}
   590  	}
   591  	// Start generating a new snapshot from scratch on a backgroung thread. The
   592  	// generator will run a wiper first if there's not one running right now.
   593  	log.Info("Rebuilding state snapshot")
   594  	t.layers = map[common.Hash]snapshot{
   595  		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root, wiper),
   596  	}
   597  }
   598  
   599  // AccountIterator creates a new account iterator for the specified root hash and
   600  // seeks to a starting account hash.
   601  func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
   602  	return newFastAccountIterator(t, root, seek)
   603  }