github.com/codysnider/go-ethereum@v1.10.18-0.20220420071915-14f4ae99222a/core/state/snapshot/snapshot.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package snapshot implements a journalled, dynamic state dump.
    18  package snapshot
    19  
    20  import (
    21  	"bytes"
    22  	"errors"
    23  	"fmt"
    24  	"sync"
    25  	"sync/atomic"
    26  
    27  	"github.com/ethereum/go-ethereum/common"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/ethdb"
    30  	"github.com/ethereum/go-ethereum/log"
    31  	"github.com/ethereum/go-ethereum/metrics"
    32  	"github.com/ethereum/go-ethereum/rlp"
    33  	"github.com/ethereum/go-ethereum/trie"
    34  )
    35  
    36  var (
    37  	snapshotCleanAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
    38  	snapshotCleanAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
    39  	snapshotCleanAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
    40  	snapshotCleanAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
    41  	snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
    42  
    43  	snapshotCleanStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
    44  	snapshotCleanStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
    45  	snapshotCleanStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
    46  	snapshotCleanStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
    47  	snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
    48  
    49  	snapshotDirtyAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
    50  	snapshotDirtyAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
    51  	snapshotDirtyAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
    52  	snapshotDirtyAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
    53  	snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
    54  
    55  	snapshotDirtyStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
    56  	snapshotDirtyStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
    57  	snapshotDirtyStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
    58  	snapshotDirtyStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
    59  	snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
    60  
    61  	snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    62  	snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    63  
    64  	snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
    65  	snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
    66  	snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
    67  	snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
    68  
    69  	snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
    70  	snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
    71  
    72  	snapshotBloomAccountTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
    73  	snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
    74  	snapshotBloomAccountMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
    75  
    76  	snapshotBloomStorageTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
    77  	snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
    78  	snapshotBloomStorageMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
    79  
    80  	// ErrSnapshotStale is returned from data accessors if the underlying snapshot
    81  	// layer had been invalidated due to the chain progressing forward far enough
    82  	// to not maintain the layer's original state.
    83  	ErrSnapshotStale = errors.New("snapshot stale")
    84  
    85  	// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
    86  	// is being generated currently and the requested data item is not yet in the
    87  	// range of accounts covered.
    88  	ErrNotCoveredYet = errors.New("not covered yet")
    89  
    90  	// ErrNotConstructed is returned if the callers want to iterate the snapshot
    91  	// while the generation is not finished yet.
    92  	ErrNotConstructed = errors.New("snapshot is not constructed")
    93  
    94  	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
    95  	// that forms a cycle in the snapshot tree.
    96  	errSnapshotCycle = errors.New("snapshot cycle")
    97  )
    98  
    99  // Snapshot represents the functionality supported by a snapshot storage layer.
   100  type Snapshot interface {
   101  	// Root returns the root hash for which this snapshot was made.
   102  	Root() common.Hash
   103  
   104  	// Account directly retrieves the account associated with a particular hash in
   105  	// the snapshot slim data format.
   106  	Account(hash common.Hash) (*Account, error)
   107  
   108  	// AccountRLP directly retrieves the account RLP associated with a particular
   109  	// hash in the snapshot slim data format.
   110  	AccountRLP(hash common.Hash) ([]byte, error)
   111  
   112  	// Storage directly retrieves the storage data associated with a particular hash,
   113  	// within a particular account.
   114  	Storage(accountHash, storageHash common.Hash) ([]byte, error)
   115  }
   116  
   117  // snapshot is the internal version of the snapshot data layer that supports some
   118  // additional methods compared to the public API.
   119  type snapshot interface {
   120  	Snapshot
   121  
   122  	// Parent returns the subsequent layer of a snapshot, or nil if the base was
   123  	// reached.
   124  	//
   125  	// Note, the method is an internal helper to avoid type switching between the
   126  	// disk and diff layers. There is no locking involved.
   127  	Parent() snapshot
   128  
   129  	// Update creates a new layer on top of the existing snapshot diff tree with
   130  	// the specified data items.
   131  	//
   132  	// Note, the maps are retained by the method to avoid copying everything.
   133  	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
   134  
   135  	// Journal commits an entire diff hierarchy to disk into a single journal entry.
   136  	// This is meant to be used during shutdown to persist the snapshot without
   137  	// flattening everything down (bad for reorgs).
   138  	Journal(buffer *bytes.Buffer) (common.Hash, error)
   139  
   140  	// Stale return whether this layer has become stale (was flattened across) or
   141  	// if it's still live.
   142  	Stale() bool
   143  
   144  	// AccountIterator creates an account iterator over an arbitrary layer.
   145  	AccountIterator(seek common.Hash) AccountIterator
   146  
   147  	// StorageIterator creates a storage iterator over an arbitrary layer.
   148  	StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
   149  }
   150  
   151  // Tree is an Ethereum state snapshot tree. It consists of one persistent base
   152  // layer backed by a key-value store, on top of which arbitrarily many in-memory
   153  // diff layers are topped. The memory diffs can form a tree with branching, but
   154  // the disk layer is singleton and common to all. If a reorg goes deeper than the
   155  // disk layer, everything needs to be deleted.
   156  //
   157  // The goal of a state snapshot is twofold: to allow direct access to account and
   158  // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
   159  // cheap iteration of the account/storage tries for sync aid.
   160  type Tree struct {
   161  	diskdb ethdb.KeyValueStore      // Persistent database to store the snapshot
   162  	triedb *trie.Database           // In-memory cache to access the trie through
   163  	cache  int                      // Megabytes permitted to use for read caches
   164  	layers map[common.Hash]snapshot // Collection of all known layers
   165  	lock   sync.RWMutex
   166  
   167  	// Test hooks
   168  	onFlatten func() // Hook invoked when the bottom most diff layers are flattened
   169  }
   170  
   171  // New attempts to load an already existing snapshot from a persistent key-value
   172  // store (with a number of memory layers from a journal), ensuring that the head
   173  // of the snapshot matches the expected one.
   174  //
   175  // If the snapshot is missing or the disk layer is broken, the snapshot will be
   176  // reconstructed using both the existing data and the state trie.
   177  // The repair happens on a background thread.
   178  //
   179  // If the memory layers in the journal do not match the disk layer (e.g. there is
   180  // a gap) or the journal is missing, there are two repair cases:
   181  //
   182  // - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
   183  //   This case happens when the snapshot is 'ahead' of the state trie.
   184  // - otherwise, the entire snapshot is considered invalid and will be recreated on
   185  //   a background thread.
   186  func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
   187  	// Create a new, empty snapshot tree
   188  	snap := &Tree{
   189  		diskdb: diskdb,
   190  		triedb: triedb,
   191  		cache:  cache,
   192  		layers: make(map[common.Hash]snapshot),
   193  	}
   194  	if !async {
   195  		defer snap.waitBuild()
   196  	}
   197  	// Attempt to load a previously persisted snapshot and rebuild one if failed
   198  	head, disabled, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
   199  	if disabled {
   200  		log.Warn("Snapshot maintenance disabled (syncing)")
   201  		return snap, nil
   202  	}
   203  	if err != nil {
   204  		if rebuild {
   205  			log.Warn("Failed to load snapshot, regenerating", "err", err)
   206  			snap.Rebuild(root)
   207  			return snap, nil
   208  		}
   209  		return nil, err // Bail out the error, don't rebuild automatically.
   210  	}
   211  	// Existing snapshot loaded, seed all the layers
   212  	for head != nil {
   213  		snap.layers[head.Root()] = head
   214  		head = head.Parent()
   215  	}
   216  	return snap, nil
   217  }
   218  
   219  // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
   220  // to be used by tests to ensure we're testing what we believe we are.
   221  func (t *Tree) waitBuild() {
   222  	// Find the rebuild termination channel
   223  	var done chan struct{}
   224  
   225  	t.lock.RLock()
   226  	for _, layer := range t.layers {
   227  		if layer, ok := layer.(*diskLayer); ok {
   228  			done = layer.genPending
   229  			break
   230  		}
   231  	}
   232  	t.lock.RUnlock()
   233  
   234  	// Wait until the snapshot is generated
   235  	if done != nil {
   236  		<-done
   237  	}
   238  }
   239  
   240  // Disable interrupts any pending snapshot generator, deletes all the snapshot
   241  // layers in memory and marks snapshots disabled globally. In order to resume
   242  // the snapshot functionality, the caller must invoke Rebuild.
   243  func (t *Tree) Disable() {
   244  	// Interrupt any live snapshot layers
   245  	t.lock.Lock()
   246  	defer t.lock.Unlock()
   247  
   248  	for _, layer := range t.layers {
   249  		switch layer := layer.(type) {
   250  		case *diskLayer:
   251  			// If the base layer is generating, abort it
   252  			if layer.genAbort != nil {
   253  				abort := make(chan *generatorStats)
   254  				layer.genAbort <- abort
   255  				<-abort
   256  			}
   257  			// Layer should be inactive now, mark it as stale
   258  			layer.lock.Lock()
   259  			layer.stale = true
   260  			layer.lock.Unlock()
   261  
   262  		case *diffLayer:
   263  			// If the layer is a simple diff, simply mark as stale
   264  			layer.lock.Lock()
   265  			atomic.StoreUint32(&layer.stale, 1)
   266  			layer.lock.Unlock()
   267  
   268  		default:
   269  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   270  		}
   271  	}
   272  	t.layers = map[common.Hash]snapshot{}
   273  
   274  	// Delete all snapshot liveness information from the database
   275  	batch := t.diskdb.NewBatch()
   276  
   277  	rawdb.WriteSnapshotDisabled(batch)
   278  	rawdb.DeleteSnapshotRoot(batch)
   279  	rawdb.DeleteSnapshotJournal(batch)
   280  	rawdb.DeleteSnapshotGenerator(batch)
   281  	rawdb.DeleteSnapshotRecoveryNumber(batch)
   282  	// Note, we don't delete the sync progress
   283  
   284  	if err := batch.Write(); err != nil {
   285  		log.Crit("Failed to disable snapshots", "err", err)
   286  	}
   287  }
   288  
   289  // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
   290  // snapshot is maintained for that block.
   291  func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
   292  	t.lock.RLock()
   293  	defer t.lock.RUnlock()
   294  
   295  	return t.layers[blockRoot]
   296  }
   297  
   298  // Snapshots returns all visited layers from the topmost layer with specific
   299  // root and traverses downward. The layer amount is limited by the given number.
   300  // If nodisk is set, then disk layer is excluded.
   301  func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
   302  	t.lock.RLock()
   303  	defer t.lock.RUnlock()
   304  
   305  	if limits == 0 {
   306  		return nil
   307  	}
   308  	layer := t.layers[root]
   309  	if layer == nil {
   310  		return nil
   311  	}
   312  	var ret []Snapshot
   313  	for {
   314  		if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
   315  			break
   316  		}
   317  		ret = append(ret, layer)
   318  		limits -= 1
   319  		if limits == 0 {
   320  			break
   321  		}
   322  		parent := layer.Parent()
   323  		if parent == nil {
   324  			break
   325  		}
   326  		layer = parent
   327  	}
   328  	return ret
   329  }
   330  
   331  // Update adds a new snapshot into the tree, if that can be linked to an existing
   332  // old parent. It is disallowed to insert a disk layer (the origin of all).
   333  func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
   334  	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
   335  	// special case that can only happen for Clique networks where empty blocks
   336  	// don't modify the state (0 block subsidy).
   337  	//
   338  	// Although we could silently ignore this internally, it should be the caller's
   339  	// responsibility to avoid even attempting to insert such a snapshot.
   340  	if blockRoot == parentRoot {
   341  		return errSnapshotCycle
   342  	}
   343  	// Generate a new snapshot on top of the parent
   344  	parent := t.Snapshot(parentRoot)
   345  	if parent == nil {
   346  		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
   347  	}
   348  	snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
   349  
   350  	// Save the new snapshot for later
   351  	t.lock.Lock()
   352  	defer t.lock.Unlock()
   353  
   354  	t.layers[snap.root] = snap
   355  	return nil
   356  }
   357  
   358  // Cap traverses downwards the snapshot tree from a head block hash until the
   359  // number of allowed layers are crossed. All layers beyond the permitted number
   360  // are flattened downwards.
   361  //
   362  // Note, the final diff layer count in general will be one more than the amount
   363  // requested. This happens because the bottom-most diff layer is the accumulator
   364  // which may or may not overflow and cascade to disk. Since this last layer's
   365  // survival is only known *after* capping, we need to omit it from the count if
   366  // we want to ensure that *at least* the requested number of diff layers remain.
   367  func (t *Tree) Cap(root common.Hash, layers int) error {
   368  	// Retrieve the head snapshot to cap from
   369  	snap := t.Snapshot(root)
   370  	if snap == nil {
   371  		return fmt.Errorf("snapshot [%#x] missing", root)
   372  	}
   373  	diff, ok := snap.(*diffLayer)
   374  	if !ok {
   375  		return fmt.Errorf("snapshot [%#x] is disk layer", root)
   376  	}
   377  	// If the generator is still running, use a more aggressive cap
   378  	diff.origin.lock.RLock()
   379  	if diff.origin.genMarker != nil && layers > 8 {
   380  		layers = 8
   381  	}
   382  	diff.origin.lock.RUnlock()
   383  
   384  	// Run the internal capping and discard all stale layers
   385  	t.lock.Lock()
   386  	defer t.lock.Unlock()
   387  
   388  	// Flattening the bottom-most diff layer requires special casing since there's
   389  	// no child to rewire to the grandparent. In that case we can fake a temporary
   390  	// child for the capping and then remove it.
   391  	if layers == 0 {
   392  		// If full commit was requested, flatten the diffs and merge onto disk
   393  		diff.lock.RLock()
   394  		base := diffToDisk(diff.flatten().(*diffLayer))
   395  		diff.lock.RUnlock()
   396  
   397  		// Replace the entire snapshot tree with the flat base
   398  		t.layers = map[common.Hash]snapshot{base.root: base}
   399  		return nil
   400  	}
   401  	persisted := t.cap(diff, layers)
   402  
   403  	// Remove any layer that is stale or links into a stale layer
   404  	children := make(map[common.Hash][]common.Hash)
   405  	for root, snap := range t.layers {
   406  		if diff, ok := snap.(*diffLayer); ok {
   407  			parent := diff.parent.Root()
   408  			children[parent] = append(children[parent], root)
   409  		}
   410  	}
   411  	var remove func(root common.Hash)
   412  	remove = func(root common.Hash) {
   413  		delete(t.layers, root)
   414  		for _, child := range children[root] {
   415  			remove(child)
   416  		}
   417  		delete(children, root)
   418  	}
   419  	for root, snap := range t.layers {
   420  		if snap.Stale() {
   421  			remove(root)
   422  		}
   423  	}
   424  	// If the disk layer was modified, regenerate all the cumulative blooms
   425  	if persisted != nil {
   426  		var rebloom func(root common.Hash)
   427  		rebloom = func(root common.Hash) {
   428  			if diff, ok := t.layers[root].(*diffLayer); ok {
   429  				diff.rebloom(persisted)
   430  			}
   431  			for _, child := range children[root] {
   432  				rebloom(child)
   433  			}
   434  		}
   435  		rebloom(persisted.root)
   436  	}
   437  	return nil
   438  }
   439  
   440  // cap traverses downwards the diff tree until the number of allowed layers are
   441  // crossed. All diffs beyond the permitted number are flattened downwards. If the
   442  // layer limit is reached, memory cap is also enforced (but not before).
   443  //
   444  // The method returns the new disk layer if diffs were persisted into it.
   445  //
   446  // Note, the final diff layer count in general will be one more than the amount
   447  // requested. This happens because the bottom-most diff layer is the accumulator
   448  // which may or may not overflow and cascade to disk. Since this last layer's
   449  // survival is only known *after* capping, we need to omit it from the count if
   450  // we want to ensure that *at least* the requested number of diff layers remain.
   451  func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
   452  	// Dive until we run out of layers or reach the persistent database
   453  	for i := 0; i < layers-1; i++ {
   454  		// If we still have diff layers below, continue down
   455  		if parent, ok := diff.parent.(*diffLayer); ok {
   456  			diff = parent
   457  		} else {
   458  			// Diff stack too shallow, return without modifications
   459  			return nil
   460  		}
   461  	}
   462  	// We're out of layers, flatten anything below, stopping if it's the disk or if
   463  	// the memory limit is not yet exceeded.
   464  	switch parent := diff.parent.(type) {
   465  	case *diskLayer:
   466  		return nil
   467  
   468  	case *diffLayer:
   469  		// Hold the write lock until the flattened parent is linked correctly.
   470  		// Otherwise, the stale layer may be accessed by external reads in the
   471  		// meantime.
   472  		diff.lock.Lock()
   473  		defer diff.lock.Unlock()
   474  
   475  		// Flatten the parent into the grandparent. The flattening internally obtains a
   476  		// write lock on grandparent.
   477  		flattened := parent.flatten().(*diffLayer)
   478  		t.layers[flattened.root] = flattened
   479  
   480  		// Invoke the hook if it's registered. Ugly hack.
   481  		if t.onFlatten != nil {
   482  			t.onFlatten()
   483  		}
   484  		diff.parent = flattened
   485  		if flattened.memory < aggregatorMemoryLimit {
   486  			// Accumulator layer is smaller than the limit, so we can abort, unless
   487  			// there's a snapshot being generated currently. In that case, the trie
   488  			// will move from underneath the generator so we **must** merge all the
   489  			// partial data down into the snapshot and restart the generation.
   490  			if flattened.parent.(*diskLayer).genAbort == nil {
   491  				return nil
   492  			}
   493  		}
   494  	default:
   495  		panic(fmt.Sprintf("unknown data layer: %T", parent))
   496  	}
   497  	// If the bottom-most layer is larger than our memory cap, persist to disk
   498  	bottom := diff.parent.(*diffLayer)
   499  
   500  	bottom.lock.RLock()
   501  	base := diffToDisk(bottom)
   502  	bottom.lock.RUnlock()
   503  
   504  	t.layers[base.root] = base
   505  	diff.parent = base
   506  	return base
   507  }
   508  
   509  // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
   510  // it. The method will panic if called onto a non-bottom-most diff layer.
   511  //
   512  // The disk layer persistence should be operated in an atomic way. All updates should
   513  // be discarded if the whole transition if not finished.
   514  func diffToDisk(bottom *diffLayer) *diskLayer {
   515  	var (
   516  		base  = bottom.parent.(*diskLayer)
   517  		batch = base.diskdb.NewBatch()
   518  		stats *generatorStats
   519  	)
   520  	// If the disk layer is running a snapshot generator, abort it
   521  	if base.genAbort != nil {
   522  		abort := make(chan *generatorStats)
   523  		base.genAbort <- abort
   524  		stats = <-abort
   525  	}
   526  	// Put the deletion in the batch writer, flush all updates in the final step.
   527  	rawdb.DeleteSnapshotRoot(batch)
   528  
   529  	// Mark the original base as stale as we're going to create a new wrapper
   530  	base.lock.Lock()
   531  	if base.stale {
   532  		panic("parent disk layer is stale") // we've committed into the same base from two children, boo
   533  	}
   534  	base.stale = true
   535  	base.lock.Unlock()
   536  
   537  	// Destroy all the destructed accounts from the database
   538  	for hash := range bottom.destructSet {
   539  		// Skip any account not covered yet by the snapshot
   540  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   541  			continue
   542  		}
   543  		// Remove all storage slots
   544  		rawdb.DeleteAccountSnapshot(batch, hash)
   545  		base.cache.Set(hash[:], nil)
   546  
   547  		it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
   548  		for it.Next() {
   549  			key := it.Key()
   550  			batch.Delete(key)
   551  			base.cache.Del(key[1:])
   552  			snapshotFlushStorageItemMeter.Mark(1)
   553  
   554  			// Ensure we don't delete too much data blindly (contract can be
   555  			// huge). It's ok to flush, the root will go missing in case of a
   556  			// crash and we'll detect and regenerate the snapshot.
   557  			if batch.ValueSize() > ethdb.IdealBatchSize {
   558  				if err := batch.Write(); err != nil {
   559  					log.Crit("Failed to write storage deletions", "err", err)
   560  				}
   561  				batch.Reset()
   562  			}
   563  		}
   564  		it.Release()
   565  	}
   566  	// Push all updated accounts into the database
   567  	for hash, data := range bottom.accountData {
   568  		// Skip any account not covered yet by the snapshot
   569  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   570  			continue
   571  		}
   572  		// Push the account to disk
   573  		rawdb.WriteAccountSnapshot(batch, hash, data)
   574  		base.cache.Set(hash[:], data)
   575  		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
   576  
   577  		snapshotFlushAccountItemMeter.Mark(1)
   578  		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
   579  
   580  		// Ensure we don't write too much data blindly. It's ok to flush, the
   581  		// root will go missing in case of a crash and we'll detect and regen
   582  		// the snapshot.
   583  		if batch.ValueSize() > ethdb.IdealBatchSize {
   584  			if err := batch.Write(); err != nil {
   585  				log.Crit("Failed to write storage deletions", "err", err)
   586  			}
   587  			batch.Reset()
   588  		}
   589  	}
   590  	// Push all the storage slots into the database
   591  	for accountHash, storage := range bottom.storageData {
   592  		// Skip any account not covered yet by the snapshot
   593  		if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
   594  			continue
   595  		}
   596  		// Generation might be mid-account, track that case too
   597  		midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
   598  
   599  		for storageHash, data := range storage {
   600  			// Skip any slot not covered yet by the snapshot
   601  			if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
   602  				continue
   603  			}
   604  			if len(data) > 0 {
   605  				rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
   606  				base.cache.Set(append(accountHash[:], storageHash[:]...), data)
   607  				snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
   608  			} else {
   609  				rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
   610  				base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
   611  			}
   612  			snapshotFlushStorageItemMeter.Mark(1)
   613  			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
   614  		}
   615  	}
   616  	// Update the snapshot block marker and write any remainder data
   617  	rawdb.WriteSnapshotRoot(batch, bottom.root)
   618  
   619  	// Write out the generator progress marker and report
   620  	journalProgress(batch, base.genMarker, stats)
   621  
   622  	// Flush all the updates in the single db operation. Ensure the
   623  	// disk layer transition is atomic.
   624  	if err := batch.Write(); err != nil {
   625  		log.Crit("Failed to write leftover snapshot", "err", err)
   626  	}
   627  	log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
   628  	res := &diskLayer{
   629  		root:       bottom.root,
   630  		cache:      base.cache,
   631  		diskdb:     base.diskdb,
   632  		triedb:     base.triedb,
   633  		genMarker:  base.genMarker,
   634  		genPending: base.genPending,
   635  	}
   636  	// If snapshot generation hasn't finished yet, port over all the starts and
   637  	// continue where the previous round left off.
   638  	//
   639  	// Note, the `base.genAbort` comparison is not used normally, it's checked
   640  	// to allow the tests to play with the marker without triggering this path.
   641  	if base.genMarker != nil && base.genAbort != nil {
   642  		res.genMarker = base.genMarker
   643  		res.genAbort = make(chan chan *generatorStats)
   644  		go res.generate(stats)
   645  	}
   646  	return res
   647  }
   648  
   649  // Journal commits an entire diff hierarchy to disk into a single journal entry.
   650  // This is meant to be used during shutdown to persist the snapshot without
   651  // flattening everything down (bad for reorgs).
   652  //
   653  // The method returns the root hash of the base layer that needs to be persisted
   654  // to disk as a trie too to allow continuing any pending generation op.
   655  func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
   656  	// Retrieve the head snapshot to journal from var snap snapshot
   657  	snap := t.Snapshot(root)
   658  	if snap == nil {
   659  		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
   660  	}
   661  	// Run the journaling
   662  	t.lock.Lock()
   663  	defer t.lock.Unlock()
   664  
   665  	// Firstly write out the metadata of journal
   666  	journal := new(bytes.Buffer)
   667  	if err := rlp.Encode(journal, journalVersion); err != nil {
   668  		return common.Hash{}, err
   669  	}
   670  	diskroot := t.diskRoot()
   671  	if diskroot == (common.Hash{}) {
   672  		return common.Hash{}, errors.New("invalid disk root")
   673  	}
   674  	// Secondly write out the disk layer root, ensure the
   675  	// diff journal is continuous with disk.
   676  	if err := rlp.Encode(journal, diskroot); err != nil {
   677  		return common.Hash{}, err
   678  	}
   679  	// Finally write out the journal of each layer in reverse order.
   680  	base, err := snap.(snapshot).Journal(journal)
   681  	if err != nil {
   682  		return common.Hash{}, err
   683  	}
   684  	// Store the journal into the database and return
   685  	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
   686  	return base, nil
   687  }
   688  
   689  // Rebuild wipes all available snapshot data from the persistent database and
   690  // discard all caches and diff layers. Afterwards, it starts a new snapshot
   691  // generator with the given root hash.
   692  func (t *Tree) Rebuild(root common.Hash) {
   693  	t.lock.Lock()
   694  	defer t.lock.Unlock()
   695  
   696  	// Firstly delete any recovery flag in the database. Because now we are
   697  	// building a brand new snapshot. Also reenable the snapshot feature.
   698  	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
   699  	rawdb.DeleteSnapshotDisabled(t.diskdb)
   700  
   701  	// Iterate over and mark all layers stale
   702  	for _, layer := range t.layers {
   703  		switch layer := layer.(type) {
   704  		case *diskLayer:
   705  			// If the base layer is generating, abort it and save
   706  			if layer.genAbort != nil {
   707  				abort := make(chan *generatorStats)
   708  				layer.genAbort <- abort
   709  				<-abort
   710  			}
   711  			// Layer should be inactive now, mark it as stale
   712  			layer.lock.Lock()
   713  			layer.stale = true
   714  			layer.lock.Unlock()
   715  
   716  		case *diffLayer:
   717  			// If the layer is a simple diff, simply mark as stale
   718  			layer.lock.Lock()
   719  			atomic.StoreUint32(&layer.stale, 1)
   720  			layer.lock.Unlock()
   721  
   722  		default:
   723  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   724  		}
   725  	}
   726  	// Start generating a new snapshot from scratch on a background thread. The
   727  	// generator will run a wiper first if there's not one running right now.
   728  	log.Info("Rebuilding state snapshot")
   729  	t.layers = map[common.Hash]snapshot{
   730  		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root),
   731  	}
   732  }
   733  
   734  // AccountIterator creates a new account iterator for the specified root hash and
   735  // seeks to a starting account hash.
   736  func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
   737  	ok, err := t.generating()
   738  	if err != nil {
   739  		return nil, err
   740  	}
   741  	if ok {
   742  		return nil, ErrNotConstructed
   743  	}
   744  	return newFastAccountIterator(t, root, seek)
   745  }
   746  
   747  // StorageIterator creates a new storage iterator for the specified root hash and
   748  // account. The iterator will be move to the specific start position.
   749  func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
   750  	ok, err := t.generating()
   751  	if err != nil {
   752  		return nil, err
   753  	}
   754  	if ok {
   755  		return nil, ErrNotConstructed
   756  	}
   757  	return newFastStorageIterator(t, root, account, seek)
   758  }
   759  
   760  // Verify iterates the whole state(all the accounts as well as the corresponding storages)
   761  // with the specific root and compares the re-computed hash with the original one.
   762  func (t *Tree) Verify(root common.Hash) error {
   763  	acctIt, err := t.AccountIterator(root, common.Hash{})
   764  	if err != nil {
   765  		return err
   766  	}
   767  	defer acctIt.Release()
   768  
   769  	got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
   770  		storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
   771  		if err != nil {
   772  			return common.Hash{}, err
   773  		}
   774  		defer storageIt.Release()
   775  
   776  		hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
   777  		if err != nil {
   778  			return common.Hash{}, err
   779  		}
   780  		return hash, nil
   781  	}, newGenerateStats(), true)
   782  
   783  	if err != nil {
   784  		return err
   785  	}
   786  	if got != root {
   787  		return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
   788  	}
   789  	return nil
   790  }
   791  
   792  // disklayer is an internal helper function to return the disk layer.
   793  // The lock of snapTree is assumed to be held already.
   794  func (t *Tree) disklayer() *diskLayer {
   795  	var snap snapshot
   796  	for _, s := range t.layers {
   797  		snap = s
   798  		break
   799  	}
   800  	if snap == nil {
   801  		return nil
   802  	}
   803  	switch layer := snap.(type) {
   804  	case *diskLayer:
   805  		return layer
   806  	case *diffLayer:
   807  		return layer.origin
   808  	default:
   809  		panic(fmt.Sprintf("%T: undefined layer", snap))
   810  	}
   811  }
   812  
   813  // diskRoot is a internal helper function to return the disk layer root.
   814  // The lock of snapTree is assumed to be held already.
   815  func (t *Tree) diskRoot() common.Hash {
   816  	disklayer := t.disklayer()
   817  	if disklayer == nil {
   818  		return common.Hash{}
   819  	}
   820  	return disklayer.Root()
   821  }
   822  
   823  // generating is an internal helper function which reports whether the snapshot
   824  // is still under the construction.
   825  func (t *Tree) generating() (bool, error) {
   826  	t.lock.Lock()
   827  	defer t.lock.Unlock()
   828  
   829  	layer := t.disklayer()
   830  	if layer == nil {
   831  		return false, errors.New("disk layer is missing")
   832  	}
   833  	layer.lock.RLock()
   834  	defer layer.lock.RUnlock()
   835  	return layer.genMarker != nil, nil
   836  }
   837  
   838  // diskRoot is a external helper function to return the disk layer root.
   839  func (t *Tree) DiskRoot() common.Hash {
   840  	t.lock.Lock()
   841  	defer t.lock.Unlock()
   842  
   843  	return t.diskRoot()
   844  }