github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/core/state/snapshot/snapshot.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of the adkgo library (adapted for adkgo from go--ethereum v1.10.8).
     3  //
     4  // the adkgo library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // the adkgo library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the adkgo library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package snapshot implements a journalled, dynamic state dump.
    18  package snapshot
    19  
    20  import (
    21  	"bytes"
    22  	"errors"
    23  	"fmt"
    24  	"sync"
    25  	"sync/atomic"
    26  
    27  	"github.com/aidoskuneen/adk-node/common"
    28  	"github.com/aidoskuneen/adk-node/core/rawdb"
    29  	"github.com/aidoskuneen/adk-node/ethdb"
    30  	"github.com/aidoskuneen/adk-node/log"
    31  	"github.com/aidoskuneen/adk-node/metrics"
    32  	"github.com/aidoskuneen/adk-node/rlp"
    33  	"github.com/aidoskuneen/adk-node/trie"
    34  )
    35  
    36  var (
    37  	snapshotCleanAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
    38  	snapshotCleanAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
    39  	snapshotCleanAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
    40  	snapshotCleanAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
    41  	snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
    42  
    43  	snapshotCleanStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
    44  	snapshotCleanStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
    45  	snapshotCleanStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
    46  	snapshotCleanStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
    47  	snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
    48  
    49  	snapshotDirtyAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
    50  	snapshotDirtyAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
    51  	snapshotDirtyAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
    52  	snapshotDirtyAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
    53  	snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
    54  
    55  	snapshotDirtyStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
    56  	snapshotDirtyStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
    57  	snapshotDirtyStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
    58  	snapshotDirtyStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
    59  	snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
    60  
    61  	snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    62  	snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    63  
    64  	snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
    65  	snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
    66  	snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
    67  	snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
    68  
    69  	snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
    70  	snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
    71  
    72  	snapshotBloomAccountTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
    73  	snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
    74  	snapshotBloomAccountMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
    75  
    76  	snapshotBloomStorageTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
    77  	snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
    78  	snapshotBloomStorageMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
    79  
    80  	// ErrSnapshotStale is returned from data accessors if the underlying snapshot
    81  	// layer had been invalidated due to the chain progressing forward far enough
    82  	// to not maintain the layer's original state.
    83  	ErrSnapshotStale = errors.New("snapshot stale")
    84  
    85  	// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
    86  	// is being generated currently and the requested data item is not yet in the
    87  	// range of accounts covered.
    88  	ErrNotCoveredYet = errors.New("not covered yet")
    89  
    90  	// ErrNotConstructed is returned if the callers want to iterate the snapshot
    91  	// while the generation is not finished yet.
    92  	ErrNotConstructed = errors.New("snapshot is not constructed")
    93  
    94  	// errSnapshotCycle is returned if a snapshot is attempted to be inserted
    95  	// that forms a cycle in the snapshot tree.
    96  	errSnapshotCycle = errors.New("snapshot cycle")
    97  )
    98  
    99  // Snapshot represents the functionality supported by a snapshot storage layer.
   100  type Snapshot interface {
   101  	// Root returns the root hash for which this snapshot was made.
   102  	Root() common.Hash
   103  
   104  	// Account directly retrieves the account associated with a particular hash in
   105  	// the snapshot slim data format.
   106  	Account(hash common.Hash) (*Account, error)
   107  
   108  	// AccountRLP directly retrieves the account RLP associated with a particular
   109  	// hash in the snapshot slim data format.
   110  	AccountRLP(hash common.Hash) ([]byte, error)
   111  
   112  	// Storage directly retrieves the storage data associated with a particular hash,
   113  	// within a particular account.
   114  	Storage(accountHash, storageHash common.Hash) ([]byte, error)
   115  }
   116  
   117  // snapshot is the internal version of the snapshot data layer that supports some
   118  // additional methods compared to the public API.
   119  type snapshot interface {
   120  	Snapshot
   121  
   122  	// Parent returns the subsequent layer of a snapshot, or nil if the base was
   123  	// reached.
   124  	//
   125  	// Note, the method is an internal helper to avoid type switching between the
   126  	// disk and diff layers. There is no locking involved.
   127  	Parent() snapshot
   128  
   129  	// Update creates a new layer on top of the existing snapshot diff tree with
   130  	// the specified data items.
   131  	//
   132  	// Note, the maps are retained by the method to avoid copying everything.
   133  	Update(blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
   134  
   135  	// Journal commits an entire diff hierarchy to disk into a single journal entry.
   136  	// This is meant to be used during shutdown to persist the snapshot without
   137  	// flattening everything down (bad for reorgs).
   138  	Journal(buffer *bytes.Buffer) (common.Hash, error)
   139  
   140  	// Stale return whether this layer has become stale (was flattened across) or
   141  	// if it's still live.
   142  	Stale() bool
   143  
   144  	// AccountIterator creates an account iterator over an arbitrary layer.
   145  	AccountIterator(seek common.Hash) AccountIterator
   146  
   147  	// StorageIterator creates a storage iterator over an arbitrary layer.
   148  	StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
   149  }
   150  
   151  // Tree is an Ethereum state snapshot tree. It consists of one persistent base
   152  // layer backed by a key-value store, on top of which arbitrarily many in-memory
   153  // diff layers are topped. The memory diffs can form a tree with branching, but
   154  // the disk layer is singleton and common to all. If a reorg goes deeper than the
   155  // disk layer, everything needs to be deleted.
   156  //
   157  // The goal of a state snapshot is twofold: to allow direct access to account and
   158  // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
   159  // cheap iteration of the account/storage tries for sync aid.
   160  type Tree struct {
   161  	diskdb ethdb.KeyValueStore      // Persistent database to store the snapshot
   162  	triedb *trie.Database           // In-memory cache to access the trie through
   163  	cache  int                      // Megabytes permitted to use for read caches
   164  	layers map[common.Hash]snapshot // Collection of all known layers
   165  	lock   sync.RWMutex
   166  }
   167  
   168  // New attempts to load an already existing snapshot from a persistent key-value
   169  // store (with a number of memory layers from a journal), ensuring that the head
   170  // of the snapshot matches the expected one.
   171  //
   172  // If the snapshot is missing or the disk layer is broken, the snapshot will be
   173  // reconstructed using both the existing data and the state trie.
   174  // The repair happens on a background thread.
   175  //
   176  // If the memory layers in the journal do not match the disk layer (e.g. there is
   177  // a gap) or the journal is missing, there are two repair cases:
   178  //
   179  // - if the 'recovery' parameter is true, all memory diff-layers will be discarded.
   180  //   This case happens when the snapshot is 'ahead' of the state trie.
   181  // - otherwise, the entire snapshot is considered invalid and will be recreated on
   182  //   a background thread.
   183  func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, async bool, rebuild bool, recovery bool) (*Tree, error) {
   184  	// Create a new, empty snapshot tree
   185  	snap := &Tree{
   186  		diskdb: diskdb,
   187  		triedb: triedb,
   188  		cache:  cache,
   189  		layers: make(map[common.Hash]snapshot),
   190  	}
   191  	if !async {
   192  		defer snap.waitBuild()
   193  	}
   194  	// Attempt to load a previously persisted snapshot and rebuild one if failed
   195  	head, disabled, err := loadSnapshot(diskdb, triedb, cache, root, recovery)
   196  	if disabled {
   197  		log.Warn("Snapshot maintenance disabled (syncing)")
   198  		return snap, nil
   199  	}
   200  	if err != nil {
   201  		if rebuild {
   202  			log.Warn("Failed to load snapshot, regenerating", "err", err)
   203  			snap.Rebuild(root)
   204  			return snap, nil
   205  		}
   206  		return nil, err // Bail out the error, don't rebuild automatically.
   207  	}
   208  	// Existing snapshot loaded, seed all the layers
   209  	for head != nil {
   210  		snap.layers[head.Root()] = head
   211  		head = head.Parent()
   212  	}
   213  	return snap, nil
   214  }
   215  
   216  // waitBuild blocks until the snapshot finishes rebuilding. This method is meant
   217  // to be used by tests to ensure we're testing what we believe we are.
   218  func (t *Tree) waitBuild() {
   219  	// Find the rebuild termination channel
   220  	var done chan struct{}
   221  
   222  	t.lock.RLock()
   223  	for _, layer := range t.layers {
   224  		if layer, ok := layer.(*diskLayer); ok {
   225  			done = layer.genPending
   226  			break
   227  		}
   228  	}
   229  	t.lock.RUnlock()
   230  
   231  	// Wait until the snapshot is generated
   232  	if done != nil {
   233  		<-done
   234  	}
   235  }
   236  
   237  // Disable interrupts any pending snapshot generator, deletes all the snapshot
   238  // layers in memory and marks snapshots disabled globally. In order to resume
   239  // the snapshot functionality, the caller must invoke Rebuild.
   240  func (t *Tree) Disable() {
   241  	// Interrupt any live snapshot layers
   242  	t.lock.Lock()
   243  	defer t.lock.Unlock()
   244  
   245  	for _, layer := range t.layers {
   246  		switch layer := layer.(type) {
   247  		case *diskLayer:
   248  			// If the base layer is generating, abort it
   249  			if layer.genAbort != nil {
   250  				abort := make(chan *generatorStats)
   251  				layer.genAbort <- abort
   252  				<-abort
   253  			}
   254  			// Layer should be inactive now, mark it as stale
   255  			layer.lock.Lock()
   256  			layer.stale = true
   257  			layer.lock.Unlock()
   258  
   259  		case *diffLayer:
   260  			// If the layer is a simple diff, simply mark as stale
   261  			layer.lock.Lock()
   262  			atomic.StoreUint32(&layer.stale, 1)
   263  			layer.lock.Unlock()
   264  
   265  		default:
   266  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   267  		}
   268  	}
   269  	t.layers = map[common.Hash]snapshot{}
   270  
   271  	// Delete all snapshot liveness information from the database
   272  	batch := t.diskdb.NewBatch()
   273  
   274  	rawdb.WriteSnapshotDisabled(batch)
   275  	rawdb.DeleteSnapshotRoot(batch)
   276  	rawdb.DeleteSnapshotJournal(batch)
   277  	rawdb.DeleteSnapshotGenerator(batch)
   278  	rawdb.DeleteSnapshotRecoveryNumber(batch)
   279  	// Note, we don't delete the sync progress
   280  
   281  	if err := batch.Write(); err != nil {
   282  		log.Crit("Failed to disable snapshots", "err", err)
   283  	}
   284  }
   285  
   286  // Snapshot retrieves a snapshot belonging to the given block root, or nil if no
   287  // snapshot is maintained for that block.
   288  func (t *Tree) Snapshot(blockRoot common.Hash) Snapshot {
   289  	t.lock.RLock()
   290  	defer t.lock.RUnlock()
   291  
   292  	return t.layers[blockRoot]
   293  }
   294  
   295  // Snapshots returns all visited layers from the topmost layer with specific
   296  // root and traverses downward. The layer amount is limited by the given number.
   297  // If nodisk is set, then disk layer is excluded.
   298  func (t *Tree) Snapshots(root common.Hash, limits int, nodisk bool) []Snapshot {
   299  	t.lock.RLock()
   300  	defer t.lock.RUnlock()
   301  
   302  	if limits == 0 {
   303  		return nil
   304  	}
   305  	layer := t.layers[root]
   306  	if layer == nil {
   307  		return nil
   308  	}
   309  	var ret []Snapshot
   310  	for {
   311  		if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
   312  			break
   313  		}
   314  		ret = append(ret, layer)
   315  		limits -= 1
   316  		if limits == 0 {
   317  			break
   318  		}
   319  		parent := layer.Parent()
   320  		if parent == nil {
   321  			break
   322  		}
   323  		layer = parent
   324  	}
   325  	return ret
   326  }
   327  
   328  // Update adds a new snapshot into the tree, if that can be linked to an existing
   329  // old parent. It is disallowed to insert a disk layer (the origin of all).
   330  func (t *Tree) Update(blockRoot common.Hash, parentRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
   331  	// Reject noop updates to avoid self-loops in the snapshot tree. This is a
   332  	// special case that can only happen for Clique networks where empty blocks
   333  	// don't modify the state (0 block subsidy).
   334  	//
   335  	// Although we could silently ignore this internally, it should be the caller's
   336  	// responsibility to avoid even attempting to insert such a snapshot.
   337  	if blockRoot == parentRoot {
   338  		return errSnapshotCycle
   339  	}
   340  	// Generate a new snapshot on top of the parent
   341  	parent := t.Snapshot(parentRoot)
   342  	if parent == nil {
   343  		return fmt.Errorf("parent [%#x] snapshot missing", parentRoot)
   344  	}
   345  	snap := parent.(snapshot).Update(blockRoot, destructs, accounts, storage)
   346  
   347  	// Save the new snapshot for later
   348  	t.lock.Lock()
   349  	defer t.lock.Unlock()
   350  
   351  	t.layers[snap.root] = snap
   352  	return nil
   353  }
   354  
   355  // Cap traverses downwards the snapshot tree from a head block hash until the
   356  // number of allowed layers are crossed. All layers beyond the permitted number
   357  // are flattened downwards.
   358  //
   359  // Note, the final diff layer count in general will be one more than the amount
   360  // requested. This happens because the bottom-most diff layer is the accumulator
   361  // which may or may not overflow and cascade to disk. Since this last layer's
   362  // survival is only known *after* capping, we need to omit it from the count if
   363  // we want to ensure that *at least* the requested number of diff layers remain.
   364  func (t *Tree) Cap(root common.Hash, layers int) error {
   365  	// Retrieve the head snapshot to cap from
   366  	snap := t.Snapshot(root)
   367  	if snap == nil {
   368  		return fmt.Errorf("snapshot [%#x] missing", root)
   369  	}
   370  	diff, ok := snap.(*diffLayer)
   371  	if !ok {
   372  		return fmt.Errorf("snapshot [%#x] is disk layer", root)
   373  	}
   374  	// If the generator is still running, use a more aggressive cap
   375  	diff.origin.lock.RLock()
   376  	if diff.origin.genMarker != nil && layers > 8 {
   377  		layers = 8
   378  	}
   379  	diff.origin.lock.RUnlock()
   380  
   381  	// Run the internal capping and discard all stale layers
   382  	t.lock.Lock()
   383  	defer t.lock.Unlock()
   384  
   385  	// Flattening the bottom-most diff layer requires special casing since there's
   386  	// no child to rewire to the grandparent. In that case we can fake a temporary
   387  	// child for the capping and then remove it.
   388  	if layers == 0 {
   389  		// If full commit was requested, flatten the diffs and merge onto disk
   390  		diff.lock.RLock()
   391  		base := diffToDisk(diff.flatten().(*diffLayer))
   392  		diff.lock.RUnlock()
   393  
   394  		// Replace the entire snapshot tree with the flat base
   395  		t.layers = map[common.Hash]snapshot{base.root: base}
   396  		return nil
   397  	}
   398  	persisted := t.cap(diff, layers)
   399  
   400  	// Remove any layer that is stale or links into a stale layer
   401  	children := make(map[common.Hash][]common.Hash)
   402  	for root, snap := range t.layers {
   403  		if diff, ok := snap.(*diffLayer); ok {
   404  			parent := diff.parent.Root()
   405  			children[parent] = append(children[parent], root)
   406  		}
   407  	}
   408  	var remove func(root common.Hash)
   409  	remove = func(root common.Hash) {
   410  		delete(t.layers, root)
   411  		for _, child := range children[root] {
   412  			remove(child)
   413  		}
   414  		delete(children, root)
   415  	}
   416  	for root, snap := range t.layers {
   417  		if snap.Stale() {
   418  			remove(root)
   419  		}
   420  	}
   421  	// If the disk layer was modified, regenerate all the cumulative blooms
   422  	if persisted != nil {
   423  		var rebloom func(root common.Hash)
   424  		rebloom = func(root common.Hash) {
   425  			if diff, ok := t.layers[root].(*diffLayer); ok {
   426  				diff.rebloom(persisted)
   427  			}
   428  			for _, child := range children[root] {
   429  				rebloom(child)
   430  			}
   431  		}
   432  		rebloom(persisted.root)
   433  	}
   434  	return nil
   435  }
   436  
   437  // cap traverses downwards the diff tree until the number of allowed layers are
   438  // crossed. All diffs beyond the permitted number are flattened downwards. If the
   439  // layer limit is reached, memory cap is also enforced (but not before).
   440  //
   441  // The method returns the new disk layer if diffs were persisted into it.
   442  //
   443  // Note, the final diff layer count in general will be one more than the amount
   444  // requested. This happens because the bottom-most diff layer is the accumulator
   445  // which may or may not overflow and cascade to disk. Since this last layer's
   446  // survival is only known *after* capping, we need to omit it from the count if
   447  // we want to ensure that *at least* the requested number of diff layers remain.
   448  func (t *Tree) cap(diff *diffLayer, layers int) *diskLayer {
   449  	// Dive until we run out of layers or reach the persistent database
   450  	for i := 0; i < layers-1; i++ {
   451  		// If we still have diff layers below, continue down
   452  		if parent, ok := diff.parent.(*diffLayer); ok {
   453  			diff = parent
   454  		} else {
   455  			// Diff stack too shallow, return without modifications
   456  			return nil
   457  		}
   458  	}
   459  	// We're out of layers, flatten anything below, stopping if it's the disk or if
   460  	// the memory limit is not yet exceeded.
   461  	switch parent := diff.parent.(type) {
   462  	case *diskLayer:
   463  		return nil
   464  
   465  	case *diffLayer:
   466  		// Flatten the parent into the grandparent. The flattening internally obtains a
   467  		// write lock on grandparent.
   468  		flattened := parent.flatten().(*diffLayer)
   469  		t.layers[flattened.root] = flattened
   470  
   471  		diff.lock.Lock()
   472  		defer diff.lock.Unlock()
   473  
   474  		diff.parent = flattened
   475  		if flattened.memory < aggregatorMemoryLimit {
   476  			// Accumulator layer is smaller than the limit, so we can abort, unless
   477  			// there's a snapshot being generated currently. In that case, the trie
   478  			// will move from underneath the generator so we **must** merge all the
   479  			// partial data down into the snapshot and restart the generation.
   480  			if flattened.parent.(*diskLayer).genAbort == nil {
   481  				return nil
   482  			}
   483  		}
   484  	default:
   485  		panic(fmt.Sprintf("unknown data layer: %T", parent))
   486  	}
   487  	// If the bottom-most layer is larger than our memory cap, persist to disk
   488  	bottom := diff.parent.(*diffLayer)
   489  
   490  	bottom.lock.RLock()
   491  	base := diffToDisk(bottom)
   492  	bottom.lock.RUnlock()
   493  
   494  	t.layers[base.root] = base
   495  	diff.parent = base
   496  	return base
   497  }
   498  
   499  // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
   500  // it. The method will panic if called onto a non-bottom-most diff layer.
   501  //
   502  // The disk layer persistence should be operated in an atomic way. All updates should
   503  // be discarded if the whole transition if not finished.
   504  func diffToDisk(bottom *diffLayer) *diskLayer {
   505  	var (
   506  		base  = bottom.parent.(*diskLayer)
   507  		batch = base.diskdb.NewBatch()
   508  		stats *generatorStats
   509  	)
   510  	// If the disk layer is running a snapshot generator, abort it
   511  	if base.genAbort != nil {
   512  		abort := make(chan *generatorStats)
   513  		base.genAbort <- abort
   514  		stats = <-abort
   515  	}
   516  	// Put the deletion in the batch writer, flush all updates in the final step.
   517  	rawdb.DeleteSnapshotRoot(batch)
   518  
   519  	// Mark the original base as stale as we're going to create a new wrapper
   520  	base.lock.Lock()
   521  	if base.stale {
   522  		panic("parent disk layer is stale") // we've committed into the same base from two children, boo
   523  	}
   524  	base.stale = true
   525  	base.lock.Unlock()
   526  
   527  	// Destroy all the destructed accounts from the database
   528  	for hash := range bottom.destructSet {
   529  		// Skip any account not covered yet by the snapshot
   530  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   531  			continue
   532  		}
   533  		// Remove all storage slots
   534  		rawdb.DeleteAccountSnapshot(batch, hash)
   535  		base.cache.Set(hash[:], nil)
   536  
   537  		it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
   538  		for it.Next() {
   539  			if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
   540  				batch.Delete(key)
   541  				base.cache.Del(key[1:])
   542  				snapshotFlushStorageItemMeter.Mark(1)
   543  
   544  				// Ensure we don't delete too much data blindly (contract can be
   545  				// huge). It's ok to flush, the root will go missing in case of a
   546  				// crash and we'll detect and regenerate the snapshot.
   547  				if batch.ValueSize() > ethdb.IdealBatchSize {
   548  					if err := batch.Write(); err != nil {
   549  						log.Crit("Failed to write storage deletions", "err", err)
   550  					}
   551  					batch.Reset()
   552  				}
   553  			}
   554  		}
   555  		it.Release()
   556  	}
   557  	// Push all updated accounts into the database
   558  	for hash, data := range bottom.accountData {
   559  		// Skip any account not covered yet by the snapshot
   560  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   561  			continue
   562  		}
   563  		// Push the account to disk
   564  		rawdb.WriteAccountSnapshot(batch, hash, data)
   565  		base.cache.Set(hash[:], data)
   566  		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
   567  
   568  		snapshotFlushAccountItemMeter.Mark(1)
   569  		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
   570  
   571  		// Ensure we don't write too much data blindly. It's ok to flush, the
   572  		// root will go missing in case of a crash and we'll detect and regen
   573  		// the snapshot.
   574  		if batch.ValueSize() > ethdb.IdealBatchSize {
   575  			if err := batch.Write(); err != nil {
   576  				log.Crit("Failed to write storage deletions", "err", err)
   577  			}
   578  			batch.Reset()
   579  		}
   580  	}
   581  	// Push all the storage slots into the database
   582  	for accountHash, storage := range bottom.storageData {
   583  		// Skip any account not covered yet by the snapshot
   584  		if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
   585  			continue
   586  		}
   587  		// Generation might be mid-account, track that case too
   588  		midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
   589  
   590  		for storageHash, data := range storage {
   591  			// Skip any slot not covered yet by the snapshot
   592  			if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
   593  				continue
   594  			}
   595  			if len(data) > 0 {
   596  				rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
   597  				base.cache.Set(append(accountHash[:], storageHash[:]...), data)
   598  				snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
   599  			} else {
   600  				rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
   601  				base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
   602  			}
   603  			snapshotFlushStorageItemMeter.Mark(1)
   604  			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
   605  		}
   606  	}
   607  	// Update the snapshot block marker and write any remainder data
   608  	rawdb.WriteSnapshotRoot(batch, bottom.root)
   609  
   610  	// Write out the generator progress marker and report
   611  	journalProgress(batch, base.genMarker, stats)
   612  
   613  	// Flush all the updates in the single db operation. Ensure the
   614  	// disk layer transition is atomic.
   615  	if err := batch.Write(); err != nil {
   616  		log.Crit("Failed to write leftover snapshot", "err", err)
   617  	}
   618  	log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
   619  	res := &diskLayer{
   620  		root:       bottom.root,
   621  		cache:      base.cache,
   622  		diskdb:     base.diskdb,
   623  		triedb:     base.triedb,
   624  		genMarker:  base.genMarker,
   625  		genPending: base.genPending,
   626  	}
   627  	// If snapshot generation hasn't finished yet, port over all the starts and
   628  	// continue where the previous round left off.
   629  	//
   630  	// Note, the `base.genAbort` comparison is not used normally, it's checked
   631  	// to allow the tests to play with the marker without triggering this path.
   632  	if base.genMarker != nil && base.genAbort != nil {
   633  		res.genMarker = base.genMarker
   634  		res.genAbort = make(chan chan *generatorStats)
   635  		go res.generate(stats)
   636  	}
   637  	return res
   638  }
   639  
   640  // Journal commits an entire diff hierarchy to disk into a single journal entry.
   641  // This is meant to be used during shutdown to persist the snapshot without
   642  // flattening everything down (bad for reorgs).
   643  //
   644  // The method returns the root hash of the base layer that needs to be persisted
   645  // to disk as a trie too to allow continuing any pending generation op.
   646  func (t *Tree) Journal(root common.Hash) (common.Hash, error) {
   647  	// Retrieve the head snapshot to journal from var snap snapshot
   648  	snap := t.Snapshot(root)
   649  	if snap == nil {
   650  		return common.Hash{}, fmt.Errorf("snapshot [%#x] missing", root)
   651  	}
   652  	// Run the journaling
   653  	t.lock.Lock()
   654  	defer t.lock.Unlock()
   655  
   656  	// Firstly write out the metadata of journal
   657  	journal := new(bytes.Buffer)
   658  	if err := rlp.Encode(journal, journalVersion); err != nil {
   659  		return common.Hash{}, err
   660  	}
   661  	diskroot := t.diskRoot()
   662  	if diskroot == (common.Hash{}) {
   663  		return common.Hash{}, errors.New("invalid disk root")
   664  	}
   665  	// Secondly write out the disk layer root, ensure the
   666  	// diff journal is continuous with disk.
   667  	if err := rlp.Encode(journal, diskroot); err != nil {
   668  		return common.Hash{}, err
   669  	}
   670  	// Finally write out the journal of each layer in reverse order.
   671  	base, err := snap.(snapshot).Journal(journal)
   672  	if err != nil {
   673  		return common.Hash{}, err
   674  	}
   675  	// Store the journal into the database and return
   676  	rawdb.WriteSnapshotJournal(t.diskdb, journal.Bytes())
   677  	return base, nil
   678  }
   679  
   680  // Rebuild wipes all available snapshot data from the persistent database and
   681  // discard all caches and diff layers. Afterwards, it starts a new snapshot
   682  // generator with the given root hash.
   683  func (t *Tree) Rebuild(root common.Hash) {
   684  	t.lock.Lock()
   685  	defer t.lock.Unlock()
   686  
   687  	// Firstly delete any recovery flag in the database. Because now we are
   688  	// building a brand new snapshot. Also reenable the snapshot feature.
   689  	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
   690  	rawdb.DeleteSnapshotDisabled(t.diskdb)
   691  
   692  	// Iterate over and mark all layers stale
   693  	for _, layer := range t.layers {
   694  		switch layer := layer.(type) {
   695  		case *diskLayer:
   696  			// If the base layer is generating, abort it and save
   697  			if layer.genAbort != nil {
   698  				abort := make(chan *generatorStats)
   699  				layer.genAbort <- abort
   700  				<-abort
   701  			}
   702  			// Layer should be inactive now, mark it as stale
   703  			layer.lock.Lock()
   704  			layer.stale = true
   705  			layer.lock.Unlock()
   706  
   707  		case *diffLayer:
   708  			// If the layer is a simple diff, simply mark as stale
   709  			layer.lock.Lock()
   710  			atomic.StoreUint32(&layer.stale, 1)
   711  			layer.lock.Unlock()
   712  
   713  		default:
   714  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   715  		}
   716  	}
   717  	// Start generating a new snapshot from scratch on a background thread. The
   718  	// generator will run a wiper first if there's not one running right now.
   719  	log.Info("Rebuilding state snapshot")
   720  	t.layers = map[common.Hash]snapshot{
   721  		root: generateSnapshot(t.diskdb, t.triedb, t.cache, root),
   722  	}
   723  }
   724  
   725  // AccountIterator creates a new account iterator for the specified root hash and
   726  // seeks to a starting account hash.
   727  func (t *Tree) AccountIterator(root common.Hash, seek common.Hash) (AccountIterator, error) {
   728  	ok, err := t.generating()
   729  	if err != nil {
   730  		return nil, err
   731  	}
   732  	if ok {
   733  		return nil, ErrNotConstructed
   734  	}
   735  	return newFastAccountIterator(t, root, seek)
   736  }
   737  
   738  // StorageIterator creates a new storage iterator for the specified root hash and
   739  // account. The iterator will be move to the specific start position.
   740  func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (StorageIterator, error) {
   741  	ok, err := t.generating()
   742  	if err != nil {
   743  		return nil, err
   744  	}
   745  	if ok {
   746  		return nil, ErrNotConstructed
   747  	}
   748  	return newFastStorageIterator(t, root, account, seek)
   749  }
   750  
   751  // Verify iterates the whole state(all the accounts as well as the corresponding storages)
   752  // with the specific root and compares the re-computed hash with the original one.
   753  func (t *Tree) Verify(root common.Hash) error {
   754  	acctIt, err := t.AccountIterator(root, common.Hash{})
   755  	if err != nil {
   756  		return err
   757  	}
   758  	defer acctIt.Release()
   759  
   760  	got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
   761  		storageIt, err := t.StorageIterator(root, accountHash, common.Hash{})
   762  		if err != nil {
   763  			return common.Hash{}, err
   764  		}
   765  		defer storageIt.Release()
   766  
   767  		hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
   768  		if err != nil {
   769  			return common.Hash{}, err
   770  		}
   771  		return hash, nil
   772  	}, newGenerateStats(), true)
   773  
   774  	if err != nil {
   775  		return err
   776  	}
   777  	if got != root {
   778  		return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
   779  	}
   780  	return nil
   781  }
   782  
   783  // disklayer is an internal helper function to return the disk layer.
   784  // The lock of snapTree is assumed to be held already.
   785  func (t *Tree) disklayer() *diskLayer {
   786  	var snap snapshot
   787  	for _, s := range t.layers {
   788  		snap = s
   789  		break
   790  	}
   791  	if snap == nil {
   792  		return nil
   793  	}
   794  	switch layer := snap.(type) {
   795  	case *diskLayer:
   796  		return layer
   797  	case *diffLayer:
   798  		return layer.origin
   799  	default:
   800  		panic(fmt.Sprintf("%T: undefined layer", snap))
   801  	}
   802  }
   803  
   804  // diskRoot is a internal helper function to return the disk layer root.
   805  // The lock of snapTree is assumed to be held already.
   806  func (t *Tree) diskRoot() common.Hash {
   807  	disklayer := t.disklayer()
   808  	if disklayer == nil {
   809  		return common.Hash{}
   810  	}
   811  	return disklayer.Root()
   812  }
   813  
   814  // generating is an internal helper function which reports whether the snapshot
   815  // is still under the construction.
   816  func (t *Tree) generating() (bool, error) {
   817  	t.lock.Lock()
   818  	defer t.lock.Unlock()
   819  
   820  	layer := t.disklayer()
   821  	if layer == nil {
   822  		return false, errors.New("disk layer is missing")
   823  	}
   824  	layer.lock.RLock()
   825  	defer layer.lock.RUnlock()
   826  	return layer.genMarker != nil, nil
   827  }
   828  
   829  // diskRoot is a external helper function to return the disk layer root.
   830  func (t *Tree) DiskRoot() common.Hash {
   831  	t.lock.Lock()
   832  	defer t.lock.Unlock()
   833  
   834  	return t.diskRoot()
   835  }