gitlab.com/flarenetwork/coreth@v0.1.1/core/state/snapshot/snapshot.go (about)

     1  // (c) 2019-2020, Ava Labs, Inc.
     2  //
     3  // This file is a derived work, based on the go-ethereum library whose original
     4  // notices appear below.
     5  //
     6  // It is distributed under a license compatible with the licensing terms of the
     7  // original code from which it is derived.
     8  //
     9  // Much love to the original authors for their work.
    10  // **********
    11  // Copyright 2019 The go-ethereum Authors
    12  // This file is part of the go-ethereum library.
    13  //
    14  // The go-ethereum library is free software: you can redistribute it and/or modify
    15  // it under the terms of the GNU Lesser General Public License as published by
    16  // the Free Software Foundation, either version 3 of the License, or
    17  // (at your option) any later version.
    18  //
    19  // The go-ethereum library is distributed in the hope that it will be useful,
    20  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    21  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    22  // GNU Lesser General Public License for more details.
    23  //
    24  // You should have received a copy of the GNU Lesser General Public License
    25  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    26  
    27  // Package snapshot implements a journalled, dynamic state dump.
    28  package snapshot
    29  
    30  import (
    31  	"bytes"
    32  	"errors"
    33  	"fmt"
    34  	"sync"
    35  	"sync/atomic"
    36  	"time"
    37  
    38  	"github.com/VictoriaMetrics/fastcache"
    39  	"github.com/ethereum/go-ethereum/common"
    40  	"github.com/ethereum/go-ethereum/ethdb"
    41  	"github.com/ethereum/go-ethereum/log"
    42  	"github.com/ethereum/go-ethereum/metrics"
    43  	"github.com/ethereum/go-ethereum/trie"
    44  	"gitlab.com/flarenetwork/coreth/core/rawdb"
    45  )
    46  
    47  const (
    48  	// skipGenThreshold is the minimum time that must have elapsed since the
    49  	// creation of the previous disk layer to start snapshot generation on a new
    50  	// disk layer.
    51  	//
    52  	// If disk layers are being discarded at a frequency greater than this threshold,
    53  	// starting snapshot generation is not worth it (will be aborted before meaningful
    54  	// work can be done).
    55  	skipGenThreshold = 500 * time.Millisecond
    56  )
    57  
    58  var (
    59  	snapshotCleanAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/account/hit", nil)
    60  	snapshotCleanAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/miss", nil)
    61  	snapshotCleanAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/inex", nil)
    62  	snapshotCleanAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/account/read", nil)
    63  	snapshotCleanAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/account/write", nil)
    64  
    65  	snapshotCleanStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/clean/storage/hit", nil)
    66  	snapshotCleanStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/miss", nil)
    67  	snapshotCleanStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/inex", nil)
    68  	snapshotCleanStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/clean/storage/read", nil)
    69  	snapshotCleanStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/clean/storage/write", nil)
    70  
    71  	snapshotDirtyAccountHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/account/hit", nil)
    72  	snapshotDirtyAccountMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/miss", nil)
    73  	snapshotDirtyAccountInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/inex", nil)
    74  	snapshotDirtyAccountReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/account/read", nil)
    75  	snapshotDirtyAccountWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/account/write", nil)
    76  
    77  	snapshotDirtyStorageHitMeter   = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/hit", nil)
    78  	snapshotDirtyStorageMissMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/miss", nil)
    79  	snapshotDirtyStorageInexMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/inex", nil)
    80  	snapshotDirtyStorageReadMeter  = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/read", nil)
    81  	snapshotDirtyStorageWriteMeter = metrics.NewRegisteredMeter("state/snapshot/dirty/storage/write", nil)
    82  
    83  	snapshotDirtyAccountHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/account/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    84  	snapshotDirtyStorageHitDepthHist = metrics.NewRegisteredHistogram("state/snapshot/dirty/storage/hit/depth", nil, metrics.NewExpDecaySample(1028, 0.015))
    85  
    86  	snapshotFlushAccountItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/item", nil)
    87  	snapshotFlushAccountSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/account/size", nil)
    88  	snapshotFlushStorageItemMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/item", nil)
    89  	snapshotFlushStorageSizeMeter = metrics.NewRegisteredMeter("state/snapshot/flush/storage/size", nil)
    90  
    91  	snapshotBloomIndexTimer = metrics.NewRegisteredResettingTimer("state/snapshot/bloom/index", nil)
    92  	snapshotBloomErrorGauge = metrics.NewRegisteredGaugeFloat64("state/snapshot/bloom/error", nil)
    93  
    94  	snapshotBloomAccountTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/account/truehit", nil)
    95  	snapshotBloomAccountFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/account/falsehit", nil)
    96  	snapshotBloomAccountMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/account/miss", nil)
    97  
    98  	snapshotBloomStorageTrueHitMeter  = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/truehit", nil)
    99  	snapshotBloomStorageFalseHitMeter = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/falsehit", nil)
   100  	snapshotBloomStorageMissMeter     = metrics.NewRegisteredMeter("state/snapshot/bloom/storage/miss", nil)
   101  
   102  	// ErrSnapshotStale is returned from data accessors if the underlying snapshot
   103  	// layer had been invalidated due to the chain progressing forward far enough
   104  	// to not maintain the layer's original state.
   105  	ErrSnapshotStale = errors.New("snapshot stale")
   106  
   107  	// ErrStaleParentLayer is returned when Flatten attempts to flatten a diff layer into
   108  	// a stale parent.
   109  	ErrStaleParentLayer = errors.New("parent disk layer is stale")
   110  
   111  	// ErrNotCoveredYet is returned from data accessors if the underlying snapshot
   112  	// is being generated currently and the requested data item is not yet in the
   113  	// range of accounts covered.
   114  	ErrNotCoveredYet = errors.New("not covered yet")
   115  
   116  	// ErrNotConstructed is returned if the callers want to iterate the snapshot
   117  	// while the generation is not finished yet.
   118  	ErrNotConstructed = errors.New("snapshot is not constructed")
   119  )
   120  
   121  // Snapshot represents the functionality supported by a snapshot storage layer.
   122  type Snapshot interface {
   123  	// Root returns the root hash for which this snapshot was made.
   124  	Root() common.Hash
   125  
   126  	// Account directly retrieves the account associated with a particular hash in
   127  	// the snapshot slim data format.
   128  	Account(hash common.Hash) (*Account, error)
   129  
   130  	// AccountRLP directly retrieves the account RLP associated with a particular
   131  	// hash in the snapshot slim data format.
   132  	AccountRLP(hash common.Hash) ([]byte, error)
   133  
   134  	// Storage directly retrieves the storage data associated with a particular hash,
   135  	// within a particular account.
   136  	Storage(accountHash, storageHash common.Hash) ([]byte, error)
   137  }
   138  
   139  // snapshot is the internal version of the snapshot data layer that supports some
   140  // additional methods compared to the public API.
   141  type snapshot interface {
   142  	Snapshot
   143  
   144  	BlockHash() common.Hash
   145  
   146  	// Parent returns the subsequent layer of a snapshot, or nil if the base was
   147  	// reached.
   148  	//
   149  	// Note, the method is an internal helper to avoid type switching between the
   150  	// disk and diff layers. There is no locking involved.
   151  	Parent() snapshot
   152  
   153  	// Update creates a new layer on top of the existing snapshot diff tree with
   154  	// the specified data items.
   155  	//
   156  	// Note, the maps are retained by the method to avoid copying everything.
   157  	Update(blockHash, blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer
   158  
   159  	// Stale return whether this layer has become stale (was flattened across) or
   160  	// if it's still live.
   161  	Stale() bool
   162  
   163  	// AccountIterator creates an account iterator over an arbitrary layer.
   164  	AccountIterator(seek common.Hash) AccountIterator
   165  
   166  	// StorageIterator creates a storage iterator over an arbitrary layer.
   167  	StorageIterator(account common.Hash, seek common.Hash) (StorageIterator, bool)
   168  }
   169  
   170  // Tree is an Ethereum state snapshot tree. It consists of one persistent base
   171  // layer backed by a key-value store, on top of which arbitrarily many in-memory
   172  // diff layers are topped. The memory diffs can form a tree with branching, but
   173  // the disk layer is singleton and common to all. If a reorg goes deeper than the
   174  // disk layer, everything needs to be deleted.
   175  //
   176  // The goal of a state snapshot is twofold: to allow direct access to account and
   177  // storage data to avoid expensive multi-level trie lookups; and to allow sorted,
   178  // cheap iteration of the account/storage tries for sync aid.
   179  type Tree struct {
   180  	diskdb ethdb.KeyValueStore // Persistent database to store the snapshot
   181  	triedb *trie.Database      // In-memory cache to access the trie through
   182  	cache  int                 // Megabytes permitted to use for read caches
   183  	// Collection of all known layers
   184  	// blockHash -> snapshot
   185  	blockLayers map[common.Hash]snapshot
   186  	// stateRoot -> blockHash -> snapshot
   187  	// Update creates a new block layer with a parent taken from the blockHash -> snapshot map
   188  	// we can support grabbing a read only Snapshot by getting any one from the state root based map
   189  	stateLayers map[common.Hash]map[common.Hash]snapshot
   190  	verified    bool // Indicates if snapshot integrity has been verified
   191  	lock        sync.RWMutex
   192  }
   193  
   194  // New attempts to load an already existing snapshot from a persistent key-value
   195  // store (with a number of memory layers from a journal), ensuring that the head
   196  // of the snapshot matches the expected one.
   197  //
   198  // If the snapshot is missing or the disk layer is broken, the snapshot will be
   199  // reconstructed using both the existing data and the state trie.
   200  // The repair happens on a background thread.
   201  func New(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, async bool, rebuild bool, verify bool) (*Tree, error) {
   202  	// Create a new, empty snapshot tree
   203  	snap := &Tree{
   204  		diskdb:      diskdb,
   205  		triedb:      triedb,
   206  		cache:       cache,
   207  		blockLayers: make(map[common.Hash]snapshot),
   208  		stateLayers: make(map[common.Hash]map[common.Hash]snapshot),
   209  		verified:    !verify, // if verify is false, all verification will be bypassed
   210  	}
   211  
   212  	// Attempt to load a previously persisted snapshot and rebuild one if failed
   213  	head, generated, err := loadSnapshot(diskdb, triedb, cache, blockHash, root)
   214  	if err != nil {
   215  		if rebuild {
   216  			log.Warn("Failed to load snapshot, regenerating", "err", err)
   217  			snap.Rebuild(blockHash, root)
   218  			if !async {
   219  				if err := snap.verifyIntegrity(snap.disklayer(), true); err != nil {
   220  					return nil, err
   221  				}
   222  			}
   223  			return snap, nil
   224  		}
   225  		return nil, err // Bail out the error, don't rebuild automatically.
   226  	}
   227  
   228  	// Existing snapshot loaded, seed all the layers
   229  	// It is unnecessary to grab the lock here, since it was created within this function
   230  	// call, but we grab it nevertheless to follow the spec for insertSnap.
   231  	snap.lock.Lock()
   232  	defer snap.lock.Unlock()
   233  	for head != nil {
   234  		snap.insertSnap(head)
   235  		head = head.Parent()
   236  	}
   237  
   238  	// Verify any synchronously generated or loaded snapshot from disk
   239  	if !async || generated {
   240  		if err := snap.verifyIntegrity(snap.disklayer(), !async && !generated); err != nil {
   241  			return nil, err
   242  		}
   243  	}
   244  
   245  	return snap, nil
   246  }
   247  
   248  // insertSnap inserts [snap] into the tree.
   249  // Assumes the lock is held.
   250  func (t *Tree) insertSnap(snap snapshot) {
   251  	t.blockLayers[snap.BlockHash()] = snap
   252  	blockSnaps, ok := t.stateLayers[snap.Root()]
   253  	if !ok {
   254  		blockSnaps = make(map[common.Hash]snapshot)
   255  		t.stateLayers[snap.Root()] = blockSnaps
   256  	}
   257  	blockSnaps[snap.BlockHash()] = snap
   258  }
   259  
   260  // Snapshot retrieves a snapshot belonging to the given state root, or nil if no
   261  // snapshot is maintained for that state root.
   262  func (t *Tree) Snapshot(stateRoot common.Hash) Snapshot {
   263  	return t.getSnapshot(stateRoot, false)
   264  }
   265  
   266  // getSnapshot retrieves a Snapshot by its state root. If the caller already holds the
   267  // snapTree lock when callthing this function, [holdsTreeLock] should be set to true.
   268  func (t *Tree) getSnapshot(stateRoot common.Hash, holdsTreeLock bool) snapshot {
   269  	if !holdsTreeLock {
   270  		t.lock.RLock()
   271  		defer t.lock.RUnlock()
   272  	}
   273  
   274  	layers := t.stateLayers[stateRoot]
   275  	for _, layer := range layers {
   276  		return layer
   277  	}
   278  	return nil
   279  }
   280  
   281  // Snapshots returns all visited layers from the topmost layer with specific
   282  // root and traverses downward. The layer amount is limited by the given number.
   283  // If nodisk is set, then disk layer is excluded.
   284  func (t *Tree) Snapshots(blockHash common.Hash, limits int, nodisk bool) []Snapshot {
   285  	t.lock.RLock()
   286  	defer t.lock.RUnlock()
   287  
   288  	if limits == 0 {
   289  		return nil
   290  	}
   291  	layer, ok := t.blockLayers[blockHash]
   292  	if !ok {
   293  		return nil
   294  	}
   295  	var ret []Snapshot
   296  	for {
   297  		if _, isdisk := layer.(*diskLayer); isdisk && nodisk {
   298  			break
   299  		}
   300  		ret = append(ret, layer)
   301  		limits -= 1
   302  		if limits == 0 {
   303  			break
   304  		}
   305  		parent := layer.Parent()
   306  		if parent == nil {
   307  			break
   308  		}
   309  		layer = parent
   310  	}
   311  	return ret
   312  }
   313  
   314  // Update adds a new snapshot into the tree, if that can be linked to an existing
   315  // old parent. It is disallowed to insert a disk layer (the origin of all).
   316  func (t *Tree) Update(blockHash, blockRoot, parentBlockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) error {
   317  	t.lock.Lock()
   318  	defer t.lock.Unlock()
   319  
   320  	// Grab the parent snapshot based on the parent block hash, not the parent state root
   321  	parent := t.blockLayers[parentBlockHash]
   322  	if parent == nil {
   323  		return fmt.Errorf("parent [%#x] snapshot missing", parentBlockHash)
   324  	}
   325  
   326  	snap := t.blockLayers[blockHash]
   327  	if snap != nil {
   328  		log.Warn("Attempted to insert a snapshot layer for an existing block",
   329  			"blockHash", blockHash, "blockRoot", blockRoot, "parentHash", parentBlockHash,
   330  			"existingBlockRoot", snap.Root(),
   331  		)
   332  	}
   333  
   334  	snap = parent.Update(blockHash, blockRoot, destructs, accounts, storage)
   335  	t.insertSnap(snap)
   336  	return nil
   337  }
   338  
   339  // verifyIntegrity performs an integrity check on the current snapshot using
   340  // verify. Most importantly, verifyIntegrity ensures verify is called at
   341  // most once during the entire lifetime of [Tree], returning immediately if
   342  // already invoked. If [waitBuild] is true, verifyIntegrity will wait for
   343  // generation of the snapshot to finish before verifying.
   344  //
   345  // It is assumed that the caller holds the [snapTree] lock
   346  // when calling this function.
   347  func (t *Tree) verifyIntegrity(base *diskLayer, waitBuild bool) error {
   348  	// Find the rebuild termination channel and wait until
   349  	// the snapshot is generated
   350  	if done := base.genPending; waitBuild && done != nil {
   351  		log.Info("Waiting for snapshot generation", "root", base.root)
   352  		<-done
   353  	}
   354  
   355  	if t.verified {
   356  		return nil
   357  	}
   358  
   359  	if base.genMarker != nil {
   360  		return errors.New("cannot verify integrity of an unfinished snapshot")
   361  	}
   362  
   363  	start := time.Now()
   364  	log.Info("Verifying snapshot integrity", "root", base.root)
   365  	if err := t.verify(base.root, true); err != nil {
   366  		return fmt.Errorf("unable to verify snapshot integrity: %w", err)
   367  	}
   368  
   369  	log.Info("Verified snapshot integrity", "root", base.root, "elapsed", time.Since(start))
   370  	t.verified = true
   371  	return nil
   372  }
   373  
   374  // Flatten flattens the snapshot for [blockHash] into its parent. if its
   375  // parent is not a disk layer, Flatten will return an error.
   376  // Note: a blockHash is used instead of a state root so that the exact state
   377  // transition between the two states is well defined. This is intended to
   378  // prevent the following edge case
   379  //    A
   380  //   /  \
   381  //  B    C
   382  //       |
   383  //       D
   384  // In this scenario, it's possible For (A, B) and (A, C, D) to be two
   385  // different paths to the resulting state. We use block hashes and parent
   386  // block hashes to ensure that the exact path through which we flatten
   387  // diffLayers is well defined.
   388  func (t *Tree) Flatten(blockHash common.Hash) error {
   389  	t.lock.Lock()
   390  	defer t.lock.Unlock()
   391  
   392  	start := time.Now()
   393  	snap, ok := t.blockLayers[blockHash]
   394  	if !ok {
   395  		return fmt.Errorf("cannot flatten missing snapshot: %s", blockHash)
   396  	}
   397  	diff, ok := snap.(*diffLayer)
   398  	if !ok {
   399  		return fmt.Errorf("cannot flatten disk layer: (%s, %s)", blockHash, snap.Root())
   400  	}
   401  	if diff.parent == nil {
   402  		return fmt.Errorf("cannot flatten snapshot with missing parent (%s, %s)", blockHash, diff.root)
   403  	}
   404  	if parentDiff, ok := diff.parent.(*diffLayer); ok {
   405  		return fmt.Errorf("cannot flatten snapshot (%s, %s) into diff layer parent (%s, %s)", blockHash, diff.root, parentDiff.blockHash, parentDiff.root)
   406  	}
   407  	parentLayer := t.blockLayers[diff.parent.BlockHash()]
   408  	if parentLayer == nil {
   409  		return fmt.Errorf("snapshot missing parent layer: %s", diff.parent.BlockHash())
   410  	}
   411  
   412  	diff.lock.RLock()
   413  	base, snapshotGenerated, err := diffToDisk(diff)
   414  	diff.lock.RUnlock()
   415  	if err != nil {
   416  		return err
   417  	}
   418  
   419  	// Remove parent layer
   420  	if err := t.discard(diff.parent.BlockHash(), true); err != nil {
   421  		return fmt.Errorf("failed to discard parent layer while flattening (%s, %s): %w", blockHash, diff.root, err)
   422  	}
   423  	// We created a new diskLayer [base] to replace [diff], so we need to replace
   424  	// it in both maps and replace all pointers to it.
   425  	t.blockLayers[base.blockHash] = base
   426  	stateSnaps := t.stateLayers[base.root]
   427  	// stateSnaps must already be initialized here, since we are replacing
   428  	// an existing snapshot instead of adding a new one.
   429  	stateSnaps[base.blockHash] = base
   430  
   431  	// Replace the parent pointers for any snapshot that referenced
   432  	// the replaced diffLayer.
   433  	for _, snap := range t.blockLayers {
   434  		if diff, ok := snap.(*diffLayer); ok {
   435  			if base.blockHash == diff.parent.BlockHash() {
   436  				diff.parent = base
   437  			}
   438  		}
   439  	}
   440  
   441  	// TODO add tracking of children to the snapshots to reduce overhead here.
   442  	children := make(map[common.Hash][]common.Hash)
   443  	for blockHash, snap := range t.blockLayers {
   444  		if diff, ok := snap.(*diffLayer); ok {
   445  			parent := diff.parent.BlockHash()
   446  			children[parent] = append(children[parent], blockHash)
   447  		}
   448  	}
   449  	var remove func(blockHash common.Hash)
   450  	remove = func(blockHash common.Hash) {
   451  		t.discard(blockHash, false)
   452  		for _, child := range children[blockHash] {
   453  			remove(child)
   454  		}
   455  		delete(children, blockHash)
   456  	}
   457  	for blockHash, snap := range t.blockLayers {
   458  		if snap.Stale() {
   459  			remove(blockHash)
   460  		}
   461  	}
   462  	// If the disk layer was modified, regenerate all the cumulative blooms
   463  	var rebloom func(blockHash common.Hash)
   464  	rebloom = func(blockHash common.Hash) {
   465  		if diff, ok := t.blockLayers[blockHash].(*diffLayer); ok {
   466  			diff.rebloom(base)
   467  		}
   468  		for _, child := range children[blockHash] {
   469  			rebloom(child)
   470  		}
   471  	}
   472  	rebloom(base.blockHash)
   473  	log.Debug("Flattened snapshot tree", "blockHash", blockHash, "root", base.root, "size", len(t.blockLayers), "elapsed", common.PrettyDuration(time.Since(start)))
   474  
   475  	if !snapshotGenerated {
   476  		return nil
   477  	}
   478  	return t.verifyIntegrity(base, false)
   479  }
   480  
   481  // Length returns the number of snapshot layers that is currently being maintained.
   482  func (t *Tree) NumStateLayers() int {
   483  	t.lock.RLock()
   484  	defer t.lock.RUnlock()
   485  
   486  	return len(t.stateLayers)
   487  }
   488  
   489  func (t *Tree) NumBlockLayers() int {
   490  	t.lock.RLock()
   491  	defer t.lock.RUnlock()
   492  
   493  	return len(t.blockLayers)
   494  }
   495  
   496  // Discard removes layers that we no longer need
   497  func (t *Tree) Discard(blockHash common.Hash) error {
   498  	t.lock.Lock()
   499  	defer t.lock.Unlock()
   500  
   501  	return t.discard(blockHash, false)
   502  }
   503  
   504  // discard removes the snapshot associated with [blockHash] from the
   505  // snapshot tree.
   506  // If [force] is true, discard may delete the disk layer. This should
   507  // only be called within Flatten, when a new disk layer is being created.
   508  // Assumes the lock is held.
   509  func (t *Tree) discard(blockHash common.Hash, force bool) error {
   510  	snap := t.blockLayers[blockHash]
   511  	if snap == nil {
   512  		return fmt.Errorf("cannot discard missing snapshot: %s", blockHash)
   513  	}
   514  	_, ok := snap.(*diffLayer)
   515  	// Never discard the disk layer
   516  	if !ok && !force {
   517  		return fmt.Errorf("cannot discard the disk layer: %s", blockHash)
   518  	}
   519  	snaps, ok := t.stateLayers[snap.Root()]
   520  	if !ok {
   521  		return fmt.Errorf("cannot discard snapshot %s missing from state: %s", blockHash, snap.Root())
   522  	}
   523  	// Discard the block from the map. If there are no more blocks
   524  	// mapping to the same state remove it from [stateLayers] as well.
   525  	delete(snaps, blockHash)
   526  	if len(snaps) == 0 {
   527  		delete(t.stateLayers, snap.Root())
   528  	}
   529  	delete(t.blockLayers, blockHash)
   530  	return nil
   531  }
   532  
   533  // AbortGeneration aborts an ongoing snapshot generation process (if it hasn't
   534  // stopped already).
   535  //
   536  // It is not required to manually abort snapshot generation. If generation has not
   537  // been manually aborted prior to invoking [diffToDisk], it will be aborted anyways.
   538  //
   539  // It is safe to call this method multiple times and when there is no snapshot
   540  // generation currently underway.
   541  func (t *Tree) AbortGeneration() {
   542  	t.lock.Lock()
   543  	defer t.lock.Unlock()
   544  
   545  	dl := t.disklayer()
   546  	dl.abortGeneration()
   547  }
   548  
   549  // abortGeneration sends an abort message to the generate goroutine and waits
   550  // for it to shutdown before returning (if it is running). This call should not
   551  // be made concurrently.
   552  func (dl *diskLayer) abortGeneration() bool {
   553  	// Store ideal time for abort to get better estimate of load
   554  	//
   555  	// Note that we set this time regardless if abortion was skipped otherwise we
   556  	// will never restart generation (age will always be negative).
   557  	if dl.abortStarted.IsZero() {
   558  		dl.abortStarted = time.Now()
   559  	}
   560  
   561  	// If the disk layer is running a snapshot generator, abort it
   562  	if dl.genAbort != nil && dl.genStats == nil {
   563  		abort := make(chan struct{})
   564  		dl.genAbort <- abort
   565  		<-abort
   566  		return true
   567  	}
   568  
   569  	return false
   570  }
   571  
   572  // diffToDisk merges a bottom-most diff into the persistent disk layer underneath
   573  // it. The method will panic if called onto a non-bottom-most diff layer.
   574  //
   575  // The disk layer persistence should be operated in an atomic way. All updates should
   576  // be discarded if the whole transition if not finished.
   577  func diffToDisk(bottom *diffLayer) (*diskLayer, bool, error) {
   578  	var (
   579  		base  = bottom.parent.(*diskLayer)
   580  		batch = base.diskdb.NewBatch()
   581  	)
   582  
   583  	// Attempt to abort generation (if not already aborted)
   584  	base.abortGeneration()
   585  
   586  	// Put the deletion in the batch writer, flush all updates in the final step.
   587  	rawdb.DeleteSnapshotBlockHash(batch)
   588  	rawdb.DeleteSnapshotRoot(batch)
   589  
   590  	// Mark the original base as stale as we're going to create a new wrapper
   591  	base.lock.Lock()
   592  	if base.stale {
   593  		return nil, false, ErrStaleParentLayer // we've committed into the same base from two children, boo
   594  	}
   595  	base.stale = true
   596  	base.lock.Unlock()
   597  
   598  	// Destroy all the destructed accounts from the database
   599  	for hash := range bottom.destructSet {
   600  		// Skip any account not covered yet by the snapshot
   601  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   602  			continue
   603  		}
   604  		// Remove all storage slots
   605  		rawdb.DeleteAccountSnapshot(batch, hash)
   606  		base.cache.Set(hash[:], nil)
   607  
   608  		it := rawdb.IterateStorageSnapshots(base.diskdb, hash)
   609  		for it.Next() {
   610  			if key := it.Key(); len(key) == 65 { // TODO(karalabe): Yuck, we should move this into the iterator
   611  				batch.Delete(key)
   612  				base.cache.Del(key[1:])
   613  				snapshotFlushStorageItemMeter.Mark(1)
   614  
   615  				// Ensure we don't delete too much data blindly (contract can be
   616  				// huge). It's ok to flush, the root will go missing in case of a
   617  				// crash and we'll detect and regenerate the snapshot.
   618  				if batch.ValueSize() > ethdb.IdealBatchSize {
   619  					if err := batch.Write(); err != nil {
   620  						log.Crit("Failed to write storage deletions", "err", err)
   621  					}
   622  					batch.Reset()
   623  				}
   624  			}
   625  		}
   626  		it.Release()
   627  	}
   628  	// Push all updated accounts into the database
   629  	for hash, data := range bottom.accountData {
   630  		// Skip any account not covered yet by the snapshot
   631  		if base.genMarker != nil && bytes.Compare(hash[:], base.genMarker) > 0 {
   632  			continue
   633  		}
   634  		// Push the account to disk
   635  		rawdb.WriteAccountSnapshot(batch, hash, data)
   636  		base.cache.Set(hash[:], data)
   637  		snapshotCleanAccountWriteMeter.Mark(int64(len(data)))
   638  
   639  		snapshotFlushAccountItemMeter.Mark(1)
   640  		snapshotFlushAccountSizeMeter.Mark(int64(len(data)))
   641  
   642  		// Ensure we don't write too much data blindly. It's ok to flush, the
   643  		// root will go missing in case of a crash and we'll detect and regen
   644  		// the snapshot.
   645  		if batch.ValueSize() > ethdb.IdealBatchSize {
   646  			if err := batch.Write(); err != nil {
   647  				log.Crit("Failed to write storage deletions", "err", err)
   648  			}
   649  			batch.Reset()
   650  		}
   651  	}
   652  	// Push all the storage slots into the database
   653  	for accountHash, storage := range bottom.storageData {
   654  		// Skip any account not covered yet by the snapshot
   655  		if base.genMarker != nil && bytes.Compare(accountHash[:], base.genMarker) > 0 {
   656  			continue
   657  		}
   658  		// Generation might be mid-account, track that case too
   659  		midAccount := base.genMarker != nil && bytes.Equal(accountHash[:], base.genMarker[:common.HashLength])
   660  
   661  		for storageHash, data := range storage {
   662  			// Skip any slot not covered yet by the snapshot
   663  			if midAccount && bytes.Compare(storageHash[:], base.genMarker[common.HashLength:]) > 0 {
   664  				continue
   665  			}
   666  			if len(data) > 0 {
   667  				rawdb.WriteStorageSnapshot(batch, accountHash, storageHash, data)
   668  				base.cache.Set(append(accountHash[:], storageHash[:]...), data)
   669  				snapshotCleanStorageWriteMeter.Mark(int64(len(data)))
   670  			} else {
   671  				rawdb.DeleteStorageSnapshot(batch, accountHash, storageHash)
   672  				base.cache.Set(append(accountHash[:], storageHash[:]...), nil)
   673  			}
   674  			snapshotFlushStorageItemMeter.Mark(1)
   675  			snapshotFlushStorageSizeMeter.Mark(int64(len(data)))
   676  		}
   677  	}
   678  	// Update the snapshot block marker and write any remainder data
   679  	rawdb.WriteSnapshotBlockHash(batch, bottom.blockHash)
   680  	rawdb.WriteSnapshotRoot(batch, bottom.root)
   681  
   682  	// Write out the generator progress marker and report
   683  	journalProgress(batch, base.genMarker, base.genStats)
   684  
   685  	// Flush all the updates in the single db operation. Ensure the
   686  	// disk layer transition is atomic.
   687  	if err := batch.Write(); err != nil {
   688  		log.Crit("Failed to write leftover snapshot", "err", err)
   689  	}
   690  	log.Debug("Journalled disk layer", "root", bottom.root, "complete", base.genMarker == nil)
   691  	res := &diskLayer{
   692  		root:       bottom.root,
   693  		blockHash:  bottom.blockHash,
   694  		cache:      base.cache,
   695  		diskdb:     base.diskdb,
   696  		triedb:     base.triedb,
   697  		genMarker:  base.genMarker,
   698  		genPending: base.genPending,
   699  		created:    time.Now(),
   700  	}
   701  	// If snapshot generation hasn't finished yet, port over all the starts and
   702  	// continue where the previous round left off.
   703  	//
   704  	// Note, the `base.genAbort` comparison is not used normally, it's checked
   705  	// to allow the tests to play with the marker without triggering this path.
   706  	if base.genMarker != nil && base.genAbort != nil {
   707  		res.genMarker = base.genMarker
   708  		res.genAbort = make(chan chan struct{})
   709  
   710  		// If the diskLayer we are about to discard is not very old, we skip
   711  		// generation on the next layer (assuming generation will just get canceled
   712  		// before doing meaningful work anyways).
   713  		diskLayerAge := base.abortStarted.Sub(base.created)
   714  		if diskLayerAge < skipGenThreshold {
   715  			log.Debug("Skipping snapshot generation", "previous disk layer age", diskLayerAge)
   716  			res.genStats = base.genStats
   717  		} else {
   718  			go res.generate(base.genStats)
   719  		}
   720  	}
   721  	return res, base.genMarker == nil, nil
   722  }
   723  
   724  // Rebuild wipes all available snapshot data from the persistent database and
   725  // discard all caches and diff layers. Afterwards, it starts a new snapshot
   726  // generator with the given root hash.
   727  func (t *Tree) Rebuild(blockHash, root common.Hash) {
   728  	t.lock.Lock()
   729  	defer t.lock.Unlock()
   730  
   731  	// Firstly delete any recovery flag in the database. Because now we are
   732  	// building a brand new snapshot. Also reenable the snapshot feature.
   733  	rawdb.DeleteSnapshotRecoveryNumber(t.diskdb)
   734  
   735  	// Track whether there's a wipe currently running and keep it alive if so
   736  	var wiper chan struct{}
   737  
   738  	// Iterate over and mark all layers stale
   739  	for _, layer := range t.blockLayers {
   740  		switch layer := layer.(type) {
   741  		case *diskLayer:
   742  			// If the base layer is generating, abort it and save
   743  			if layer.genAbort != nil {
   744  				abort := make(chan struct{})
   745  				layer.genAbort <- abort
   746  				<-abort
   747  
   748  				if stats := layer.genStats; stats != nil {
   749  					wiper = stats.wiping
   750  				}
   751  
   752  			}
   753  			// Layer should be inactive now, mark it as stale
   754  			layer.lock.Lock()
   755  			layer.stale = true
   756  			layer.lock.Unlock()
   757  
   758  		case *diffLayer:
   759  			// If the layer is a simple diff, simply mark as stale
   760  			layer.lock.Lock()
   761  			atomic.StoreUint32(&layer.stale, 1)
   762  			layer.lock.Unlock()
   763  
   764  		default:
   765  			panic(fmt.Sprintf("unknown layer type: %T", layer))
   766  		}
   767  	}
   768  	// Start generating a new snapshot from scratch on a background thread. The
   769  	// generator will run a wiper first if there's not one running right now.
   770  	log.Info("Rebuilding state snapshot")
   771  	base := generateSnapshot(t.diskdb, t.triedb, t.cache, blockHash, root, wiper)
   772  	t.blockLayers = map[common.Hash]snapshot{
   773  		blockHash: base,
   774  	}
   775  	t.stateLayers = map[common.Hash]map[common.Hash]snapshot{
   776  		root: {
   777  			blockHash: base,
   778  		},
   779  	}
   780  }
   781  
   782  // AccountIterator creates a new account iterator for the specified root hash and
   783  // seeks to a starting account hash. When [force] is true, a new account
   784  // iterator is created without acquiring the [snapTree] lock and without
   785  // confirming that the snapshot on the disk layer is fully generated.
   786  func (t *Tree) AccountIterator(root common.Hash, seek common.Hash, force bool) (AccountIterator, error) {
   787  	if !force {
   788  		ok, err := t.generating()
   789  		if err != nil {
   790  			return nil, err
   791  		}
   792  		if ok {
   793  			return nil, ErrNotConstructed
   794  		}
   795  	}
   796  	return newFastAccountIterator(t, root, seek, force)
   797  }
   798  
   799  // StorageIterator creates a new storage iterator for the specified root hash and
   800  // account. The iterator will be move to the specific start position. When [force]
   801  // is true, a new account iterator is created without acquiring the [snapTree]
   802  // lock and without confirming that the snapshot on the disk layer is fully generated.
   803  func (t *Tree) StorageIterator(root common.Hash, account common.Hash, seek common.Hash, force bool) (StorageIterator, error) {
   804  	if !force {
   805  		ok, err := t.generating()
   806  		if err != nil {
   807  			return nil, err
   808  		}
   809  		if ok {
   810  			return nil, ErrNotConstructed
   811  		}
   812  	}
   813  	return newFastStorageIterator(t, root, account, seek, force)
   814  }
   815  
   816  // Verify iterates the whole state(all the accounts as well as the corresponding storages)
   817  // with the specific root and compares the re-computed hash with the original one.
   818  func (t *Tree) Verify(root common.Hash) error {
   819  	return t.verify(root, false)
   820  }
   821  
   822  // verify iterates the whole state(all the accounts as well as the corresponding storages)
   823  // with the specific root and compares the re-computed hash with the original one.
   824  // When [force] is true, it is assumed that the caller has confirmed that the
   825  // snapshot is generated and that they hold the snapTree lock.
   826  func (t *Tree) verify(root common.Hash, force bool) error {
   827  	acctIt, err := t.AccountIterator(root, common.Hash{}, force)
   828  	if err != nil {
   829  		return err
   830  	}
   831  	defer acctIt.Release()
   832  
   833  	got, err := generateTrieRoot(nil, acctIt, common.Hash{}, stackTrieGenerate, func(db ethdb.KeyValueWriter, accountHash, codeHash common.Hash, stat *generateStats) (common.Hash, error) {
   834  		storageIt, err := t.StorageIterator(root, accountHash, common.Hash{}, force)
   835  		if err != nil {
   836  			return common.Hash{}, err
   837  		}
   838  		defer storageIt.Release()
   839  
   840  		hash, err := generateTrieRoot(nil, storageIt, accountHash, stackTrieGenerate, nil, stat, false)
   841  		if err != nil {
   842  			return common.Hash{}, err
   843  		}
   844  		return hash, nil
   845  	}, newGenerateStats(), true)
   846  
   847  	if err != nil {
   848  		return err
   849  	}
   850  	if got != root {
   851  		return fmt.Errorf("state root hash mismatch: got %x, want %x", got, root)
   852  	}
   853  	return nil
   854  }
   855  
   856  // disklayer is an internal helper function to return the disk layer.
   857  // The lock of snapTree is assumed to be held already.
   858  func (t *Tree) disklayer() *diskLayer {
   859  	var snap snapshot
   860  	for _, s := range t.blockLayers {
   861  		snap = s
   862  		break
   863  	}
   864  	if snap == nil {
   865  		return nil
   866  	}
   867  	switch layer := snap.(type) {
   868  	case *diskLayer:
   869  		return layer
   870  	case *diffLayer:
   871  		return layer.origin
   872  	default:
   873  		panic(fmt.Sprintf("%T: undefined layer", snap))
   874  	}
   875  }
   876  
   877  // diskRoot is a internal helper function to return the disk layer root.
   878  // The lock of snapTree is assumed to be held already.
   879  func (t *Tree) diskRoot() common.Hash {
   880  	disklayer := t.disklayer()
   881  	if disklayer == nil {
   882  		return common.Hash{}
   883  	}
   884  	return disklayer.Root()
   885  }
   886  
   887  // generating is an internal helper function which reports whether the snapshot
   888  // is still under the construction.
   889  func (t *Tree) generating() (bool, error) {
   890  	t.lock.Lock()
   891  	defer t.lock.Unlock()
   892  
   893  	layer := t.disklayer()
   894  	if layer == nil {
   895  		return false, errors.New("disk layer is missing")
   896  	}
   897  	layer.lock.RLock()
   898  	defer layer.lock.RUnlock()
   899  	return layer.genMarker != nil, nil
   900  }
   901  
   902  // diskRoot is a external helper function to return the disk layer root.
   903  func (t *Tree) DiskRoot() common.Hash {
   904  	t.lock.Lock()
   905  	defer t.lock.Unlock()
   906  
   907  	return t.diskRoot()
   908  }
   909  
   910  // NewTestTree creates a *Tree with a pre-populated diskLayer
   911  func NewTestTree(diskdb ethdb.KeyValueStore, blockHash, root common.Hash) *Tree {
   912  	base := &diskLayer{
   913  		diskdb:    diskdb,
   914  		root:      root,
   915  		blockHash: blockHash,
   916  		cache:     fastcache.New(128 * 256),
   917  		created:   time.Now(),
   918  	}
   919  	return &Tree{
   920  		blockLayers: map[common.Hash]snapshot{
   921  			blockHash: base,
   922  		},
   923  		stateLayers: map[common.Hash]map[common.Hash]snapshot{
   924  			root: {
   925  				blockHash: base,
   926  			},
   927  		},
   928  	}
   929  }