github.com/klaytn/klaytn@v1.12.1/snapshot/generate.go (about)

     1  // Modifications Copyright 2021 The klaytn Authors
     2  // Copyright 2019 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from core/state/snapshot/generate.go (2021/10/21).
    19  // Modified and improved for the klaytn development.
    20  
    21  package snapshot
    22  
    23  import (
    24  	"bytes"
    25  	"encoding/binary"
    26  	"errors"
    27  	"fmt"
    28  	"math"
    29  	"time"
    30  
    31  	"github.com/klaytn/klaytn/blockchain/types/account"
    32  
    33  	"github.com/VictoriaMetrics/fastcache"
    34  	"github.com/klaytn/klaytn/common"
    35  	"github.com/klaytn/klaytn/common/hexutil"
    36  	"github.com/klaytn/klaytn/rlp"
    37  	"github.com/klaytn/klaytn/storage/database"
    38  	"github.com/klaytn/klaytn/storage/statedb"
    39  	"github.com/rcrowley/go-metrics"
    40  )
    41  
    42  var (
    43  	// accountCheckRange is the upper limit of the number of accounts involved in
    44  	// each range check. This is a value estimated based on experience. If this
    45  	// value is too large, the failure rate of range prove will increase. Otherwise
    46  	// the the value is too small, the efficiency of the state recovery will decrease.
    47  	accountCheckRange = 128
    48  
    49  	// storageCheckRange is the upper limit of the number of storage slots involved
    50  	// in each range check. This is a value estimated based on experience. If this
    51  	// value is too large, the failure rate of range prove will increase. Otherwise
    52  	// the the value is too small, the efficiency of the state recovery will decrease.
    53  	storageCheckRange = 1024
    54  
    55  	// errMissingTrie is returned if the target trie is missing while the generation
    56  	// is running. In this case the generation is aborted and wait the new signal.
    57  	errMissingTrie = errors.New("missing trie")
    58  )
    59  
    60  var (
    61  	snapGeneratedAccountMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil)
    62  	snapRecoveredAccountMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil)
    63  	snapWipedAccountMeter         = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil)
    64  	snapMissallAccountMeter       = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil)
    65  	snapGeneratedStorageMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil)
    66  	snapRecoveredStorageMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil)
    67  	snapWipedStorageMeter         = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil)
    68  	snapMissallStorageMeter       = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil)
    69  	snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil)
    70  	snapFailedRangeProofMeter     = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil)
    71  
    72  	// snapAccountProveCounter measures time spent on the account proving
    73  	snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil)
    74  	// snapAccountTrieReadCounter measures time spent on the account trie iteration
    75  	snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil)
    76  	// snapAccountSnapReadCounter measues time spent on the snapshot account iteration
    77  	snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil)
    78  	// snapAccountWriteCounter measures time spent on writing/updating/deleting accounts
    79  	snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil)
    80  	// snapStorageProveCounter measures time spent on storage proving
    81  	snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil)
    82  	// snapStorageTrieReadCounter measures time spent on the storage trie iteration
    83  	snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil)
    84  	// snapStorageSnapReadCounter measures time spent on the snapshot storage iteration
    85  	snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil)
    86  	// snapStorageWriteCounter measures time spent on writing/updating/deleting storages
    87  	snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil)
    88  )
    89  
    90  // generatorStats is a collection of statistics gathered by the snapshot generator
    91  // for logging purposes.
    92  type generatorStats struct {
    93  	origin   uint64             // Origin prefix where generation started
    94  	start    time.Time          // Timestamp when generation started
    95  	accounts uint64             // Number of accounts indexed(generated or recovered)
    96  	slots    uint64             // Number of storage slots indexed(generated or recovered)
    97  	storage  common.StorageSize // Total account and storage slot size(generation or recovery)
    98  }
    99  
   100  // Log creates an contextual log with the given message and the context pulled
   101  // from the internally maintained statistics.
   102  func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) {
   103  	var ctx []interface{}
   104  	if root != (common.Hash{}) {
   105  		ctx = append(ctx, []interface{}{"root", root}...)
   106  	}
   107  	// Figure out whether we're after or within an account
   108  	switch len(marker) {
   109  	case common.HashLength:
   110  		ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
   111  	case 2 * common.HashLength:
   112  		ctx = append(ctx, []interface{}{
   113  			"in", common.BytesToHash(marker[:common.HashLength]),
   114  			"at", common.BytesToHash(marker[common.HashLength:]),
   115  		}...)
   116  	}
   117  	// Add the usual measurements
   118  	ctx = append(ctx, []interface{}{
   119  		"accounts", gs.accounts,
   120  		"slots", gs.slots,
   121  		"storage", gs.storage,
   122  		"elapsed", common.PrettyDuration(time.Since(gs.start)),
   123  	}...)
   124  	// Calculate the estimated indexing time based on current stats
   125  	if len(marker) > 0 {
   126  		if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
   127  			left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])
   128  
   129  			speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
   130  			ctx = append(ctx, []interface{}{
   131  				"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
   132  			}...)
   133  		}
   134  	}
   135  	logger.Info(msg, ctx...)
   136  }
   137  
   138  // generateSnapshot regenerates a brand new snapshot based on an existing state
   139  // database and head block asynchronously. The snapshot is returned immediately
   140  // and generation is continued in the background until done.
   141  func generateSnapshot(db database.DBManager, triedb *statedb.Database, cache int, root common.Hash) *diskLayer {
   142  	// Create a new disk layer with an initialized state marker at zero
   143  	var (
   144  		stats     = &generatorStats{start: time.Now()}
   145  		batch     = db.NewSnapshotDBBatch()
   146  		genMarker = []byte{} // Initialized but empty!
   147  	)
   148  	defer batch.Release()
   149  
   150  	batch.WriteSnapshotRoot(root)
   151  	journalProgress(batch, genMarker, stats)
   152  	if err := batch.Write(); err != nil {
   153  		logger.Crit("Failed to write initialized state marker", "err", err)
   154  	}
   155  	base := &diskLayer{
   156  		diskdb:     db,
   157  		triedb:     triedb,
   158  		root:       root,
   159  		cache:      fastcache.New(cache * 1024 * 1024),
   160  		genMarker:  genMarker,
   161  		genPending: make(chan struct{}),
   162  		genAbort:   make(chan chan *generatorStats),
   163  	}
   164  	go base.generate(stats)
   165  	logger.Debug("Start snapshot generation", "root", root)
   166  	return base
   167  }
   168  
   169  // journalProgress persists the generator stats into the database to resume later.
   170  func journalProgress(db database.KeyValueWriter, marker []byte, stats *generatorStats) {
   171  	// Write out the generator marker. Note it's a standalone disk layer generator
   172  	// which is not mixed with journal. It's ok if the generator is persisted while
   173  	// journal is not.
   174  	entry := journalGenerator{
   175  		Done:   marker == nil,
   176  		Marker: marker,
   177  	}
   178  	if stats != nil {
   179  		entry.Accounts = stats.accounts
   180  		entry.Slots = stats.slots
   181  		entry.Storage = uint64(stats.storage)
   182  	}
   183  	blob, err := rlp.EncodeToBytes(entry)
   184  	if err != nil {
   185  		panic(err) // Cannot happen, here to catch dev errors
   186  	}
   187  	var logstr string
   188  	switch {
   189  	case marker == nil:
   190  		logstr = "done"
   191  	case bytes.Equal(marker, []byte{}):
   192  		logstr = "empty"
   193  	case len(marker) == common.HashLength:
   194  		logstr = fmt.Sprintf("%#x", marker)
   195  	default:
   196  		logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:])
   197  	}
   198  	logger.Debug("Journalled generator progress", "progress", logstr)
   199  
   200  	// TODO-Klaytn-Snapshot refactor the following db write
   201  	if err := db.Put(database.SnapshotGeneratorKey, blob); err != nil {
   202  		logger.Crit("Failed to store snapshot generator", "err", err)
   203  	}
   204  }
   205  
   206  // proofResult contains the output of range proving which can be used
   207  // for further processing regardless if it is successful or not.
   208  type proofResult struct {
   209  	keys     [][]byte      // The key set of all elements being iterated, even proving is failed
   210  	vals     [][]byte      // The val set of all elements being iterated, even proving is failed
   211  	diskMore bool          // Set when the database has extra snapshot states since last iteration
   212  	trieMore bool          // Set when the trie has extra snapshot states(only meaningful for successful proving)
   213  	proofErr error         // Indicator whether the given state range is valid or not
   214  	tr       *statedb.Trie // The trie, in case the trie was resolved by the prover (may be nil)
   215  }
   216  
   217  // valid returns the indicator that range proof is successful or not.
   218  func (result *proofResult) valid() bool {
   219  	return result.proofErr == nil
   220  }
   221  
   222  // last returns the last verified element key regardless of whether the range proof is
   223  // successful or not. Nil is returned if nothing involved in the proving.
   224  func (result *proofResult) last() []byte {
   225  	var last []byte
   226  	if len(result.keys) > 0 {
   227  		last = result.keys[len(result.keys)-1]
   228  	}
   229  	return last
   230  }
   231  
   232  // forEach iterates all the visited elements and applies the given callback on them.
   233  // The iteration is aborted if the callback returns non-nil error.
   234  func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error {
   235  	for i := 0; i < len(result.keys); i++ {
   236  		key, val := result.keys[i], result.vals[i]
   237  		if err := callback(key, val); err != nil {
   238  			return err
   239  		}
   240  	}
   241  	return nil
   242  }
   243  
   244  // proveRange proves the snapshot segment with particular prefix is "valid".
   245  // The iteration start point will be assigned if the iterator is restored from
   246  // the last interruption. Max will be assigned in order to limit the maximum
   247  // amount of data involved in each iteration.
   248  //
   249  // The proof result will be returned if the range proving is finished, otherwise
   250  // the error will be returned to abort the entire procedure.
   251  func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) {
   252  	var (
   253  		keys     [][]byte
   254  		vals     [][]byte
   255  		proof    = database.NewMemoryDBManager()
   256  		diskMore = false
   257  	)
   258  
   259  	iter := dl.diskdb.NewSnapshotDBIterator(prefix, origin)
   260  	defer iter.Release()
   261  
   262  	start := time.Now()
   263  	for iter.Next() {
   264  		key := iter.Key()
   265  		if len(key) != len(prefix)+common.HashLength {
   266  			continue
   267  		}
   268  		if len(keys) == max {
   269  			// Break if we've reached the max size, and signal that we're not
   270  			// done yet.
   271  			diskMore = true
   272  			break
   273  		}
   274  		keys = append(keys, common.CopyBytes(key[len(prefix):]))
   275  
   276  		if valueConvertFn == nil {
   277  			vals = append(vals, common.CopyBytes(iter.Value()))
   278  		} else {
   279  			val, err := valueConvertFn(iter.Value())
   280  			if err != nil {
   281  				// Special case, the state data is corrupted (invalid slim-format account),
   282  				// don't abort the entire procedure directly. Instead, let the fallback
   283  				// generation to heal the invalid data.
   284  				//
   285  				// Here append the original value to ensure that the number of key and
   286  				// value are the same.
   287  				vals = append(vals, common.CopyBytes(iter.Value()))
   288  				logger.Error("Failed to convert account state data", "err", err)
   289  			} else {
   290  				vals = append(vals, val)
   291  			}
   292  		}
   293  	}
   294  	// Update metrics for database iteration and merkle proving
   295  	if kind == "storage" {
   296  		snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds())
   297  	} else {
   298  		snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds())
   299  	}
   300  	defer func(start time.Time) {
   301  		if kind == "storage" {
   302  			snapStorageProveCounter.Inc(time.Since(start).Nanoseconds())
   303  		} else {
   304  			snapAccountProveCounter.Inc(time.Since(start).Nanoseconds())
   305  		}
   306  	}(time.Now())
   307  
   308  	// The snap state is exhausted, pass the entire key/val set for verification
   309  	if origin == nil && !diskMore {
   310  		var (
   311  			dbm    = database.NewMemoryDBManager()
   312  			triedb = statedb.NewDatabase(dbm)
   313  		)
   314  		tr, _ := statedb.NewTrie(common.Hash{}, triedb, nil)
   315  		for i, key := range keys {
   316  			tr.TryUpdate(key, vals[i])
   317  		}
   318  		if gotRoot := tr.Hash(); gotRoot != root {
   319  			return &proofResult{
   320  				keys:     keys,
   321  				vals:     vals,
   322  				proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root),
   323  			}, nil
   324  		}
   325  		return &proofResult{keys: keys, vals: vals}, nil
   326  	}
   327  	// Snap state is chunked, generate edge proofs for verification.
   328  	tr, err := statedb.NewTrie(root, dl.triedb, nil)
   329  	if err != nil {
   330  		stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
   331  		return nil, errMissingTrie
   332  	}
   333  	// Firstly find out the key of last iterated element.
   334  	var last []byte
   335  	if len(keys) > 0 {
   336  		last = keys[len(keys)-1]
   337  	}
   338  	// Generate the Merkle proofs for the first and last element
   339  	if origin == nil {
   340  		origin = common.Hash{}.Bytes()
   341  	}
   342  	if err := tr.Prove(origin, 0, proof); err != nil {
   343  		logger.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err)
   344  		return &proofResult{
   345  			keys:     keys,
   346  			vals:     vals,
   347  			diskMore: diskMore,
   348  			proofErr: err,
   349  			tr:       tr,
   350  		}, nil
   351  	}
   352  	if last != nil {
   353  		if err := tr.Prove(last, 0, proof); err != nil {
   354  			logger.Debug("Failed to prove range", "kind", kind, "last", last, "err", err)
   355  			return &proofResult{
   356  				keys:     keys,
   357  				vals:     vals,
   358  				diskMore: diskMore,
   359  				proofErr: err,
   360  				tr:       tr,
   361  			}, nil
   362  		}
   363  	}
   364  	// Verify the snapshot segment with range prover, ensure that all flat states
   365  	// in this range correspond to merkle trie.
   366  	cont, err := statedb.VerifyRangeProof(root, origin, last, keys, vals, proof)
   367  	return &proofResult{
   368  			keys:     keys,
   369  			vals:     vals,
   370  			diskMore: diskMore,
   371  			trieMore: cont,
   372  			proofErr: err,
   373  			tr:       tr,
   374  		},
   375  		nil
   376  }
   377  
   378  // onStateCallback is a function that is called by generateRange, when processing a range of
   379  // accounts or storage slots. For each element, the callback is invoked.
   380  // If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot.
   381  // If 'write' is true, then this element needs to be updated with the 'val'.
   382  // If 'write' is false, then this element is already correct, and needs no update. However,
   383  // for accounts, the storage trie of the account needs to be checked.
   384  // The 'val' is the canonical encoding of the value (not the slim format for accounts)
   385  type onStateCallback func(key []byte, val []byte, write bool, delete bool) error
   386  
   387  // generateRange generates the state segment with particular prefix. Generation can
   388  // either verify the correctness of existing state through rangeproof and skip
   389  // generation, or iterate trie to regenerate state on demand.
   390  func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) {
   391  	// Use range prover to check the validity of the flat state in the range
   392  	result, err := dl.proveRange(stats, root, prefix, kind, origin, max, valueConvertFn)
   393  	if err != nil {
   394  		return false, nil, err
   395  	}
   396  	last := result.last()
   397  
   398  	// Construct contextual logger
   399  	logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)}
   400  	if len(origin) > 0 {
   401  		logCtx = append(logCtx, "origin", hexutil.Encode(origin))
   402  	}
   403  	localLogger := logger.NewWith(logCtx...)
   404  
   405  	// The range prover says the range is correct, skip trie iteration
   406  	if result.valid() {
   407  		snapSuccessfulRangeProofMeter.Mark(1)
   408  		localLogger.Trace("Proved state range", "last", hexutil.Encode(last))
   409  
   410  		// The verification is passed, process each state with the given
   411  		// callback function. If this state represents a contract, the
   412  		// corresponding storage check will be performed in the callback
   413  		if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil {
   414  			return false, nil, err
   415  		}
   416  		// Only abort the iteration when both database and trie are exhausted
   417  		return !result.diskMore && !result.trieMore, last, nil
   418  	}
   419  	localLogger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr)
   420  	snapFailedRangeProofMeter.Mark(1)
   421  
   422  	// Special case, the entire trie is missing. In the original trie scheme,
   423  	// all the duplicated subtries will be filter out(only one copy of data
   424  	// will be stored). While in the snapshot model, all the storage tries
   425  	// belong to different contracts will be kept even they are duplicated.
   426  	// Track it to a certain extent remove the noise data used for statistics.
   427  	if origin == nil && last == nil {
   428  		meter := snapMissallAccountMeter
   429  		if kind == "storage" {
   430  			meter = snapMissallStorageMeter
   431  		}
   432  		meter.Mark(1)
   433  	}
   434  
   435  	// We use the snap data to build up a cache which can be used by the
   436  	// main account trie as a primary lookup when resolving hashes
   437  	var snapNodeCache database.DBManager
   438  	if len(result.keys) > 0 {
   439  		snapNodeCache = database.NewMemoryDBManager()
   440  		snapTrieDb := statedb.NewDatabase(snapNodeCache)
   441  		snapTrie, _ := statedb.NewTrie(common.Hash{}, snapTrieDb, nil)
   442  		for i, key := range result.keys {
   443  			snapTrie.Update(key, result.vals[i])
   444  		}
   445  		root, _ := snapTrie.Commit(nil)
   446  		// TODO-Klaytn update proper block number
   447  		snapTrieDb.Commit(root, false, 0)
   448  	}
   449  	tr := result.tr
   450  	if tr == nil {
   451  		tr, err = statedb.NewTrie(root, dl.triedb, nil)
   452  		if err != nil {
   453  			stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
   454  			return false, nil, errMissingTrie
   455  		}
   456  	}
   457  
   458  	var (
   459  		trieMore       bool
   460  		nodeIt         = tr.NodeIterator(origin)
   461  		iter           = statedb.NewIterator(nodeIt)
   462  		kvkeys, kvvals = result.keys, result.vals
   463  
   464  		// counters
   465  		count     = 0 // number of states delivered by iterator
   466  		created   = 0 // states created from the trie
   467  		updated   = 0 // states updated from the trie
   468  		deleted   = 0 // states not in trie, but were in snapshot
   469  		untouched = 0 // states already correct
   470  
   471  		// timers
   472  		start    = time.Now()
   473  		internal time.Duration
   474  	)
   475  	nodeIt.AddResolver(snapNodeCache)
   476  	for iter.Next() {
   477  		if last != nil && bytes.Compare(iter.Key, last) > 0 {
   478  			trieMore = true
   479  			break
   480  		}
   481  		count++
   482  		write := true
   483  		created++
   484  		for len(kvkeys) > 0 {
   485  			if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 {
   486  				// delete the key
   487  				istart := time.Now()
   488  				if err := onState(kvkeys[0], nil, false, true); err != nil {
   489  					return false, nil, err
   490  				}
   491  				kvkeys = kvkeys[1:]
   492  				kvvals = kvvals[1:]
   493  				deleted++
   494  				internal += time.Since(istart)
   495  				continue
   496  			} else if cmp == 0 {
   497  				// the snapshot key can be overwritten
   498  				created--
   499  				if write = !bytes.Equal(kvvals[0], iter.Value); write {
   500  					updated++
   501  				} else {
   502  					untouched++
   503  				}
   504  				kvkeys = kvkeys[1:]
   505  				kvvals = kvvals[1:]
   506  			}
   507  			break
   508  		}
   509  		istart := time.Now()
   510  		if err := onState(iter.Key, iter.Value, write, false); err != nil {
   511  			return false, nil, err
   512  		}
   513  		internal += time.Since(istart)
   514  	}
   515  	if iter.Err != nil {
   516  		return false, nil, iter.Err
   517  	}
   518  	// Delete all stale snapshot states remaining
   519  	istart := time.Now()
   520  	for _, key := range kvkeys {
   521  		if err := onState(key, nil, false, true); err != nil {
   522  			return false, nil, err
   523  		}
   524  		deleted += 1
   525  	}
   526  	internal += time.Since(istart)
   527  
   528  	// Update metrics for counting trie iteration
   529  	if kind == "storage" {
   530  		snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
   531  	} else {
   532  		snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds())
   533  	}
   534  	localLogger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last),
   535  		"count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted)
   536  
   537  	// If there are either more trie items, or there are more snap items
   538  	// (in the next segment), then we need to keep working
   539  	return !trieMore && !result.diskMore, last, nil
   540  }
   541  
   542  // generate is a background thread that iterates over the state and storage tries,
   543  // constructing the state snapshot. All the arguments are purely for statistics
   544  // gathering and logging, since the method surfs the blocks as they arrive, often
   545  // being restarted.
   546  func (dl *diskLayer) generate(stats *generatorStats) {
   547  	var (
   548  		accMarker    []byte
   549  		accountRange = accountCheckRange
   550  	)
   551  	if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
   552  		// Always reset the initial account range as 1
   553  		// whenever recover from the interruption.
   554  		accMarker, accountRange = dl.genMarker[:common.HashLength], 1
   555  	}
   556  
   557  	var (
   558  		batch     = dl.diskdb.NewSnapshotDBBatch()
   559  		logged    = time.Now()
   560  		accOrigin = common.CopyBytes(accMarker)
   561  		abort     chan *generatorStats
   562  	)
   563  	defer batch.Release()
   564  	stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
   565  
   566  	checkAndFlush := func(currentLocation []byte) error {
   567  		select {
   568  		case abort = <-dl.genAbort:
   569  		default:
   570  		}
   571  		if batch.ValueSize() > database.IdealBatchSize || abort != nil {
   572  			if bytes.Compare(currentLocation, dl.genMarker) < 0 {
   573  				logger.Error("Snapshot generator went backwards",
   574  					"currentLocation", fmt.Sprintf("%x", currentLocation),
   575  					"genMarker", fmt.Sprintf("%x", dl.genMarker))
   576  			}
   577  
   578  			// Flush out the batch anyway no matter it's empty or not.
   579  			// It's possible that all the states are recovered and the
   580  			// generation indeed makes progress.
   581  			journalProgress(batch, currentLocation, stats)
   582  
   583  			if err := batch.Write(); err != nil {
   584  				return err
   585  			}
   586  			batch.Reset()
   587  
   588  			dl.lock.Lock()
   589  			dl.genMarker = currentLocation
   590  			dl.lock.Unlock()
   591  
   592  			if abort != nil {
   593  				stats.Log("Aborting state snapshot generation", dl.root, currentLocation)
   594  				return errors.New("aborted")
   595  			}
   596  		}
   597  		if time.Since(logged) > 8*time.Second {
   598  			stats.Log("Generating state snapshot", dl.root, currentLocation)
   599  			logged = time.Now()
   600  		}
   601  		return nil
   602  	}
   603  
   604  	onAccount := func(key []byte, val []byte, write bool, delete bool) error {
   605  		var (
   606  			start       = time.Now()
   607  			accountHash = common.BytesToHash(key)
   608  		)
   609  		if delete {
   610  			batch.DeleteAccountSnapshot(accountHash)
   611  			snapWipedAccountMeter.Mark(1)
   612  
   613  			// Ensure that any previous snapshot storage values are cleared
   614  			prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...)
   615  			keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength
   616  			if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
   617  				return err
   618  			}
   619  			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
   620  			return nil
   621  		}
   622  		serializer := account.NewAccountSerializer()
   623  		if err := rlp.DecodeBytes(val, serializer); err != nil {
   624  			logger.Crit("Invalid account encountered during snapshot creation", "err", err)
   625  		}
   626  		acc := serializer.GetAccount()
   627  		// If the account is not yet in-progress, write it out
   628  		if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
   629  			dataLen := len(val) // Approximate size, saves us a round of RLP-encoding
   630  			if !write {
   631  				snapRecoveredAccountMeter.Mark(1)
   632  			} else {
   633  				batch.WriteAccountSnapshot(accountHash, val)
   634  				snapGeneratedAccountMeter.Mark(1)
   635  			}
   636  			stats.storage += common.StorageSize(1 + common.HashLength + dataLen)
   637  			stats.accounts++
   638  		}
   639  		marker := accountHash[:]
   640  		// If the snap generation goes here after interrupted, genMarker may go backward
   641  		// when last genMarker is consisted of accountHash and storageHash
   642  		if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength {
   643  			marker = dl.genMarker[:]
   644  		}
   645  		// If we've exceeded our batch allowance or termination was requested, flush to disk
   646  		if err := checkAndFlush(marker); err != nil {
   647  			return err
   648  		}
   649  		// If the iterated account is the contract, create a further loop to
   650  		// verify or regenerate the contract storage.
   651  		contractAcc, ok := acc.(*account.SmartContractAccount)
   652  		if !ok {
   653  			// If the root is empty, we still need to ensure that any previous snapshot
   654  			// storage values are cleared
   655  			// TODO: investigate if this can be avoided, this will be very costly since it
   656  			// affects every single EOA account
   657  			//  - Perhaps we can avoid if where codeHash is emptyCode
   658  			prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...)
   659  			keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength
   660  			if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
   661  				return err
   662  			}
   663  			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
   664  
   665  			accMarker = nil
   666  			return nil
   667  		}
   668  
   669  		rootHash := contractAcc.GetStorageRoot().Unextend()
   670  		if rootHash == emptyRoot {
   671  			prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...)
   672  			keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength
   673  			if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil {
   674  				return err
   675  			}
   676  			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
   677  		} else {
   678  			snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds())
   679  
   680  			var storeMarker []byte
   681  			if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
   682  				storeMarker = dl.genMarker[common.HashLength:]
   683  			}
   684  			onStorage := func(key []byte, val []byte, write bool, delete bool) error {
   685  				defer func(start time.Time) {
   686  					snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds())
   687  				}(time.Now())
   688  
   689  				if delete {
   690  					batch.DeleteStorageSnapshot(accountHash, common.BytesToHash(key))
   691  					snapWipedStorageMeter.Mark(1)
   692  					return nil
   693  				}
   694  				if write {
   695  					batch.WriteStorageSnapshot(accountHash, common.BytesToHash(key), val)
   696  					snapGeneratedStorageMeter.Mark(1)
   697  				} else {
   698  					snapRecoveredStorageMeter.Mark(1)
   699  				}
   700  				stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val))
   701  				stats.slots++
   702  
   703  				// If we've exceeded our batch allowance or termination was requested, flush to disk
   704  				if err := checkAndFlush(append(accountHash[:], key...)); err != nil {
   705  					return err
   706  				}
   707  				return nil
   708  			}
   709  			storeOrigin := common.CopyBytes(storeMarker)
   710  			for {
   711  				exhausted, last, err := dl.generateRange(rootHash, append(database.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil)
   712  				if err != nil {
   713  					return err
   714  				}
   715  				if exhausted {
   716  					break
   717  				}
   718  				if storeOrigin = increaseKey(last); storeOrigin == nil {
   719  					break // special case, the last is 0xffffffff...fff
   720  				}
   721  			}
   722  		}
   723  		// Some account processed, unmark the marker
   724  		accMarker = nil
   725  		return nil
   726  	}
   727  
   728  	// Global loop for regerating the entire state trie + all layered storage tries.
   729  	for {
   730  		exhausted, last, err := dl.generateRange(dl.root, database.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, nil)
   731  		// The procedure it aborted, either by external signal or internal error
   732  		if err != nil {
   733  			if abort == nil { // aborted by internal error, wait the signal
   734  				abort = <-dl.genAbort
   735  			}
   736  			abort <- stats
   737  			return
   738  		}
   739  		// Abort the procedure if the entire snapshot is generated
   740  		if exhausted {
   741  			break
   742  		}
   743  		if accOrigin = increaseKey(last); accOrigin == nil {
   744  			break // special case, the last is 0xffffffff...fff
   745  		}
   746  		accountRange = accountCheckRange
   747  	}
   748  	// Snapshot fully generated, set the marker to nil.
   749  	// Note even there is nothing to commit, persist the
   750  	// generator anyway to mark the snapshot is complete.
   751  	journalProgress(batch, nil, stats)
   752  	if err := batch.Write(); err != nil {
   753  		logger.Error("Failed to flush batch", "err", err)
   754  
   755  		abort = <-dl.genAbort
   756  		abort <- stats
   757  		return
   758  	}
   759  	batch.Reset()
   760  
   761  	logger.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
   762  		"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start)))
   763  
   764  	dl.lock.Lock()
   765  	dl.genMarker = nil
   766  	close(dl.genPending)
   767  	dl.lock.Unlock()
   768  
   769  	// Someone will be looking for us, wait it out
   770  	abort = <-dl.genAbort
   771  	abort <- nil
   772  }
   773  
   774  // increaseKey increase the input key by one bit. Return nil if the entire
   775  // addition operation overflows,
   776  func increaseKey(key []byte) []byte {
   777  	for i := len(key) - 1; i >= 0; i-- {
   778  		key[i]++
   779  		if key[i] != 0x0 {
   780  			return key
   781  		}
   782  	}
   783  	return nil
   784  }