github.com/snowblossomcoin/go-ethereum@v1.9.25/core/state/snapshot/generate.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"math/big"
    23  	"time"
    24  
    25  	"github.com/VictoriaMetrics/fastcache"
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/common/math"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/crypto"
    30  	"github.com/ethereum/go-ethereum/ethdb"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/ethereum/go-ethereum/rlp"
    33  	"github.com/ethereum/go-ethereum/trie"
    34  )
    35  
    36  var (
    37  	// emptyRoot is the known root hash of an empty trie.
    38  	emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421")
    39  
    40  	// emptyCode is the known hash of the empty EVM bytecode.
    41  	emptyCode = crypto.Keccak256Hash(nil)
    42  )
    43  
    44  // generatorStats is a collection of statistics gathered by the snapshot generator
    45  // for logging purposes.
    46  type generatorStats struct {
    47  	wiping   chan struct{}      // Notification channel if wiping is in progress
    48  	origin   uint64             // Origin prefix where generation started
    49  	start    time.Time          // Timestamp when generation started
    50  	accounts uint64             // Number of accounts indexed
    51  	slots    uint64             // Number of storage slots indexed
    52  	storage  common.StorageSize // Account and storage slot size
    53  }
    54  
    55  // Log creates an contextual log with the given message and the context pulled
    56  // from the internally maintained statistics.
    57  func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) {
    58  	var ctx []interface{}
    59  	if root != (common.Hash{}) {
    60  		ctx = append(ctx, []interface{}{"root", root}...)
    61  	}
    62  	// Figure out whether we're after or within an account
    63  	switch len(marker) {
    64  	case common.HashLength:
    65  		ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...)
    66  	case 2 * common.HashLength:
    67  		ctx = append(ctx, []interface{}{
    68  			"in", common.BytesToHash(marker[:common.HashLength]),
    69  			"at", common.BytesToHash(marker[common.HashLength:]),
    70  		}...)
    71  	}
    72  	// Add the usual measurements
    73  	ctx = append(ctx, []interface{}{
    74  		"accounts", gs.accounts,
    75  		"slots", gs.slots,
    76  		"storage", gs.storage,
    77  		"elapsed", common.PrettyDuration(time.Since(gs.start)),
    78  	}...)
    79  	// Calculate the estimated indexing time based on current stats
    80  	if len(marker) > 0 {
    81  		if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 {
    82  			left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8])
    83  
    84  			speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero
    85  			ctx = append(ctx, []interface{}{
    86  				"eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond),
    87  			}...)
    88  		}
    89  	}
    90  	log.Info(msg, ctx...)
    91  }
    92  
    93  // generateSnapshot regenerates a brand new snapshot based on an existing state
    94  // database and head block asynchronously. The snapshot is returned immediately
    95  // and generation is continued in the background until done.
    96  func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, wiper chan struct{}) *diskLayer {
    97  	// Wipe any previously existing snapshot from the database if no wiper is
    98  	// currently in progress.
    99  	if wiper == nil {
   100  		wiper = wipeSnapshot(diskdb, true)
   101  	}
   102  	// Create a new disk layer with an initialized state marker at zero
   103  	rawdb.WriteSnapshotRoot(diskdb, root)
   104  
   105  	base := &diskLayer{
   106  		diskdb:     diskdb,
   107  		triedb:     triedb,
   108  		root:       root,
   109  		cache:      fastcache.New(cache * 1024 * 1024),
   110  		genMarker:  []byte{}, // Initialized but empty!
   111  		genPending: make(chan struct{}),
   112  		genAbort:   make(chan chan *generatorStats),
   113  	}
   114  	go base.generate(&generatorStats{wiping: wiper, start: time.Now()})
   115  	log.Debug("Start snapshot generation", "root", root)
   116  	return base
   117  }
   118  
   119  // generate is a background thread that iterates over the state and storage tries,
   120  // constructing the state snapshot. All the arguments are purely for statistics
   121  // gethering and logging, since the method surfs the blocks as they arrive, often
   122  // being restarted.
   123  func (dl *diskLayer) generate(stats *generatorStats) {
   124  	// If a database wipe is in operation, wait until it's done
   125  	if stats.wiping != nil {
   126  		stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker)
   127  		select {
   128  		// If wiper is done, resume normal mode of operation
   129  		case <-stats.wiping:
   130  			stats.wiping = nil
   131  			stats.start = time.Now()
   132  
   133  		// If generator was aborted during wipe, return
   134  		case abort := <-dl.genAbort:
   135  			abort <- stats
   136  			return
   137  		}
   138  	}
   139  	// Create an account and state iterator pointing to the current generator marker
   140  	accTrie, err := trie.NewSecure(dl.root, dl.triedb)
   141  	if err != nil {
   142  		// The account trie is missing (GC), surf the chain until one becomes available
   143  		stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker)
   144  
   145  		abort := <-dl.genAbort
   146  		abort <- stats
   147  		return
   148  	}
   149  	stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker)
   150  
   151  	var accMarker []byte
   152  	if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that
   153  		accMarker = dl.genMarker[:common.HashLength]
   154  	}
   155  	accIt := trie.NewIterator(accTrie.NodeIterator(accMarker))
   156  	batch := dl.diskdb.NewBatch()
   157  
   158  	// Iterate from the previous marker and continue generating the state snapshot
   159  	logged := time.Now()
   160  	for accIt.Next() {
   161  		// Retrieve the current account and flatten it into the internal format
   162  		accountHash := common.BytesToHash(accIt.Key)
   163  
   164  		var acc struct {
   165  			Nonce    uint64
   166  			Balance  *big.Int
   167  			Root     common.Hash
   168  			CodeHash []byte
   169  		}
   170  		if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil {
   171  			log.Crit("Invalid account encountered during snapshot creation", "err", err)
   172  		}
   173  		data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash)
   174  
   175  		// If the account is not yet in-progress, write it out
   176  		if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) {
   177  			rawdb.WriteAccountSnapshot(batch, accountHash, data)
   178  			stats.storage += common.StorageSize(1 + common.HashLength + len(data))
   179  			stats.accounts++
   180  		}
   181  		// If we've exceeded our batch allowance or termination was requested, flush to disk
   182  		var abort chan *generatorStats
   183  		select {
   184  		case abort = <-dl.genAbort:
   185  		default:
   186  		}
   187  		if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
   188  			// Only write and set the marker if we actually did something useful
   189  			if batch.ValueSize() > 0 {
   190  				batch.Write()
   191  				batch.Reset()
   192  
   193  				dl.lock.Lock()
   194  				dl.genMarker = accountHash[:]
   195  				dl.lock.Unlock()
   196  			}
   197  			if abort != nil {
   198  				stats.Log("Aborting state snapshot generation", dl.root, accountHash[:])
   199  				abort <- stats
   200  				return
   201  			}
   202  		}
   203  		// If the account is in-progress, continue where we left off (otherwise iterate all)
   204  		if acc.Root != emptyRoot {
   205  			storeTrie, err := trie.NewSecure(acc.Root, dl.triedb)
   206  			if err != nil {
   207  				log.Error("Generator failed to access storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
   208  				abort := <-dl.genAbort
   209  				abort <- stats
   210  				return
   211  			}
   212  			var storeMarker []byte
   213  			if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength {
   214  				storeMarker = dl.genMarker[common.HashLength:]
   215  			}
   216  			storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker))
   217  			for storeIt.Next() {
   218  				rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value)
   219  				stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value))
   220  				stats.slots++
   221  
   222  				// If we've exceeded our batch allowance or termination was requested, flush to disk
   223  				var abort chan *generatorStats
   224  				select {
   225  				case abort = <-dl.genAbort:
   226  				default:
   227  				}
   228  				if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil {
   229  					// Only write and set the marker if we actually did something useful
   230  					if batch.ValueSize() > 0 {
   231  						batch.Write()
   232  						batch.Reset()
   233  
   234  						dl.lock.Lock()
   235  						dl.genMarker = append(accountHash[:], storeIt.Key...)
   236  						dl.lock.Unlock()
   237  					}
   238  					if abort != nil {
   239  						stats.Log("Aborting state snapshot generation", dl.root, append(accountHash[:], storeIt.Key...))
   240  						abort <- stats
   241  						return
   242  					}
   243  				}
   244  			}
   245  			if err := storeIt.Err; err != nil {
   246  				log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err)
   247  				abort := <-dl.genAbort
   248  				abort <- stats
   249  				return
   250  			}
   251  		}
   252  		if time.Since(logged) > 8*time.Second {
   253  			stats.Log("Generating state snapshot", dl.root, accIt.Key)
   254  			logged = time.Now()
   255  		}
   256  		// Some account processed, unmark the marker
   257  		accMarker = nil
   258  	}
   259  	if err := accIt.Err; err != nil {
   260  		log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err)
   261  		abort := <-dl.genAbort
   262  		abort <- stats
   263  		return
   264  	}
   265  	// Snapshot fully generated, set the marker to nil
   266  	if batch.ValueSize() > 0 {
   267  		batch.Write()
   268  	}
   269  	log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots,
   270  		"storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start)))
   271  
   272  	dl.lock.Lock()
   273  	dl.genMarker = nil
   274  	close(dl.genPending)
   275  	dl.lock.Unlock()
   276  
   277  	// Someone will be looking for us, wait it out
   278  	abort := <-dl.genAbort
   279  	abort <- nil
   280  }