github.com/matthieu/go-ethereum@v1.13.2/core/state/snapshot/journal.go (about)

     1  // Copyright 2019 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package snapshot
    18  
    19  import (
    20  	"bytes"
    21  	"encoding/binary"
    22  	"errors"
    23  	"fmt"
    24  	"io"
    25  	"time"
    26  
    27  	"github.com/VictoriaMetrics/fastcache"
    28  	"github.com/matthieu/go-ethereum/common"
    29  	"github.com/matthieu/go-ethereum/core/rawdb"
    30  	"github.com/matthieu/go-ethereum/ethdb"
    31  	"github.com/matthieu/go-ethereum/log"
    32  	"github.com/matthieu/go-ethereum/rlp"
    33  	"github.com/matthieu/go-ethereum/trie"
    34  )
    35  
    36  // journalGenerator is a disk layer entry containing the generator progress marker.
    37  type journalGenerator struct {
    38  	Wiping   bool // Whether the database was in progress of being wiped
    39  	Done     bool // Whether the generator finished creating the snapshot
    40  	Marker   []byte
    41  	Accounts uint64
    42  	Slots    uint64
    43  	Storage  uint64
    44  }
    45  
    46  // journalDestruct is an account deletion entry in a diffLayer's disk journal.
    47  type journalDestruct struct {
    48  	Hash common.Hash
    49  }
    50  
    51  // journalAccount is an account entry in a diffLayer's disk journal.
    52  type journalAccount struct {
    53  	Hash common.Hash
    54  	Blob []byte
    55  }
    56  
    57  // journalStorage is an account's storage map in a diffLayer's disk journal.
    58  type journalStorage struct {
    59  	Hash common.Hash
    60  	Keys []common.Hash
    61  	Vals [][]byte
    62  }
    63  
    64  // loadSnapshot loads a pre-existing state snapshot backed by a key-value store.
    65  func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) {
    66  	// Retrieve the block number and hash of the snapshot, failing if no snapshot
    67  	// is present in the database (or crashed mid-update).
    68  	baseRoot := rawdb.ReadSnapshotRoot(diskdb)
    69  	if baseRoot == (common.Hash{}) {
    70  		return nil, errors.New("missing or corrupted snapshot")
    71  	}
    72  	base := &diskLayer{
    73  		diskdb: diskdb,
    74  		triedb: triedb,
    75  		cache:  fastcache.New(cache * 1024 * 1024),
    76  		root:   baseRoot,
    77  	}
    78  	// Retrieve the journal, it must exist since even for 0 layer it stores whether
    79  	// we've already generated the snapshot or are in progress only
    80  	journal := rawdb.ReadSnapshotJournal(diskdb)
    81  	if len(journal) == 0 {
    82  		return nil, errors.New("missing or corrupted snapshot journal")
    83  	}
    84  	r := rlp.NewStream(bytes.NewReader(journal), 0)
    85  
    86  	// Read the snapshot generation progress for the disk layer
    87  	var generator journalGenerator
    88  	if err := r.Decode(&generator); err != nil {
    89  		return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err)
    90  	}
    91  	// Load all the snapshot diffs from the journal
    92  	snapshot, err := loadDiffLayer(base, r)
    93  	if err != nil {
    94  		return nil, err
    95  	}
    96  	// Entire snapshot journal loaded, sanity check the head and return
    97  	// Journal doesn't exist, don't worry if it's not supposed to
    98  	if head := snapshot.Root(); head != root {
    99  		return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root)
   100  	}
   101  	// Everything loaded correctly, resume any suspended operations
   102  	if !generator.Done {
   103  		// If the generator was still wiping, restart one from scratch (fine for
   104  		// now as it's rare and the wiper deletes the stuff it touches anyway, so
   105  		// restarting won't incur a lot of extra database hops.
   106  		var wiper chan struct{}
   107  		if generator.Wiping {
   108  			log.Info("Resuming previous snapshot wipe")
   109  			wiper = wipeSnapshot(diskdb, false)
   110  		}
   111  		// Whether or not wiping was in progress, load any generator progress too
   112  		base.genMarker = generator.Marker
   113  		if base.genMarker == nil {
   114  			base.genMarker = []byte{}
   115  		}
   116  		base.genPending = make(chan struct{})
   117  		base.genAbort = make(chan chan *generatorStats)
   118  
   119  		var origin uint64
   120  		if len(generator.Marker) >= 8 {
   121  			origin = binary.BigEndian.Uint64(generator.Marker)
   122  		}
   123  		go base.generate(&generatorStats{
   124  			wiping:   wiper,
   125  			origin:   origin,
   126  			start:    time.Now(),
   127  			accounts: generator.Accounts,
   128  			slots:    generator.Slots,
   129  			storage:  common.StorageSize(generator.Storage),
   130  		})
   131  	}
   132  	return snapshot, nil
   133  }
   134  
   135  // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new
   136  // diff and verifying that it can be linked to the requested parent.
   137  func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) {
   138  	// Read the next diff journal entry
   139  	var root common.Hash
   140  	if err := r.Decode(&root); err != nil {
   141  		// The first read may fail with EOF, marking the end of the journal
   142  		if err == io.EOF {
   143  			return parent, nil
   144  		}
   145  		return nil, fmt.Errorf("load diff root: %v", err)
   146  	}
   147  	var destructs []journalDestruct
   148  	if err := r.Decode(&destructs); err != nil {
   149  		return nil, fmt.Errorf("load diff destructs: %v", err)
   150  	}
   151  	destructSet := make(map[common.Hash]struct{})
   152  	for _, entry := range destructs {
   153  		destructSet[entry.Hash] = struct{}{}
   154  	}
   155  	var accounts []journalAccount
   156  	if err := r.Decode(&accounts); err != nil {
   157  		return nil, fmt.Errorf("load diff accounts: %v", err)
   158  	}
   159  	accountData := make(map[common.Hash][]byte)
   160  	for _, entry := range accounts {
   161  		if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
   162  			accountData[entry.Hash] = entry.Blob
   163  		} else {
   164  			accountData[entry.Hash] = nil
   165  		}
   166  	}
   167  	var storage []journalStorage
   168  	if err := r.Decode(&storage); err != nil {
   169  		return nil, fmt.Errorf("load diff storage: %v", err)
   170  	}
   171  	storageData := make(map[common.Hash]map[common.Hash][]byte)
   172  	for _, entry := range storage {
   173  		slots := make(map[common.Hash][]byte)
   174  		for i, key := range entry.Keys {
   175  			if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that
   176  				slots[key] = entry.Vals[i]
   177  			} else {
   178  				slots[key] = nil
   179  			}
   180  		}
   181  		storageData[entry.Hash] = slots
   182  	}
   183  	return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r)
   184  }
   185  
   186  // Journal writes the persistent layer generator stats into a buffer to be stored
   187  // in the database as the snapshot journal.
   188  func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
   189  	// If the snapshot is currently being generated, abort it
   190  	var stats *generatorStats
   191  	if dl.genAbort != nil {
   192  		abort := make(chan *generatorStats)
   193  		dl.genAbort <- abort
   194  
   195  		if stats = <-abort; stats != nil {
   196  			stats.Log("Journalling in-progress snapshot", dl.genMarker)
   197  		}
   198  	}
   199  	// Ensure the layer didn't get stale
   200  	dl.lock.RLock()
   201  	defer dl.lock.RUnlock()
   202  
   203  	if dl.stale {
   204  		return common.Hash{}, ErrSnapshotStale
   205  	}
   206  	// Write out the generator marker
   207  	entry := journalGenerator{
   208  		Done:   dl.genMarker == nil,
   209  		Marker: dl.genMarker,
   210  	}
   211  	if stats != nil {
   212  		entry.Wiping = (stats.wiping != nil)
   213  		entry.Accounts = stats.accounts
   214  		entry.Slots = stats.slots
   215  		entry.Storage = uint64(stats.storage)
   216  	}
   217  	if err := rlp.Encode(buffer, entry); err != nil {
   218  		return common.Hash{}, err
   219  	}
   220  	return dl.root, nil
   221  }
   222  
   223  // Journal writes the memory layer contents into a buffer to be stored in the
   224  // database as the snapshot journal.
   225  func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) {
   226  	// Journal the parent first
   227  	base, err := dl.parent.Journal(buffer)
   228  	if err != nil {
   229  		return common.Hash{}, err
   230  	}
   231  	// Ensure the layer didn't get stale
   232  	dl.lock.RLock()
   233  	defer dl.lock.RUnlock()
   234  
   235  	if dl.Stale() {
   236  		return common.Hash{}, ErrSnapshotStale
   237  	}
   238  	// Everything below was journalled, persist this layer too
   239  	if err := rlp.Encode(buffer, dl.root); err != nil {
   240  		return common.Hash{}, err
   241  	}
   242  	destructs := make([]journalDestruct, 0, len(dl.destructSet))
   243  	for hash := range dl.destructSet {
   244  		destructs = append(destructs, journalDestruct{Hash: hash})
   245  	}
   246  	if err := rlp.Encode(buffer, destructs); err != nil {
   247  		return common.Hash{}, err
   248  	}
   249  	accounts := make([]journalAccount, 0, len(dl.accountData))
   250  	for hash, blob := range dl.accountData {
   251  		accounts = append(accounts, journalAccount{Hash: hash, Blob: blob})
   252  	}
   253  	if err := rlp.Encode(buffer, accounts); err != nil {
   254  		return common.Hash{}, err
   255  	}
   256  	storage := make([]journalStorage, 0, len(dl.storageData))
   257  	for hash, slots := range dl.storageData {
   258  		keys := make([]common.Hash, 0, len(slots))
   259  		vals := make([][]byte, 0, len(slots))
   260  		for key, val := range slots {
   261  			keys = append(keys, key)
   262  			vals = append(vals, val)
   263  		}
   264  		storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals})
   265  	}
   266  	if err := rlp.Encode(buffer, storage); err != nil {
   267  		return common.Hash{}, err
   268  	}
   269  	return base, nil
   270  }