github.com/klaytn/klaytn@v1.10.2/snapshot/disklayer.go (about)

     1  // Modifications Copyright 2021 The klaytn Authors
     2  // Copyright 2019 The go-ethereum Authors
     3  // This file is part of the go-ethereum library.
     4  //
     5  // The go-ethereum library is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU Lesser General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // The go-ethereum library is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU Lesser General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU Lesser General Public License
    16  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from core/state/snapshot/disklayer.go (2021/10/21).
    19  // Modified and improved for the klaytn development.
    20  
    21  package snapshot
    22  
    23  import (
    24  	"bytes"
    25  	"sync"
    26  
    27  	"github.com/klaytn/klaytn/blockchain/types/account"
    28  
    29  	"github.com/klaytn/klaytn/rlp"
    30  
    31  	"github.com/VictoriaMetrics/fastcache"
    32  	"github.com/klaytn/klaytn/common"
    33  	"github.com/klaytn/klaytn/storage/database"
    34  	"github.com/klaytn/klaytn/storage/statedb"
    35  )
    36  
    37  // diskLayer is a low level persistent snapshot built on top of a key-value store.
    38  type diskLayer struct {
    39  	diskdb database.DBManager // Key-value store containing the base snapshot
    40  	triedb *statedb.Database  // Trie node cache for reconstruction purposes
    41  	cache  *fastcache.Cache   // Cache to avoid hitting the disk for direct access
    42  
    43  	root  common.Hash // Root hash of the base snapshot
    44  	stale bool        // Signals that the layer became stale (state progressed)
    45  
    46  	genMarker  []byte                    // Marker for the state that's indexed during initial layer generation
    47  	genPending chan struct{}             // Notification channel when generation is done (test synchronicity)
    48  	genAbort   chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer
    49  
    50  	lock sync.RWMutex
    51  }
    52  
    53  // Root returns  root hash for which this snapshot was made.
    54  func (dl *diskLayer) Root() common.Hash {
    55  	return dl.root
    56  }
    57  
    58  // Parent always returns nil as there's no layer below the disk.
    59  func (dl *diskLayer) Parent() snapshot {
    60  	return nil
    61  }
    62  
    63  // Stale return whether this layer has become stale (was flattened across) or if
    64  // it's still live.
    65  func (dl *diskLayer) Stale() bool {
    66  	dl.lock.RLock()
    67  	defer dl.lock.RUnlock()
    68  
    69  	return dl.stale
    70  }
    71  
    72  // Account directly retrieves the account associated with a particular hash in
    73  // the snapshot slim data format.
    74  func (dl *diskLayer) Account(hash common.Hash) (account.Account, error) {
    75  	data, err := dl.AccountRLP(hash)
    76  	if err != nil {
    77  		return nil, err
    78  	}
    79  	if len(data) == 0 { // can be both nil and []byte{}
    80  		return nil, nil
    81  	}
    82  	serializer := account.NewAccountSerializer()
    83  	if err := rlp.DecodeBytes(data, serializer); err != nil {
    84  		panic(err)
    85  	}
    86  	return serializer.GetAccount(), nil
    87  }
    88  
    89  // AccountRLP directly retrieves the account RLP associated with a particular
    90  // hash in the snapshot slim data format.
    91  func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) {
    92  	dl.lock.RLock()
    93  	defer dl.lock.RUnlock()
    94  
    95  	// If the layer was flattened into, consider it invalid (any live reference to
    96  	// the original should be marked as unusable).
    97  	if dl.stale {
    98  		return nil, ErrSnapshotStale
    99  	}
   100  	// If the layer is being generated, ensure the requested hash has already been
   101  	// covered by the generator.
   102  	if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 {
   103  		return nil, ErrNotCoveredYet
   104  	}
   105  	// If we're in the disk layer, all diff layers missed
   106  	snapshotDirtyAccountMissMeter.Mark(1)
   107  
   108  	// Try to retrieve the account from the memory cache
   109  	if blob, found := dl.cache.HasGet(nil, hash[:]); found {
   110  		snapshotCleanAccountHitMeter.Mark(1)
   111  		snapshotCleanAccountReadMeter.Mark(int64(len(blob)))
   112  		return blob, nil
   113  	}
   114  	// Cache doesn't contain account, pull from disk and cache for later
   115  	blob := dl.diskdb.ReadAccountSnapshot(hash)
   116  	dl.cache.Set(hash[:], blob)
   117  
   118  	snapshotCleanAccountMissMeter.Mark(1)
   119  	if n := len(blob); n > 0 {
   120  		snapshotCleanAccountWriteMeter.Mark(int64(n))
   121  	} else {
   122  		snapshotCleanAccountInexMeter.Mark(1)
   123  	}
   124  	return blob, nil
   125  }
   126  
   127  // Storage directly retrieves the storage data associated with a particular hash,
   128  // within a particular account.
   129  func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) {
   130  	dl.lock.RLock()
   131  	defer dl.lock.RUnlock()
   132  
   133  	// If the layer was flattened into, consider it invalid (any live reference to
   134  	// the original should be marked as unusable).
   135  	if dl.stale {
   136  		return nil, ErrSnapshotStale
   137  	}
   138  	key := append(accountHash[:], storageHash[:]...)
   139  
   140  	// If the layer is being generated, ensure the requested hash has already been
   141  	// covered by the generator.
   142  	if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 {
   143  		return nil, ErrNotCoveredYet
   144  	}
   145  	// If we're in the disk layer, all diff layers missed
   146  	snapshotDirtyStorageMissMeter.Mark(1)
   147  
   148  	// Try to retrieve the storage slot from the memory cache
   149  	if blob, found := dl.cache.HasGet(nil, key); found {
   150  		snapshotCleanStorageHitMeter.Mark(1)
   151  		snapshotCleanStorageReadMeter.Mark(int64(len(blob)))
   152  		return blob, nil
   153  	}
   154  	// Cache doesn't contain storage slot, pull from disk and cache for later
   155  	blob := dl.diskdb.ReadStorageSnapshot(accountHash, storageHash)
   156  	dl.cache.Set(key, blob)
   157  
   158  	snapshotCleanStorageMissMeter.Mark(1)
   159  	if n := len(blob); n > 0 {
   160  		snapshotCleanStorageWriteMeter.Mark(int64(n))
   161  	} else {
   162  		snapshotCleanStorageInexMeter.Mark(1)
   163  	}
   164  	return blob, nil
   165  }
   166  
   167  // Update creates a new layer on top of the existing snapshot diff tree with
   168  // the specified data items. Note, the maps are retained by the method to avoid
   169  // copying everything.
   170  func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer {
   171  	return newDiffLayer(dl, blockHash, destructs, accounts, storage)
   172  }