github.com/fff-chain/go-fff@v0.0.0-20220726032732-1c84420b8a99/core/state/snapshot/disklayer.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "sync" 22 23 "github.com/VictoriaMetrics/fastcache" 24 "github.com/fff-chain/go-fff/common" 25 "github.com/fff-chain/go-fff/core/rawdb" 26 "github.com/fff-chain/go-fff/ethdb" 27 "github.com/fff-chain/go-fff/rlp" 28 "github.com/fff-chain/go-fff/trie" 29 ) 30 31 // diskLayer is a low level persistent snapshot built on top of a key-value store. 32 type diskLayer struct { 33 diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot 34 triedb *trie.Database // Trie node cache for reconstruction purposes 35 cache *fastcache.Cache // Cache to avoid hitting the disk for direct access 36 37 root common.Hash // Root hash of the base snapshot 38 stale bool // Signals that the layer became stale (state progressed) 39 40 genMarker []byte // Marker for the state that's indexed during initial layer generation 41 genPending chan struct{} // Notification channel when generation is done (test synchronicity) 42 genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer 43 44 lock sync.RWMutex 45 } 46 47 // Root returns root hash for which this snapshot was made. 48 func (dl *diskLayer) Root() common.Hash { 49 return dl.root 50 } 51 52 func (dl *diskLayer) WaitAndGetVerifyRes() bool { 53 return true 54 } 55 56 func (dl *diskLayer) MarkValid() {} 57 58 func (dl *diskLayer) Verified() bool { 59 return true 60 } 61 62 // Parent always returns nil as there's no layer below the disk. 63 func (dl *diskLayer) Parent() snapshot { 64 return nil 65 } 66 67 // Stale return whether this layer has become stale (was flattened across) or if 68 // it's still live. 69 func (dl *diskLayer) Stale() bool { 70 dl.lock.RLock() 71 defer dl.lock.RUnlock() 72 73 return dl.stale 74 } 75 76 // Account directly retrieves the account associated with a particular hash in 77 // the snapshot slim data format. 78 func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { 79 data, err := dl.AccountRLP(hash) 80 if err != nil { 81 return nil, err 82 } 83 if len(data) == 0 { // can be both nil and []byte{} 84 return nil, nil 85 } 86 account := new(Account) 87 if err := rlp.DecodeBytes(data, account); err != nil { 88 panic(err) 89 } 90 return account, nil 91 } 92 93 // AccountRLP directly retrieves the account RLP associated with a particular 94 // hash in the snapshot slim data format. 95 func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) { 96 dl.lock.RLock() 97 defer dl.lock.RUnlock() 98 99 // If the layer was flattened into, consider it invalid (any live reference to 100 // the original should be marked as unusable). 101 if dl.stale { 102 return nil, ErrSnapshotStale 103 } 104 // If the layer is being generated, ensure the requested hash has already been 105 // covered by the generator. 106 if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 { 107 return nil, ErrNotCoveredYet 108 } 109 // If we're in the disk layer, all diff layers missed 110 snapshotDirtyAccountMissMeter.Mark(1) 111 112 // Try to retrieve the account from the memory cache 113 if blob, found := dl.cache.HasGet(nil, hash[:]); found { 114 snapshotCleanAccountHitMeter.Mark(1) 115 snapshotCleanAccountReadMeter.Mark(int64(len(blob))) 116 return blob, nil 117 } 118 // Cache doesn't contain account, pull from disk and cache for later 119 blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) 120 dl.cache.Set(hash[:], blob) 121 122 snapshotCleanAccountMissMeter.Mark(1) 123 if n := len(blob); n > 0 { 124 snapshotCleanAccountWriteMeter.Mark(int64(n)) 125 } else { 126 snapshotCleanAccountInexMeter.Mark(1) 127 } 128 return blob, nil 129 } 130 131 // Storage directly retrieves the storage data associated with a particular hash, 132 // within a particular account. 133 func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { 134 dl.lock.RLock() 135 defer dl.lock.RUnlock() 136 137 // If the layer was flattened into, consider it invalid (any live reference to 138 // the original should be marked as unusable). 139 if dl.stale { 140 return nil, ErrSnapshotStale 141 } 142 key := append(accountHash[:], storageHash[:]...) 143 144 // If the layer is being generated, ensure the requested hash has already been 145 // covered by the generator. 146 if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 { 147 return nil, ErrNotCoveredYet 148 } 149 // If we're in the disk layer, all diff layers missed 150 snapshotDirtyStorageMissMeter.Mark(1) 151 152 // Try to retrieve the storage slot from the memory cache 153 if blob, found := dl.cache.HasGet(nil, key); found { 154 snapshotCleanStorageHitMeter.Mark(1) 155 snapshotCleanStorageReadMeter.Mark(int64(len(blob))) 156 return blob, nil 157 } 158 // Cache doesn't contain storage slot, pull from disk and cache for later 159 blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) 160 dl.cache.Set(key, blob) 161 162 snapshotCleanStorageMissMeter.Mark(1) 163 if n := len(blob); n > 0 { 164 snapshotCleanStorageWriteMeter.Mark(int64(n)) 165 } else { 166 snapshotCleanStorageInexMeter.Mark(1) 167 } 168 return blob, nil 169 } 170 171 // Update creates a new layer on top of the existing snapshot diff tree with 172 // the specified data items. Note, the maps are retained by the method to avoid 173 // copying everything. 174 func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte, verified chan struct{}) *diffLayer { 175 return newDiffLayer(dl, blockHash, destructs, accounts, storage, verified) 176 }