github.com/dim4egster/coreth@v0.10.2/core/state/snapshot/disklayer.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2019 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package snapshot 28 29 import ( 30 "bytes" 31 "sync" 32 "time" 33 34 "github.com/VictoriaMetrics/fastcache" 35 "github.com/dim4egster/coreth/core/rawdb" 36 "github.com/dim4egster/coreth/ethdb" 37 "github.com/dim4egster/coreth/trie" 38 "github.com/ethereum/go-ethereum/common" 39 "github.com/ethereum/go-ethereum/rlp" 40 ) 41 42 // diskLayer is a low level persistent snapshot built on top of a key-value store. 43 type diskLayer struct { 44 diskdb ethdb.KeyValueStore // Key-value store containing the base snapshot 45 triedb *trie.Database // Trie node cache for reconstruction purposes 46 cache *fastcache.Cache // Cache to avoid hitting the disk for direct access 47 48 blockHash common.Hash // Block hash of the base snapshot 49 root common.Hash // Root hash of the base snapshot 50 stale bool // Signals that the layer became stale (state progressed) 51 52 genMarker []byte // Marker for the state that's indexed during initial layer generation 53 genPending chan struct{} // Notification channel when generation is done (test synchronicity) 54 genAbort chan chan struct{} // Notification channel to abort generating the snapshot in this layer 55 56 genStats *generatorStats // Stats for snapshot generation (generation aborted/finished if non-nil) 57 58 created time.Time // Time at which disk layer was created 59 logged time.Time // Time at which last logged generation progress 60 abortStarted time.Time // Time as which disk layer started to be aborted 61 62 lock sync.RWMutex 63 } 64 65 // Root returns root hash for which this snapshot was made. 66 func (dl *diskLayer) Root() common.Hash { 67 return dl.root 68 } 69 70 // BlockHash returns the block hash for which this snapshot was made 71 func (dl *diskLayer) BlockHash() common.Hash { 72 return dl.blockHash 73 } 74 75 // Parent always returns nil as there's no layer below the disk. 76 func (dl *diskLayer) Parent() snapshot { 77 return nil 78 } 79 80 // Stale return whether this layer has become stale (was flattened across) or if 81 // it's still live. 82 func (dl *diskLayer) Stale() bool { 83 dl.lock.RLock() 84 defer dl.lock.RUnlock() 85 86 return dl.stale 87 } 88 89 // Account directly retrieves the account associated with a particular hash in 90 // the snapshot slim data format. 91 func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { 92 data, err := dl.AccountRLP(hash) 93 if err != nil { 94 return nil, err 95 } 96 if len(data) == 0 { // can be both nil and []byte{} 97 return nil, nil 98 } 99 account := new(Account) 100 if err := rlp.DecodeBytes(data, account); err != nil { 101 panic(err) 102 } 103 return account, nil 104 } 105 106 // AccountRLP directly retrieves the account RLP associated with a particular 107 // hash in the snapshot slim data format. 108 func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) { 109 dl.lock.RLock() 110 defer dl.lock.RUnlock() 111 112 // If the layer was flattened into, consider it invalid (any live reference to 113 // the original should be marked as unusable). 114 if dl.stale { 115 return nil, ErrSnapshotStale 116 } 117 // If the layer is being generated, ensure the requested hash has already been 118 // covered by the generator. 119 if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 { 120 return nil, ErrNotCoveredYet 121 } 122 // If we're in the disk layer, all diff layers missed 123 snapshotDirtyAccountMissMeter.Mark(1) 124 125 // Try to retrieve the account from the memory cache 126 if blob, found := dl.cache.HasGet(nil, hash[:]); found { 127 snapshotCleanAccountHitMeter.Mark(1) 128 snapshotCleanAccountReadMeter.Mark(int64(len(blob))) 129 return blob, nil 130 } 131 // Cache doesn't contain account, pull from disk and cache for later 132 blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) 133 dl.cache.Set(hash[:], blob) 134 135 snapshotCleanAccountMissMeter.Mark(1) 136 if n := len(blob); n > 0 { 137 snapshotCleanAccountWriteMeter.Mark(int64(n)) 138 } else { 139 snapshotCleanAccountInexMeter.Mark(1) 140 } 141 return blob, nil 142 } 143 144 // Storage directly retrieves the storage data associated with a particular hash, 145 // within a particular account. 146 func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { 147 dl.lock.RLock() 148 defer dl.lock.RUnlock() 149 150 // If the layer was flattened into, consider it invalid (any live reference to 151 // the original should be marked as unusable). 152 if dl.stale { 153 return nil, ErrSnapshotStale 154 } 155 key := append(accountHash[:], storageHash[:]...) 156 157 // If the layer is being generated, ensure the requested hash has already been 158 // covered by the generator. 159 if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 { 160 return nil, ErrNotCoveredYet 161 } 162 // If we're in the disk layer, all diff layers missed 163 snapshotDirtyStorageMissMeter.Mark(1) 164 165 // Try to retrieve the storage slot from the memory cache 166 if blob, found := dl.cache.HasGet(nil, key); found { 167 snapshotCleanStorageHitMeter.Mark(1) 168 snapshotCleanStorageReadMeter.Mark(int64(len(blob))) 169 return blob, nil 170 } 171 // Cache doesn't contain storage slot, pull from disk and cache for later 172 blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) 173 dl.cache.Set(key, blob) 174 175 snapshotCleanStorageMissMeter.Mark(1) 176 if n := len(blob); n > 0 { 177 snapshotCleanStorageWriteMeter.Mark(int64(n)) 178 } else { 179 snapshotCleanStorageInexMeter.Mark(1) 180 } 181 return blob, nil 182 } 183 184 // Update creates a new layer on top of the existing snapshot diff tree with 185 // the specified data items. Note, the maps are retained by the method to avoid 186 // copying everything. 187 func (dl *diskLayer) Update(blockHash, blockRoot common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { 188 return newDiffLayer(dl, blockHash, blockRoot, destructs, accounts, storage) 189 }