github.com/core-coin/go-core/v2@v2.1.9/core/state/snapshot/disklayer.go (about) 1 // Copyright 2019 by the Authors 2 // This file is part of the go-core library. 3 // 4 // The go-core library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-core library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-core library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "sync" 22 23 "github.com/VictoriaMetrics/fastcache" 24 25 "github.com/core-coin/go-core/v2/xcbdb" 26 27 "github.com/core-coin/go-core/v2/common" 28 "github.com/core-coin/go-core/v2/core/rawdb" 29 "github.com/core-coin/go-core/v2/rlp" 30 "github.com/core-coin/go-core/v2/trie" 31 ) 32 33 // diskLayer is a low level persistent snapshot built on top of a key-value store. 34 type diskLayer struct { 35 diskdb xcbdb.KeyValueStore // Key-value store containing the base snapshot 36 triedb *trie.Database // Trie node cache for reconstuction purposes 37 cache *fastcache.Cache // Cache to avoid hitting the disk for direct access 38 39 root common.Hash // Root hash of the base snapshot 40 stale bool // Signals that the layer became stale (state progressed) 41 42 genMarker []byte // Marker for the state that's indexed during initial layer generation 43 genPending chan struct{} // Notification channel when generation is done (test synchronicity) 44 genAbort chan chan *generatorStats // Notification channel to abort generating the snapshot in this layer 45 46 lock sync.RWMutex 47 } 48 49 // Root returns root hash for which this snapshot was made. 50 func (dl *diskLayer) Root() common.Hash { 51 return dl.root 52 } 53 54 // Parent always returns nil as there's no layer below the disk. 55 func (dl *diskLayer) Parent() snapshot { 56 return nil 57 } 58 59 // Stale return whether this layer has become stale (was flattened across) or if 60 // it's still live. 61 func (dl *diskLayer) Stale() bool { 62 dl.lock.RLock() 63 defer dl.lock.RUnlock() 64 65 return dl.stale 66 } 67 68 // Account directly retrieves the account associated with a particular hash in 69 // the snapshot slim data format. 70 func (dl *diskLayer) Account(hash common.Hash) (*Account, error) { 71 data, err := dl.AccountRLP(hash) 72 if err != nil { 73 return nil, err 74 } 75 if len(data) == 0 { // can be both nil and []byte{} 76 return nil, nil 77 } 78 account := new(Account) 79 if err := rlp.DecodeBytes(data, account); err != nil { 80 panic(err) 81 } 82 return account, nil 83 } 84 85 // AccountRLP directly retrieves the account RLP associated with a particular 86 // hash in the snapshot slim data format. 87 func (dl *diskLayer) AccountRLP(hash common.Hash) ([]byte, error) { 88 dl.lock.RLock() 89 defer dl.lock.RUnlock() 90 91 // If the layer was flattened into, consider it invalid (any live reference to 92 // the original should be marked as unusable). 93 if dl.stale { 94 return nil, ErrSnapshotStale 95 } 96 // If the layer is being generated, ensure the requested hash has already been 97 // covered by the generator. 98 if dl.genMarker != nil && bytes.Compare(hash[:], dl.genMarker) > 0 { 99 return nil, ErrNotCoveredYet 100 } 101 // If we're in the disk layer, all diff layers missed 102 snapshotDirtyAccountMissMeter.Mark(1) 103 104 // Try to retrieve the account from the memory cache 105 if blob, found := dl.cache.HasGet(nil, hash[:]); found { 106 snapshotCleanAccountHitMeter.Mark(1) 107 snapshotCleanAccountReadMeter.Mark(int64(len(blob))) 108 return blob, nil 109 } 110 // Cache doesn't contain account, pull from disk and cache for later 111 blob := rawdb.ReadAccountSnapshot(dl.diskdb, hash) 112 dl.cache.Set(hash[:], blob) 113 114 snapshotCleanAccountMissMeter.Mark(1) 115 if n := len(blob); n > 0 { 116 snapshotCleanAccountWriteMeter.Mark(int64(n)) 117 } else { 118 snapshotCleanAccountInexMeter.Mark(1) 119 } 120 return blob, nil 121 } 122 123 // Storage directly retrieves the storage data associated with a particular hash, 124 // within a particular account. 125 func (dl *diskLayer) Storage(accountHash, storageHash common.Hash) ([]byte, error) { 126 dl.lock.RLock() 127 defer dl.lock.RUnlock() 128 129 // If the layer was flattened into, consider it invalid (any live reference to 130 // the original should be marked as unusable). 131 if dl.stale { 132 return nil, ErrSnapshotStale 133 } 134 key := append(accountHash[:], storageHash[:]...) 135 136 // If the layer is being generated, ensure the requested hash has already been 137 // covered by the generator. 138 if dl.genMarker != nil && bytes.Compare(key, dl.genMarker) > 0 { 139 return nil, ErrNotCoveredYet 140 } 141 // If we're in the disk layer, all diff layers missed 142 snapshotDirtyStorageMissMeter.Mark(1) 143 144 // Try to retrieve the storage slot from the memory cache 145 if blob, found := dl.cache.HasGet(nil, key); found { 146 snapshotCleanStorageHitMeter.Mark(1) 147 snapshotCleanStorageReadMeter.Mark(int64(len(blob))) 148 return blob, nil 149 } 150 // Cache doesn't contain storage slot, pull from disk and cache for later 151 blob := rawdb.ReadStorageSnapshot(dl.diskdb, accountHash, storageHash) 152 dl.cache.Set(key, blob) 153 154 snapshotCleanStorageMissMeter.Mark(1) 155 if n := len(blob); n > 0 { 156 snapshotCleanStorageWriteMeter.Mark(int64(n)) 157 } else { 158 snapshotCleanStorageInexMeter.Mark(1) 159 } 160 return blob, nil 161 } 162 163 // Update creates a new layer on top of the existing snapshot diff tree with 164 // the specified data items. Note, the maps are retained by the method to avoid 165 // copying everything. 166 func (dl *diskLayer) Update(blockHash common.Hash, destructs map[common.Hash]struct{}, accounts map[common.Hash][]byte, storage map[common.Hash]map[common.Hash][]byte) *diffLayer { 167 return newDiffLayer(dl, blockHash, destructs, accounts, storage) 168 }