gitlab.com/yannislg/go-pulse@v0.0.0-20210722055913-a3e24e95638d/core/state/snapshot/journal.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "io" 25 "time" 26 27 "github.com/VictoriaMetrics/fastcache" 28 "github.com/ethereum/go-ethereum/common" 29 "github.com/ethereum/go-ethereum/core/rawdb" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/rlp" 33 "github.com/ethereum/go-ethereum/trie" 34 ) 35 36 // journalGenerator is a disk layer entry containing the generator progress marker. 37 type journalGenerator struct { 38 Wiping bool // Whether the database was in progress of being wiped 39 Done bool // Whether the generator finished creating the snapshot 40 Marker []byte 41 Accounts uint64 42 Slots uint64 43 Storage uint64 44 } 45 46 // journalDestruct is an account deletion entry in a diffLayer's disk journal. 47 type journalDestruct struct { 48 Hash common.Hash 49 } 50 51 // journalAccount is an account entry in a diffLayer's disk journal. 52 type journalAccount struct { 53 Hash common.Hash 54 Blob []byte 55 } 56 57 // journalStorage is an account's storage map in a diffLayer's disk journal. 58 type journalStorage struct { 59 Hash common.Hash 60 Keys []common.Hash 61 Vals [][]byte 62 } 63 64 // loadSnapshot loads a pre-existing state snapshot backed by a key-value store. 65 func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) (snapshot, error) { 66 // Retrieve the block number and hash of the snapshot, failing if no snapshot 67 // is present in the database (or crashed mid-update). 68 baseRoot := rawdb.ReadSnapshotRoot(diskdb) 69 if baseRoot == (common.Hash{}) { 70 return nil, errors.New("missing or corrupted snapshot") 71 } 72 base := &diskLayer{ 73 diskdb: diskdb, 74 triedb: triedb, 75 cache: fastcache.New(cache * 1024 * 1024), 76 root: baseRoot, 77 } 78 // Retrieve the journal, it must exist since even for 0 layer it stores whether 79 // we've already generated the snapshot or are in progress only 80 journal := rawdb.ReadSnapshotJournal(diskdb) 81 if len(journal) == 0 { 82 return nil, errors.New("missing or corrupted snapshot journal") 83 } 84 r := rlp.NewStream(bytes.NewReader(journal), 0) 85 86 // Read the snapshot generation progress for the disk layer 87 var generator journalGenerator 88 if err := r.Decode(&generator); err != nil { 89 return nil, fmt.Errorf("failed to load snapshot progress marker: %v", err) 90 } 91 // Load all the snapshot diffs from the journal 92 snapshot, err := loadDiffLayer(base, r) 93 if err != nil { 94 return nil, err 95 } 96 // Entire snapshot journal loaded, sanity check the head and return 97 // Journal doesn't exist, don't worry if it's not supposed to 98 if head := snapshot.Root(); head != root { 99 return nil, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) 100 } 101 // Everything loaded correctly, resume any suspended operations 102 if !generator.Done { 103 // If the generator was still wiping, restart one from scratch (fine for 104 // now as it's rare and the wiper deletes the stuff it touches anyway, so 105 // restarting won't incur a lot of extra database hops. 106 var wiper chan struct{} 107 if generator.Wiping { 108 log.Info("Resuming previous snapshot wipe") 109 wiper = wipeSnapshot(diskdb, false) 110 } 111 // Whether or not wiping was in progress, load any generator progress too 112 base.genMarker = generator.Marker 113 if base.genMarker == nil { 114 base.genMarker = []byte{} 115 } 116 base.genPending = make(chan struct{}) 117 base.genAbort = make(chan chan *generatorStats) 118 119 var origin uint64 120 if len(generator.Marker) >= 8 { 121 origin = binary.BigEndian.Uint64(generator.Marker) 122 } 123 go base.generate(&generatorStats{ 124 wiping: wiper, 125 origin: origin, 126 start: time.Now(), 127 accounts: generator.Accounts, 128 slots: generator.Slots, 129 storage: common.StorageSize(generator.Storage), 130 }) 131 } 132 return snapshot, nil 133 } 134 135 // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new 136 // diff and verifying that it can be linked to the requested parent. 137 func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { 138 // Read the next diff journal entry 139 var root common.Hash 140 if err := r.Decode(&root); err != nil { 141 // The first read may fail with EOF, marking the end of the journal 142 if err == io.EOF { 143 return parent, nil 144 } 145 return nil, fmt.Errorf("load diff root: %v", err) 146 } 147 var destructs []journalDestruct 148 if err := r.Decode(&destructs); err != nil { 149 return nil, fmt.Errorf("load diff destructs: %v", err) 150 } 151 destructSet := make(map[common.Hash]struct{}) 152 for _, entry := range destructs { 153 destructSet[entry.Hash] = struct{}{} 154 } 155 var accounts []journalAccount 156 if err := r.Decode(&accounts); err != nil { 157 return nil, fmt.Errorf("load diff accounts: %v", err) 158 } 159 accountData := make(map[common.Hash][]byte) 160 for _, entry := range accounts { 161 accountData[entry.Hash] = entry.Blob 162 } 163 var storage []journalStorage 164 if err := r.Decode(&storage); err != nil { 165 return nil, fmt.Errorf("load diff storage: %v", err) 166 } 167 storageData := make(map[common.Hash]map[common.Hash][]byte) 168 for _, entry := range storage { 169 slots := make(map[common.Hash][]byte) 170 for i, key := range entry.Keys { 171 slots[key] = entry.Vals[i] 172 } 173 storageData[entry.Hash] = slots 174 } 175 return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r) 176 } 177 178 // Journal writes the persistent layer generator stats into a buffer to be stored 179 // in the database as the snapshot journal. 180 func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { 181 // If the snapshot is currently being generated, abort it 182 var stats *generatorStats 183 if dl.genAbort != nil { 184 abort := make(chan *generatorStats) 185 dl.genAbort <- abort 186 187 if stats = <-abort; stats != nil { 188 stats.Log("Journalling in-progress snapshot", dl.genMarker) 189 } 190 } 191 // Ensure the layer didn't get stale 192 dl.lock.RLock() 193 defer dl.lock.RUnlock() 194 195 if dl.stale { 196 return common.Hash{}, ErrSnapshotStale 197 } 198 // Write out the generator marker 199 entry := journalGenerator{ 200 Done: dl.genMarker == nil, 201 Marker: dl.genMarker, 202 } 203 if stats != nil { 204 entry.Wiping = (stats.wiping != nil) 205 entry.Accounts = stats.accounts 206 entry.Slots = stats.slots 207 entry.Storage = uint64(stats.storage) 208 } 209 if err := rlp.Encode(buffer, entry); err != nil { 210 return common.Hash{}, err 211 } 212 return dl.root, nil 213 } 214 215 // Journal writes the memory layer contents into a buffer to be stored in the 216 // database as the snapshot journal. 217 func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { 218 // Journal the parent first 219 base, err := dl.parent.Journal(buffer) 220 if err != nil { 221 return common.Hash{}, err 222 } 223 // Ensure the layer didn't get stale 224 dl.lock.RLock() 225 defer dl.lock.RUnlock() 226 227 if dl.Stale() { 228 return common.Hash{}, ErrSnapshotStale 229 } 230 // Everything below was journalled, persist this layer too 231 if err := rlp.Encode(buffer, dl.root); err != nil { 232 return common.Hash{}, err 233 } 234 destructs := make([]journalDestruct, 0, len(dl.destructSet)) 235 for hash := range dl.destructSet { 236 destructs = append(destructs, journalDestruct{Hash: hash}) 237 } 238 if err := rlp.Encode(buffer, destructs); err != nil { 239 return common.Hash{}, err 240 } 241 accounts := make([]journalAccount, 0, len(dl.accountData)) 242 for hash, blob := range dl.accountData { 243 accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) 244 } 245 if err := rlp.Encode(buffer, accounts); err != nil { 246 return common.Hash{}, err 247 } 248 storage := make([]journalStorage, 0, len(dl.storageData)) 249 for hash, slots := range dl.storageData { 250 keys := make([]common.Hash, 0, len(slots)) 251 vals := make([][]byte, 0, len(slots)) 252 for key, val := range slots { 253 keys = append(keys, key) 254 vals = append(vals, val) 255 } 256 storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) 257 } 258 if err := rlp.Encode(buffer, storage); err != nil { 259 return common.Hash{}, err 260 } 261 return base, nil 262 }