github.com/DxChainNetwork/dxc@v0.8.1-0.20220824085222-1162e304b6e7/core/state/snapshot/journal.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "io" 25 "time" 26 27 "github.com/DxChainNetwork/dxc/common" 28 "github.com/DxChainNetwork/dxc/core/rawdb" 29 "github.com/DxChainNetwork/dxc/ethdb" 30 "github.com/DxChainNetwork/dxc/log" 31 "github.com/DxChainNetwork/dxc/rlp" 32 "github.com/DxChainNetwork/dxc/trie" 33 "github.com/VictoriaMetrics/fastcache" 34 ) 35 36 const journalVersion uint64 = 0 37 38 // journalGenerator is a disk layer entry containing the generator progress marker. 39 type journalGenerator struct { 40 // Indicator that whether the database was in progress of being wiped. 41 // It's deprecated but keep it here for background compatibility. 42 Wiping bool 43 44 Done bool // Whether the generator finished creating the snapshot 45 Marker []byte 46 Accounts uint64 47 Slots uint64 48 Storage uint64 49 } 50 51 // journalDestruct is an account deletion entry in a diffLayer's disk journal. 52 type journalDestruct struct { 53 Hash common.Hash 54 } 55 56 // journalAccount is an account entry in a diffLayer's disk journal. 57 type journalAccount struct { 58 Hash common.Hash 59 Blob []byte 60 } 61 62 // journalStorage is an account's storage map in a diffLayer's disk journal. 63 type journalStorage struct { 64 Hash common.Hash 65 Keys []common.Hash 66 Vals [][]byte 67 } 68 69 // loadAndParseJournal tries to parse the snapshot journal in latest format. 70 func loadAndParseJournal(db ethdb.KeyValueStore, base *diskLayer) (snapshot, journalGenerator, error) { 71 // Retrieve the disk layer generator. It must exist, no matter the 72 // snapshot is fully generated or not. Otherwise the entire disk 73 // layer is invalid. 74 generatorBlob := rawdb.ReadSnapshotGenerator(db) 75 if len(generatorBlob) == 0 { 76 return nil, journalGenerator{}, errors.New("missing snapshot generator") 77 } 78 var generator journalGenerator 79 if err := rlp.DecodeBytes(generatorBlob, &generator); err != nil { 80 return nil, journalGenerator{}, fmt.Errorf("failed to decode snapshot generator: %v", err) 81 } 82 // Retrieve the diff layer journal. It's possible that the journal is 83 // not existent, e.g. the disk layer is generating while that the Geth 84 // crashes without persisting the diff journal. 85 // So if there is no journal, or the journal is invalid(e.g. the journal 86 // is not matched with disk layer; or the it's the legacy-format journal, 87 // etc.), we just discard all diffs and try to recover them later. 88 journal := rawdb.ReadSnapshotJournal(db) 89 if len(journal) == 0 { 90 log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "missing") 91 return base, generator, nil 92 } 93 r := rlp.NewStream(bytes.NewReader(journal), 0) 94 95 // Firstly, resolve the first element as the journal version 96 version, err := r.Uint() 97 if err != nil { 98 log.Warn("Failed to resolve the journal version", "error", err) 99 return base, generator, nil 100 } 101 if version != journalVersion { 102 log.Warn("Discarded the snapshot journal with wrong version", "required", journalVersion, "got", version) 103 return base, generator, nil 104 } 105 // Secondly, resolve the disk layer root, ensure it's continuous 106 // with disk layer. Note now we can ensure it's the snapshot journal 107 // correct version, so we expect everything can be resolved properly. 108 var root common.Hash 109 if err := r.Decode(&root); err != nil { 110 return nil, journalGenerator{}, errors.New("missing disk layer root") 111 } 112 // The diff journal is not matched with disk, discard them. 113 // It can happen that Geth crashes without persisting the latest 114 // diff journal. 115 if !bytes.Equal(root.Bytes(), base.root.Bytes()) { 116 log.Warn("Loaded snapshot journal", "diskroot", base.root, "diffs", "unmatched") 117 return base, generator, nil 118 } 119 // Load all the snapshot diffs from the journal 120 snapshot, err := loadDiffLayer(base, r) 121 if err != nil { 122 return nil, journalGenerator{}, err 123 } 124 log.Debug("Loaded snapshot journal", "diskroot", base.root, "diffhead", snapshot.Root()) 125 return snapshot, generator, nil 126 } 127 128 // loadSnapshot loads a pre-existing state snapshot backed by a key-value store. 129 func loadSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash, recovery bool) (snapshot, bool, error) { 130 // If snapshotting is disabled (initial sync in progress), don't do anything, 131 // wait for the chain to permit us to do something meaningful 132 if rawdb.ReadSnapshotDisabled(diskdb) { 133 return nil, true, nil 134 } 135 // Retrieve the block number and hash of the snapshot, failing if no snapshot 136 // is present in the database (or crashed mid-update). 137 baseRoot := rawdb.ReadSnapshotRoot(diskdb) 138 if baseRoot == (common.Hash{}) { 139 return nil, false, errors.New("missing or corrupted snapshot") 140 } 141 base := &diskLayer{ 142 diskdb: diskdb, 143 triedb: triedb, 144 cache: fastcache.New(cache * 1024 * 1024), 145 root: baseRoot, 146 } 147 snapshot, generator, err := loadAndParseJournal(diskdb, base) 148 if err != nil { 149 log.Warn("Failed to load new-format journal", "error", err) 150 return nil, false, err 151 } 152 // Entire snapshot journal loaded, sanity check the head. If the loaded 153 // snapshot is not matched with current state root, print a warning log 154 // or discard the entire snapshot it's legacy snapshot. 155 // 156 // Possible scenario: Geth was crashed without persisting journal and then 157 // restart, the head is rewound to the point with available state(trie) 158 // which is below the snapshot. In this case the snapshot can be recovered 159 // by re-executing blocks but right now it's unavailable. 160 if head := snapshot.Root(); head != root { 161 // If it's legacy snapshot, or it's new-format snapshot but 162 // it's not in recovery mode, returns the error here for 163 // rebuilding the entire snapshot forcibly. 164 if !recovery { 165 return nil, false, fmt.Errorf("head doesn't match snapshot: have %#x, want %#x", head, root) 166 } 167 // It's in snapshot recovery, the assumption is held that 168 // the disk layer is always higher than chain head. It can 169 // be eventually recovered when the chain head beyonds the 170 // disk layer. 171 log.Warn("Snapshot is not continuous with chain", "snaproot", head, "chainroot", root) 172 } 173 // Everything loaded correctly, resume any suspended operations 174 if !generator.Done { 175 // Whether or not wiping was in progress, load any generator progress too 176 base.genMarker = generator.Marker 177 if base.genMarker == nil { 178 base.genMarker = []byte{} 179 } 180 base.genPending = make(chan struct{}) 181 base.genAbort = make(chan chan *generatorStats) 182 183 var origin uint64 184 if len(generator.Marker) >= 8 { 185 origin = binary.BigEndian.Uint64(generator.Marker) 186 } 187 go base.generate(&generatorStats{ 188 origin: origin, 189 start: time.Now(), 190 accounts: generator.Accounts, 191 slots: generator.Slots, 192 storage: common.StorageSize(generator.Storage), 193 }) 194 } 195 return snapshot, false, nil 196 } 197 198 // loadDiffLayer reads the next sections of a snapshot journal, reconstructing a new 199 // diff and verifying that it can be linked to the requested parent. 200 func loadDiffLayer(parent snapshot, r *rlp.Stream) (snapshot, error) { 201 // Read the next diff journal entry 202 var root common.Hash 203 if err := r.Decode(&root); err != nil { 204 // The first read may fail with EOF, marking the end of the journal 205 if err == io.EOF { 206 return parent, nil 207 } 208 return nil, fmt.Errorf("load diff root: %v", err) 209 } 210 var destructs []journalDestruct 211 if err := r.Decode(&destructs); err != nil { 212 return nil, fmt.Errorf("load diff destructs: %v", err) 213 } 214 destructSet := make(map[common.Hash]struct{}) 215 for _, entry := range destructs { 216 destructSet[entry.Hash] = struct{}{} 217 } 218 var accounts []journalAccount 219 if err := r.Decode(&accounts); err != nil { 220 return nil, fmt.Errorf("load diff accounts: %v", err) 221 } 222 accountData := make(map[common.Hash][]byte) 223 for _, entry := range accounts { 224 if len(entry.Blob) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that 225 accountData[entry.Hash] = entry.Blob 226 } else { 227 accountData[entry.Hash] = nil 228 } 229 } 230 var storage []journalStorage 231 if err := r.Decode(&storage); err != nil { 232 return nil, fmt.Errorf("load diff storage: %v", err) 233 } 234 storageData := make(map[common.Hash]map[common.Hash][]byte) 235 for _, entry := range storage { 236 slots := make(map[common.Hash][]byte) 237 for i, key := range entry.Keys { 238 if len(entry.Vals[i]) > 0 { // RLP loses nil-ness, but `[]byte{}` is not a valid item, so reinterpret that 239 slots[key] = entry.Vals[i] 240 } else { 241 slots[key] = nil 242 } 243 } 244 storageData[entry.Hash] = slots 245 } 246 return loadDiffLayer(newDiffLayer(parent, root, destructSet, accountData, storageData), r) 247 } 248 249 // Journal terminates any in-progress snapshot generation, also implicitly pushing 250 // the progress into the database. 251 func (dl *diskLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { 252 // If the snapshot is currently being generated, abort it 253 var stats *generatorStats 254 if dl.genAbort != nil { 255 abort := make(chan *generatorStats) 256 dl.genAbort <- abort 257 258 if stats = <-abort; stats != nil { 259 stats.Log("Journalling in-progress snapshot", dl.root, dl.genMarker) 260 } 261 } 262 // Ensure the layer didn't get stale 263 dl.lock.RLock() 264 defer dl.lock.RUnlock() 265 266 if dl.stale { 267 return common.Hash{}, ErrSnapshotStale 268 } 269 // Ensure the generator stats is written even if none was ran this cycle 270 journalProgress(dl.diskdb, dl.genMarker, stats) 271 272 log.Debug("Journalled disk layer", "root", dl.root) 273 return dl.root, nil 274 } 275 276 // Journal writes the memory layer contents into a buffer to be stored in the 277 // database as the snapshot journal. 278 func (dl *diffLayer) Journal(buffer *bytes.Buffer) (common.Hash, error) { 279 // Journal the parent first 280 base, err := dl.parent.Journal(buffer) 281 if err != nil { 282 return common.Hash{}, err 283 } 284 // Ensure the layer didn't get stale 285 dl.lock.RLock() 286 defer dl.lock.RUnlock() 287 288 if dl.Stale() { 289 return common.Hash{}, ErrSnapshotStale 290 } 291 // Everything below was journalled, persist this layer too 292 if err := rlp.Encode(buffer, dl.root); err != nil { 293 return common.Hash{}, err 294 } 295 destructs := make([]journalDestruct, 0, len(dl.destructSet)) 296 for hash := range dl.destructSet { 297 destructs = append(destructs, journalDestruct{Hash: hash}) 298 } 299 if err := rlp.Encode(buffer, destructs); err != nil { 300 return common.Hash{}, err 301 } 302 accounts := make([]journalAccount, 0, len(dl.accountData)) 303 for hash, blob := range dl.accountData { 304 accounts = append(accounts, journalAccount{Hash: hash, Blob: blob}) 305 } 306 if err := rlp.Encode(buffer, accounts); err != nil { 307 return common.Hash{}, err 308 } 309 storage := make([]journalStorage, 0, len(dl.storageData)) 310 for hash, slots := range dl.storageData { 311 keys := make([]common.Hash, 0, len(slots)) 312 vals := make([][]byte, 0, len(slots)) 313 for key, val := range slots { 314 keys = append(keys, key) 315 vals = append(vals, val) 316 } 317 storage = append(storage, journalStorage{Hash: hash, Keys: keys, Vals: vals}) 318 } 319 if err := rlp.Encode(buffer, storage); err != nil { 320 return common.Hash{}, err 321 } 322 log.Debug("Journalled diff layer", "root", dl.root, "parent", dl.parent.Root()) 323 return base, nil 324 }