gitlab.com/flarenetwork/coreth@v0.1.1/core/state/snapshot/generate.go (about) 1 // (c) 2019-2020, Ava Labs, Inc. 2 // 3 // This file is a derived work, based on the go-ethereum library whose original 4 // notices appear below. 5 // 6 // It is distributed under a license compatible with the licensing terms of the 7 // original code from which it is derived. 8 // 9 // Much love to the original authors for their work. 10 // ********** 11 // Copyright 2019 The go-ethereum Authors 12 // This file is part of the go-ethereum library. 13 // 14 // The go-ethereum library is free software: you can redistribute it and/or modify 15 // it under the terms of the GNU Lesser General Public License as published by 16 // the Free Software Foundation, either version 3 of the License, or 17 // (at your option) any later version. 18 // 19 // The go-ethereum library is distributed in the hope that it will be useful, 20 // but WITHOUT ANY WARRANTY; without even the implied warranty of 21 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 22 // GNU Lesser General Public License for more details. 23 // 24 // You should have received a copy of the GNU Lesser General Public License 25 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 26 27 package snapshot 28 29 import ( 30 "bytes" 31 "encoding/binary" 32 "fmt" 33 "math/big" 34 "time" 35 36 "github.com/VictoriaMetrics/fastcache" 37 "github.com/ethereum/go-ethereum/common" 38 "github.com/ethereum/go-ethereum/common/math" 39 "github.com/ethereum/go-ethereum/crypto" 40 "github.com/ethereum/go-ethereum/ethdb" 41 "github.com/ethereum/go-ethereum/log" 42 "github.com/ethereum/go-ethereum/rlp" 43 "github.com/ethereum/go-ethereum/trie" 44 "gitlab.com/flarenetwork/coreth/core/rawdb" 45 ) 46 47 var ( 48 // emptyRoot is the known root hash of an empty trie. 49 emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 50 51 // emptyCode is the known hash of the empty EVM bytecode. 52 emptyCode = crypto.Keccak256Hash(nil) 53 ) 54 55 // generatorStats is a collection of statistics gathered by the snapshot generator 56 // for logging purposes. 57 type generatorStats struct { 58 wiping chan struct{} // Notification channel if wiping is in progress 59 origin uint64 // Origin prefix where generation started 60 start time.Time // Timestamp when generation started 61 accounts uint64 // Number of accounts indexed(generated or recovered) 62 slots uint64 // Number of storage slots indexed(generated or recovered) 63 storage common.StorageSize // Total account and storage slot size(generation or recovery) 64 } 65 66 // Log creates an contextual log with the given message and the context pulled 67 // from the internally maintained statistics. 68 func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { 69 var ctx []interface{} 70 if root != (common.Hash{}) { 71 ctx = append(ctx, []interface{}{"root", root}...) 72 } 73 // Figure out whether we're after or within an account 74 switch len(marker) { 75 case common.HashLength: 76 ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) 77 case 2 * common.HashLength: 78 ctx = append(ctx, []interface{}{ 79 "in", common.BytesToHash(marker[:common.HashLength]), 80 "at", common.BytesToHash(marker[common.HashLength:]), 81 }...) 82 } 83 // Add the usual measurements 84 ctx = append(ctx, []interface{}{ 85 "accounts", gs.accounts, 86 "slots", gs.slots, 87 "storage", gs.storage, 88 "elapsed", common.PrettyDuration(time.Since(gs.start)), 89 }...) 90 // Calculate the estimated indexing time based on current stats 91 if len(marker) > 0 { 92 if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { 93 left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) 94 95 speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero 96 ctx = append(ctx, []interface{}{ 97 "eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), 98 }...) 99 } 100 } 101 log.Info(msg, ctx...) 102 } 103 104 // generateSnapshot regenerates a brand new snapshot based on an existing state 105 // database and head block asynchronously. The snapshot is returned immediately 106 // and generation is continued in the background until done. 107 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, blockHash, root common.Hash, wiper chan struct{}) *diskLayer { 108 // Wipe any previously existing snapshot from the database if no wiper is 109 // currently in progress. 110 if wiper == nil { 111 wiper = wipeSnapshot(diskdb, true) 112 } 113 // Create a new disk layer with an initialized state marker at zero 114 var ( 115 stats = &generatorStats{wiping: wiper, start: time.Now()} 116 batch = diskdb.NewBatch() 117 genMarker = []byte{} // Initialized but empty! 118 ) 119 rawdb.WriteSnapshotBlockHash(batch, blockHash) 120 rawdb.WriteSnapshotRoot(batch, root) 121 journalProgress(batch, genMarker, stats) 122 if err := batch.Write(); err != nil { 123 log.Crit("Failed to write initialized state marker", "err", err) 124 } 125 base := &diskLayer{ 126 diskdb: diskdb, 127 triedb: triedb, 128 blockHash: blockHash, 129 root: root, 130 cache: fastcache.New(cache * 1024 * 1024), 131 genMarker: genMarker, 132 genPending: make(chan struct{}), 133 genAbort: make(chan chan struct{}), 134 created: time.Now(), 135 } 136 go base.generate(stats) 137 log.Debug("Start snapshot generation", "root", root) 138 return base 139 } 140 141 // journalProgress persists the generator stats into the database to resume later. 142 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 143 // Write out the generator marker. Note it's a standalone disk layer generator 144 // which is not mixed with journal. It's ok if the generator is persisted while 145 // journal is not. 146 entry := journalGenerator{ 147 Done: marker == nil, 148 Marker: marker, 149 } 150 if stats != nil { 151 entry.Wiping = (stats.wiping != nil) 152 entry.Accounts = stats.accounts 153 entry.Slots = stats.slots 154 entry.Storage = uint64(stats.storage) 155 } 156 blob, err := rlp.EncodeToBytes(entry) 157 if err != nil { 158 panic(err) // Cannot happen, here to catch dev errors 159 } 160 var logstr string 161 switch { 162 case marker == nil: 163 logstr = "done" 164 case bytes.Equal(marker, []byte{}): 165 logstr = "empty" 166 case len(marker) == common.HashLength: 167 logstr = fmt.Sprintf("%#x", marker) 168 default: 169 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 170 } 171 log.Debug("Journalled generator progress", "progress", logstr) 172 rawdb.WriteSnapshotGenerator(db, blob) 173 } 174 175 // checkAndFlush checks to see if snapshot generation has been aborted or if 176 // the current batch size is greater than ethdb.IdealBatchSize. If so, it saves 177 // the current progress to disk and returns true. Else, it could log current 178 // progress and returns true. 179 func (dl *diskLayer) checkAndFlush(batch ethdb.Batch, stats *generatorStats, currentLocation []byte) bool { 180 // If we've exceeded our batch allowance or termination was requested, flush to disk 181 var abort chan struct{} 182 select { 183 case abort = <-dl.genAbort: 184 default: 185 } 186 if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 187 // Flush out the batch anyway no matter it's empty or not. 188 // It's possible that all the states are recovered and the 189 // generation indeed makes progress. 190 journalProgress(batch, currentLocation, stats) 191 192 if err := batch.Write(); err != nil { 193 log.Error("Failed to flush batch", "err", err) 194 if abort == nil { 195 abort = <-dl.genAbort 196 } 197 dl.genStats = stats 198 close(abort) 199 return true 200 } 201 batch.Reset() 202 203 dl.lock.Lock() 204 dl.genMarker = currentLocation 205 dl.lock.Unlock() 206 207 if abort != nil { 208 stats.Log("Aborting state snapshot generation", dl.root, currentLocation) 209 dl.genStats = stats 210 close(abort) 211 return true 212 } 213 } 214 if time.Since(dl.logged) > 8*time.Second { 215 stats.Log("Generating state snapshot", dl.root, currentLocation) 216 dl.logged = time.Now() 217 } 218 return false 219 } 220 221 // generate is a background thread that iterates over the state and storage tries, 222 // constructing the state snapshot. All the arguments are purely for statistics 223 // gathering and logging, since the method surfs the blocks as they arrive, often 224 // being restarted. 225 func (dl *diskLayer) generate(stats *generatorStats) { 226 // If a database wipe is in operation, wait until it's done 227 if stats.wiping != nil { 228 stats.Log("Wiper running, state snapshotting paused", common.Hash{}, dl.genMarker) 229 select { 230 // If wiper is done, resume normal mode of operation 231 case <-stats.wiping: 232 stats.wiping = nil 233 stats.start = time.Now() 234 235 // If generator was aborted during wipe, return 236 case abort := <-dl.genAbort: 237 stats.Log("Aborting state snapshot generation", dl.root, dl.genMarker) 238 dl.genStats = stats 239 close(abort) 240 return 241 } 242 } 243 // Create an account and state iterator pointing to the current generator marker 244 accTrie, err := trie.NewSecure(dl.root, dl.triedb) 245 if err != nil { 246 // The account trie is missing (GC), surf the chain until one becomes available 247 stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 248 abort := <-dl.genAbort 249 dl.genStats = stats 250 close(abort) 251 return 252 } 253 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 254 255 var accMarker []byte 256 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 257 accMarker = dl.genMarker[:common.HashLength] 258 } 259 accIt := trie.NewIterator(accTrie.NodeIterator(accMarker)) 260 batch := dl.diskdb.NewBatch() 261 262 // Iterate from the previous marker and continue generating the state snapshot 263 dl.logged = time.Now() 264 for accIt.Next() { 265 // Retrieve the current account and flatten it into the internal format 266 accountHash := common.BytesToHash(accIt.Key) 267 268 var acc struct { 269 Nonce uint64 270 Balance *big.Int 271 Root common.Hash 272 CodeHash []byte 273 IsMultiCoin bool 274 } 275 if err := rlp.DecodeBytes(accIt.Value, &acc); err != nil { 276 log.Crit("Invalid account encountered during snapshot creation", "err", err) 277 } 278 data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash, acc.IsMultiCoin) 279 280 // If the account is not yet in-progress, write it out 281 if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { 282 rawdb.WriteAccountSnapshot(batch, accountHash, data) 283 stats.storage += common.StorageSize(1 + common.HashLength + len(data)) 284 stats.accounts++ 285 } 286 if dl.checkAndFlush(batch, stats, accountHash[:]) { 287 // checkAndFlush handles abort 288 return 289 } 290 // If the iterated account is a contract, iterate through corresponding contract 291 // storage to generate snapshot entries. 292 if acc.Root != emptyRoot { 293 storeTrie, err := trie.NewSecure(acc.Root, dl.triedb) 294 if err != nil { 295 log.Error("Generator failed to access storage trie", "root", dl.root, "account", accountHash, "stroot", acc.Root, "err", err) 296 abort := <-dl.genAbort 297 dl.genStats = stats 298 close(abort) 299 return 300 } 301 var storeMarker []byte 302 if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { 303 storeMarker = dl.genMarker[common.HashLength:] 304 } 305 storeIt := trie.NewIterator(storeTrie.NodeIterator(storeMarker)) 306 for storeIt.Next() { 307 rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(storeIt.Key), storeIt.Value) 308 stats.storage += common.StorageSize(1 + 2*common.HashLength + len(storeIt.Value)) 309 stats.slots++ 310 311 if dl.checkAndFlush(batch, stats, append(accountHash[:], storeIt.Key...)) { 312 // checkAndFlush handles abort 313 return 314 } 315 } 316 if err := storeIt.Err; err != nil { 317 log.Error("Generator failed to iterate storage trie", "accroot", dl.root, "acchash", common.BytesToHash(accIt.Key), "stroot", acc.Root, "err", err) 318 abort := <-dl.genAbort 319 dl.genStats = stats 320 close(abort) 321 return 322 } 323 } 324 if time.Since(dl.logged) > 8*time.Second { 325 stats.Log("Generating state snapshot", dl.root, accIt.Key) 326 dl.logged = time.Now() 327 } 328 // Some account processed, unmark the marker 329 accMarker = nil 330 } 331 if err := accIt.Err; err != nil { 332 log.Error("Generator failed to iterate account trie", "root", dl.root, "err", err) 333 abort := <-dl.genAbort 334 dl.genStats = stats 335 close(abort) 336 return 337 } 338 // Snapshot fully generated, set the marker to nil. 339 // Note even there is nothing to commit, persist the 340 // generator anyway to mark the snapshot is complete. 341 journalProgress(batch, nil, stats) 342 if err := batch.Write(); err != nil { 343 log.Error("Failed to flush batch", "err", err) 344 abort := <-dl.genAbort 345 dl.genStats = stats 346 close(abort) 347 return 348 } 349 350 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 351 "storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start))) 352 353 dl.lock.Lock() 354 dl.genMarker = nil 355 dl.genStats = stats 356 close(dl.genPending) 357 dl.lock.Unlock() 358 359 // Someone will be looking for us, wait it out 360 abort := <-dl.genAbort 361 close(abort) 362 }