github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/core/state/snapshot/generate.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "encoding/binary" 22 "errors" 23 "fmt" 24 "math/big" 25 "time" 26 27 "github.com/VictoriaMetrics/fastcache" 28 29 "github.com/scroll-tech/go-ethereum/common" 30 "github.com/scroll-tech/go-ethereum/common/hexutil" 31 "github.com/scroll-tech/go-ethereum/common/math" 32 "github.com/scroll-tech/go-ethereum/core/rawdb" 33 "github.com/scroll-tech/go-ethereum/crypto/codehash" 34 "github.com/scroll-tech/go-ethereum/ethdb" 35 "github.com/scroll-tech/go-ethereum/ethdb/memorydb" 36 "github.com/scroll-tech/go-ethereum/log" 37 "github.com/scroll-tech/go-ethereum/metrics" 38 "github.com/scroll-tech/go-ethereum/rlp" 39 "github.com/scroll-tech/go-ethereum/trie" 40 ) 41 42 var ( 43 // emptyRoot is the known root hash of an empty trie. 44 emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 45 46 // emptyPoseidonCode is the known hash of the empty EVM bytecode. 47 emptyPoseidonCode = codehash.EmptyPoseidonCodeHash 48 emptyKeccakCode = codehash.EmptyKeccakCodeHash 49 50 // accountCheckRange is the upper limit of the number of accounts involved in 51 // each range check. This is a value estimated based on experience. If this 52 // value is too large, the failure rate of range prove will increase. Otherwise 53 // the the value is too small, the efficiency of the state recovery will decrease. 54 accountCheckRange = 128 55 56 // storageCheckRange is the upper limit of the number of storage slots involved 57 // in each range check. This is a value estimated based on experience. If this 58 // value is too large, the failure rate of range prove will increase. Otherwise 59 // the the value is too small, the efficiency of the state recovery will decrease. 60 storageCheckRange = 1024 61 62 // errMissingTrie is returned if the target trie is missing while the generation 63 // is running. In this case the generation is aborted and wait the new signal. 64 errMissingTrie = errors.New("missing trie") 65 ) 66 67 // Metrics in generation 68 var ( 69 snapGeneratedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil) 70 snapRecoveredAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil) 71 snapWipedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil) 72 snapMissallAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil) 73 snapGeneratedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil) 74 snapRecoveredStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil) 75 snapWipedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil) 76 snapMissallStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil) 77 snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil) 78 snapFailedRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil) 79 80 // snapAccountProveCounter measures time spent on the account proving 81 snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil) 82 // snapAccountTrieReadCounter measures time spent on the account trie iteration 83 snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil) 84 // snapAccountSnapReadCounter measues time spent on the snapshot account iteration 85 snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil) 86 // snapAccountWriteCounter measures time spent on writing/updating/deleting accounts 87 snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil) 88 // snapStorageProveCounter measures time spent on storage proving 89 snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil) 90 // snapStorageTrieReadCounter measures time spent on the storage trie iteration 91 snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil) 92 // snapStorageSnapReadCounter measures time spent on the snapshot storage iteration 93 snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil) 94 // snapStorageWriteCounter measures time spent on writing/updating/deleting storages 95 snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil) 96 ) 97 98 // generatorStats is a collection of statistics gathered by the snapshot generator 99 // for logging purposes. 100 type generatorStats struct { 101 origin uint64 // Origin prefix where generation started 102 start time.Time // Timestamp when generation started 103 accounts uint64 // Number of accounts indexed(generated or recovered) 104 slots uint64 // Number of storage slots indexed(generated or recovered) 105 storage common.StorageSize // Total account and storage slot size(generation or recovery) 106 } 107 108 // Log creates an contextual log with the given message and the context pulled 109 // from the internally maintained statistics. 110 func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { 111 var ctx []interface{} 112 if root != (common.Hash{}) { 113 ctx = append(ctx, []interface{}{"root", root}...) 114 } 115 // Figure out whether we're after or within an account 116 switch len(marker) { 117 case common.HashLength: 118 ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) 119 case 2 * common.HashLength: 120 ctx = append(ctx, []interface{}{ 121 "in", common.BytesToHash(marker[:common.HashLength]), 122 "at", common.BytesToHash(marker[common.HashLength:]), 123 }...) 124 } 125 // Add the usual measurements 126 ctx = append(ctx, []interface{}{ 127 "accounts", gs.accounts, 128 "slots", gs.slots, 129 "storage", gs.storage, 130 "elapsed", common.PrettyDuration(time.Since(gs.start)), 131 }...) 132 // Calculate the estimated indexing time based on current stats 133 if len(marker) > 0 { 134 if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { 135 left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) 136 137 speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero 138 ctx = append(ctx, []interface{}{ 139 "eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), 140 }...) 141 } 142 } 143 log.Info(msg, ctx...) 144 } 145 146 // generateSnapshot regenerates a brand new snapshot based on an existing state 147 // database and head block asynchronously. The snapshot is returned immediately 148 // and generation is continued in the background until done. 149 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { 150 // Create a new disk layer with an initialized state marker at zero 151 var ( 152 stats = &generatorStats{start: time.Now()} 153 batch = diskdb.NewBatch() 154 genMarker = []byte{} // Initialized but empty! 155 ) 156 rawdb.WriteSnapshotRoot(batch, root) 157 journalProgress(batch, genMarker, stats) 158 if err := batch.Write(); err != nil { 159 log.Crit("Failed to write initialized state marker", "err", err) 160 } 161 base := &diskLayer{ 162 diskdb: diskdb, 163 triedb: triedb, 164 root: root, 165 cache: fastcache.New(cache * 1024 * 1024), 166 genMarker: genMarker, 167 genPending: make(chan struct{}), 168 genAbort: make(chan chan *generatorStats), 169 } 170 go base.generate(stats) 171 log.Debug("Start snapshot generation", "root", root) 172 return base 173 } 174 175 // journalProgress persists the generator stats into the database to resume later. 176 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 177 // Write out the generator marker. Note it's a standalone disk layer generator 178 // which is not mixed with journal. It's ok if the generator is persisted while 179 // journal is not. 180 entry := journalGenerator{ 181 Done: marker == nil, 182 Marker: marker, 183 } 184 if stats != nil { 185 entry.Accounts = stats.accounts 186 entry.Slots = stats.slots 187 entry.Storage = uint64(stats.storage) 188 } 189 blob, err := rlp.EncodeToBytes(entry) 190 if err != nil { 191 panic(err) // Cannot happen, here to catch dev errors 192 } 193 var logstr string 194 switch { 195 case marker == nil: 196 logstr = "done" 197 case bytes.Equal(marker, []byte{}): 198 logstr = "empty" 199 case len(marker) == common.HashLength: 200 logstr = fmt.Sprintf("%#x", marker) 201 default: 202 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 203 } 204 log.Debug("Journalled generator progress", "progress", logstr) 205 rawdb.WriteSnapshotGenerator(db, blob) 206 } 207 208 // proofResult contains the output of range proving which can be used 209 // for further processing regardless if it is successful or not. 210 type proofResult struct { 211 keys [][]byte // The key set of all elements being iterated, even proving is failed 212 vals [][]byte // The val set of all elements being iterated, even proving is failed 213 diskMore bool // Set when the database has extra snapshot states since last iteration 214 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 215 proofErr error // Indicator whether the given state range is valid or not 216 tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil) 217 } 218 219 // valid returns the indicator that range proof is successful or not. 220 func (result *proofResult) valid() bool { 221 return result.proofErr == nil 222 } 223 224 // last returns the last verified element key regardless of whether the range proof is 225 // successful or not. Nil is returned if nothing involved in the proving. 226 func (result *proofResult) last() []byte { 227 var last []byte 228 if len(result.keys) > 0 { 229 last = result.keys[len(result.keys)-1] 230 } 231 return last 232 } 233 234 // forEach iterates all the visited elements and applies the given callback on them. 235 // The iteration is aborted if the callback returns non-nil error. 236 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 237 for i := 0; i < len(result.keys); i++ { 238 key, val := result.keys[i], result.vals[i] 239 if err := callback(key, val); err != nil { 240 return err 241 } 242 } 243 return nil 244 } 245 246 // proveRange proves the snapshot segment with particular prefix is "valid". 247 // The iteration start point will be assigned if the iterator is restored from 248 // the last interruption. Max will be assigned in order to limit the maximum 249 // amount of data involved in each iteration. 250 // 251 // The proof result will be returned if the range proving is finished, otherwise 252 // the error will be returned to abort the entire procedure. 253 func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 254 var ( 255 keys [][]byte 256 vals [][]byte 257 proof = rawdb.NewMemoryDatabase() 258 diskMore = false 259 ) 260 iter := dl.diskdb.NewIterator(prefix, origin) 261 defer iter.Release() 262 263 var start = time.Now() 264 for iter.Next() { 265 key := iter.Key() 266 if len(key) != len(prefix)+common.HashLength { 267 continue 268 } 269 if len(keys) == max { 270 // Break if we've reached the max size, and signal that we're not 271 // done yet. 272 diskMore = true 273 break 274 } 275 keys = append(keys, common.CopyBytes(key[len(prefix):])) 276 277 if valueConvertFn == nil { 278 vals = append(vals, common.CopyBytes(iter.Value())) 279 } else { 280 val, err := valueConvertFn(iter.Value()) 281 if err != nil { 282 // Special case, the state data is corrupted (invalid slim-format account), 283 // don't abort the entire procedure directly. Instead, let the fallback 284 // generation to heal the invalid data. 285 // 286 // Here append the original value to ensure that the number of key and 287 // value are the same. 288 vals = append(vals, common.CopyBytes(iter.Value())) 289 log.Error("Failed to convert account state data", "err", err) 290 } else { 291 vals = append(vals, val) 292 } 293 } 294 } 295 // Update metrics for database iteration and merkle proving 296 if kind == "storage" { 297 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 298 } else { 299 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 300 } 301 defer func(start time.Time) { 302 if kind == "storage" { 303 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 304 } else { 305 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 306 } 307 }(time.Now()) 308 309 // The snap state is exhausted, pass the entire key/val set for verification 310 if origin == nil && !diskMore { 311 stackTr := trie.NewStackTrie(nil) 312 for i, key := range keys { 313 stackTr.TryUpdate(key, vals[i]) 314 } 315 if gotRoot := stackTr.Hash(); gotRoot != root { 316 return &proofResult{ 317 keys: keys, 318 vals: vals, 319 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 320 }, nil 321 } 322 return &proofResult{keys: keys, vals: vals}, nil 323 } 324 // Snap state is chunked, generate edge proofs for verification. 325 tr, err := trie.New(root, dl.triedb) 326 if err != nil { 327 stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 328 return nil, errMissingTrie 329 } 330 // Firstly find out the key of last iterated element. 331 var last []byte 332 if len(keys) > 0 { 333 last = keys[len(keys)-1] 334 } 335 // Generate the Merkle proofs for the first and last element 336 if origin == nil { 337 origin = common.Hash{}.Bytes() 338 } 339 if err := tr.Prove(origin, 0, proof); err != nil { 340 log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 341 return &proofResult{ 342 keys: keys, 343 vals: vals, 344 diskMore: diskMore, 345 proofErr: err, 346 tr: tr, 347 }, nil 348 } 349 if last != nil { 350 if err := tr.Prove(last, 0, proof); err != nil { 351 log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) 352 return &proofResult{ 353 keys: keys, 354 vals: vals, 355 diskMore: diskMore, 356 proofErr: err, 357 tr: tr, 358 }, nil 359 } 360 } 361 // Verify the snapshot segment with range prover, ensure that all flat states 362 // in this range correspond to merkle trie. 363 cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) 364 return &proofResult{ 365 keys: keys, 366 vals: vals, 367 diskMore: diskMore, 368 trieMore: cont, 369 proofErr: err, 370 tr: tr}, 371 nil 372 } 373 374 // onStateCallback is a function that is called by generateRange, when processing a range of 375 // accounts or storage slots. For each element, the callback is invoked. 376 // If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 377 // If 'write' is true, then this element needs to be updated with the 'val'. 378 // If 'write' is false, then this element is already correct, and needs no update. However, 379 // for accounts, the storage trie of the account needs to be checked. 380 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 381 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 382 383 // generateRange generates the state segment with particular prefix. Generation can 384 // either verify the correctness of existing state through rangeproof and skip 385 // generation, or iterate trie to regenerate state on demand. 386 func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 387 // Use range prover to check the validity of the flat state in the range 388 result, err := dl.proveRange(stats, root, prefix, kind, origin, max, valueConvertFn) 389 if err != nil { 390 return false, nil, err 391 } 392 last := result.last() 393 394 // Construct contextual logger 395 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 396 if len(origin) > 0 { 397 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 398 } 399 logger := log.New(logCtx...) 400 401 // The range prover says the range is correct, skip trie iteration 402 if result.valid() { 403 snapSuccessfulRangeProofMeter.Mark(1) 404 logger.Trace("Proved state range", "last", hexutil.Encode(last)) 405 406 // The verification is passed, process each state with the given 407 // callback function. If this state represents a contract, the 408 // corresponding storage check will be performed in the callback 409 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 410 return false, nil, err 411 } 412 // Only abort the iteration when both database and trie are exhausted 413 return !result.diskMore && !result.trieMore, last, nil 414 } 415 logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 416 snapFailedRangeProofMeter.Mark(1) 417 418 // Special case, the entire trie is missing. In the original trie scheme, 419 // all the duplicated subtries will be filter out(only one copy of data 420 // will be stored). While in the snapshot model, all the storage tries 421 // belong to different contracts will be kept even they are duplicated. 422 // Track it to a certain extent remove the noise data used for statistics. 423 if origin == nil && last == nil { 424 meter := snapMissallAccountMeter 425 if kind == "storage" { 426 meter = snapMissallStorageMeter 427 } 428 meter.Mark(1) 429 } 430 431 // We use the snap data to build up a cache which can be used by the 432 // main account trie as a primary lookup when resolving hashes 433 var snapNodeCache ethdb.KeyValueStore 434 if len(result.keys) > 0 { 435 snapNodeCache = memorydb.New() 436 snapTrieDb := trie.NewDatabase(snapNodeCache) 437 snapTrie, _ := trie.New(common.Hash{}, snapTrieDb) 438 for i, key := range result.keys { 439 snapTrie.Update(key, result.vals[i]) 440 } 441 root, _, _ := snapTrie.Commit(nil) 442 snapTrieDb.Commit(root, false, nil) 443 } 444 tr := result.tr 445 if tr == nil { 446 tr, err = trie.New(root, dl.triedb) 447 if err != nil { 448 stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 449 return false, nil, errMissingTrie 450 } 451 } 452 453 var ( 454 trieMore bool 455 nodeIt = tr.NodeIterator(origin) 456 iter = trie.NewIterator(nodeIt) 457 kvkeys, kvvals = result.keys, result.vals 458 459 // counters 460 count = 0 // number of states delivered by iterator 461 created = 0 // states created from the trie 462 updated = 0 // states updated from the trie 463 deleted = 0 // states not in trie, but were in snapshot 464 untouched = 0 // states already correct 465 466 // timers 467 start = time.Now() 468 internal time.Duration 469 ) 470 nodeIt.AddResolver(snapNodeCache) 471 for iter.Next() { 472 if last != nil && bytes.Compare(iter.Key, last) > 0 { 473 trieMore = true 474 break 475 } 476 count++ 477 write := true 478 created++ 479 for len(kvkeys) > 0 { 480 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 481 // delete the key 482 istart := time.Now() 483 if err := onState(kvkeys[0], nil, false, true); err != nil { 484 return false, nil, err 485 } 486 kvkeys = kvkeys[1:] 487 kvvals = kvvals[1:] 488 deleted++ 489 internal += time.Since(istart) 490 continue 491 } else if cmp == 0 { 492 // the snapshot key can be overwritten 493 created-- 494 if write = !bytes.Equal(kvvals[0], iter.Value); write { 495 updated++ 496 } else { 497 untouched++ 498 } 499 kvkeys = kvkeys[1:] 500 kvvals = kvvals[1:] 501 } 502 break 503 } 504 istart := time.Now() 505 if err := onState(iter.Key, iter.Value, write, false); err != nil { 506 return false, nil, err 507 } 508 internal += time.Since(istart) 509 } 510 if iter.Err != nil { 511 return false, nil, iter.Err 512 } 513 // Delete all stale snapshot states remaining 514 istart := time.Now() 515 for _, key := range kvkeys { 516 if err := onState(key, nil, false, true); err != nil { 517 return false, nil, err 518 } 519 deleted += 1 520 } 521 internal += time.Since(istart) 522 523 // Update metrics for counting trie iteration 524 if kind == "storage" { 525 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 526 } else { 527 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 528 } 529 logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last), 530 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 531 532 // If there are either more trie items, or there are more snap items 533 // (in the next segment), then we need to keep working 534 return !trieMore && !result.diskMore, last, nil 535 } 536 537 // generate is a background thread that iterates over the state and storage tries, 538 // constructing the state snapshot. All the arguments are purely for statistics 539 // gathering and logging, since the method surfs the blocks as they arrive, often 540 // being restarted. 541 func (dl *diskLayer) generate(stats *generatorStats) { 542 var ( 543 accMarker []byte 544 accountRange = accountCheckRange 545 ) 546 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 547 // Always reset the initial account range as 1 548 // whenever recover from the interruption. 549 accMarker, accountRange = dl.genMarker[:common.HashLength], 1 550 } 551 var ( 552 batch = dl.diskdb.NewBatch() 553 logged = time.Now() 554 accOrigin = common.CopyBytes(accMarker) 555 abort chan *generatorStats 556 ) 557 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 558 559 checkAndFlush := func(currentLocation []byte) error { 560 select { 561 case abort = <-dl.genAbort: 562 default: 563 } 564 if batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 565 if bytes.Compare(currentLocation, dl.genMarker) < 0 { 566 log.Error("Snapshot generator went backwards", 567 "currentLocation", fmt.Sprintf("%x", currentLocation), 568 "genMarker", fmt.Sprintf("%x", dl.genMarker)) 569 } 570 571 // Flush out the batch anyway no matter it's empty or not. 572 // It's possible that all the states are recovered and the 573 // generation indeed makes progress. 574 journalProgress(batch, currentLocation, stats) 575 576 if err := batch.Write(); err != nil { 577 return err 578 } 579 batch.Reset() 580 581 dl.lock.Lock() 582 dl.genMarker = currentLocation 583 dl.lock.Unlock() 584 585 if abort != nil { 586 stats.Log("Aborting state snapshot generation", dl.root, currentLocation) 587 return errors.New("aborted") 588 } 589 } 590 if time.Since(logged) > 8*time.Second { 591 stats.Log("Generating state snapshot", dl.root, currentLocation) 592 logged = time.Now() 593 } 594 return nil 595 } 596 597 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 598 var ( 599 start = time.Now() 600 accountHash = common.BytesToHash(key) 601 ) 602 if delete { 603 rawdb.DeleteAccountSnapshot(batch, accountHash) 604 snapWipedAccountMeter.Mark(1) 605 606 // Ensure that any previous snapshot storage values are cleared 607 prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...) 608 keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength 609 if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil { 610 return err 611 } 612 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 613 return nil 614 } 615 // Retrieve the current account and flatten it into the internal format 616 var acc struct { 617 Nonce uint64 618 Balance *big.Int 619 Root common.Hash 620 KeccakCodeHash []byte 621 PoseidonCodeHash []byte 622 CodeSize uint64 623 } 624 if err := rlp.DecodeBytes(val, &acc); err != nil { 625 log.Crit("Invalid account encountered during snapshot creation", "err", err) 626 } 627 // If the account is not yet in-progress, write it out 628 if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { 629 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 630 if !write { 631 if bytes.Equal(acc.KeccakCodeHash, emptyKeccakCode[:]) { 632 // account for keccakCodeHash, poseidonCodeHash, and CodeSize 633 dataLen = dataLen - 32 - 32 - 8 634 } 635 if acc.Root == emptyRoot { 636 dataLen -= 32 637 } 638 snapRecoveredAccountMeter.Mark(1) 639 } else { 640 data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.KeccakCodeHash, acc.PoseidonCodeHash, acc.CodeSize) 641 dataLen = len(data) 642 rawdb.WriteAccountSnapshot(batch, accountHash, data) 643 snapGeneratedAccountMeter.Mark(1) 644 } 645 stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 646 stats.accounts++ 647 } 648 marker := accountHash[:] 649 // If the snap generation goes here after interrupted, genMarker may go backward 650 // when last genMarker is consisted of accountHash and storageHash 651 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 652 marker = dl.genMarker[:] 653 } 654 // If we've exceeded our batch allowance or termination was requested, flush to disk 655 if err := checkAndFlush(marker); err != nil { 656 return err 657 } 658 // If the iterated account is the contract, create a further loop to 659 // verify or regenerate the contract storage. 660 if acc.Root == emptyRoot { 661 // If the root is empty, we still need to ensure that any previous snapshot 662 // storage values are cleared 663 // TODO: investigate if this can be avoided, this will be very costly since it 664 // affects every single EOA account 665 // - Perhaps we can avoid if where codeHash is emptyCode 666 prefix := append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...) 667 keyLen := len(rawdb.SnapshotStoragePrefix) + 2*common.HashLength 668 if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil { 669 return err 670 } 671 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 672 } else { 673 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 674 675 var storeMarker []byte 676 if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { 677 storeMarker = dl.genMarker[common.HashLength:] 678 } 679 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 680 defer func(start time.Time) { 681 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 682 }(time.Now()) 683 684 if delete { 685 rawdb.DeleteStorageSnapshot(batch, accountHash, common.BytesToHash(key)) 686 snapWipedStorageMeter.Mark(1) 687 return nil 688 } 689 if write { 690 rawdb.WriteStorageSnapshot(batch, accountHash, common.BytesToHash(key), val) 691 snapGeneratedStorageMeter.Mark(1) 692 } else { 693 snapRecoveredStorageMeter.Mark(1) 694 } 695 stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 696 stats.slots++ 697 698 // If we've exceeded our batch allowance or termination was requested, flush to disk 699 if err := checkAndFlush(append(accountHash[:], key...)); err != nil { 700 return err 701 } 702 return nil 703 } 704 var storeOrigin = common.CopyBytes(storeMarker) 705 for { 706 exhausted, last, err := dl.generateRange(acc.Root, append(rawdb.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil) 707 if err != nil { 708 return err 709 } 710 if exhausted { 711 break 712 } 713 if storeOrigin = increaseKey(last); storeOrigin == nil { 714 break // special case, the last is 0xffffffff...fff 715 } 716 } 717 } 718 // Some account processed, unmark the marker 719 accMarker = nil 720 return nil 721 } 722 723 // Global loop for regerating the entire state trie + all layered storage tries. 724 for { 725 exhausted, last, err := dl.generateRange(dl.root, rawdb.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, FullAccountRLP) 726 // The procedure it aborted, either by external signal or internal error 727 if err != nil { 728 if abort == nil { // aborted by internal error, wait the signal 729 abort = <-dl.genAbort 730 } 731 abort <- stats 732 return 733 } 734 // Abort the procedure if the entire snapshot is generated 735 if exhausted { 736 break 737 } 738 if accOrigin = increaseKey(last); accOrigin == nil { 739 break // special case, the last is 0xffffffff...fff 740 } 741 accountRange = accountCheckRange 742 } 743 // Snapshot fully generated, set the marker to nil. 744 // Note even there is nothing to commit, persist the 745 // generator anyway to mark the snapshot is complete. 746 journalProgress(batch, nil, stats) 747 if err := batch.Write(); err != nil { 748 log.Error("Failed to flush batch", "err", err) 749 750 abort = <-dl.genAbort 751 abort <- stats 752 return 753 } 754 batch.Reset() 755 756 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 757 "storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start))) 758 759 dl.lock.Lock() 760 dl.genMarker = nil 761 close(dl.genPending) 762 dl.lock.Unlock() 763 764 // Someone will be looking for us, wait it out 765 abort = <-dl.genAbort 766 abort <- nil 767 } 768 769 // increaseKey increase the input key by one bit. Return nil if the entire 770 // addition operation overflows, 771 func increaseKey(key []byte) []byte { 772 for i := len(key) - 1; i >= 0; i-- { 773 key[i]++ 774 if key[i] != 0x0 { 775 return key 776 } 777 } 778 return nil 779 }