github.com/authcall/reference-optimistic-geth@v0.0.0-20220816224302-06313bfeb8d2/core/state/snapshot/generate.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math/big" 24 "time" 25 26 "github.com/VictoriaMetrics/fastcache" 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/hexutil" 29 "github.com/ethereum/go-ethereum/core/rawdb" 30 "github.com/ethereum/go-ethereum/crypto" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/ethdb/memorydb" 33 "github.com/ethereum/go-ethereum/log" 34 "github.com/ethereum/go-ethereum/rlp" 35 "github.com/ethereum/go-ethereum/trie" 36 ) 37 38 var ( 39 // emptyRoot is the known root hash of an empty trie. 40 emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 41 42 // emptyCode is the known hash of the empty EVM bytecode. 43 emptyCode = crypto.Keccak256Hash(nil) 44 45 // accountCheckRange is the upper limit of the number of accounts involved in 46 // each range check. This is a value estimated based on experience. If this 47 // range is too large, the failure rate of range proof will increase. Otherwise, 48 // if the range is too small, the efficiency of the state recovery will decrease. 49 accountCheckRange = 128 50 51 // storageCheckRange is the upper limit of the number of storage slots involved 52 // in each range check. This is a value estimated based on experience. If this 53 // range is too large, the failure rate of range proof will increase. Otherwise, 54 // if the range is too small, the efficiency of the state recovery will decrease. 55 storageCheckRange = 1024 56 57 // errMissingTrie is returned if the target trie is missing while the generation 58 // is running. In this case the generation is aborted and wait the new signal. 59 errMissingTrie = errors.New("missing trie") 60 ) 61 62 // generateSnapshot regenerates a brand new snapshot based on an existing state 63 // database and head block asynchronously. The snapshot is returned immediately 64 // and generation is continued in the background until done. 65 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { 66 // Create a new disk layer with an initialized state marker at zero 67 var ( 68 stats = &generatorStats{start: time.Now()} 69 batch = diskdb.NewBatch() 70 genMarker = []byte{} // Initialized but empty! 71 ) 72 rawdb.WriteSnapshotRoot(batch, root) 73 journalProgress(batch, genMarker, stats) 74 if err := batch.Write(); err != nil { 75 log.Crit("Failed to write initialized state marker", "err", err) 76 } 77 base := &diskLayer{ 78 diskdb: diskdb, 79 triedb: triedb, 80 root: root, 81 cache: fastcache.New(cache * 1024 * 1024), 82 genMarker: genMarker, 83 genPending: make(chan struct{}), 84 genAbort: make(chan chan *generatorStats), 85 } 86 go base.generate(stats) 87 log.Debug("Start snapshot generation", "root", root) 88 return base 89 } 90 91 // journalProgress persists the generator stats into the database to resume later. 92 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 93 // Write out the generator marker. Note it's a standalone disk layer generator 94 // which is not mixed with journal. It's ok if the generator is persisted while 95 // journal is not. 96 entry := journalGenerator{ 97 Done: marker == nil, 98 Marker: marker, 99 } 100 if stats != nil { 101 entry.Accounts = stats.accounts 102 entry.Slots = stats.slots 103 entry.Storage = uint64(stats.storage) 104 } 105 blob, err := rlp.EncodeToBytes(entry) 106 if err != nil { 107 panic(err) // Cannot happen, here to catch dev errors 108 } 109 var logstr string 110 switch { 111 case marker == nil: 112 logstr = "done" 113 case bytes.Equal(marker, []byte{}): 114 logstr = "empty" 115 case len(marker) == common.HashLength: 116 logstr = fmt.Sprintf("%#x", marker) 117 default: 118 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 119 } 120 log.Debug("Journalled generator progress", "progress", logstr) 121 rawdb.WriteSnapshotGenerator(db, blob) 122 } 123 124 // proofResult contains the output of range proving which can be used 125 // for further processing regardless if it is successful or not. 126 type proofResult struct { 127 keys [][]byte // The key set of all elements being iterated, even proving is failed 128 vals [][]byte // The val set of all elements being iterated, even proving is failed 129 diskMore bool // Set when the database has extra snapshot states since last iteration 130 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 131 proofErr error // Indicator whether the given state range is valid or not 132 tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil) 133 } 134 135 // valid returns the indicator that range proof is successful or not. 136 func (result *proofResult) valid() bool { 137 return result.proofErr == nil 138 } 139 140 // last returns the last verified element key regardless of whether the range proof is 141 // successful or not. Nil is returned if nothing involved in the proving. 142 func (result *proofResult) last() []byte { 143 var last []byte 144 if len(result.keys) > 0 { 145 last = result.keys[len(result.keys)-1] 146 } 147 return last 148 } 149 150 // forEach iterates all the visited elements and applies the given callback on them. 151 // The iteration is aborted if the callback returns non-nil error. 152 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 153 for i := 0; i < len(result.keys); i++ { 154 key, val := result.keys[i], result.vals[i] 155 if err := callback(key, val); err != nil { 156 return err 157 } 158 } 159 return nil 160 } 161 162 // proveRange proves the snapshot segment with particular prefix is "valid". 163 // The iteration start point will be assigned if the iterator is restored from 164 // the last interruption. Max will be assigned in order to limit the maximum 165 // amount of data involved in each iteration. 166 // 167 // The proof result will be returned if the range proving is finished, otherwise 168 // the error will be returned to abort the entire procedure. 169 func (dl *diskLayer) proveRange(ctx *generatorContext, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 170 var ( 171 keys [][]byte 172 vals [][]byte 173 proof = rawdb.NewMemoryDatabase() 174 diskMore = false 175 iter = ctx.iterator(kind) 176 start = time.Now() 177 min = append(prefix, origin...) 178 ) 179 for iter.Next() { 180 // Ensure the iterated item is always equal or larger than the given origin. 181 key := iter.Key() 182 if bytes.Compare(key, min) < 0 { 183 return nil, errors.New("invalid iteration position") 184 } 185 // Ensure the iterated item still fall in the specified prefix. If 186 // not which means the items in the specified area are all visited. 187 // Move the iterator a step back since we iterate one extra element 188 // out. 189 if !bytes.Equal(key[:len(prefix)], prefix) { 190 iter.Hold() 191 break 192 } 193 // Break if we've reached the max size, and signal that we're not 194 // done yet. Move the iterator a step back since we iterate one 195 // extra element out. 196 if len(keys) == max { 197 iter.Hold() 198 diskMore = true 199 break 200 } 201 keys = append(keys, common.CopyBytes(key[len(prefix):])) 202 203 if valueConvertFn == nil { 204 vals = append(vals, common.CopyBytes(iter.Value())) 205 } else { 206 val, err := valueConvertFn(iter.Value()) 207 if err != nil { 208 // Special case, the state data is corrupted (invalid slim-format account), 209 // don't abort the entire procedure directly. Instead, let the fallback 210 // generation to heal the invalid data. 211 // 212 // Here append the original value to ensure that the number of key and 213 // value are aligned. 214 vals = append(vals, common.CopyBytes(iter.Value())) 215 log.Error("Failed to convert account state data", "err", err) 216 } else { 217 vals = append(vals, val) 218 } 219 } 220 } 221 // Update metrics for database iteration and merkle proving 222 if kind == snapStorage { 223 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 224 } else { 225 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 226 } 227 defer func(start time.Time) { 228 if kind == snapStorage { 229 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 230 } else { 231 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 232 } 233 }(time.Now()) 234 235 // The snap state is exhausted, pass the entire key/val set for verification 236 if origin == nil && !diskMore { 237 stackTr := trie.NewStackTrieWithOwner(nil, owner) 238 for i, key := range keys { 239 stackTr.TryUpdate(key, vals[i]) 240 } 241 if gotRoot := stackTr.Hash(); gotRoot != root { 242 return &proofResult{ 243 keys: keys, 244 vals: vals, 245 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 246 }, nil 247 } 248 return &proofResult{keys: keys, vals: vals}, nil 249 } 250 // Snap state is chunked, generate edge proofs for verification. 251 tr, err := trie.New(owner, root, dl.triedb) 252 if err != nil { 253 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 254 return nil, errMissingTrie 255 } 256 // Firstly find out the key of last iterated element. 257 var last []byte 258 if len(keys) > 0 { 259 last = keys[len(keys)-1] 260 } 261 // Generate the Merkle proofs for the first and last element 262 if origin == nil { 263 origin = common.Hash{}.Bytes() 264 } 265 if err := tr.Prove(origin, 0, proof); err != nil { 266 log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 267 return &proofResult{ 268 keys: keys, 269 vals: vals, 270 diskMore: diskMore, 271 proofErr: err, 272 tr: tr, 273 }, nil 274 } 275 if last != nil { 276 if err := tr.Prove(last, 0, proof); err != nil { 277 log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) 278 return &proofResult{ 279 keys: keys, 280 vals: vals, 281 diskMore: diskMore, 282 proofErr: err, 283 tr: tr, 284 }, nil 285 } 286 } 287 // Verify the snapshot segment with range prover, ensure that all flat states 288 // in this range correspond to merkle trie. 289 cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) 290 return &proofResult{ 291 keys: keys, 292 vals: vals, 293 diskMore: diskMore, 294 trieMore: cont, 295 proofErr: err, 296 tr: tr}, 297 nil 298 } 299 300 // onStateCallback is a function that is called by generateRange, when processing a range of 301 // accounts or storage slots. For each element, the callback is invoked. 302 // 303 // - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 304 // - If 'write' is true, then this element needs to be updated with the 'val'. 305 // - If 'write' is false, then this element is already correct, and needs no update. 306 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 307 // 308 // However, for accounts, the storage trie of the account needs to be checked. Also, 309 // dangling storages(storage exists but the corresponding account is missing) need to 310 // be cleaned up. 311 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 312 313 // generateRange generates the state segment with particular prefix. Generation can 314 // either verify the correctness of existing state through range-proof and skip 315 // generation, or iterate trie to regenerate state on demand. 316 func (dl *diskLayer) generateRange(ctx *generatorContext, owner common.Hash, root common.Hash, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 317 // Use range prover to check the validity of the flat state in the range 318 result, err := dl.proveRange(ctx, owner, root, prefix, kind, origin, max, valueConvertFn) 319 if err != nil { 320 return false, nil, err 321 } 322 last := result.last() 323 324 // Construct contextual logger 325 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 326 if len(origin) > 0 { 327 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 328 } 329 logger := log.New(logCtx...) 330 331 // The range prover says the range is correct, skip trie iteration 332 if result.valid() { 333 snapSuccessfulRangeProofMeter.Mark(1) 334 logger.Trace("Proved state range", "last", hexutil.Encode(last)) 335 336 // The verification is passed, process each state with the given 337 // callback function. If this state represents a contract, the 338 // corresponding storage check will be performed in the callback 339 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 340 return false, nil, err 341 } 342 // Only abort the iteration when both database and trie are exhausted 343 return !result.diskMore && !result.trieMore, last, nil 344 } 345 logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 346 snapFailedRangeProofMeter.Mark(1) 347 348 // Special case, the entire trie is missing. In the original trie scheme, 349 // all the duplicated subtries will be filtered out (only one copy of data 350 // will be stored). While in the snapshot model, all the storage tries 351 // belong to different contracts will be kept even they are duplicated. 352 // Track it to a certain extent remove the noise data used for statistics. 353 if origin == nil && last == nil { 354 meter := snapMissallAccountMeter 355 if kind == snapStorage { 356 meter = snapMissallStorageMeter 357 } 358 meter.Mark(1) 359 } 360 // We use the snap data to build up a cache which can be used by the 361 // main account trie as a primary lookup when resolving hashes 362 var snapNodeCache ethdb.KeyValueStore 363 if len(result.keys) > 0 { 364 snapNodeCache = memorydb.New() 365 snapTrieDb := trie.NewDatabase(snapNodeCache) 366 snapTrie, _ := trie.New(owner, common.Hash{}, snapTrieDb) 367 for i, key := range result.keys { 368 snapTrie.Update(key, result.vals[i]) 369 } 370 root, _, _ := snapTrie.Commit(nil) 371 snapTrieDb.Commit(root, false, nil) 372 } 373 // Construct the trie for state iteration, reuse the trie 374 // if it's already opened with some nodes resolved. 375 tr := result.tr 376 if tr == nil { 377 tr, err = trie.New(owner, root, dl.triedb) 378 if err != nil { 379 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 380 return false, nil, errMissingTrie 381 } 382 } 383 var ( 384 trieMore bool 385 nodeIt = tr.NodeIterator(origin) 386 iter = trie.NewIterator(nodeIt) 387 kvkeys, kvvals = result.keys, result.vals 388 389 // counters 390 count = 0 // number of states delivered by iterator 391 created = 0 // states created from the trie 392 updated = 0 // states updated from the trie 393 deleted = 0 // states not in trie, but were in snapshot 394 untouched = 0 // states already correct 395 396 // timers 397 start = time.Now() 398 internal time.Duration 399 ) 400 nodeIt.AddResolver(snapNodeCache) 401 402 for iter.Next() { 403 if last != nil && bytes.Compare(iter.Key, last) > 0 { 404 trieMore = true 405 break 406 } 407 count++ 408 write := true 409 created++ 410 for len(kvkeys) > 0 { 411 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 412 // delete the key 413 istart := time.Now() 414 if err := onState(kvkeys[0], nil, false, true); err != nil { 415 return false, nil, err 416 } 417 kvkeys = kvkeys[1:] 418 kvvals = kvvals[1:] 419 deleted++ 420 internal += time.Since(istart) 421 continue 422 } else if cmp == 0 { 423 // the snapshot key can be overwritten 424 created-- 425 if write = !bytes.Equal(kvvals[0], iter.Value); write { 426 updated++ 427 } else { 428 untouched++ 429 } 430 kvkeys = kvkeys[1:] 431 kvvals = kvvals[1:] 432 } 433 break 434 } 435 istart := time.Now() 436 if err := onState(iter.Key, iter.Value, write, false); err != nil { 437 return false, nil, err 438 } 439 internal += time.Since(istart) 440 } 441 if iter.Err != nil { 442 return false, nil, iter.Err 443 } 444 // Delete all stale snapshot states remaining 445 istart := time.Now() 446 for _, key := range kvkeys { 447 if err := onState(key, nil, false, true); err != nil { 448 return false, nil, err 449 } 450 deleted += 1 451 } 452 internal += time.Since(istart) 453 454 // Update metrics for counting trie iteration 455 if kind == snapStorage { 456 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 457 } else { 458 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 459 } 460 logger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last), 461 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 462 463 // If there are either more trie items, or there are more snap items 464 // (in the next segment), then we need to keep working 465 return !trieMore && !result.diskMore, last, nil 466 } 467 468 // checkAndFlush checks if an interruption signal is received or the 469 // batch size has exceeded the allowance. 470 func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error { 471 var abort chan *generatorStats 472 select { 473 case abort = <-dl.genAbort: 474 default: 475 } 476 if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 477 if bytes.Compare(current, dl.genMarker) < 0 { 478 log.Error("Snapshot generator went backwards", "current", fmt.Sprintf("%x", current), "genMarker", fmt.Sprintf("%x", dl.genMarker)) 479 } 480 // Flush out the batch anyway no matter it's empty or not. 481 // It's possible that all the states are recovered and the 482 // generation indeed makes progress. 483 journalProgress(ctx.batch, current, ctx.stats) 484 485 if err := ctx.batch.Write(); err != nil { 486 return err 487 } 488 ctx.batch.Reset() 489 490 dl.lock.Lock() 491 dl.genMarker = current 492 dl.lock.Unlock() 493 494 if abort != nil { 495 ctx.stats.Log("Aborting state snapshot generation", dl.root, current) 496 return newAbortErr(abort) // bubble up an error for interruption 497 } 498 // Don't hold the iterators too long, release them to let compactor works 499 ctx.reopenIterator(snapAccount) 500 ctx.reopenIterator(snapStorage) 501 } 502 if time.Since(ctx.logged) > 8*time.Second { 503 ctx.stats.Log("Generating state snapshot", dl.root, current) 504 ctx.logged = time.Now() 505 } 506 return nil 507 } 508 509 // generateStorages generates the missing storage slots of the specific contract. 510 // It's supposed to restart the generation from the given origin position. 511 func generateStorages(ctx *generatorContext, dl *diskLayer, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { 512 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 513 defer func(start time.Time) { 514 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 515 }(time.Now()) 516 517 if delete { 518 rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key)) 519 snapWipedStorageMeter.Mark(1) 520 return nil 521 } 522 if write { 523 rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val) 524 snapGeneratedStorageMeter.Mark(1) 525 } else { 526 snapRecoveredStorageMeter.Mark(1) 527 } 528 ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 529 ctx.stats.slots++ 530 531 // If we've exceeded our batch allowance or termination was requested, flush to disk 532 if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil { 533 return err 534 } 535 return nil 536 } 537 // Loop for re-generating the missing storage slots. 538 var origin = common.CopyBytes(storeMarker) 539 for { 540 exhausted, last, err := dl.generateRange(ctx, account, storageRoot, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) 541 if err != nil { 542 return err // The procedure it aborted, either by external signal or internal error. 543 } 544 // Abort the procedure if the entire contract storage is generated 545 if exhausted { 546 break 547 } 548 if origin = increaseKey(last); origin == nil { 549 break // special case, the last is 0xffffffff...fff 550 } 551 } 552 return nil 553 } 554 555 // generateAccounts generates the missing snapshot accounts as well as their 556 // storage slots in the main trie. It's supposed to restart the generation 557 // from the given origin position. 558 func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error { 559 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 560 // Make sure to clear all dangling storages before this account 561 account := common.BytesToHash(key) 562 ctx.removeStorageBefore(account) 563 564 start := time.Now() 565 if delete { 566 rawdb.DeleteAccountSnapshot(ctx.batch, account) 567 snapWipedAccountMeter.Mark(1) 568 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 569 570 ctx.removeStorageAt(account) 571 return nil 572 } 573 // Retrieve the current account and flatten it into the internal format 574 var acc struct { 575 Nonce uint64 576 Balance *big.Int 577 Root common.Hash 578 CodeHash []byte 579 } 580 if err := rlp.DecodeBytes(val, &acc); err != nil { 581 log.Crit("Invalid account encountered during snapshot creation", "err", err) 582 } 583 // If the account is not yet in-progress, write it out 584 if accMarker == nil || !bytes.Equal(account[:], accMarker) { 585 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 586 if !write { 587 if bytes.Equal(acc.CodeHash, emptyCode[:]) { 588 dataLen -= 32 589 } 590 if acc.Root == emptyRoot { 591 dataLen -= 32 592 } 593 snapRecoveredAccountMeter.Mark(1) 594 } else { 595 data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) 596 dataLen = len(data) 597 rawdb.WriteAccountSnapshot(ctx.batch, account, data) 598 snapGeneratedAccountMeter.Mark(1) 599 } 600 ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 601 ctx.stats.accounts++ 602 } 603 // If the snap generation goes here after interrupted, genMarker may go backward 604 // when last genMarker is consisted of accountHash and storageHash 605 marker := account[:] 606 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 607 marker = dl.genMarker[:] 608 } 609 // If we've exceeded our batch allowance or termination was requested, flush to disk 610 if err := dl.checkAndFlush(ctx, marker); err != nil { 611 return err 612 } 613 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well 614 615 // If the iterated account is the contract, create a further loop to 616 // verify or regenerate the contract storage. 617 if acc.Root == emptyRoot { 618 ctx.removeStorageAt(account) 619 } else { 620 var storeMarker []byte 621 if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { 622 storeMarker = dl.genMarker[common.HashLength:] 623 } 624 if err := generateStorages(ctx, dl, account, acc.Root, storeMarker); err != nil { 625 return err 626 } 627 } 628 // Some account processed, unmark the marker 629 accMarker = nil 630 return nil 631 } 632 // Always reset the initial account range as 1 whenever recover from the 633 // interruption. TODO(rjl493456442) can we remove it? 634 var accountRange = accountCheckRange 635 if len(accMarker) > 0 { 636 accountRange = 1 637 } 638 origin := common.CopyBytes(accMarker) 639 for { 640 exhausted, last, err := dl.generateRange(ctx, common.Hash{}, dl.root, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) 641 if err != nil { 642 return err // The procedure it aborted, either by external signal or internal error. 643 } 644 origin = increaseKey(last) 645 646 // Last step, cleanup the storages after the last account. 647 // All the left storages should be treated as dangling. 648 if origin == nil || exhausted { 649 ctx.removeStorageLeft() 650 break 651 } 652 accountRange = accountCheckRange 653 } 654 return nil 655 } 656 657 // generate is a background thread that iterates over the state and storage tries, 658 // constructing the state snapshot. All the arguments are purely for statistics 659 // gathering and logging, since the method surfs the blocks as they arrive, often 660 // being restarted. 661 func (dl *diskLayer) generate(stats *generatorStats) { 662 var ( 663 accMarker []byte 664 abort chan *generatorStats 665 ) 666 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 667 accMarker = dl.genMarker[:common.HashLength] 668 } 669 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 670 671 // Initialize the global generator context. The snapshot iterators are 672 // opened at the interrupted position because the assumption is held 673 // that all the snapshot data are generated correctly before the marker. 674 // Even if the snapshot data is updated during the interruption (before 675 // or at the marker), the assumption is still held. 676 // For the account or storage slot at the interruption, they will be 677 // processed twice by the generator(they are already processed in the 678 // last run) but it's fine. 679 ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker) 680 defer ctx.close() 681 682 if err := generateAccounts(ctx, dl, accMarker); err != nil { 683 // Extract the received interruption signal if exists 684 if aerr, ok := err.(*abortErr); ok { 685 abort = aerr.abort 686 } 687 // Aborted by internal error, wait the signal 688 if abort == nil { 689 abort = <-dl.genAbort 690 } 691 abort <- stats 692 return 693 } 694 // Snapshot fully generated, set the marker to nil. 695 // Note even there is nothing to commit, persist the 696 // generator anyway to mark the snapshot is complete. 697 journalProgress(ctx.batch, nil, stats) 698 if err := ctx.batch.Write(); err != nil { 699 log.Error("Failed to flush batch", "err", err) 700 701 abort = <-dl.genAbort 702 abort <- stats 703 return 704 } 705 ctx.batch.Reset() 706 707 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 708 "storage", stats.storage, "dangling", stats.dangling, "elapsed", common.PrettyDuration(time.Since(stats.start))) 709 710 dl.lock.Lock() 711 dl.genMarker = nil 712 close(dl.genPending) 713 dl.lock.Unlock() 714 715 // Someone will be looking for us, wait it out 716 abort = <-dl.genAbort 717 abort <- nil 718 } 719 720 // increaseKey increase the input key by one bit. Return nil if the entire 721 // addition operation overflows. 722 func increaseKey(key []byte) []byte { 723 for i := len(key) - 1; i >= 0; i-- { 724 key[i]++ 725 if key[i] != 0x0 { 726 return key 727 } 728 } 729 return nil 730 } 731 732 // abortErr wraps an interruption signal received to represent the 733 // generation is aborted by external processes. 734 type abortErr struct { 735 abort chan *generatorStats 736 } 737 738 func newAbortErr(abort chan *generatorStats) error { 739 return &abortErr{abort: abort} 740 } 741 742 func (err *abortErr) Error() string { 743 return "aborted" 744 }