github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/core/state/snapshot/generate.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math/big" 24 "time" 25 26 "github.com/VictoriaMetrics/fastcache" 27 "github.com/ethereum/go-ethereum/common" 28 "github.com/ethereum/go-ethereum/common/hexutil" 29 "github.com/ethereum/go-ethereum/core/rawdb" 30 "github.com/ethereum/go-ethereum/crypto" 31 "github.com/ethereum/go-ethereum/ethdb" 32 "github.com/ethereum/go-ethereum/ethdb/memorydb" 33 "github.com/ethereum/go-ethereum/log" 34 "github.com/ethereum/go-ethereum/rlp" 35 "github.com/ethereum/go-ethereum/trie" 36 ) 37 38 var ( 39 // emptyRoot is the known root hash of an empty trie. 40 emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 41 42 // emptyCode is the known hash of the empty EVM bytecode. 43 emptyCode = crypto.Keccak256Hash(nil) 44 45 // accountCheckRange is the upper limit of the number of accounts involved in 46 // each range check. This is a value estimated based on experience. If this 47 // range is too large, the failure rate of range proof will increase. Otherwise, 48 // if the range is too small, the efficiency of the state recovery will decrease. 49 accountCheckRange = 128 50 51 // storageCheckRange is the upper limit of the number of storage slots involved 52 // in each range check. This is a value estimated based on experience. If this 53 // range is too large, the failure rate of range proof will increase. Otherwise, 54 // if the range is too small, the efficiency of the state recovery will decrease. 55 storageCheckRange = 1024 56 57 // errMissingTrie is returned if the target trie is missing while the generation 58 // is running. In this case the generation is aborted and wait the new signal. 59 errMissingTrie = errors.New("missing trie") 60 ) 61 62 // generateSnapshot regenerates a brand new snapshot based on an existing state 63 // database and head block asynchronously. The snapshot is returned immediately 64 // and generation is continued in the background until done. 65 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { 66 // Create a new disk layer with an initialized state marker at zero 67 var ( 68 stats = &generatorStats{start: time.Now()} 69 batch = diskdb.NewBatch() 70 genMarker = []byte{} // Initialized but empty! 71 ) 72 rawdb.WriteSnapshotRoot(batch, root) 73 journalProgress(batch, genMarker, stats) 74 if err := batch.Write(); err != nil { 75 log.Crit("Failed to write initialized state marker", "err", err) 76 } 77 base := &diskLayer{ 78 diskdb: diskdb, 79 triedb: triedb, 80 root: root, 81 cache: fastcache.New(cache * 1024 * 1024), 82 genMarker: genMarker, 83 genPending: make(chan struct{}), 84 genAbort: make(chan chan *generatorStats), 85 } 86 go base.generate(stats) 87 log.Debug("Start snapshot generation", "root", root) 88 return base 89 } 90 91 // journalProgress persists the generator stats into the database to resume later. 92 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 93 // Write out the generator marker. Note it's a standalone disk layer generator 94 // which is not mixed with journal. It's ok if the generator is persisted while 95 // journal is not. 96 entry := journalGenerator{ 97 Done: marker == nil, 98 Marker: marker, 99 } 100 if stats != nil { 101 entry.Accounts = stats.accounts 102 entry.Slots = stats.slots 103 entry.Storage = uint64(stats.storage) 104 } 105 blob, err := rlp.EncodeToBytes(entry) 106 if err != nil { 107 panic(err) // Cannot happen, here to catch dev errors 108 } 109 var logstr string 110 switch { 111 case marker == nil: 112 logstr = "done" 113 case bytes.Equal(marker, []byte{}): 114 logstr = "empty" 115 case len(marker) == common.HashLength: 116 logstr = fmt.Sprintf("%#x", marker) 117 default: 118 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 119 } 120 log.Debug("Journalled generator progress", "progress", logstr) 121 rawdb.WriteSnapshotGenerator(db, blob) 122 } 123 124 // proofResult contains the output of range proving which can be used 125 // for further processing regardless if it is successful or not. 126 type proofResult struct { 127 keys [][]byte // The key set of all elements being iterated, even proving is failed 128 vals [][]byte // The val set of all elements being iterated, even proving is failed 129 diskMore bool // Set when the database has extra snapshot states since last iteration 130 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 131 proofErr error // Indicator whether the given state range is valid or not 132 tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil) 133 } 134 135 // valid returns the indicator that range proof is successful or not. 136 func (result *proofResult) valid() bool { 137 return result.proofErr == nil 138 } 139 140 // last returns the last verified element key regardless of whether the range proof is 141 // successful or not. Nil is returned if nothing involved in the proving. 142 func (result *proofResult) last() []byte { 143 var last []byte 144 if len(result.keys) > 0 { 145 last = result.keys[len(result.keys)-1] 146 } 147 return last 148 } 149 150 // forEach iterates all the visited elements and applies the given callback on them. 151 // The iteration is aborted if the callback returns non-nil error. 152 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 153 for i := 0; i < len(result.keys); i++ { 154 key, val := result.keys[i], result.vals[i] 155 if err := callback(key, val); err != nil { 156 return err 157 } 158 } 159 return nil 160 } 161 162 // proveRange proves the snapshot segment with particular prefix is "valid". 163 // The iteration start point will be assigned if the iterator is restored from 164 // the last interruption. Max will be assigned in order to limit the maximum 165 // amount of data involved in each iteration. 166 // 167 // The proof result will be returned if the range proving is finished, otherwise 168 // the error will be returned to abort the entire procedure. 169 func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 170 var ( 171 keys [][]byte 172 vals [][]byte 173 proof = rawdb.NewMemoryDatabase() 174 diskMore = false 175 iter = ctx.iterator(kind) 176 start = time.Now() 177 min = append(prefix, origin...) 178 ) 179 for iter.Next() { 180 // Ensure the iterated item is always equal or larger than the given origin. 181 key := iter.Key() 182 if bytes.Compare(key, min) < 0 { 183 return nil, errors.New("invalid iteration position") 184 } 185 // Ensure the iterated item still fall in the specified prefix. If 186 // not which means the items in the specified area are all visited. 187 // Move the iterator a step back since we iterate one extra element 188 // out. 189 if !bytes.Equal(key[:len(prefix)], prefix) { 190 iter.Hold() 191 break 192 } 193 // Break if we've reached the max size, and signal that we're not 194 // done yet. Move the iterator a step back since we iterate one 195 // extra element out. 196 if len(keys) == max { 197 iter.Hold() 198 diskMore = true 199 break 200 } 201 keys = append(keys, common.CopyBytes(key[len(prefix):])) 202 203 if valueConvertFn == nil { 204 vals = append(vals, common.CopyBytes(iter.Value())) 205 } else { 206 val, err := valueConvertFn(iter.Value()) 207 if err != nil { 208 // Special case, the state data is corrupted (invalid slim-format account), 209 // don't abort the entire procedure directly. Instead, let the fallback 210 // generation to heal the invalid data. 211 // 212 // Here append the original value to ensure that the number of key and 213 // value are aligned. 214 vals = append(vals, common.CopyBytes(iter.Value())) 215 log.Error("Failed to convert account state data", "err", err) 216 } else { 217 vals = append(vals, val) 218 } 219 } 220 } 221 // Update metrics for database iteration and merkle proving 222 if kind == snapStorage { 223 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 224 } else { 225 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 226 } 227 defer func(start time.Time) { 228 if kind == snapStorage { 229 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 230 } else { 231 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 232 } 233 }(time.Now()) 234 235 // The snap state is exhausted, pass the entire key/val set for verification 236 root := trieId.Root 237 if origin == nil && !diskMore { 238 stackTr := trie.NewStackTrie(nil) 239 for i, key := range keys { 240 stackTr.TryUpdate(key, vals[i]) 241 } 242 if gotRoot := stackTr.Hash(); gotRoot != root { 243 return &proofResult{ 244 keys: keys, 245 vals: vals, 246 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 247 }, nil 248 } 249 return &proofResult{keys: keys, vals: vals}, nil 250 } 251 // Snap state is chunked, generate edge proofs for verification. 252 tr, err := trie.New(trieId, dl.triedb) 253 if err != nil { 254 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 255 return nil, errMissingTrie 256 } 257 // Firstly find out the key of last iterated element. 258 var last []byte 259 if len(keys) > 0 { 260 last = keys[len(keys)-1] 261 } 262 // Generate the Merkle proofs for the first and last element 263 if origin == nil { 264 origin = common.Hash{}.Bytes() 265 } 266 if err := tr.Prove(origin, 0, proof); err != nil { 267 log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 268 return &proofResult{ 269 keys: keys, 270 vals: vals, 271 diskMore: diskMore, 272 proofErr: err, 273 tr: tr, 274 }, nil 275 } 276 if last != nil { 277 if err := tr.Prove(last, 0, proof); err != nil { 278 log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) 279 return &proofResult{ 280 keys: keys, 281 vals: vals, 282 diskMore: diskMore, 283 proofErr: err, 284 tr: tr, 285 }, nil 286 } 287 } 288 // Verify the snapshot segment with range prover, ensure that all flat states 289 // in this range correspond to merkle trie. 290 cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) 291 return &proofResult{ 292 keys: keys, 293 vals: vals, 294 diskMore: diskMore, 295 trieMore: cont, 296 proofErr: err, 297 tr: tr}, 298 nil 299 } 300 301 // onStateCallback is a function that is called by generateRange, when processing a range of 302 // accounts or storage slots. For each element, the callback is invoked. 303 // 304 // - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 305 // - If 'write' is true, then this element needs to be updated with the 'val'. 306 // - If 'write' is false, then this element is already correct, and needs no update. 307 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 308 // 309 // However, for accounts, the storage trie of the account needs to be checked. Also, 310 // dangling storages(storage exists but the corresponding account is missing) need to 311 // be cleaned up. 312 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 313 314 // generateRange generates the state segment with particular prefix. Generation can 315 // either verify the correctness of existing state through range-proof and skip 316 // generation, or iterate trie to regenerate state on demand. 317 func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 318 // Use range prover to check the validity of the flat state in the range 319 result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn) 320 if err != nil { 321 return false, nil, err 322 } 323 last := result.last() 324 325 // Construct contextual logger 326 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 327 if len(origin) > 0 { 328 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 329 } 330 logger := log.New(logCtx...) 331 332 // The range prover says the range is correct, skip trie iteration 333 if result.valid() { 334 snapSuccessfulRangeProofMeter.Mark(1) 335 logger.Trace("Proved state range", "last", hexutil.Encode(last)) 336 337 // The verification is passed, process each state with the given 338 // callback function. If this state represents a contract, the 339 // corresponding storage check will be performed in the callback 340 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 341 return false, nil, err 342 } 343 // Only abort the iteration when both database and trie are exhausted 344 return !result.diskMore && !result.trieMore, last, nil 345 } 346 logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 347 snapFailedRangeProofMeter.Mark(1) 348 349 // Special case, the entire trie is missing. In the original trie scheme, 350 // all the duplicated subtries will be filtered out (only one copy of data 351 // will be stored). While in the snapshot model, all the storage tries 352 // belong to different contracts will be kept even they are duplicated. 353 // Track it to a certain extent remove the noise data used for statistics. 354 if origin == nil && last == nil { 355 meter := snapMissallAccountMeter 356 if kind == snapStorage { 357 meter = snapMissallStorageMeter 358 } 359 meter.Mark(1) 360 } 361 // We use the snap data to build up a cache which can be used by the 362 // main account trie as a primary lookup when resolving hashes 363 var snapNodeCache ethdb.KeyValueStore 364 if len(result.keys) > 0 { 365 snapNodeCache = memorydb.New() 366 snapTrieDb := trie.NewDatabase(snapNodeCache) 367 snapTrie := trie.NewEmpty(snapTrieDb) 368 for i, key := range result.keys { 369 snapTrie.Update(key, result.vals[i]) 370 } 371 root, nodes, _ := snapTrie.Commit(false) 372 if nodes != nil { 373 snapTrieDb.Update(trie.NewWithNodeSet(nodes)) 374 } 375 snapTrieDb.Commit(root, false, nil) 376 } 377 // Construct the trie for state iteration, reuse the trie 378 // if it's already opened with some nodes resolved. 379 tr := result.tr 380 if tr == nil { 381 tr, err = trie.New(trieId, dl.triedb) 382 if err != nil { 383 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 384 return false, nil, errMissingTrie 385 } 386 } 387 var ( 388 trieMore bool 389 nodeIt = tr.NodeIterator(origin) 390 iter = trie.NewIterator(nodeIt) 391 kvkeys, kvvals = result.keys, result.vals 392 393 // counters 394 count = 0 // number of states delivered by iterator 395 created = 0 // states created from the trie 396 updated = 0 // states updated from the trie 397 deleted = 0 // states not in trie, but were in snapshot 398 untouched = 0 // states already correct 399 400 // timers 401 start = time.Now() 402 internal time.Duration 403 ) 404 nodeIt.AddResolver(snapNodeCache) 405 406 for iter.Next() { 407 if last != nil && bytes.Compare(iter.Key, last) > 0 { 408 trieMore = true 409 break 410 } 411 count++ 412 write := true 413 created++ 414 for len(kvkeys) > 0 { 415 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 416 // delete the key 417 istart := time.Now() 418 if err := onState(kvkeys[0], nil, false, true); err != nil { 419 return false, nil, err 420 } 421 kvkeys = kvkeys[1:] 422 kvvals = kvvals[1:] 423 deleted++ 424 internal += time.Since(istart) 425 continue 426 } else if cmp == 0 { 427 // the snapshot key can be overwritten 428 created-- 429 if write = !bytes.Equal(kvvals[0], iter.Value); write { 430 updated++ 431 } else { 432 untouched++ 433 } 434 kvkeys = kvkeys[1:] 435 kvvals = kvvals[1:] 436 } 437 break 438 } 439 istart := time.Now() 440 if err := onState(iter.Key, iter.Value, write, false); err != nil { 441 return false, nil, err 442 } 443 internal += time.Since(istart) 444 } 445 if iter.Err != nil { 446 return false, nil, iter.Err 447 } 448 // Delete all stale snapshot states remaining 449 istart := time.Now() 450 for _, key := range kvkeys { 451 if err := onState(key, nil, false, true); err != nil { 452 return false, nil, err 453 } 454 deleted += 1 455 } 456 internal += time.Since(istart) 457 458 // Update metrics for counting trie iteration 459 if kind == snapStorage { 460 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 461 } else { 462 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 463 } 464 logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last), 465 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 466 467 // If there are either more trie items, or there are more snap items 468 // (in the next segment), then we need to keep working 469 return !trieMore && !result.diskMore, last, nil 470 } 471 472 // checkAndFlush checks if an interruption signal is received or the 473 // batch size has exceeded the allowance. 474 func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error { 475 var abort chan *generatorStats 476 select { 477 case abort = <-dl.genAbort: 478 default: 479 } 480 if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 481 if bytes.Compare(current, dl.genMarker) < 0 { 482 log.Error("Snapshot generator went backwards", "current", fmt.Sprintf("%x", current), "genMarker", fmt.Sprintf("%x", dl.genMarker)) 483 } 484 // Flush out the batch anyway no matter it's empty or not. 485 // It's possible that all the states are recovered and the 486 // generation indeed makes progress. 487 journalProgress(ctx.batch, current, ctx.stats) 488 489 if err := ctx.batch.Write(); err != nil { 490 return err 491 } 492 ctx.batch.Reset() 493 494 dl.lock.Lock() 495 dl.genMarker = current 496 dl.lock.Unlock() 497 498 if abort != nil { 499 ctx.stats.Log("Aborting state snapshot generation", dl.root, current) 500 return newAbortErr(abort) // bubble up an error for interruption 501 } 502 // Don't hold the iterators too long, release them to let compactor works 503 ctx.reopenIterator(snapAccount) 504 ctx.reopenIterator(snapStorage) 505 } 506 if time.Since(ctx.logged) > 8*time.Second { 507 ctx.stats.Log("Generating state snapshot", dl.root, current) 508 ctx.logged = time.Now() 509 } 510 return nil 511 } 512 513 // generateStorages generates the missing storage slots of the specific contract. 514 // It's supposed to restart the generation from the given origin position. 515 func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { 516 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 517 defer func(start time.Time) { 518 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 519 }(time.Now()) 520 521 if delete { 522 rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key)) 523 snapWipedStorageMeter.Mark(1) 524 return nil 525 } 526 if write { 527 rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val) 528 snapGeneratedStorageMeter.Mark(1) 529 } else { 530 snapRecoveredStorageMeter.Mark(1) 531 } 532 ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 533 ctx.stats.slots++ 534 535 // If we've exceeded our batch allowance or termination was requested, flush to disk 536 if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil { 537 return err 538 } 539 return nil 540 } 541 // Loop for re-generating the missing storage slots. 542 var origin = common.CopyBytes(storeMarker) 543 for { 544 id := trie.StorageTrieID(stateRoot, account, storageRoot) 545 exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) 546 if err != nil { 547 return err // The procedure it aborted, either by external signal or internal error. 548 } 549 // Abort the procedure if the entire contract storage is generated 550 if exhausted { 551 break 552 } 553 if origin = increaseKey(last); origin == nil { 554 break // special case, the last is 0xffffffff...fff 555 } 556 } 557 return nil 558 } 559 560 // generateAccounts generates the missing snapshot accounts as well as their 561 // storage slots in the main trie. It's supposed to restart the generation 562 // from the given origin position. 563 func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error { 564 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 565 // Make sure to clear all dangling storages before this account 566 account := common.BytesToHash(key) 567 ctx.removeStorageBefore(account) 568 569 start := time.Now() 570 if delete { 571 rawdb.DeleteAccountSnapshot(ctx.batch, account) 572 snapWipedAccountMeter.Mark(1) 573 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 574 575 ctx.removeStorageAt(account) 576 return nil 577 } 578 // Retrieve the current account and flatten it into the internal format 579 var acc struct { 580 Nonce uint64 581 Balance *big.Int 582 Root common.Hash 583 CodeHash []byte 584 } 585 if err := rlp.DecodeBytes(val, &acc); err != nil { 586 log.Crit("Invalid account encountered during snapshot creation", "err", err) 587 } 588 // If the account is not yet in-progress, write it out 589 if accMarker == nil || !bytes.Equal(account[:], accMarker) { 590 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 591 if !write { 592 if bytes.Equal(acc.CodeHash, emptyCode[:]) { 593 dataLen -= 32 594 } 595 if acc.Root == emptyRoot { 596 dataLen -= 32 597 } 598 snapRecoveredAccountMeter.Mark(1) 599 } else { 600 data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) 601 dataLen = len(data) 602 rawdb.WriteAccountSnapshot(ctx.batch, account, data) 603 snapGeneratedAccountMeter.Mark(1) 604 } 605 ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 606 ctx.stats.accounts++ 607 } 608 // If the snap generation goes here after interrupted, genMarker may go backward 609 // when last genMarker is consisted of accountHash and storageHash 610 marker := account[:] 611 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 612 marker = dl.genMarker[:] 613 } 614 // If we've exceeded our batch allowance or termination was requested, flush to disk 615 if err := dl.checkAndFlush(ctx, marker); err != nil { 616 return err 617 } 618 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well 619 620 // If the iterated account is the contract, create a further loop to 621 // verify or regenerate the contract storage. 622 if acc.Root == emptyRoot { 623 ctx.removeStorageAt(account) 624 } else { 625 var storeMarker []byte 626 if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { 627 storeMarker = dl.genMarker[common.HashLength:] 628 } 629 if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil { 630 return err 631 } 632 } 633 // Some account processed, unmark the marker 634 accMarker = nil 635 return nil 636 } 637 // Always reset the initial account range as 1 whenever recover from the 638 // interruption. TODO(rjl493456442) can we remove it? 639 var accountRange = accountCheckRange 640 if len(accMarker) > 0 { 641 accountRange = 1 642 } 643 origin := common.CopyBytes(accMarker) 644 for { 645 id := trie.StateTrieID(dl.root) 646 exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) 647 if err != nil { 648 return err // The procedure it aborted, either by external signal or internal error. 649 } 650 origin = increaseKey(last) 651 652 // Last step, cleanup the storages after the last account. 653 // All the left storages should be treated as dangling. 654 if origin == nil || exhausted { 655 ctx.removeStorageLeft() 656 break 657 } 658 accountRange = accountCheckRange 659 } 660 return nil 661 } 662 663 // generate is a background thread that iterates over the state and storage tries, 664 // constructing the state snapshot. All the arguments are purely for statistics 665 // gathering and logging, since the method surfs the blocks as they arrive, often 666 // being restarted. 667 func (dl *diskLayer) generate(stats *generatorStats) { 668 var ( 669 accMarker []byte 670 abort chan *generatorStats 671 ) 672 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 673 accMarker = dl.genMarker[:common.HashLength] 674 } 675 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 676 677 // Initialize the global generator context. The snapshot iterators are 678 // opened at the interrupted position because the assumption is held 679 // that all the snapshot data are generated correctly before the marker. 680 // Even if the snapshot data is updated during the interruption (before 681 // or at the marker), the assumption is still held. 682 // For the account or storage slot at the interruption, they will be 683 // processed twice by the generator(they are already processed in the 684 // last run) but it's fine. 685 ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker) 686 defer ctx.close() 687 688 if err := generateAccounts(ctx, dl, accMarker); err != nil { 689 // Extract the received interruption signal if exists 690 if aerr, ok := err.(*abortErr); ok { 691 abort = aerr.abort 692 } 693 // Aborted by internal error, wait the signal 694 if abort == nil { 695 abort = <-dl.genAbort 696 } 697 abort <- stats 698 return 699 } 700 // Snapshot fully generated, set the marker to nil. 701 // Note even there is nothing to commit, persist the 702 // generator anyway to mark the snapshot is complete. 703 journalProgress(ctx.batch, nil, stats) 704 if err := ctx.batch.Write(); err != nil { 705 log.Error("Failed to flush batch", "err", err) 706 707 abort = <-dl.genAbort 708 abort <- stats 709 return 710 } 711 ctx.batch.Reset() 712 713 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 714 "storage", stats.storage, "dangling", stats.dangling, "elapsed", common.PrettyDuration(time.Since(stats.start))) 715 716 dl.lock.Lock() 717 dl.genMarker = nil 718 close(dl.genPending) 719 dl.lock.Unlock() 720 721 // Someone will be looking for us, wait it out 722 abort = <-dl.genAbort 723 abort <- nil 724 } 725 726 // increaseKey increase the input key by one bit. Return nil if the entire 727 // addition operation overflows. 728 func increaseKey(key []byte) []byte { 729 for i := len(key) - 1; i >= 0; i-- { 730 key[i]++ 731 if key[i] != 0x0 { 732 return key 733 } 734 } 735 return nil 736 } 737 738 // abortErr wraps an interruption signal received to represent the 739 // generation is aborted by external processes. 740 type abortErr struct { 741 abort chan *generatorStats 742 } 743 744 func newAbortErr(abort chan *generatorStats) error { 745 return &abortErr{abort: abort} 746 } 747 748 func (err *abortErr) Error() string { 749 return "aborted" 750 }