github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/core/state/snapshot/generate.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "math/big" 24 "time" 25 26 "github.com/VictoriaMetrics/fastcache" 27 "github.com/tirogen/go-ethereum/common" 28 "github.com/tirogen/go-ethereum/common/hexutil" 29 "github.com/tirogen/go-ethereum/core/rawdb" 30 "github.com/tirogen/go-ethereum/crypto" 31 "github.com/tirogen/go-ethereum/ethdb" 32 "github.com/tirogen/go-ethereum/log" 33 "github.com/tirogen/go-ethereum/rlp" 34 "github.com/tirogen/go-ethereum/trie" 35 ) 36 37 var ( 38 // emptyRoot is the known root hash of an empty trie. 39 emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 40 41 // emptyCode is the known hash of the empty EVM bytecode. 42 emptyCode = crypto.Keccak256Hash(nil) 43 44 // accountCheckRange is the upper limit of the number of accounts involved in 45 // each range check. This is a value estimated based on experience. If this 46 // range is too large, the failure rate of range proof will increase. Otherwise, 47 // if the range is too small, the efficiency of the state recovery will decrease. 48 accountCheckRange = 128 49 50 // storageCheckRange is the upper limit of the number of storage slots involved 51 // in each range check. This is a value estimated based on experience. If this 52 // range is too large, the failure rate of range proof will increase. Otherwise, 53 // if the range is too small, the efficiency of the state recovery will decrease. 54 storageCheckRange = 1024 55 56 // errMissingTrie is returned if the target trie is missing while the generation 57 // is running. In this case the generation is aborted and wait the new signal. 58 errMissingTrie = errors.New("missing trie") 59 ) 60 61 // generateSnapshot regenerates a brand new snapshot based on an existing state 62 // database and head block asynchronously. The snapshot is returned immediately 63 // and generation is continued in the background until done. 64 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *trie.Database, cache int, root common.Hash) *diskLayer { 65 // Create a new disk layer with an initialized state marker at zero 66 var ( 67 stats = &generatorStats{start: time.Now()} 68 batch = diskdb.NewBatch() 69 genMarker = []byte{} // Initialized but empty! 70 ) 71 rawdb.WriteSnapshotRoot(batch, root) 72 journalProgress(batch, genMarker, stats) 73 if err := batch.Write(); err != nil { 74 log.Crit("Failed to write initialized state marker", "err", err) 75 } 76 base := &diskLayer{ 77 diskdb: diskdb, 78 triedb: triedb, 79 root: root, 80 cache: fastcache.New(cache * 1024 * 1024), 81 genMarker: genMarker, 82 genPending: make(chan struct{}), 83 genAbort: make(chan chan *generatorStats), 84 } 85 go base.generate(stats) 86 log.Debug("Start snapshot generation", "root", root) 87 return base 88 } 89 90 // journalProgress persists the generator stats into the database to resume later. 91 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 92 // Write out the generator marker. Note it's a standalone disk layer generator 93 // which is not mixed with journal. It's ok if the generator is persisted while 94 // journal is not. 95 entry := journalGenerator{ 96 Done: marker == nil, 97 Marker: marker, 98 } 99 if stats != nil { 100 entry.Accounts = stats.accounts 101 entry.Slots = stats.slots 102 entry.Storage = uint64(stats.storage) 103 } 104 blob, err := rlp.EncodeToBytes(entry) 105 if err != nil { 106 panic(err) // Cannot happen, here to catch dev errors 107 } 108 var logstr string 109 switch { 110 case marker == nil: 111 logstr = "done" 112 case bytes.Equal(marker, []byte{}): 113 logstr = "empty" 114 case len(marker) == common.HashLength: 115 logstr = fmt.Sprintf("%#x", marker) 116 default: 117 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 118 } 119 log.Debug("Journalled generator progress", "progress", logstr) 120 rawdb.WriteSnapshotGenerator(db, blob) 121 } 122 123 // proofResult contains the output of range proving which can be used 124 // for further processing regardless if it is successful or not. 125 type proofResult struct { 126 keys [][]byte // The key set of all elements being iterated, even proving is failed 127 vals [][]byte // The val set of all elements being iterated, even proving is failed 128 diskMore bool // Set when the database has extra snapshot states since last iteration 129 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 130 proofErr error // Indicator whether the given state range is valid or not 131 tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil) 132 } 133 134 // valid returns the indicator that range proof is successful or not. 135 func (result *proofResult) valid() bool { 136 return result.proofErr == nil 137 } 138 139 // last returns the last verified element key regardless of whether the range proof is 140 // successful or not. Nil is returned if nothing involved in the proving. 141 func (result *proofResult) last() []byte { 142 var last []byte 143 if len(result.keys) > 0 { 144 last = result.keys[len(result.keys)-1] 145 } 146 return last 147 } 148 149 // forEach iterates all the visited elements and applies the given callback on them. 150 // The iteration is aborted if the callback returns non-nil error. 151 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 152 for i := 0; i < len(result.keys); i++ { 153 key, val := result.keys[i], result.vals[i] 154 if err := callback(key, val); err != nil { 155 return err 156 } 157 } 158 return nil 159 } 160 161 // proveRange proves the snapshot segment with particular prefix is "valid". 162 // The iteration start point will be assigned if the iterator is restored from 163 // the last interruption. Max will be assigned in order to limit the maximum 164 // amount of data involved in each iteration. 165 // 166 // The proof result will be returned if the range proving is finished, otherwise 167 // the error will be returned to abort the entire procedure. 168 func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 169 var ( 170 keys [][]byte 171 vals [][]byte 172 proof = rawdb.NewMemoryDatabase() 173 diskMore = false 174 iter = ctx.iterator(kind) 175 start = time.Now() 176 min = append(prefix, origin...) 177 ) 178 for iter.Next() { 179 // Ensure the iterated item is always equal or larger than the given origin. 180 key := iter.Key() 181 if bytes.Compare(key, min) < 0 { 182 return nil, errors.New("invalid iteration position") 183 } 184 // Ensure the iterated item still fall in the specified prefix. If 185 // not which means the items in the specified area are all visited. 186 // Move the iterator a step back since we iterate one extra element 187 // out. 188 if !bytes.Equal(key[:len(prefix)], prefix) { 189 iter.Hold() 190 break 191 } 192 // Break if we've reached the max size, and signal that we're not 193 // done yet. Move the iterator a step back since we iterate one 194 // extra element out. 195 if len(keys) == max { 196 iter.Hold() 197 diskMore = true 198 break 199 } 200 keys = append(keys, common.CopyBytes(key[len(prefix):])) 201 202 if valueConvertFn == nil { 203 vals = append(vals, common.CopyBytes(iter.Value())) 204 } else { 205 val, err := valueConvertFn(iter.Value()) 206 if err != nil { 207 // Special case, the state data is corrupted (invalid slim-format account), 208 // don't abort the entire procedure directly. Instead, let the fallback 209 // generation to heal the invalid data. 210 // 211 // Here append the original value to ensure that the number of key and 212 // value are aligned. 213 vals = append(vals, common.CopyBytes(iter.Value())) 214 log.Error("Failed to convert account state data", "err", err) 215 } else { 216 vals = append(vals, val) 217 } 218 } 219 } 220 // Update metrics for database iteration and merkle proving 221 if kind == snapStorage { 222 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 223 } else { 224 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 225 } 226 defer func(start time.Time) { 227 if kind == snapStorage { 228 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 229 } else { 230 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 231 } 232 }(time.Now()) 233 234 // The snap state is exhausted, pass the entire key/val set for verification 235 root := trieId.Root 236 if origin == nil && !diskMore { 237 stackTr := trie.NewStackTrie(nil) 238 for i, key := range keys { 239 stackTr.TryUpdate(key, vals[i]) 240 } 241 if gotRoot := stackTr.Hash(); gotRoot != root { 242 return &proofResult{ 243 keys: keys, 244 vals: vals, 245 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 246 }, nil 247 } 248 return &proofResult{keys: keys, vals: vals}, nil 249 } 250 // Snap state is chunked, generate edge proofs for verification. 251 tr, err := trie.New(trieId, dl.triedb) 252 if err != nil { 253 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 254 return nil, errMissingTrie 255 } 256 // Firstly find out the key of last iterated element. 257 var last []byte 258 if len(keys) > 0 { 259 last = keys[len(keys)-1] 260 } 261 // Generate the Merkle proofs for the first and last element 262 if origin == nil { 263 origin = common.Hash{}.Bytes() 264 } 265 if err := tr.Prove(origin, 0, proof); err != nil { 266 log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 267 return &proofResult{ 268 keys: keys, 269 vals: vals, 270 diskMore: diskMore, 271 proofErr: err, 272 tr: tr, 273 }, nil 274 } 275 if last != nil { 276 if err := tr.Prove(last, 0, proof); err != nil { 277 log.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) 278 return &proofResult{ 279 keys: keys, 280 vals: vals, 281 diskMore: diskMore, 282 proofErr: err, 283 tr: tr, 284 }, nil 285 } 286 } 287 // Verify the snapshot segment with range prover, ensure that all flat states 288 // in this range correspond to merkle trie. 289 cont, err := trie.VerifyRangeProof(root, origin, last, keys, vals, proof) 290 return &proofResult{ 291 keys: keys, 292 vals: vals, 293 diskMore: diskMore, 294 trieMore: cont, 295 proofErr: err, 296 tr: tr}, 297 nil 298 } 299 300 // onStateCallback is a function that is called by generateRange, when processing a range of 301 // accounts or storage slots. For each element, the callback is invoked. 302 // 303 // - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 304 // - If 'write' is true, then this element needs to be updated with the 'val'. 305 // - If 'write' is false, then this element is already correct, and needs no update. 306 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 307 // 308 // However, for accounts, the storage trie of the account needs to be checked. Also, 309 // dangling storages(storage exists but the corresponding account is missing) need to 310 // be cleaned up. 311 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 312 313 // generateRange generates the state segment with particular prefix. Generation can 314 // either verify the correctness of existing state through range-proof and skip 315 // generation, or iterate trie to regenerate state on demand. 316 func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 317 // Use range prover to check the validity of the flat state in the range 318 result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn) 319 if err != nil { 320 return false, nil, err 321 } 322 last := result.last() 323 324 // Construct contextual logger 325 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 326 if len(origin) > 0 { 327 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 328 } 329 logger := log.New(logCtx...) 330 331 // The range prover says the range is correct, skip trie iteration 332 if result.valid() { 333 snapSuccessfulRangeProofMeter.Mark(1) 334 logger.Trace("Proved state range", "last", hexutil.Encode(last)) 335 336 // The verification is passed, process each state with the given 337 // callback function. If this state represents a contract, the 338 // corresponding storage check will be performed in the callback 339 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 340 return false, nil, err 341 } 342 // Only abort the iteration when both database and trie are exhausted 343 return !result.diskMore && !result.trieMore, last, nil 344 } 345 logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 346 snapFailedRangeProofMeter.Mark(1) 347 348 // Special case, the entire trie is missing. In the original trie scheme, 349 // all the duplicated subtries will be filtered out (only one copy of data 350 // will be stored). While in the snapshot model, all the storage tries 351 // belong to different contracts will be kept even they are duplicated. 352 // Track it to a certain extent remove the noise data used for statistics. 353 if origin == nil && last == nil { 354 meter := snapMissallAccountMeter 355 if kind == snapStorage { 356 meter = snapMissallStorageMeter 357 } 358 meter.Mark(1) 359 } 360 // We use the snap data to build up a cache which can be used by the 361 // main account trie as a primary lookup when resolving hashes 362 var snapNodeCache ethdb.Database 363 if len(result.keys) > 0 { 364 snapNodeCache = rawdb.NewMemoryDatabase() 365 snapTrieDb := trie.NewDatabase(snapNodeCache) 366 snapTrie := trie.NewEmpty(snapTrieDb) 367 for i, key := range result.keys { 368 snapTrie.Update(key, result.vals[i]) 369 } 370 root, nodes, _ := snapTrie.Commit(false) 371 if nodes != nil { 372 snapTrieDb.Update(trie.NewWithNodeSet(nodes)) 373 } 374 snapTrieDb.Commit(root, false, nil) 375 } 376 // Construct the trie for state iteration, reuse the trie 377 // if it's already opened with some nodes resolved. 378 tr := result.tr 379 if tr == nil { 380 tr, err = trie.New(trieId, dl.triedb) 381 if err != nil { 382 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 383 return false, nil, errMissingTrie 384 } 385 } 386 var ( 387 trieMore bool 388 nodeIt = tr.NodeIterator(origin) 389 iter = trie.NewIterator(nodeIt) 390 kvkeys, kvvals = result.keys, result.vals 391 392 // counters 393 count = 0 // number of states delivered by iterator 394 created = 0 // states created from the trie 395 updated = 0 // states updated from the trie 396 deleted = 0 // states not in trie, but were in snapshot 397 untouched = 0 // states already correct 398 399 // timers 400 start = time.Now() 401 internal time.Duration 402 ) 403 nodeIt.AddResolver(snapNodeCache) 404 405 for iter.Next() { 406 if last != nil && bytes.Compare(iter.Key, last) > 0 { 407 trieMore = true 408 break 409 } 410 count++ 411 write := true 412 created++ 413 for len(kvkeys) > 0 { 414 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 415 // delete the key 416 istart := time.Now() 417 if err := onState(kvkeys[0], nil, false, true); err != nil { 418 return false, nil, err 419 } 420 kvkeys = kvkeys[1:] 421 kvvals = kvvals[1:] 422 deleted++ 423 internal += time.Since(istart) 424 continue 425 } else if cmp == 0 { 426 // the snapshot key can be overwritten 427 created-- 428 if write = !bytes.Equal(kvvals[0], iter.Value); write { 429 updated++ 430 } else { 431 untouched++ 432 } 433 kvkeys = kvkeys[1:] 434 kvvals = kvvals[1:] 435 } 436 break 437 } 438 istart := time.Now() 439 if err := onState(iter.Key, iter.Value, write, false); err != nil { 440 return false, nil, err 441 } 442 internal += time.Since(istart) 443 } 444 if iter.Err != nil { 445 return false, nil, iter.Err 446 } 447 // Delete all stale snapshot states remaining 448 istart := time.Now() 449 for _, key := range kvkeys { 450 if err := onState(key, nil, false, true); err != nil { 451 return false, nil, err 452 } 453 deleted += 1 454 } 455 internal += time.Since(istart) 456 457 // Update metrics for counting trie iteration 458 if kind == snapStorage { 459 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 460 } else { 461 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 462 } 463 logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last), 464 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 465 466 // If there are either more trie items, or there are more snap items 467 // (in the next segment), then we need to keep working 468 return !trieMore && !result.diskMore, last, nil 469 } 470 471 // checkAndFlush checks if an interruption signal is received or the 472 // batch size has exceeded the allowance. 473 func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error { 474 var abort chan *generatorStats 475 select { 476 case abort = <-dl.genAbort: 477 default: 478 } 479 if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 480 if bytes.Compare(current, dl.genMarker) < 0 { 481 log.Error("Snapshot generator went backwards", "current", fmt.Sprintf("%x", current), "genMarker", fmt.Sprintf("%x", dl.genMarker)) 482 } 483 // Flush out the batch anyway no matter it's empty or not. 484 // It's possible that all the states are recovered and the 485 // generation indeed makes progress. 486 journalProgress(ctx.batch, current, ctx.stats) 487 488 if err := ctx.batch.Write(); err != nil { 489 return err 490 } 491 ctx.batch.Reset() 492 493 dl.lock.Lock() 494 dl.genMarker = current 495 dl.lock.Unlock() 496 497 if abort != nil { 498 ctx.stats.Log("Aborting state snapshot generation", dl.root, current) 499 return newAbortErr(abort) // bubble up an error for interruption 500 } 501 // Don't hold the iterators too long, release them to let compactor works 502 ctx.reopenIterator(snapAccount) 503 ctx.reopenIterator(snapStorage) 504 } 505 if time.Since(ctx.logged) > 8*time.Second { 506 ctx.stats.Log("Generating state snapshot", dl.root, current) 507 ctx.logged = time.Now() 508 } 509 return nil 510 } 511 512 // generateStorages generates the missing storage slots of the specific contract. 513 // It's supposed to restart the generation from the given origin position. 514 func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { 515 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 516 defer func(start time.Time) { 517 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 518 }(time.Now()) 519 520 if delete { 521 rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key)) 522 snapWipedStorageMeter.Mark(1) 523 return nil 524 } 525 if write { 526 rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val) 527 snapGeneratedStorageMeter.Mark(1) 528 } else { 529 snapRecoveredStorageMeter.Mark(1) 530 } 531 ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 532 ctx.stats.slots++ 533 534 // If we've exceeded our batch allowance or termination was requested, flush to disk 535 if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil { 536 return err 537 } 538 return nil 539 } 540 // Loop for re-generating the missing storage slots. 541 var origin = common.CopyBytes(storeMarker) 542 for { 543 id := trie.StorageTrieID(stateRoot, account, storageRoot) 544 exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) 545 if err != nil { 546 return err // The procedure it aborted, either by external signal or internal error. 547 } 548 // Abort the procedure if the entire contract storage is generated 549 if exhausted { 550 break 551 } 552 if origin = increaseKey(last); origin == nil { 553 break // special case, the last is 0xffffffff...fff 554 } 555 } 556 return nil 557 } 558 559 // generateAccounts generates the missing snapshot accounts as well as their 560 // storage slots in the main trie. It's supposed to restart the generation 561 // from the given origin position. 562 func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error { 563 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 564 // Make sure to clear all dangling storages before this account 565 account := common.BytesToHash(key) 566 ctx.removeStorageBefore(account) 567 568 start := time.Now() 569 if delete { 570 rawdb.DeleteAccountSnapshot(ctx.batch, account) 571 snapWipedAccountMeter.Mark(1) 572 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 573 574 ctx.removeStorageAt(account) 575 return nil 576 } 577 // Retrieve the current account and flatten it into the internal format 578 var acc struct { 579 Nonce uint64 580 Balance *big.Int 581 Root common.Hash 582 CodeHash []byte 583 } 584 if err := rlp.DecodeBytes(val, &acc); err != nil { 585 log.Crit("Invalid account encountered during snapshot creation", "err", err) 586 } 587 // If the account is not yet in-progress, write it out 588 if accMarker == nil || !bytes.Equal(account[:], accMarker) { 589 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 590 if !write { 591 if bytes.Equal(acc.CodeHash, emptyCode[:]) { 592 dataLen -= 32 593 } 594 if acc.Root == emptyRoot { 595 dataLen -= 32 596 } 597 snapRecoveredAccountMeter.Mark(1) 598 } else { 599 data := SlimAccountRLP(acc.Nonce, acc.Balance, acc.Root, acc.CodeHash) 600 dataLen = len(data) 601 rawdb.WriteAccountSnapshot(ctx.batch, account, data) 602 snapGeneratedAccountMeter.Mark(1) 603 } 604 ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 605 ctx.stats.accounts++ 606 } 607 // If the snap generation goes here after interrupted, genMarker may go backward 608 // when last genMarker is consisted of accountHash and storageHash 609 marker := account[:] 610 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 611 marker = dl.genMarker[:] 612 } 613 // If we've exceeded our batch allowance or termination was requested, flush to disk 614 if err := dl.checkAndFlush(ctx, marker); err != nil { 615 return err 616 } 617 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well 618 619 // If the iterated account is the contract, create a further loop to 620 // verify or regenerate the contract storage. 621 if acc.Root == emptyRoot { 622 ctx.removeStorageAt(account) 623 } else { 624 var storeMarker []byte 625 if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { 626 storeMarker = dl.genMarker[common.HashLength:] 627 } 628 if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil { 629 return err 630 } 631 } 632 // Some account processed, unmark the marker 633 accMarker = nil 634 return nil 635 } 636 // Always reset the initial account range as 1 whenever recover from the 637 // interruption. TODO(rjl493456442) can we remove it? 638 var accountRange = accountCheckRange 639 if len(accMarker) > 0 { 640 accountRange = 1 641 } 642 origin := common.CopyBytes(accMarker) 643 for { 644 id := trie.StateTrieID(dl.root) 645 exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, FullAccountRLP) 646 if err != nil { 647 return err // The procedure it aborted, either by external signal or internal error. 648 } 649 origin = increaseKey(last) 650 651 // Last step, cleanup the storages after the last account. 652 // All the left storages should be treated as dangling. 653 if origin == nil || exhausted { 654 ctx.removeStorageLeft() 655 break 656 } 657 accountRange = accountCheckRange 658 } 659 return nil 660 } 661 662 // generate is a background thread that iterates over the state and storage tries, 663 // constructing the state snapshot. All the arguments are purely for statistics 664 // gathering and logging, since the method surfs the blocks as they arrive, often 665 // being restarted. 666 func (dl *diskLayer) generate(stats *generatorStats) { 667 var ( 668 accMarker []byte 669 abort chan *generatorStats 670 ) 671 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 672 accMarker = dl.genMarker[:common.HashLength] 673 } 674 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 675 676 // Initialize the global generator context. The snapshot iterators are 677 // opened at the interrupted position because the assumption is held 678 // that all the snapshot data are generated correctly before the marker. 679 // Even if the snapshot data is updated during the interruption (before 680 // or at the marker), the assumption is still held. 681 // For the account or storage slot at the interruption, they will be 682 // processed twice by the generator(they are already processed in the 683 // last run) but it's fine. 684 ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker) 685 defer ctx.close() 686 687 if err := generateAccounts(ctx, dl, accMarker); err != nil { 688 // Extract the received interruption signal if exists 689 if aerr, ok := err.(*abortErr); ok { 690 abort = aerr.abort 691 } 692 // Aborted by internal error, wait the signal 693 if abort == nil { 694 abort = <-dl.genAbort 695 } 696 abort <- stats 697 return 698 } 699 // Snapshot fully generated, set the marker to nil. 700 // Note even there is nothing to commit, persist the 701 // generator anyway to mark the snapshot is complete. 702 journalProgress(ctx.batch, nil, stats) 703 if err := ctx.batch.Write(); err != nil { 704 log.Error("Failed to flush batch", "err", err) 705 706 abort = <-dl.genAbort 707 abort <- stats 708 return 709 } 710 ctx.batch.Reset() 711 712 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 713 "storage", stats.storage, "dangling", stats.dangling, "elapsed", common.PrettyDuration(time.Since(stats.start))) 714 715 dl.lock.Lock() 716 dl.genMarker = nil 717 close(dl.genPending) 718 dl.lock.Unlock() 719 720 // Someone will be looking for us, wait it out 721 abort = <-dl.genAbort 722 abort <- nil 723 } 724 725 // increaseKey increase the input key by one bit. Return nil if the entire 726 // addition operation overflows. 727 func increaseKey(key []byte) []byte { 728 for i := len(key) - 1; i >= 0; i-- { 729 key[i]++ 730 if key[i] != 0x0 { 731 return key 732 } 733 } 734 return nil 735 } 736 737 // abortErr wraps an interruption signal received to represent the 738 // generation is aborted by external processes. 739 type abortErr struct { 740 abort chan *generatorStats 741 } 742 743 func newAbortErr(abort chan *generatorStats) error { 744 return &abortErr{abort: abort} 745 } 746 747 func (err *abortErr) Error() string { 748 return "aborted" 749 }