github.1485827954.workers.dev/ethereum/go-ethereum@v1.14.3/core/state/snapshot/generate.go (about) 1 // Copyright 2019 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package snapshot 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "time" 24 25 "github.com/VictoriaMetrics/fastcache" 26 "github.com/ethereum/go-ethereum/common" 27 "github.com/ethereum/go-ethereum/common/hexutil" 28 "github.com/ethereum/go-ethereum/core/rawdb" 29 "github.com/ethereum/go-ethereum/core/types" 30 "github.com/ethereum/go-ethereum/ethdb" 31 "github.com/ethereum/go-ethereum/log" 32 "github.com/ethereum/go-ethereum/rlp" 33 "github.com/ethereum/go-ethereum/trie" 34 "github.com/ethereum/go-ethereum/trie/trienode" 35 "github.com/ethereum/go-ethereum/triedb" 36 ) 37 38 var ( 39 // accountCheckRange is the upper limit of the number of accounts involved in 40 // each range check. This is a value estimated based on experience. If this 41 // range is too large, the failure rate of range proof will increase. Otherwise, 42 // if the range is too small, the efficiency of the state recovery will decrease. 43 accountCheckRange = 128 44 45 // storageCheckRange is the upper limit of the number of storage slots involved 46 // in each range check. This is a value estimated based on experience. If this 47 // range is too large, the failure rate of range proof will increase. Otherwise, 48 // if the range is too small, the efficiency of the state recovery will decrease. 49 storageCheckRange = 1024 50 51 // errMissingTrie is returned if the target trie is missing while the generation 52 // is running. In this case the generation is aborted and wait the new signal. 53 errMissingTrie = errors.New("missing trie") 54 ) 55 56 // generateSnapshot regenerates a brand new snapshot based on an existing state 57 // database and head block asynchronously. The snapshot is returned immediately 58 // and generation is continued in the background until done. 59 func generateSnapshot(diskdb ethdb.KeyValueStore, triedb *triedb.Database, cache int, root common.Hash) *diskLayer { 60 // Create a new disk layer with an initialized state marker at zero 61 var ( 62 stats = &generatorStats{start: time.Now()} 63 batch = diskdb.NewBatch() 64 genMarker = []byte{} // Initialized but empty! 65 ) 66 rawdb.WriteSnapshotRoot(batch, root) 67 journalProgress(batch, genMarker, stats) 68 if err := batch.Write(); err != nil { 69 log.Crit("Failed to write initialized state marker", "err", err) 70 } 71 base := &diskLayer{ 72 diskdb: diskdb, 73 triedb: triedb, 74 root: root, 75 cache: fastcache.New(cache * 1024 * 1024), 76 genMarker: genMarker, 77 genPending: make(chan struct{}), 78 genAbort: make(chan chan *generatorStats), 79 } 80 go base.generate(stats) 81 log.Debug("Start snapshot generation", "root", root) 82 return base 83 } 84 85 // journalProgress persists the generator stats into the database to resume later. 86 func journalProgress(db ethdb.KeyValueWriter, marker []byte, stats *generatorStats) { 87 // Write out the generator marker. Note it's a standalone disk layer generator 88 // which is not mixed with journal. It's ok if the generator is persisted while 89 // journal is not. 90 entry := journalGenerator{ 91 Done: marker == nil, 92 Marker: marker, 93 } 94 if stats != nil { 95 entry.Accounts = stats.accounts 96 entry.Slots = stats.slots 97 entry.Storage = uint64(stats.storage) 98 } 99 blob, err := rlp.EncodeToBytes(entry) 100 if err != nil { 101 panic(err) // Cannot happen, here to catch dev errors 102 } 103 var logstr string 104 switch { 105 case marker == nil: 106 logstr = "done" 107 case bytes.Equal(marker, []byte{}): 108 logstr = "empty" 109 case len(marker) == common.HashLength: 110 logstr = fmt.Sprintf("%#x", marker) 111 default: 112 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 113 } 114 log.Debug("Journalled generator progress", "progress", logstr) 115 rawdb.WriteSnapshotGenerator(db, blob) 116 } 117 118 // proofResult contains the output of range proving which can be used 119 // for further processing regardless if it is successful or not. 120 type proofResult struct { 121 keys [][]byte // The key set of all elements being iterated, even proving is failed 122 vals [][]byte // The val set of all elements being iterated, even proving is failed 123 diskMore bool // Set when the database has extra snapshot states since last iteration 124 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 125 proofErr error // Indicator whether the given state range is valid or not 126 tr *trie.Trie // The trie, in case the trie was resolved by the prover (may be nil) 127 } 128 129 // valid returns the indicator that range proof is successful or not. 130 func (result *proofResult) valid() bool { 131 return result.proofErr == nil 132 } 133 134 // last returns the last verified element key regardless of whether the range proof is 135 // successful or not. Nil is returned if nothing involved in the proving. 136 func (result *proofResult) last() []byte { 137 var last []byte 138 if len(result.keys) > 0 { 139 last = result.keys[len(result.keys)-1] 140 } 141 return last 142 } 143 144 // forEach iterates all the visited elements and applies the given callback on them. 145 // The iteration is aborted if the callback returns non-nil error. 146 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 147 for i := 0; i < len(result.keys); i++ { 148 key, val := result.keys[i], result.vals[i] 149 if err := callback(key, val); err != nil { 150 return err 151 } 152 } 153 return nil 154 } 155 156 // proveRange proves the snapshot segment with particular prefix is "valid". 157 // The iteration start point will be assigned if the iterator is restored from 158 // the last interruption. Max will be assigned in order to limit the maximum 159 // amount of data involved in each iteration. 160 // 161 // The proof result will be returned if the range proving is finished, otherwise 162 // the error will be returned to abort the entire procedure. 163 func (dl *diskLayer) proveRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 164 var ( 165 keys [][]byte 166 vals [][]byte 167 proof = rawdb.NewMemoryDatabase() 168 diskMore = false 169 iter = ctx.iterator(kind) 170 start = time.Now() 171 min = append(prefix, origin...) 172 ) 173 for iter.Next() { 174 // Ensure the iterated item is always equal or larger than the given origin. 175 key := iter.Key() 176 if bytes.Compare(key, min) < 0 { 177 return nil, errors.New("invalid iteration position") 178 } 179 // Ensure the iterated item still fall in the specified prefix. If 180 // not which means the items in the specified area are all visited. 181 // Move the iterator a step back since we iterate one extra element 182 // out. 183 if !bytes.Equal(key[:len(prefix)], prefix) { 184 iter.Hold() 185 break 186 } 187 // Break if we've reached the max size, and signal that we're not 188 // done yet. Move the iterator a step back since we iterate one 189 // extra element out. 190 if len(keys) == max { 191 iter.Hold() 192 diskMore = true 193 break 194 } 195 keys = append(keys, common.CopyBytes(key[len(prefix):])) 196 197 if valueConvertFn == nil { 198 vals = append(vals, common.CopyBytes(iter.Value())) 199 } else { 200 val, err := valueConvertFn(iter.Value()) 201 if err != nil { 202 // Special case, the state data is corrupted (invalid slim-format account), 203 // don't abort the entire procedure directly. Instead, let the fallback 204 // generation to heal the invalid data. 205 // 206 // Here append the original value to ensure that the number of key and 207 // value are aligned. 208 vals = append(vals, common.CopyBytes(iter.Value())) 209 log.Error("Failed to convert account state data", "err", err) 210 } else { 211 vals = append(vals, val) 212 } 213 } 214 } 215 // Update metrics for database iteration and merkle proving 216 if kind == snapStorage { 217 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 218 } else { 219 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 220 } 221 defer func(start time.Time) { 222 if kind == snapStorage { 223 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 224 } else { 225 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 226 } 227 }(time.Now()) 228 229 // The snap state is exhausted, pass the entire key/val set for verification 230 root := trieId.Root 231 if origin == nil && !diskMore { 232 stackTr := trie.NewStackTrie(nil) 233 for i, key := range keys { 234 if err := stackTr.Update(key, vals[i]); err != nil { 235 return nil, err 236 } 237 } 238 if gotRoot := stackTr.Hash(); gotRoot != root { 239 return &proofResult{ 240 keys: keys, 241 vals: vals, 242 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 243 }, nil 244 } 245 return &proofResult{keys: keys, vals: vals}, nil 246 } 247 // Snap state is chunked, generate edge proofs for verification. 248 tr, err := trie.New(trieId, dl.triedb) 249 if err != nil { 250 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 251 return nil, errMissingTrie 252 } 253 // Generate the Merkle proofs for the first and last element 254 if origin == nil { 255 origin = common.Hash{}.Bytes() 256 } 257 if err := tr.Prove(origin, proof); err != nil { 258 log.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 259 return &proofResult{ 260 keys: keys, 261 vals: vals, 262 diskMore: diskMore, 263 proofErr: err, 264 tr: tr, 265 }, nil 266 } 267 if len(keys) > 0 { 268 if err := tr.Prove(keys[len(keys)-1], proof); err != nil { 269 log.Debug("Failed to prove range", "kind", kind, "last", keys[len(keys)-1], "err", err) 270 return &proofResult{ 271 keys: keys, 272 vals: vals, 273 diskMore: diskMore, 274 proofErr: err, 275 tr: tr, 276 }, nil 277 } 278 } 279 // Verify the snapshot segment with range prover, ensure that all flat states 280 // in this range correspond to merkle trie. 281 cont, err := trie.VerifyRangeProof(root, origin, keys, vals, proof) 282 return &proofResult{ 283 keys: keys, 284 vals: vals, 285 diskMore: diskMore, 286 trieMore: cont, 287 proofErr: err, 288 tr: tr}, 289 nil 290 } 291 292 // onStateCallback is a function that is called by generateRange, when processing a range of 293 // accounts or storage slots. For each element, the callback is invoked. 294 // 295 // - If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 296 // - If 'write' is true, then this element needs to be updated with the 'val'. 297 // - If 'write' is false, then this element is already correct, and needs no update. 298 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 299 // 300 // However, for accounts, the storage trie of the account needs to be checked. Also, 301 // dangling storages(storage exists but the corresponding account is missing) need to 302 // be cleaned up. 303 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 304 305 // generateRange generates the state segment with particular prefix. Generation can 306 // either verify the correctness of existing state through range-proof and skip 307 // generation, or iterate trie to regenerate state on demand. 308 func (dl *diskLayer) generateRange(ctx *generatorContext, trieId *trie.ID, prefix []byte, kind string, origin []byte, max int, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 309 // Use range prover to check the validity of the flat state in the range 310 result, err := dl.proveRange(ctx, trieId, prefix, kind, origin, max, valueConvertFn) 311 if err != nil { 312 return false, nil, err 313 } 314 last := result.last() 315 316 // Construct contextual logger 317 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 318 if len(origin) > 0 { 319 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 320 } 321 logger := log.New(logCtx...) 322 323 // The range prover says the range is correct, skip trie iteration 324 if result.valid() { 325 snapSuccessfulRangeProofMeter.Mark(1) 326 logger.Trace("Proved state range", "last", hexutil.Encode(last)) 327 328 // The verification is passed, process each state with the given 329 // callback function. If this state represents a contract, the 330 // corresponding storage check will be performed in the callback 331 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 332 return false, nil, err 333 } 334 // Only abort the iteration when both database and trie are exhausted 335 return !result.diskMore && !result.trieMore, last, nil 336 } 337 logger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 338 snapFailedRangeProofMeter.Mark(1) 339 340 // Special case, the entire trie is missing. In the original trie scheme, 341 // all the duplicated subtries will be filtered out (only one copy of data 342 // will be stored). While in the snapshot model, all the storage tries 343 // belong to different contracts will be kept even they are duplicated. 344 // Track it to a certain extent remove the noise data used for statistics. 345 if origin == nil && last == nil { 346 meter := snapMissallAccountMeter 347 if kind == snapStorage { 348 meter = snapMissallStorageMeter 349 } 350 meter.Mark(1) 351 } 352 // We use the snap data to build up a cache which can be used by the 353 // main account trie as a primary lookup when resolving hashes 354 var resolver trie.NodeResolver 355 if len(result.keys) > 0 { 356 mdb := rawdb.NewMemoryDatabase() 357 tdb := triedb.NewDatabase(mdb, triedb.HashDefaults) 358 defer tdb.Close() 359 snapTrie := trie.NewEmpty(tdb) 360 for i, key := range result.keys { 361 snapTrie.Update(key, result.vals[i]) 362 } 363 root, nodes, err := snapTrie.Commit(false) 364 if err != nil { 365 return false, nil, err 366 } 367 if nodes != nil { 368 tdb.Update(root, types.EmptyRootHash, 0, trienode.NewWithNodeSet(nodes), nil) 369 tdb.Commit(root, false) 370 } 371 resolver = func(owner common.Hash, path []byte, hash common.Hash) []byte { 372 return rawdb.ReadTrieNode(mdb, owner, path, hash, tdb.Scheme()) 373 } 374 } 375 // Construct the trie for state iteration, reuse the trie 376 // if it's already opened with some nodes resolved. 377 tr := result.tr 378 if tr == nil { 379 tr, err = trie.New(trieId, dl.triedb) 380 if err != nil { 381 ctx.stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 382 return false, nil, errMissingTrie 383 } 384 } 385 var ( 386 trieMore bool 387 kvkeys, kvvals = result.keys, result.vals 388 389 // counters 390 count = 0 // number of states delivered by iterator 391 created = 0 // states created from the trie 392 updated = 0 // states updated from the trie 393 deleted = 0 // states not in trie, but were in snapshot 394 untouched = 0 // states already correct 395 396 // timers 397 start = time.Now() 398 internal time.Duration 399 ) 400 nodeIt, err := tr.NodeIterator(origin) 401 if err != nil { 402 return false, nil, err 403 } 404 nodeIt.AddResolver(resolver) 405 iter := trie.NewIterator(nodeIt) 406 407 for iter.Next() { 408 if last != nil && bytes.Compare(iter.Key, last) > 0 { 409 trieMore = true 410 break 411 } 412 count++ 413 write := true 414 created++ 415 for len(kvkeys) > 0 { 416 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 417 // delete the key 418 istart := time.Now() 419 if err := onState(kvkeys[0], nil, false, true); err != nil { 420 return false, nil, err 421 } 422 kvkeys = kvkeys[1:] 423 kvvals = kvvals[1:] 424 deleted++ 425 internal += time.Since(istart) 426 continue 427 } else if cmp == 0 { 428 // the snapshot key can be overwritten 429 created-- 430 if write = !bytes.Equal(kvvals[0], iter.Value); write { 431 updated++ 432 } else { 433 untouched++ 434 } 435 kvkeys = kvkeys[1:] 436 kvvals = kvvals[1:] 437 } 438 break 439 } 440 istart := time.Now() 441 if err := onState(iter.Key, iter.Value, write, false); err != nil { 442 return false, nil, err 443 } 444 internal += time.Since(istart) 445 } 446 if iter.Err != nil { 447 // Trie errors should never happen. Still, in case of a bug, expose the 448 // error here, as the outer code will presume errors are interrupts, not 449 // some deeper issues. 450 log.Error("State snapshotter failed to iterate trie", "err", iter.Err) 451 return false, nil, iter.Err 452 } 453 // Delete all stale snapshot states remaining 454 istart := time.Now() 455 for _, key := range kvkeys { 456 if err := onState(key, nil, false, true); err != nil { 457 return false, nil, err 458 } 459 deleted += 1 460 } 461 internal += time.Since(istart) 462 463 // Update metrics for counting trie iteration 464 if kind == snapStorage { 465 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 466 } else { 467 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 468 } 469 logger.Debug("Regenerated state range", "root", trieId.Root, "last", hexutil.Encode(last), 470 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 471 472 // If there are either more trie items, or there are more snap items 473 // (in the next segment), then we need to keep working 474 return !trieMore && !result.diskMore, last, nil 475 } 476 477 // checkAndFlush checks if an interruption signal is received or the 478 // batch size has exceeded the allowance. 479 func (dl *diskLayer) checkAndFlush(ctx *generatorContext, current []byte) error { 480 var abort chan *generatorStats 481 select { 482 case abort = <-dl.genAbort: 483 default: 484 } 485 if ctx.batch.ValueSize() > ethdb.IdealBatchSize || abort != nil { 486 if bytes.Compare(current, dl.genMarker) < 0 { 487 log.Error("Snapshot generator went backwards", "current", fmt.Sprintf("%x", current), "genMarker", fmt.Sprintf("%x", dl.genMarker)) 488 } 489 // Flush out the batch anyway no matter it's empty or not. 490 // It's possible that all the states are recovered and the 491 // generation indeed makes progress. 492 journalProgress(ctx.batch, current, ctx.stats) 493 494 if err := ctx.batch.Write(); err != nil { 495 return err 496 } 497 ctx.batch.Reset() 498 499 dl.lock.Lock() 500 dl.genMarker = current 501 dl.lock.Unlock() 502 503 if abort != nil { 504 ctx.stats.Log("Aborting state snapshot generation", dl.root, current) 505 return newAbortErr(abort) // bubble up an error for interruption 506 } 507 // Don't hold the iterators too long, release them to let compactor works 508 ctx.reopenIterator(snapAccount) 509 ctx.reopenIterator(snapStorage) 510 } 511 if time.Since(ctx.logged) > 8*time.Second { 512 ctx.stats.Log("Generating state snapshot", dl.root, current) 513 ctx.logged = time.Now() 514 } 515 return nil 516 } 517 518 // generateStorages generates the missing storage slots of the specific contract. 519 // It's supposed to restart the generation from the given origin position. 520 func generateStorages(ctx *generatorContext, dl *diskLayer, stateRoot common.Hash, account common.Hash, storageRoot common.Hash, storeMarker []byte) error { 521 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 522 defer func(start time.Time) { 523 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 524 }(time.Now()) 525 526 if delete { 527 rawdb.DeleteStorageSnapshot(ctx.batch, account, common.BytesToHash(key)) 528 snapWipedStorageMeter.Mark(1) 529 return nil 530 } 531 if write { 532 rawdb.WriteStorageSnapshot(ctx.batch, account, common.BytesToHash(key), val) 533 snapGeneratedStorageMeter.Mark(1) 534 } else { 535 snapRecoveredStorageMeter.Mark(1) 536 } 537 ctx.stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 538 ctx.stats.slots++ 539 540 // If we've exceeded our batch allowance or termination was requested, flush to disk 541 if err := dl.checkAndFlush(ctx, append(account[:], key...)); err != nil { 542 return err 543 } 544 return nil 545 } 546 // Loop for re-generating the missing storage slots. 547 var origin = common.CopyBytes(storeMarker) 548 for { 549 id := trie.StorageTrieID(stateRoot, account, storageRoot) 550 exhausted, last, err := dl.generateRange(ctx, id, append(rawdb.SnapshotStoragePrefix, account.Bytes()...), snapStorage, origin, storageCheckRange, onStorage, nil) 551 if err != nil { 552 return err // The procedure it aborted, either by external signal or internal error. 553 } 554 // Abort the procedure if the entire contract storage is generated 555 if exhausted { 556 break 557 } 558 if origin = increaseKey(last); origin == nil { 559 break // special case, the last is 0xffffffff...fff 560 } 561 } 562 return nil 563 } 564 565 // generateAccounts generates the missing snapshot accounts as well as their 566 // storage slots in the main trie. It's supposed to restart the generation 567 // from the given origin position. 568 func generateAccounts(ctx *generatorContext, dl *diskLayer, accMarker []byte) error { 569 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 570 // Make sure to clear all dangling storages before this account 571 account := common.BytesToHash(key) 572 ctx.removeStorageBefore(account) 573 574 start := time.Now() 575 if delete { 576 rawdb.DeleteAccountSnapshot(ctx.batch, account) 577 snapWipedAccountMeter.Mark(1) 578 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 579 580 ctx.removeStorageAt(account) 581 return nil 582 } 583 // Retrieve the current account and flatten it into the internal format 584 var acc types.StateAccount 585 if err := rlp.DecodeBytes(val, &acc); err != nil { 586 log.Crit("Invalid account encountered during snapshot creation", "err", err) 587 } 588 // If the account is not yet in-progress, write it out 589 if accMarker == nil || !bytes.Equal(account[:], accMarker) { 590 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 591 if !write { 592 if bytes.Equal(acc.CodeHash, types.EmptyCodeHash[:]) { 593 dataLen -= 32 594 } 595 if acc.Root == types.EmptyRootHash { 596 dataLen -= 32 597 } 598 snapRecoveredAccountMeter.Mark(1) 599 } else { 600 data := types.SlimAccountRLP(acc) 601 dataLen = len(data) 602 rawdb.WriteAccountSnapshot(ctx.batch, account, data) 603 snapGeneratedAccountMeter.Mark(1) 604 } 605 ctx.stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 606 ctx.stats.accounts++ 607 } 608 // If the snap generation goes here after interrupted, genMarker may go backward 609 // when last genMarker is consisted of accountHash and storageHash 610 marker := account[:] 611 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 612 marker = dl.genMarker[:] 613 } 614 // If we've exceeded our batch allowance or termination was requested, flush to disk 615 if err := dl.checkAndFlush(ctx, marker); err != nil { 616 return err 617 } 618 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) // let's count flush time as well 619 620 // If the iterated account is the contract, create a further loop to 621 // verify or regenerate the contract storage. 622 if acc.Root == types.EmptyRootHash { 623 ctx.removeStorageAt(account) 624 } else { 625 var storeMarker []byte 626 if accMarker != nil && bytes.Equal(account[:], accMarker) && len(dl.genMarker) > common.HashLength { 627 storeMarker = dl.genMarker[common.HashLength:] 628 } 629 if err := generateStorages(ctx, dl, dl.root, account, acc.Root, storeMarker); err != nil { 630 return err 631 } 632 } 633 // Some account processed, unmark the marker 634 accMarker = nil 635 return nil 636 } 637 // Always reset the initial account range as 1 whenever recover from the 638 // interruption. TODO(rjl493456442) can we remove it? 639 var accountRange = accountCheckRange 640 if len(accMarker) > 0 { 641 accountRange = 1 642 } 643 origin := common.CopyBytes(accMarker) 644 for { 645 id := trie.StateTrieID(dl.root) 646 exhausted, last, err := dl.generateRange(ctx, id, rawdb.SnapshotAccountPrefix, snapAccount, origin, accountRange, onAccount, types.FullAccountRLP) 647 if err != nil { 648 return err // The procedure it aborted, either by external signal or internal error. 649 } 650 origin = increaseKey(last) 651 652 // Last step, cleanup the storages after the last account. 653 // All the left storages should be treated as dangling. 654 if origin == nil || exhausted { 655 ctx.removeStorageLeft() 656 break 657 } 658 accountRange = accountCheckRange 659 } 660 return nil 661 } 662 663 // generate is a background thread that iterates over the state and storage tries, 664 // constructing the state snapshot. All the arguments are purely for statistics 665 // gathering and logging, since the method surfs the blocks as they arrive, often 666 // being restarted. 667 func (dl *diskLayer) generate(stats *generatorStats) { 668 var ( 669 accMarker []byte 670 abort chan *generatorStats 671 ) 672 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 673 accMarker = dl.genMarker[:common.HashLength] 674 } 675 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 676 677 // Initialize the global generator context. The snapshot iterators are 678 // opened at the interrupted position because the assumption is held 679 // that all the snapshot data are generated correctly before the marker. 680 // Even if the snapshot data is updated during the interruption (before 681 // or at the marker), the assumption is still held. 682 // For the account or storage slot at the interruption, they will be 683 // processed twice by the generator(they are already processed in the 684 // last run) but it's fine. 685 ctx := newGeneratorContext(stats, dl.diskdb, accMarker, dl.genMarker) 686 defer ctx.close() 687 688 if err := generateAccounts(ctx, dl, accMarker); err != nil { 689 // Extract the received interruption signal if exists 690 if aerr, ok := err.(*abortErr); ok { 691 abort = aerr.abort 692 } 693 // Aborted by internal error, wait the signal 694 if abort == nil { 695 abort = <-dl.genAbort 696 } 697 abort <- stats 698 return 699 } 700 // Snapshot fully generated, set the marker to nil. 701 // Note even there is nothing to commit, persist the 702 // generator anyway to mark the snapshot is complete. 703 journalProgress(ctx.batch, nil, stats) 704 if err := ctx.batch.Write(); err != nil { 705 log.Error("Failed to flush batch", "err", err) 706 707 abort = <-dl.genAbort 708 abort <- stats 709 return 710 } 711 ctx.batch.Reset() 712 713 log.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 714 "storage", stats.storage, "dangling", stats.dangling, "elapsed", common.PrettyDuration(time.Since(stats.start))) 715 716 dl.lock.Lock() 717 dl.genMarker = nil 718 close(dl.genPending) 719 dl.lock.Unlock() 720 721 // Someone will be looking for us, wait it out 722 abort = <-dl.genAbort 723 abort <- nil 724 } 725 726 // increaseKey increase the input key by one bit. Return nil if the entire 727 // addition operation overflows. 728 func increaseKey(key []byte) []byte { 729 for i := len(key) - 1; i >= 0; i-- { 730 key[i]++ 731 if key[i] != 0x0 { 732 return key 733 } 734 } 735 return nil 736 } 737 738 // abortErr wraps an interruption signal received to represent the 739 // generation is aborted by external processes. 740 type abortErr struct { 741 abort chan *generatorStats 742 } 743 744 func newAbortErr(abort chan *generatorStats) error { 745 return &abortErr{abort: abort} 746 } 747 748 func (err *abortErr) Error() string { 749 return "aborted" 750 }