github.com/klaytn/klaytn@v1.10.2/snapshot/generate.go (about) 1 // Modifications Copyright 2021 The klaytn Authors 2 // Copyright 2019 The go-ethereum Authors 3 // This file is part of the go-ethereum library. 4 // 5 // The go-ethereum library is free software: you can redistribute it and/or modify 6 // it under the terms of the GNU Lesser General Public License as published by 7 // the Free Software Foundation, either version 3 of the License, or 8 // (at your option) any later version. 9 // 10 // The go-ethereum library is distributed in the hope that it will be useful, 11 // but WITHOUT ANY WARRANTY; without even the implied warranty of 12 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 13 // GNU Lesser General Public License for more details. 14 // 15 // You should have received a copy of the GNU Lesser General Public License 16 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 17 // 18 // This file is derived from core/state/snapshot/generate.go (2021/10/21). 19 // Modified and improved for the klaytn development. 20 21 package snapshot 22 23 import ( 24 "bytes" 25 "encoding/binary" 26 "errors" 27 "fmt" 28 "math" 29 "time" 30 31 "github.com/klaytn/klaytn/blockchain/types/account" 32 33 "github.com/VictoriaMetrics/fastcache" 34 "github.com/klaytn/klaytn/common" 35 "github.com/klaytn/klaytn/common/hexutil" 36 "github.com/klaytn/klaytn/rlp" 37 "github.com/klaytn/klaytn/storage/database" 38 "github.com/klaytn/klaytn/storage/statedb" 39 "github.com/rcrowley/go-metrics" 40 ) 41 42 var ( 43 // accountCheckRange is the upper limit of the number of accounts involved in 44 // each range check. This is a value estimated based on experience. If this 45 // value is too large, the failure rate of range prove will increase. Otherwise 46 // the the value is too small, the efficiency of the state recovery will decrease. 47 accountCheckRange = 128 48 49 // storageCheckRange is the upper limit of the number of storage slots involved 50 // in each range check. This is a value estimated based on experience. If this 51 // value is too large, the failure rate of range prove will increase. Otherwise 52 // the the value is too small, the efficiency of the state recovery will decrease. 53 storageCheckRange = 1024 54 55 // errMissingTrie is returned if the target trie is missing while the generation 56 // is running. In this case the generation is aborted and wait the new signal. 57 errMissingTrie = errors.New("missing trie") 58 ) 59 60 var ( 61 snapGeneratedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/generated", nil) 62 snapRecoveredAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/recovered", nil) 63 snapWipedAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/wiped", nil) 64 snapMissallAccountMeter = metrics.NewRegisteredMeter("state/snapshot/generation/account/missall", nil) 65 snapGeneratedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/generated", nil) 66 snapRecoveredStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/recovered", nil) 67 snapWipedStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/wiped", nil) 68 snapMissallStorageMeter = metrics.NewRegisteredMeter("state/snapshot/generation/storage/missall", nil) 69 snapSuccessfulRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/success", nil) 70 snapFailedRangeProofMeter = metrics.NewRegisteredMeter("state/snapshot/generation/proof/failure", nil) 71 72 // snapAccountProveCounter measures time spent on the account proving 73 snapAccountProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/prove", nil) 74 // snapAccountTrieReadCounter measures time spent on the account trie iteration 75 snapAccountTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/trieread", nil) 76 // snapAccountSnapReadCounter measues time spent on the snapshot account iteration 77 snapAccountSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/snapread", nil) 78 // snapAccountWriteCounter measures time spent on writing/updating/deleting accounts 79 snapAccountWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/account/write", nil) 80 // snapStorageProveCounter measures time spent on storage proving 81 snapStorageProveCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/prove", nil) 82 // snapStorageTrieReadCounter measures time spent on the storage trie iteration 83 snapStorageTrieReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/trieread", nil) 84 // snapStorageSnapReadCounter measures time spent on the snapshot storage iteration 85 snapStorageSnapReadCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/snapread", nil) 86 // snapStorageWriteCounter measures time spent on writing/updating/deleting storages 87 snapStorageWriteCounter = metrics.NewRegisteredCounter("state/snapshot/generation/duration/storage/write", nil) 88 ) 89 90 // generatorStats is a collection of statistics gathered by the snapshot generator 91 // for logging purposes. 92 type generatorStats struct { 93 origin uint64 // Origin prefix where generation started 94 start time.Time // Timestamp when generation started 95 accounts uint64 // Number of accounts indexed(generated or recovered) 96 slots uint64 // Number of storage slots indexed(generated or recovered) 97 storage common.StorageSize // Total account and storage slot size(generation or recovery) 98 } 99 100 // Log creates an contextual log with the given message and the context pulled 101 // from the internally maintained statistics. 102 func (gs *generatorStats) Log(msg string, root common.Hash, marker []byte) { 103 var ctx []interface{} 104 if root != (common.Hash{}) { 105 ctx = append(ctx, []interface{}{"root", root}...) 106 } 107 // Figure out whether we're after or within an account 108 switch len(marker) { 109 case common.HashLength: 110 ctx = append(ctx, []interface{}{"at", common.BytesToHash(marker)}...) 111 case 2 * common.HashLength: 112 ctx = append(ctx, []interface{}{ 113 "in", common.BytesToHash(marker[:common.HashLength]), 114 "at", common.BytesToHash(marker[common.HashLength:]), 115 }...) 116 } 117 // Add the usual measurements 118 ctx = append(ctx, []interface{}{ 119 "accounts", gs.accounts, 120 "slots", gs.slots, 121 "storage", gs.storage, 122 "elapsed", common.PrettyDuration(time.Since(gs.start)), 123 }...) 124 // Calculate the estimated indexing time based on current stats 125 if len(marker) > 0 { 126 if done := binary.BigEndian.Uint64(marker[:8]) - gs.origin; done > 0 { 127 left := math.MaxUint64 - binary.BigEndian.Uint64(marker[:8]) 128 129 speed := done/uint64(time.Since(gs.start)/time.Millisecond+1) + 1 // +1s to avoid division by zero 130 ctx = append(ctx, []interface{}{ 131 "eta", common.PrettyDuration(time.Duration(left/speed) * time.Millisecond), 132 }...) 133 } 134 } 135 logger.Info(msg, ctx...) 136 } 137 138 // generateSnapshot regenerates a brand new snapshot based on an existing state 139 // database and head block asynchronously. The snapshot is returned immediately 140 // and generation is continued in the background until done. 141 func generateSnapshot(db database.DBManager, triedb *statedb.Database, cache int, root common.Hash) *diskLayer { 142 // Create a new disk layer with an initialized state marker at zero 143 var ( 144 stats = &generatorStats{start: time.Now()} 145 batch = db.NewSnapshotDBBatch() 146 genMarker = []byte{} // Initialized but empty! 147 ) 148 149 batch.WriteSnapshotRoot(root) 150 journalProgress(batch, genMarker, stats) 151 if err := batch.Write(); err != nil { 152 logger.Crit("Failed to write initialized state marker", "err", err) 153 } 154 base := &diskLayer{ 155 diskdb: db, 156 triedb: triedb, 157 root: root, 158 cache: fastcache.New(cache * 1024 * 1024), 159 genMarker: genMarker, 160 genPending: make(chan struct{}), 161 genAbort: make(chan chan *generatorStats), 162 } 163 go base.generate(stats) 164 logger.Debug("Start snapshot generation", "root", root) 165 return base 166 } 167 168 // journalProgress persists the generator stats into the database to resume later. 169 func journalProgress(db database.KeyValueWriter, marker []byte, stats *generatorStats) { 170 // Write out the generator marker. Note it's a standalone disk layer generator 171 // which is not mixed with journal. It's ok if the generator is persisted while 172 // journal is not. 173 entry := journalGenerator{ 174 Done: marker == nil, 175 Marker: marker, 176 } 177 if stats != nil { 178 entry.Accounts = stats.accounts 179 entry.Slots = stats.slots 180 entry.Storage = uint64(stats.storage) 181 } 182 blob, err := rlp.EncodeToBytes(entry) 183 if err != nil { 184 panic(err) // Cannot happen, here to catch dev errors 185 } 186 var logstr string 187 switch { 188 case marker == nil: 189 logstr = "done" 190 case bytes.Equal(marker, []byte{}): 191 logstr = "empty" 192 case len(marker) == common.HashLength: 193 logstr = fmt.Sprintf("%#x", marker) 194 default: 195 logstr = fmt.Sprintf("%#x:%#x", marker[:common.HashLength], marker[common.HashLength:]) 196 } 197 logger.Debug("Journalled generator progress", "progress", logstr) 198 199 // TODO-Klaytn-Snapshot refactor the following db write 200 if err := db.Put(database.SnapshotGeneratorKey, blob); err != nil { 201 logger.Crit("Failed to store snapshot generator", "err", err) 202 } 203 } 204 205 // proofResult contains the output of range proving which can be used 206 // for further processing regardless if it is successful or not. 207 type proofResult struct { 208 keys [][]byte // The key set of all elements being iterated, even proving is failed 209 vals [][]byte // The val set of all elements being iterated, even proving is failed 210 diskMore bool // Set when the database has extra snapshot states since last iteration 211 trieMore bool // Set when the trie has extra snapshot states(only meaningful for successful proving) 212 proofErr error // Indicator whether the given state range is valid or not 213 tr *statedb.Trie // The trie, in case the trie was resolved by the prover (may be nil) 214 } 215 216 // valid returns the indicator that range proof is successful or not. 217 func (result *proofResult) valid() bool { 218 return result.proofErr == nil 219 } 220 221 // last returns the last verified element key regardless of whether the range proof is 222 // successful or not. Nil is returned if nothing involved in the proving. 223 func (result *proofResult) last() []byte { 224 var last []byte 225 if len(result.keys) > 0 { 226 last = result.keys[len(result.keys)-1] 227 } 228 return last 229 } 230 231 // forEach iterates all the visited elements and applies the given callback on them. 232 // The iteration is aborted if the callback returns non-nil error. 233 func (result *proofResult) forEach(callback func(key []byte, val []byte) error) error { 234 for i := 0; i < len(result.keys); i++ { 235 key, val := result.keys[i], result.vals[i] 236 if err := callback(key, val); err != nil { 237 return err 238 } 239 } 240 return nil 241 } 242 243 // proveRange proves the snapshot segment with particular prefix is "valid". 244 // The iteration start point will be assigned if the iterator is restored from 245 // the last interruption. Max will be assigned in order to limit the maximum 246 // amount of data involved in each iteration. 247 // 248 // The proof result will be returned if the range proving is finished, otherwise 249 // the error will be returned to abort the entire procedure. 250 func (dl *diskLayer) proveRange(stats *generatorStats, root common.Hash, prefix []byte, kind string, origin []byte, max int, valueConvertFn func([]byte) ([]byte, error)) (*proofResult, error) { 251 var ( 252 keys [][]byte 253 vals [][]byte 254 proof = database.NewMemoryDBManager() 255 diskMore = false 256 ) 257 258 iter := dl.diskdb.NewSnapshotDBIterator(prefix, origin) 259 defer iter.Release() 260 261 start := time.Now() 262 for iter.Next() { 263 key := iter.Key() 264 if len(key) != len(prefix)+common.HashLength { 265 continue 266 } 267 if len(keys) == max { 268 // Break if we've reached the max size, and signal that we're not 269 // done yet. 270 diskMore = true 271 break 272 } 273 keys = append(keys, common.CopyBytes(key[len(prefix):])) 274 275 if valueConvertFn == nil { 276 vals = append(vals, common.CopyBytes(iter.Value())) 277 } else { 278 val, err := valueConvertFn(iter.Value()) 279 if err != nil { 280 // Special case, the state data is corrupted (invalid slim-format account), 281 // don't abort the entire procedure directly. Instead, let the fallback 282 // generation to heal the invalid data. 283 // 284 // Here append the original value to ensure that the number of key and 285 // value are the same. 286 vals = append(vals, common.CopyBytes(iter.Value())) 287 logger.Error("Failed to convert account state data", "err", err) 288 } else { 289 vals = append(vals, val) 290 } 291 } 292 } 293 // Update metrics for database iteration and merkle proving 294 if kind == "storage" { 295 snapStorageSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 296 } else { 297 snapAccountSnapReadCounter.Inc(time.Since(start).Nanoseconds()) 298 } 299 defer func(start time.Time) { 300 if kind == "storage" { 301 snapStorageProveCounter.Inc(time.Since(start).Nanoseconds()) 302 } else { 303 snapAccountProveCounter.Inc(time.Since(start).Nanoseconds()) 304 } 305 }(time.Now()) 306 307 // The snap state is exhausted, pass the entire key/val set for verification 308 if origin == nil && !diskMore { 309 var ( 310 dbm = database.NewMemoryDBManager() 311 triedb = statedb.NewDatabase(dbm) 312 ) 313 tr, _ := statedb.NewTrie(common.Hash{}, triedb) 314 for i, key := range keys { 315 tr.TryUpdate(key, vals[i]) 316 } 317 if gotRoot := tr.Hash(); gotRoot != root { 318 return &proofResult{ 319 keys: keys, 320 vals: vals, 321 proofErr: fmt.Errorf("wrong root: have %#x want %#x", gotRoot, root), 322 }, nil 323 } 324 return &proofResult{keys: keys, vals: vals}, nil 325 } 326 // Snap state is chunked, generate edge proofs for verification. 327 tr, err := statedb.NewTrie(root, dl.triedb) 328 if err != nil { 329 stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 330 return nil, errMissingTrie 331 } 332 // Firstly find out the key of last iterated element. 333 var last []byte 334 if len(keys) > 0 { 335 last = keys[len(keys)-1] 336 } 337 // Generate the Merkle proofs for the first and last element 338 if origin == nil { 339 origin = common.Hash{}.Bytes() 340 } 341 if err := tr.Prove(origin, 0, proof); err != nil { 342 logger.Debug("Failed to prove range", "kind", kind, "origin", origin, "err", err) 343 return &proofResult{ 344 keys: keys, 345 vals: vals, 346 diskMore: diskMore, 347 proofErr: err, 348 tr: tr, 349 }, nil 350 } 351 if last != nil { 352 if err := tr.Prove(last, 0, proof); err != nil { 353 logger.Debug("Failed to prove range", "kind", kind, "last", last, "err", err) 354 return &proofResult{ 355 keys: keys, 356 vals: vals, 357 diskMore: diskMore, 358 proofErr: err, 359 tr: tr, 360 }, nil 361 } 362 } 363 // Verify the snapshot segment with range prover, ensure that all flat states 364 // in this range correspond to merkle trie. 365 cont, err := statedb.VerifyRangeProof(root, origin, last, keys, vals, proof) 366 return &proofResult{ 367 keys: keys, 368 vals: vals, 369 diskMore: diskMore, 370 trieMore: cont, 371 proofErr: err, 372 tr: tr, 373 }, 374 nil 375 } 376 377 // onStateCallback is a function that is called by generateRange, when processing a range of 378 // accounts or storage slots. For each element, the callback is invoked. 379 // If 'delete' is true, then this element (and potential slots) needs to be deleted from the snapshot. 380 // If 'write' is true, then this element needs to be updated with the 'val'. 381 // If 'write' is false, then this element is already correct, and needs no update. However, 382 // for accounts, the storage trie of the account needs to be checked. 383 // The 'val' is the canonical encoding of the value (not the slim format for accounts) 384 type onStateCallback func(key []byte, val []byte, write bool, delete bool) error 385 386 // generateRange generates the state segment with particular prefix. Generation can 387 // either verify the correctness of existing state through rangeproof and skip 388 // generation, or iterate trie to regenerate state on demand. 389 func (dl *diskLayer) generateRange(root common.Hash, prefix []byte, kind string, origin []byte, max int, stats *generatorStats, onState onStateCallback, valueConvertFn func([]byte) ([]byte, error)) (bool, []byte, error) { 390 // Use range prover to check the validity of the flat state in the range 391 result, err := dl.proveRange(stats, root, prefix, kind, origin, max, valueConvertFn) 392 if err != nil { 393 return false, nil, err 394 } 395 last := result.last() 396 397 // Construct contextual logger 398 logCtx := []interface{}{"kind", kind, "prefix", hexutil.Encode(prefix)} 399 if len(origin) > 0 { 400 logCtx = append(logCtx, "origin", hexutil.Encode(origin)) 401 } 402 localLogger := logger.NewWith(logCtx...) 403 404 // The range prover says the range is correct, skip trie iteration 405 if result.valid() { 406 snapSuccessfulRangeProofMeter.Mark(1) 407 localLogger.Trace("Proved state range", "last", hexutil.Encode(last)) 408 409 // The verification is passed, process each state with the given 410 // callback function. If this state represents a contract, the 411 // corresponding storage check will be performed in the callback 412 if err := result.forEach(func(key []byte, val []byte) error { return onState(key, val, false, false) }); err != nil { 413 return false, nil, err 414 } 415 // Only abort the iteration when both database and trie are exhausted 416 return !result.diskMore && !result.trieMore, last, nil 417 } 418 localLogger.Trace("Detected outdated state range", "last", hexutil.Encode(last), "err", result.proofErr) 419 snapFailedRangeProofMeter.Mark(1) 420 421 // Special case, the entire trie is missing. In the original trie scheme, 422 // all the duplicated subtries will be filter out(only one copy of data 423 // will be stored). While in the snapshot model, all the storage tries 424 // belong to different contracts will be kept even they are duplicated. 425 // Track it to a certain extent remove the noise data used for statistics. 426 if origin == nil && last == nil { 427 meter := snapMissallAccountMeter 428 if kind == "storage" { 429 meter = snapMissallStorageMeter 430 } 431 meter.Mark(1) 432 } 433 434 // We use the snap data to build up a cache which can be used by the 435 // main account trie as a primary lookup when resolving hashes 436 var snapNodeCache database.DBManager 437 if len(result.keys) > 0 { 438 snapNodeCache = database.NewMemoryDBManager() 439 snapTrieDb := statedb.NewDatabase(snapNodeCache) 440 snapTrie, _ := statedb.NewTrie(common.Hash{}, snapTrieDb) 441 for i, key := range result.keys { 442 snapTrie.Update(key, result.vals[i]) 443 } 444 root, _ := snapTrie.Commit(nil) 445 // TODO-Klaytn update proper block number 446 snapTrieDb.Commit(root, false, 0) 447 } 448 tr := result.tr 449 if tr == nil { 450 tr, err = statedb.NewTrie(root, dl.triedb) 451 if err != nil { 452 stats.Log("Trie missing, state snapshotting paused", dl.root, dl.genMarker) 453 return false, nil, errMissingTrie 454 } 455 } 456 457 var ( 458 trieMore bool 459 nodeIt = tr.NodeIterator(origin) 460 iter = statedb.NewIterator(nodeIt) 461 kvkeys, kvvals = result.keys, result.vals 462 463 // counters 464 count = 0 // number of states delivered by iterator 465 created = 0 // states created from the trie 466 updated = 0 // states updated from the trie 467 deleted = 0 // states not in trie, but were in snapshot 468 untouched = 0 // states already correct 469 470 // timers 471 start = time.Now() 472 internal time.Duration 473 ) 474 nodeIt.AddResolver(snapNodeCache) 475 for iter.Next() { 476 if last != nil && bytes.Compare(iter.Key, last) > 0 { 477 trieMore = true 478 break 479 } 480 count++ 481 write := true 482 created++ 483 for len(kvkeys) > 0 { 484 if cmp := bytes.Compare(kvkeys[0], iter.Key); cmp < 0 { 485 // delete the key 486 istart := time.Now() 487 if err := onState(kvkeys[0], nil, false, true); err != nil { 488 return false, nil, err 489 } 490 kvkeys = kvkeys[1:] 491 kvvals = kvvals[1:] 492 deleted++ 493 internal += time.Since(istart) 494 continue 495 } else if cmp == 0 { 496 // the snapshot key can be overwritten 497 created-- 498 if write = !bytes.Equal(kvvals[0], iter.Value); write { 499 updated++ 500 } else { 501 untouched++ 502 } 503 kvkeys = kvkeys[1:] 504 kvvals = kvvals[1:] 505 } 506 break 507 } 508 istart := time.Now() 509 if err := onState(iter.Key, iter.Value, write, false); err != nil { 510 return false, nil, err 511 } 512 internal += time.Since(istart) 513 } 514 if iter.Err != nil { 515 return false, nil, iter.Err 516 } 517 // Delete all stale snapshot states remaining 518 istart := time.Now() 519 for _, key := range kvkeys { 520 if err := onState(key, nil, false, true); err != nil { 521 return false, nil, err 522 } 523 deleted += 1 524 } 525 internal += time.Since(istart) 526 527 // Update metrics for counting trie iteration 528 if kind == "storage" { 529 snapStorageTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 530 } else { 531 snapAccountTrieReadCounter.Inc((time.Since(start) - internal).Nanoseconds()) 532 } 533 localLogger.Debug("Regenerated state range", "root", root, "last", hexutil.Encode(last), 534 "count", count, "created", created, "updated", updated, "untouched", untouched, "deleted", deleted) 535 536 // If there are either more trie items, or there are more snap items 537 // (in the next segment), then we need to keep working 538 return !trieMore && !result.diskMore, last, nil 539 } 540 541 // generate is a background thread that iterates over the state and storage tries, 542 // constructing the state snapshot. All the arguments are purely for statistics 543 // gathering and logging, since the method surfs the blocks as they arrive, often 544 // being restarted. 545 func (dl *diskLayer) generate(stats *generatorStats) { 546 var ( 547 accMarker []byte 548 accountRange = accountCheckRange 549 ) 550 if len(dl.genMarker) > 0 { // []byte{} is the start, use nil for that 551 // Always reset the initial account range as 1 552 // whenever recover from the interruption. 553 accMarker, accountRange = dl.genMarker[:common.HashLength], 1 554 } 555 556 var ( 557 batch = dl.diskdb.NewSnapshotDBBatch() 558 logged = time.Now() 559 accOrigin = common.CopyBytes(accMarker) 560 abort chan *generatorStats 561 ) 562 stats.Log("Resuming state snapshot generation", dl.root, dl.genMarker) 563 564 checkAndFlush := func(currentLocation []byte) error { 565 select { 566 case abort = <-dl.genAbort: 567 default: 568 } 569 if batch.ValueSize() > database.IdealBatchSize || abort != nil { 570 if bytes.Compare(currentLocation, dl.genMarker) < 0 { 571 logger.Error("Snapshot generator went backwards", 572 "currentLocation", fmt.Sprintf("%x", currentLocation), 573 "genMarker", fmt.Sprintf("%x", dl.genMarker)) 574 } 575 576 // Flush out the batch anyway no matter it's empty or not. 577 // It's possible that all the states are recovered and the 578 // generation indeed makes progress. 579 journalProgress(batch, currentLocation, stats) 580 581 if err := batch.Write(); err != nil { 582 return err 583 } 584 batch.Reset() 585 586 dl.lock.Lock() 587 dl.genMarker = currentLocation 588 dl.lock.Unlock() 589 590 if abort != nil { 591 stats.Log("Aborting state snapshot generation", dl.root, currentLocation) 592 return errors.New("aborted") 593 } 594 } 595 if time.Since(logged) > 8*time.Second { 596 stats.Log("Generating state snapshot", dl.root, currentLocation) 597 logged = time.Now() 598 } 599 return nil 600 } 601 602 onAccount := func(key []byte, val []byte, write bool, delete bool) error { 603 var ( 604 start = time.Now() 605 accountHash = common.BytesToHash(key) 606 ) 607 if delete { 608 batch.DeleteAccountSnapshot(accountHash) 609 snapWipedAccountMeter.Mark(1) 610 611 // Ensure that any previous snapshot storage values are cleared 612 prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...) 613 keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength 614 if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil { 615 return err 616 } 617 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 618 return nil 619 } 620 serializer := account.NewAccountSerializer() 621 if err := rlp.DecodeBytes(val, serializer); err != nil { 622 logger.Crit("Invalid account encountered during snapshot creation", "err", err) 623 } 624 acc := serializer.GetAccount() 625 // If the account is not yet in-progress, write it out 626 if accMarker == nil || !bytes.Equal(accountHash[:], accMarker) { 627 dataLen := len(val) // Approximate size, saves us a round of RLP-encoding 628 if !write { 629 snapRecoveredAccountMeter.Mark(1) 630 } else { 631 batch.WriteAccountSnapshot(accountHash, val) 632 snapGeneratedAccountMeter.Mark(1) 633 } 634 stats.storage += common.StorageSize(1 + common.HashLength + dataLen) 635 stats.accounts++ 636 } 637 marker := accountHash[:] 638 // If the snap generation goes here after interrupted, genMarker may go backward 639 // when last genMarker is consisted of accountHash and storageHash 640 if accMarker != nil && bytes.Equal(marker, accMarker) && len(dl.genMarker) > common.HashLength { 641 marker = dl.genMarker[:] 642 } 643 // If we've exceeded our batch allowance or termination was requested, flush to disk 644 if err := checkAndFlush(marker); err != nil { 645 return err 646 } 647 // If the iterated account is the contract, create a further loop to 648 // verify or regenerate the contract storage. 649 contractAcc, ok := acc.(*account.SmartContractAccount) 650 if !ok { 651 // If the root is empty, we still need to ensure that any previous snapshot 652 // storage values are cleared 653 // TODO: investigate if this can be avoided, this will be very costly since it 654 // affects every single EOA account 655 // - Perhaps we can avoid if where codeHash is emptyCode 656 prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...) 657 keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength 658 if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil { 659 return err 660 } 661 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 662 663 accMarker = nil 664 return nil 665 } 666 667 rootHash := contractAcc.GetStorageRoot() 668 if rootHash == emptyRoot { 669 prefix := append(database.SnapshotStoragePrefix, accountHash.Bytes()...) 670 keyLen := len(database.SnapshotStoragePrefix) + 2*common.HashLength 671 if err := wipeKeyRange(dl.diskdb, "storage", prefix, nil, nil, keyLen, snapWipedStorageMeter, false); err != nil { 672 return err 673 } 674 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 675 } else { 676 snapAccountWriteCounter.Inc(time.Since(start).Nanoseconds()) 677 678 var storeMarker []byte 679 if accMarker != nil && bytes.Equal(accountHash[:], accMarker) && len(dl.genMarker) > common.HashLength { 680 storeMarker = dl.genMarker[common.HashLength:] 681 } 682 onStorage := func(key []byte, val []byte, write bool, delete bool) error { 683 defer func(start time.Time) { 684 snapStorageWriteCounter.Inc(time.Since(start).Nanoseconds()) 685 }(time.Now()) 686 687 if delete { 688 batch.DeleteStorageSnapshot(accountHash, common.BytesToHash(key)) 689 snapWipedStorageMeter.Mark(1) 690 return nil 691 } 692 if write { 693 batch.WriteStorageSnapshot(accountHash, common.BytesToHash(key), val) 694 snapGeneratedStorageMeter.Mark(1) 695 } else { 696 snapRecoveredStorageMeter.Mark(1) 697 } 698 stats.storage += common.StorageSize(1 + 2*common.HashLength + len(val)) 699 stats.slots++ 700 701 // If we've exceeded our batch allowance or termination was requested, flush to disk 702 if err := checkAndFlush(append(accountHash[:], key...)); err != nil { 703 return err 704 } 705 return nil 706 } 707 storeOrigin := common.CopyBytes(storeMarker) 708 for { 709 exhausted, last, err := dl.generateRange(rootHash, append(database.SnapshotStoragePrefix, accountHash.Bytes()...), "storage", storeOrigin, storageCheckRange, stats, onStorage, nil) 710 if err != nil { 711 return err 712 } 713 if exhausted { 714 break 715 } 716 if storeOrigin = increaseKey(last); storeOrigin == nil { 717 break // special case, the last is 0xffffffff...fff 718 } 719 } 720 } 721 // Some account processed, unmark the marker 722 accMarker = nil 723 return nil 724 } 725 726 // Global loop for regerating the entire state trie + all layered storage tries. 727 for { 728 exhausted, last, err := dl.generateRange(dl.root, database.SnapshotAccountPrefix, "account", accOrigin, accountRange, stats, onAccount, nil) 729 // The procedure it aborted, either by external signal or internal error 730 if err != nil { 731 if abort == nil { // aborted by internal error, wait the signal 732 abort = <-dl.genAbort 733 } 734 abort <- stats 735 return 736 } 737 // Abort the procedure if the entire snapshot is generated 738 if exhausted { 739 break 740 } 741 if accOrigin = increaseKey(last); accOrigin == nil { 742 break // special case, the last is 0xffffffff...fff 743 } 744 accountRange = accountCheckRange 745 } 746 // Snapshot fully generated, set the marker to nil. 747 // Note even there is nothing to commit, persist the 748 // generator anyway to mark the snapshot is complete. 749 journalProgress(batch, nil, stats) 750 if err := batch.Write(); err != nil { 751 logger.Error("Failed to flush batch", "err", err) 752 753 abort = <-dl.genAbort 754 abort <- stats 755 return 756 } 757 batch.Reset() 758 759 logger.Info("Generated state snapshot", "accounts", stats.accounts, "slots", stats.slots, 760 "storage", stats.storage, "elapsed", common.PrettyDuration(time.Since(stats.start))) 761 762 dl.lock.Lock() 763 dl.genMarker = nil 764 close(dl.genPending) 765 dl.lock.Unlock() 766 767 // Someone will be looking for us, wait it out 768 abort = <-dl.genAbort 769 abort <- nil 770 } 771 772 // increaseKey increase the input key by one bit. Return nil if the entire 773 // addition operation overflows, 774 func increaseKey(key []byte) []byte { 775 for i := len(key) - 1; i >= 0; i-- { 776 key[i]++ 777 if key[i] != 0x0 { 778 return key 779 } 780 } 781 return nil 782 }