github.com/MetalBlockchain/subnet-evm@v0.4.9/core/rawdb/accessors_state_sync.go (about) 1 // (c) 2022, Ava Labs, Inc. All rights reserved. 2 // See the file LICENSE for licensing terms. 3 4 package rawdb 5 6 import ( 7 "encoding/binary" 8 9 "github.com/MetalBlockchain/metalgo/utils/wrappers" 10 "github.com/MetalBlockchain/subnet-evm/ethdb" 11 "github.com/ethereum/go-ethereum/common" 12 "github.com/ethereum/go-ethereum/log" 13 ) 14 15 // ReadSyncRoot reads the root corresponding to the main trie of an in-progress 16 // sync and returns common.Hash{} if no in-progress sync was found. 17 func ReadSyncRoot(db ethdb.KeyValueReader) (common.Hash, error) { 18 has, err := db.Has(syncRootKey) 19 if err != nil || !has { 20 return common.Hash{}, err 21 } 22 root, err := db.Get(syncRootKey) 23 if err != nil { 24 return common.Hash{}, err 25 } 26 return common.BytesToHash(root), nil 27 } 28 29 // WriteSyncRoot writes root as the root of the main trie of the in-progress sync. 30 func WriteSyncRoot(db ethdb.KeyValueWriter, root common.Hash) error { 31 return db.Put(syncRootKey, root[:]) 32 } 33 34 // AddCodeToFetch adds a marker that we need to fetch the code for [hash]. 35 func AddCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) { 36 if err := db.Put(codeToFetchKey(hash), nil); err != nil { 37 log.Crit("Failed to put code to fetch", "codeHash", hash, "err", err) 38 } 39 } 40 41 // DeleteCodeToFetch removes the marker that the code corresponding to [hash] needs to be fetched. 42 func DeleteCodeToFetch(db ethdb.KeyValueWriter, hash common.Hash) { 43 if err := db.Delete(codeToFetchKey(hash)); err != nil { 44 log.Crit("Failed to delete code to fetch", "codeHash", hash, "err", err) 45 } 46 } 47 48 // NewCodeToFetchIterator returns a KeyLength iterator over all code 49 // hashes that are pending syncing. It is the caller's responsibility to 50 // unpack the key and call Release on the returned iterator. 51 func NewCodeToFetchIterator(db ethdb.Iteratee) ethdb.Iterator { 52 return NewKeyLengthIterator( 53 db.NewIterator(CodeToFetchPrefix, nil), 54 codeToFetchKeyLength, 55 ) 56 } 57 58 func codeToFetchKey(hash common.Hash) []byte { 59 codeToFetchKey := make([]byte, codeToFetchKeyLength) 60 copy(codeToFetchKey, CodeToFetchPrefix) 61 copy(codeToFetchKey[len(CodeToFetchPrefix):], hash[:]) 62 return codeToFetchKey 63 } 64 65 // NewSyncSegmentsIterator returns a KeyLength iterator over all trie segments 66 // added for root. It is the caller's responsibility to unpack the key and call 67 // Release on the returned iterator. 68 func NewSyncSegmentsIterator(db ethdb.Iteratee, root common.Hash) ethdb.Iterator { 69 segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) 70 copy(segmentsPrefix, syncSegmentsPrefix) 71 copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) 72 73 return NewKeyLengthIterator( 74 db.NewIterator(segmentsPrefix, nil), 75 syncSegmentsKeyLength, 76 ) 77 } 78 79 // WriteSyncSegment adds a trie segment for root at the given start position. 80 func WriteSyncSegment(db ethdb.KeyValueWriter, root common.Hash, start []byte) error { 81 return db.Put(packSyncSegmentKey(root, start), []byte{0x01}) 82 } 83 84 // ClearSegment removes segment markers for root from db 85 func ClearSyncSegments(db ethdb.KeyValueStore, root common.Hash) error { 86 segmentsPrefix := make([]byte, len(syncSegmentsPrefix)+common.HashLength) 87 copy(segmentsPrefix, syncSegmentsPrefix) 88 copy(segmentsPrefix[len(syncSegmentsPrefix):], root[:]) 89 90 return ClearPrefix(db, segmentsPrefix) 91 } 92 93 // ClearAllSyncSegments removes all segment markers from db 94 func ClearAllSyncSegments(db ethdb.KeyValueStore) error { 95 return ClearPrefix(db, syncSegmentsPrefix) 96 } 97 98 // UnpackSyncSegmentKey returns the root and start position for a trie segment 99 // key returned from NewSyncSegmentsIterator. 100 func UnpackSyncSegmentKey(keyBytes []byte) (common.Hash, []byte) { 101 keyBytes = keyBytes[len(syncSegmentsPrefix):] // skip prefix 102 root := common.BytesToHash(keyBytes[:common.HashLength]) 103 start := keyBytes[common.HashLength:] 104 return root, start 105 } 106 107 // packSyncSegmentKey packs root and account into a key for storage in db. 108 func packSyncSegmentKey(root common.Hash, start []byte) []byte { 109 bytes := make([]byte, len(syncSegmentsPrefix)+common.HashLength+len(start)) 110 copy(bytes, syncSegmentsPrefix) 111 copy(bytes[len(syncSegmentsPrefix):], root[:]) 112 copy(bytes[len(syncSegmentsPrefix)+common.HashLength:], start) 113 return bytes 114 } 115 116 // NewSyncStorageTriesIterator returns a KeyLength iterator over all storage tries 117 // added for syncing (beginning at seek). It is the caller's responsibility to unpack 118 // the key and call Release on the returned iterator. 119 func NewSyncStorageTriesIterator(db ethdb.Iteratee, seek []byte) ethdb.Iterator { 120 return NewKeyLengthIterator(db.NewIterator(syncStorageTriesPrefix, seek), syncStorageTriesKeyLength) 121 } 122 123 // WriteSyncStorageTrie adds a storage trie for account (with the given root) to be synced. 124 func WriteSyncStorageTrie(db ethdb.KeyValueWriter, root common.Hash, account common.Hash) error { 125 return db.Put(packSyncStorageTrieKey(root, account), []byte{0x01}) 126 } 127 128 // ClearSyncStorageTrie removes all storage trie accounts (with the given root) from db. 129 // Intended for use when the trie with root has completed syncing. 130 func ClearSyncStorageTrie(db ethdb.KeyValueStore, root common.Hash) error { 131 accountsPrefix := make([]byte, len(syncStorageTriesPrefix)+common.HashLength) 132 copy(accountsPrefix, syncStorageTriesPrefix) 133 copy(accountsPrefix[len(syncStorageTriesPrefix):], root[:]) 134 return ClearPrefix(db, accountsPrefix) 135 } 136 137 // ClearAllSyncStorageTries removes all storage tries added for syncing from db 138 func ClearAllSyncStorageTries(db ethdb.KeyValueStore) error { 139 return ClearPrefix(db, syncStorageTriesPrefix) 140 } 141 142 // UnpackSyncStorageTrieKey returns the root and account for a storage trie 143 // key returned from NewSyncStorageTriesIterator. 144 func UnpackSyncStorageTrieKey(keyBytes []byte) (common.Hash, common.Hash) { 145 keyBytes = keyBytes[len(syncStorageTriesPrefix):] // skip prefix 146 root := common.BytesToHash(keyBytes[:common.HashLength]) 147 account := common.BytesToHash(keyBytes[common.HashLength:]) 148 return root, account 149 } 150 151 // packSyncStorageTrieKey packs root and account into a key for storage in db. 152 func packSyncStorageTrieKey(root common.Hash, account common.Hash) []byte { 153 bytes := make([]byte, 0, syncStorageTriesKeyLength) 154 bytes = append(bytes, syncStorageTriesPrefix...) 155 bytes = append(bytes, root[:]...) 156 bytes = append(bytes, account[:]...) 157 return bytes 158 } 159 160 // WriteSyncPerformed logs an entry in [db] indicating the VM state synced to [blockNumber]. 161 func WriteSyncPerformed(db ethdb.KeyValueWriter, blockNumber uint64) error { 162 syncPerformedPrefixLen := len(syncPerformedPrefix) 163 bytes := make([]byte, syncPerformedPrefixLen+wrappers.LongLen) 164 copy(bytes[:syncPerformedPrefixLen], syncPerformedPrefix) 165 binary.BigEndian.PutUint64(bytes[syncPerformedPrefixLen:], blockNumber) 166 return db.Put(bytes, []byte{0x01}) 167 } 168 169 // NewSyncPerformedIterator returns an iterator over all block numbers the VM 170 // has state synced to. 171 func NewSyncPerformedIterator(db ethdb.Iteratee) ethdb.Iterator { 172 return NewKeyLengthIterator(db.NewIterator(syncPerformedPrefix, nil), syncPerformedKeyLength) 173 } 174 175 // UnpackSyncPerformedKey returns the block number from keys the iterator returned 176 // from NewSyncPerformedIterator. 177 func UnpackSyncPerformedKey(key []byte) uint64 { 178 return binary.BigEndian.Uint64(key[len(syncPerformedPrefix):]) 179 }