github.com/ethereum/go-ethereum@v1.16.1/triedb/pathdb/reader.go (about) 1 // Copyright 2024 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package pathdb 18 19 import ( 20 "errors" 21 "fmt" 22 "time" 23 24 "github.com/ethereum/go-ethereum/common" 25 "github.com/ethereum/go-ethereum/common/hexutil" 26 "github.com/ethereum/go-ethereum/core/rawdb" 27 "github.com/ethereum/go-ethereum/core/types" 28 "github.com/ethereum/go-ethereum/crypto" 29 "github.com/ethereum/go-ethereum/log" 30 "github.com/ethereum/go-ethereum/rlp" 31 "github.com/ethereum/go-ethereum/triedb/database" 32 ) 33 34 // The types of locations where the node is found. 35 const ( 36 locDirtyCache = "dirty" // dirty cache 37 locCleanCache = "clean" // clean cache 38 locDiskLayer = "disk" // persistent state 39 locDiffLayer = "diff" // diff layers 40 ) 41 42 // nodeLoc is a helpful structure that contains the location where the node 43 // is found, as it's useful for debugging purposes. 44 type nodeLoc struct { 45 loc string 46 depth int 47 } 48 49 // string returns the string representation of node location. 50 func (loc *nodeLoc) string() string { 51 return fmt.Sprintf("loc: %s, depth: %d", loc.loc, loc.depth) 52 } 53 54 // reader implements the database.NodeReader interface, providing the functionalities to 55 // retrieve trie nodes by wrapping the internal state layer. 56 type reader struct { 57 db *Database 58 state common.Hash 59 noHashCheck bool 60 layer layer 61 } 62 63 // Node implements database.NodeReader interface, retrieving the node with specified 64 // node info. Don't modify the returned byte slice since it's not deep-copied 65 // and still be referenced by database. 66 func (r *reader) Node(owner common.Hash, path []byte, hash common.Hash) ([]byte, error) { 67 blob, got, loc, err := r.layer.node(owner, path, 0) 68 if err != nil { 69 return nil, err 70 } 71 // Error out if the local one is inconsistent with the target. 72 if !r.noHashCheck && got != hash { 73 // Location is always available even if the node 74 // is not found. 75 switch loc.loc { 76 case locCleanCache: 77 nodeCleanFalseMeter.Mark(1) 78 case locDirtyCache: 79 nodeDirtyFalseMeter.Mark(1) 80 case locDiffLayer: 81 nodeDiffFalseMeter.Mark(1) 82 case locDiskLayer: 83 nodeDiskFalseMeter.Mark(1) 84 } 85 blobHex := "nil" 86 if len(blob) > 0 { 87 blobHex = hexutil.Encode(blob) 88 } 89 log.Error("Unexpected trie node", "location", loc.loc, "owner", owner.Hex(), "path", path, "expect", hash.Hex(), "got", got.Hex(), "blob", blobHex) 90 return nil, fmt.Errorf("unexpected node: (%x %v), %x!=%x, %s, blob: %s", owner, path, hash, got, loc.string(), blobHex) 91 } 92 return blob, nil 93 } 94 95 // AccountRLP directly retrieves the account associated with a particular hash. 96 // An error will be returned if the read operation exits abnormally. Specifically, 97 // if the layer is already stale. 98 // 99 // Note: 100 // - the returned account data is not a copy, please don't modify it 101 // - no error will be returned if the requested account is not found in database 102 func (r *reader) AccountRLP(hash common.Hash) ([]byte, error) { 103 l, err := r.db.tree.lookupAccount(hash, r.state) 104 if err != nil { 105 return nil, err 106 } 107 // If the located layer is stale, fall back to the slow path to retrieve 108 // the account data. This is an edge case where the located layer is the 109 // disk layer (e.g., the requested account was not changed in all the diff 110 // layers), and it becomes stale within a very short time window. 111 // 112 // This fallback mechanism is essential, because the traversal starts from 113 // the entry point layer and goes down, the staleness of the disk layer does 114 // not affect the result unless the entry point layer is also stale. 115 blob, err := l.account(hash, 0) 116 if errors.Is(err, errSnapshotStale) { 117 return r.layer.account(hash, 0) 118 } 119 return blob, err 120 } 121 122 // Account directly retrieves the account associated with a particular hash in 123 // the slim data format. An error will be returned if the read operation exits 124 // abnormally. Specifically, if the layer is already stale. 125 // 126 // Note: 127 // - the returned account object is safe to modify 128 // - no error will be returned if the requested account is not found in database 129 func (r *reader) Account(hash common.Hash) (*types.SlimAccount, error) { 130 blob, err := r.AccountRLP(hash) 131 if err != nil { 132 return nil, err 133 } 134 if len(blob) == 0 { 135 return nil, nil 136 } 137 account := new(types.SlimAccount) 138 if err := rlp.DecodeBytes(blob, account); err != nil { 139 panic(err) 140 } 141 return account, nil 142 } 143 144 // Storage directly retrieves the storage data associated with a particular hash, 145 // within a particular account. An error will be returned if the read operation 146 // exits abnormally. Specifically, if the layer is already stale. 147 // 148 // Note: 149 // - the returned storage data is not a copy, please don't modify it 150 // - no error will be returned if the requested slot is not found in database 151 func (r *reader) Storage(accountHash, storageHash common.Hash) ([]byte, error) { 152 l, err := r.db.tree.lookupStorage(accountHash, storageHash, r.state) 153 if err != nil { 154 return nil, err 155 } 156 // If the located layer is stale, fall back to the slow path to retrieve 157 // the storage data. This is an edge case where the located layer is the 158 // disk layer (e.g., the requested account was not changed in all the diff 159 // layers), and it becomes stale within a very short time window. 160 // 161 // This fallback mechanism is essential, because the traversal starts from 162 // the entry point layer and goes down, the staleness of the disk layer does 163 // not affect the result unless the entry point layer is also stale. 164 blob, err := l.storage(accountHash, storageHash, 0) 165 if errors.Is(err, errSnapshotStale) { 166 return r.layer.storage(accountHash, storageHash, 0) 167 } 168 return blob, err 169 } 170 171 // NodeReader retrieves a layer belonging to the given state root. 172 func (db *Database) NodeReader(root common.Hash) (database.NodeReader, error) { 173 layer := db.tree.get(root) 174 if layer == nil { 175 return nil, fmt.Errorf("state %#x is not available", root) 176 } 177 return &reader{ 178 db: db, 179 state: root, 180 noHashCheck: db.isVerkle, 181 layer: layer, 182 }, nil 183 } 184 185 // StateReader returns a reader that allows access to the state data associated 186 // with the specified state. 187 func (db *Database) StateReader(root common.Hash) (database.StateReader, error) { 188 layer := db.tree.get(root) 189 if layer == nil { 190 return nil, fmt.Errorf("state %#x is not available", root) 191 } 192 return &reader{ 193 db: db, 194 state: root, 195 layer: layer, 196 }, nil 197 } 198 199 // HistoricalStateReader is a wrapper over history reader, providing access to 200 // historical state. 201 type HistoricalStateReader struct { 202 db *Database 203 reader *historyReader 204 id uint64 205 } 206 207 // HistoricReader constructs a reader for accessing the requested historic state. 208 func (db *Database) HistoricReader(root common.Hash) (*HistoricalStateReader, error) { 209 // Bail out if the state history hasn't been fully indexed 210 if db.indexer == nil || !db.indexer.inited() { 211 return nil, errors.New("state histories haven't been fully indexed yet") 212 } 213 if db.freezer == nil { 214 return nil, errors.New("state histories are not available") 215 } 216 // States at the current disk layer or above are directly accessible via 217 // db.StateReader. 218 // 219 // States older than the current disk layer (including the disk layer 220 // itself) are available through historic state access. 221 // 222 // Note: the requested state may refer to a stale historic state that has 223 // already been pruned. This function does not validate availability, as 224 // underlying states may be pruned dynamically. Validity is checked during 225 // each actual state retrieval. 226 id := rawdb.ReadStateID(db.diskdb, root) 227 if id == nil { 228 return nil, fmt.Errorf("state %#x is not available", root) 229 } 230 return &HistoricalStateReader{ 231 id: *id, 232 db: db, 233 reader: newHistoryReader(db.diskdb, db.freezer), 234 }, nil 235 } 236 237 // AccountRLP directly retrieves the account RLP associated with a particular 238 // address in the slim data format. An error will be returned if the read 239 // operation exits abnormally. Specifically, if the layer is already stale. 240 // 241 // Note: 242 // - the returned account is not a copy, please don't modify it. 243 // - no error will be returned if the requested account is not found in database. 244 func (r *HistoricalStateReader) AccountRLP(address common.Address) ([]byte, error) { 245 defer func(start time.Time) { 246 historicalAccountReadTimer.UpdateSince(start) 247 }(time.Now()) 248 249 // TODO(rjl493456442): Theoretically, the obtained disk layer could become stale 250 // within a very short time window. 251 // 252 // While reading the account data while holding `db.tree.lock` can resolve 253 // this issue, but it will introduce a heavy contention over the lock. 254 // 255 // Let's optimistically assume the situation is very unlikely to happen, 256 // and try to define a low granularity lock if the current approach doesn't 257 // work later. 258 dl := r.db.tree.bottom() 259 hash := crypto.Keccak256Hash(address.Bytes()) 260 latest, err := dl.account(hash, 0) 261 if err != nil { 262 return nil, err 263 } 264 return r.reader.read(newAccountIdentQuery(address, hash), r.id, dl.stateID(), latest) 265 } 266 267 // Account directly retrieves the account associated with a particular address in 268 // the slim data format. An error will be returned if the read operation exits 269 // abnormally. Specifically, if the layer is already stale. 270 // 271 // No error will be returned if the requested account is not found in database 272 func (r *HistoricalStateReader) Account(address common.Address) (*types.SlimAccount, error) { 273 blob, err := r.AccountRLP(address) 274 if err != nil { 275 return nil, err 276 } 277 if len(blob) == 0 { 278 return nil, nil 279 } 280 account := new(types.SlimAccount) 281 if err := rlp.DecodeBytes(blob, account); err != nil { 282 panic(err) 283 } 284 return account, nil 285 } 286 287 // Storage directly retrieves the storage data associated with a particular key, 288 // within a particular account. An error will be returned if the read operation 289 // exits abnormally. Specifically, if the layer is already stale. 290 // 291 // Note: 292 // - the returned storage data is not a copy, please don't modify it. 293 // - no error will be returned if the requested slot is not found in database. 294 func (r *HistoricalStateReader) Storage(address common.Address, key common.Hash) ([]byte, error) { 295 defer func(start time.Time) { 296 historicalStorageReadTimer.UpdateSince(start) 297 }(time.Now()) 298 299 // TODO(rjl493456442): Theoretically, the obtained disk layer could become stale 300 // within a very short time window. 301 // 302 // While reading the account data while holding `db.tree.lock` can resolve 303 // this issue, but it will introduce a heavy contention over the lock. 304 // 305 // Let's optimistically assume the situation is very unlikely to happen, 306 // and try to define a low granularity lock if the current approach doesn't 307 // work later. 308 dl := r.db.tree.bottom() 309 addrHash := crypto.Keccak256Hash(address.Bytes()) 310 keyHash := crypto.Keccak256Hash(key.Bytes()) 311 latest, err := dl.storage(addrHash, keyHash, 0) 312 if err != nil { 313 return nil, err 314 } 315 return r.reader.read(newStorageIdentQuery(address, addrHash, key, keyHash), r.id, dl.stateID(), latest) 316 }