github.com/ethereum/go-ethereum@v1.16.1/triedb/database.go (about) 1 // Copyright 2022 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package triedb 18 19 import ( 20 "errors" 21 22 "github.com/ethereum/go-ethereum/common" 23 "github.com/ethereum/go-ethereum/core/rawdb" 24 "github.com/ethereum/go-ethereum/ethdb" 25 "github.com/ethereum/go-ethereum/log" 26 "github.com/ethereum/go-ethereum/trie/trienode" 27 "github.com/ethereum/go-ethereum/triedb/database" 28 "github.com/ethereum/go-ethereum/triedb/hashdb" 29 "github.com/ethereum/go-ethereum/triedb/pathdb" 30 ) 31 32 // Config defines all necessary options for database. 33 type Config struct { 34 Preimages bool // Flag whether the preimage of node key is recorded 35 IsVerkle bool // Flag whether the db is holding a verkle tree 36 HashDB *hashdb.Config // Configs for hash-based scheme 37 PathDB *pathdb.Config // Configs for experimental path-based scheme 38 } 39 40 // HashDefaults represents a config for using hash-based scheme with 41 // default settings. 42 var HashDefaults = &Config{ 43 Preimages: false, 44 IsVerkle: false, 45 HashDB: hashdb.Defaults, 46 } 47 48 // VerkleDefaults represents a config for holding verkle trie data 49 // using path-based scheme with default settings. 50 var VerkleDefaults = &Config{ 51 Preimages: false, 52 IsVerkle: true, 53 PathDB: pathdb.Defaults, 54 } 55 56 // backend defines the methods needed to access/update trie nodes in different 57 // state scheme. 58 type backend interface { 59 // NodeReader returns a reader for accessing trie nodes within the specified state. 60 // An error will be returned if the specified state is not available. 61 NodeReader(root common.Hash) (database.NodeReader, error) 62 63 // StateReader returns a reader for accessing flat states within the specified 64 // state. An error will be returned if the specified state is not available. 65 StateReader(root common.Hash) (database.StateReader, error) 66 67 // Size returns the current storage size of the diff layers on top of the 68 // disk layer and the storage size of the nodes cached in the disk layer. 69 // 70 // For hash scheme, there is no differentiation between diff layer nodes 71 // and dirty disk layer nodes, so both are merged into the second return. 72 Size() (common.StorageSize, common.StorageSize) 73 74 // Commit writes all relevant trie nodes belonging to the specified state 75 // to disk. Report specifies whether logs will be displayed in info level. 76 Commit(root common.Hash, report bool) error 77 78 // Close closes the trie database backend and releases all held resources. 79 Close() error 80 } 81 82 // Database is the wrapper of the underlying backend which is shared by different 83 // types of node backend as an entrypoint. It's responsible for all interactions 84 // relevant with trie nodes and node preimages. 85 type Database struct { 86 disk ethdb.Database 87 config *Config // Configuration for trie database 88 preimages *preimageStore // The store for caching preimages 89 backend backend // The backend for managing trie nodes 90 } 91 92 // NewDatabase initializes the trie database with default settings, note 93 // the legacy hash-based scheme is used by default. 94 func NewDatabase(diskdb ethdb.Database, config *Config) *Database { 95 // Sanitize the config and use the default one if it's not specified. 96 if config == nil { 97 config = HashDefaults 98 } 99 var preimages *preimageStore 100 if config.Preimages { 101 preimages = newPreimageStore(diskdb) 102 } 103 db := &Database{ 104 disk: diskdb, 105 config: config, 106 preimages: preimages, 107 } 108 if config.HashDB != nil && config.PathDB != nil { 109 log.Crit("Both 'hash' and 'path' mode are configured") 110 } 111 if config.PathDB != nil { 112 db.backend = pathdb.New(diskdb, config.PathDB, config.IsVerkle) 113 } else { 114 db.backend = hashdb.New(diskdb, config.HashDB) 115 } 116 return db 117 } 118 119 // NodeReader returns a reader for accessing trie nodes within the specified state. 120 // An error will be returned if the specified state is not available. 121 func (db *Database) NodeReader(blockRoot common.Hash) (database.NodeReader, error) { 122 return db.backend.NodeReader(blockRoot) 123 } 124 125 // StateReader returns a reader that allows access to the state data associated 126 // with the specified state. An error will be returned if the specified state is 127 // not available. 128 func (db *Database) StateReader(blockRoot common.Hash) (database.StateReader, error) { 129 return db.backend.StateReader(blockRoot) 130 } 131 132 // HistoricReader constructs a reader for accessing the requested historic state. 133 func (db *Database) HistoricReader(root common.Hash) (*pathdb.HistoricalStateReader, error) { 134 pdb, ok := db.backend.(*pathdb.Database) 135 if !ok { 136 return nil, errors.New("not supported") 137 } 138 return pdb.HistoricReader(root) 139 } 140 141 // Update performs a state transition by committing dirty nodes contained in the 142 // given set in order to update state from the specified parent to the specified 143 // root. The held pre-images accumulated up to this point will be flushed in case 144 // the size exceeds the threshold. 145 // 146 // The passed in maps(nodes, states) will be retained to avoid copying everything. 147 // Therefore, these maps must not be changed afterwards. 148 func (db *Database) Update(root common.Hash, parent common.Hash, block uint64, nodes *trienode.MergedNodeSet, states *StateSet) error { 149 if db.preimages != nil { 150 db.preimages.commit(false) 151 } 152 switch b := db.backend.(type) { 153 case *hashdb.Database: 154 return b.Update(root, parent, block, nodes) 155 case *pathdb.Database: 156 return b.Update(root, parent, block, nodes, states.internal()) 157 } 158 return errors.New("unknown backend") 159 } 160 161 // Commit iterates over all the children of a particular node, writes them out 162 // to disk. As a side effect, all pre-images accumulated up to this point are 163 // also written. 164 func (db *Database) Commit(root common.Hash, report bool) error { 165 if db.preimages != nil { 166 db.preimages.commit(true) 167 } 168 return db.backend.Commit(root, report) 169 } 170 171 // Size returns the storage size of diff layer nodes above the persistent disk 172 // layer, the dirty nodes buffered within the disk layer, and the size of cached 173 // preimages. 174 func (db *Database) Size() (common.StorageSize, common.StorageSize, common.StorageSize) { 175 var ( 176 diffs, nodes common.StorageSize 177 preimages common.StorageSize 178 ) 179 diffs, nodes = db.backend.Size() 180 if db.preimages != nil { 181 preimages = db.preimages.size() 182 } 183 return diffs, nodes, preimages 184 } 185 186 // Scheme returns the node scheme used in the database. 187 func (db *Database) Scheme() string { 188 if db.config.PathDB != nil { 189 return rawdb.PathScheme 190 } 191 return rawdb.HashScheme 192 } 193 194 // Close flushes the dangling preimages to disk and closes the trie database. 195 // It is meant to be called when closing the blockchain object, so that all 196 // resources held can be released correctly. 197 func (db *Database) Close() error { 198 db.WritePreimages() 199 return db.backend.Close() 200 } 201 202 // WritePreimages flushes all accumulated preimages to disk forcibly. 203 func (db *Database) WritePreimages() { 204 if db.preimages != nil { 205 db.preimages.commit(true) 206 } 207 } 208 209 // Preimage retrieves a cached trie node pre-image from preimage store. 210 func (db *Database) Preimage(hash common.Hash) []byte { 211 if db.preimages == nil { 212 return nil 213 } 214 return db.preimages.preimage(hash) 215 } 216 217 // InsertPreimage writes pre-images of trie node to the preimage store. 218 func (db *Database) InsertPreimage(preimages map[common.Hash][]byte) { 219 if db.preimages == nil { 220 return 221 } 222 db.preimages.insertPreimage(preimages) 223 } 224 225 // PreimageEnabled returns the indicator if the pre-image store is enabled. 226 func (db *Database) PreimageEnabled() bool { 227 return db.preimages != nil 228 } 229 230 // Cap iteratively flushes old but still referenced trie nodes until the total 231 // memory usage goes below the given threshold. The held pre-images accumulated 232 // up to this point will be flushed in case the size exceeds the threshold. 233 // 234 // It's only supported by hash-based database and will return an error for others. 235 func (db *Database) Cap(limit common.StorageSize) error { 236 hdb, ok := db.backend.(*hashdb.Database) 237 if !ok { 238 return errors.New("not supported") 239 } 240 if db.preimages != nil { 241 db.preimages.commit(false) 242 } 243 return hdb.Cap(limit) 244 } 245 246 // Reference adds a new reference from a parent node to a child node. This function 247 // is used to add reference between internal trie node and external node(e.g. storage 248 // trie root), all internal trie nodes are referenced together by database itself. 249 // 250 // It's only supported by hash-based database and will return an error for others. 251 func (db *Database) Reference(root common.Hash, parent common.Hash) error { 252 hdb, ok := db.backend.(*hashdb.Database) 253 if !ok { 254 return errors.New("not supported") 255 } 256 hdb.Reference(root, parent) 257 return nil 258 } 259 260 // Dereference removes an existing reference from a root node. It's only 261 // supported by hash-based database and will return an error for others. 262 func (db *Database) Dereference(root common.Hash) error { 263 hdb, ok := db.backend.(*hashdb.Database) 264 if !ok { 265 return errors.New("not supported") 266 } 267 hdb.Dereference(root) 268 return nil 269 } 270 271 // Recover rollbacks the database to a specified historical point. The state is 272 // supported as the rollback destination only if it's canonical state and the 273 // corresponding trie histories are existent. It's only supported by path-based 274 // database and will return an error for others. 275 func (db *Database) Recover(target common.Hash) error { 276 pdb, ok := db.backend.(*pathdb.Database) 277 if !ok { 278 return errors.New("not supported") 279 } 280 return pdb.Recover(target) 281 } 282 283 // Recoverable returns the indicator if the specified state is enabled to be 284 // recovered. It's only supported by path-based database and will return an 285 // error for others. 286 func (db *Database) Recoverable(root common.Hash) (bool, error) { 287 pdb, ok := db.backend.(*pathdb.Database) 288 if !ok { 289 return false, errors.New("not supported") 290 } 291 return pdb.Recoverable(root), nil 292 } 293 294 // Disable deactivates the database and invalidates all available state layers 295 // as stale to prevent access to the persistent state, which is in the syncing 296 // stage. 297 // 298 // It's only supported by path-based database and will return an error for others. 299 func (db *Database) Disable() error { 300 pdb, ok := db.backend.(*pathdb.Database) 301 if !ok { 302 return errors.New("not supported") 303 } 304 return pdb.Disable() 305 } 306 307 // Enable activates database and resets the state tree with the provided persistent 308 // state root once the state sync is finished. 309 func (db *Database) Enable(root common.Hash) error { 310 pdb, ok := db.backend.(*pathdb.Database) 311 if !ok { 312 return errors.New("not supported") 313 } 314 return pdb.Enable(root) 315 } 316 317 // Journal commits an entire diff hierarchy to disk into a single journal entry. 318 // This is meant to be used during shutdown to persist the snapshot without 319 // flattening everything down (bad for reorgs). It's only supported by path-based 320 // database and will return an error for others. 321 func (db *Database) Journal(root common.Hash) error { 322 pdb, ok := db.backend.(*pathdb.Database) 323 if !ok { 324 return errors.New("not supported") 325 } 326 return pdb.Journal(root) 327 } 328 329 // VerifyState traverses the flat states specified by the given state root and 330 // ensures they are matched with each other. 331 func (db *Database) VerifyState(root common.Hash) error { 332 pdb, ok := db.backend.(*pathdb.Database) 333 if !ok { 334 return errors.New("not supported") 335 } 336 return pdb.VerifyState(root) 337 } 338 339 // AccountIterator creates a new account iterator for the specified root hash and 340 // seeks to a starting account hash. 341 func (db *Database) AccountIterator(root common.Hash, seek common.Hash) (pathdb.AccountIterator, error) { 342 pdb, ok := db.backend.(*pathdb.Database) 343 if !ok { 344 return nil, errors.New("not supported") 345 } 346 return pdb.AccountIterator(root, seek) 347 } 348 349 // StorageIterator creates a new storage iterator for the specified root hash and 350 // account. The iterator will be move to the specific start position. 351 func (db *Database) StorageIterator(root common.Hash, account common.Hash, seek common.Hash) (pathdb.StorageIterator, error) { 352 pdb, ok := db.backend.(*pathdb.Database) 353 if !ok { 354 return nil, errors.New("not supported") 355 } 356 return pdb.StorageIterator(root, account, seek) 357 } 358 359 // IndexProgress returns the indexing progress made so far. It provides the 360 // number of states that remain unindexed. 361 func (db *Database) IndexProgress() (uint64, error) { 362 pdb, ok := db.backend.(*pathdb.Database) 363 if !ok { 364 return 0, errors.New("not supported") 365 } 366 return pdb.IndexProgress() 367 } 368 369 // IsVerkle returns the indicator if the database is holding a verkle tree. 370 func (db *Database) IsVerkle() bool { 371 return db.config.IsVerkle 372 } 373 374 // Disk returns the underlying disk database. 375 func (db *Database) Disk() ethdb.Database { 376 return db.disk 377 }