github.com/ethereum/go-ethereum@v1.16.1/core/rawdb/database.go (about) 1 // Copyright 2018 The go-ethereum Authors 2 // This file is part of the go-ethereum library. 3 // 4 // The go-ethereum library is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU Lesser General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // The go-ethereum library is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU Lesser General Public License for more details. 13 // 14 // You should have received a copy of the GNU Lesser General Public License 15 // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>. 16 17 package rawdb 18 19 import ( 20 "bytes" 21 "errors" 22 "fmt" 23 "maps" 24 "os" 25 "path/filepath" 26 "slices" 27 "strings" 28 "time" 29 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/crypto" 32 "github.com/ethereum/go-ethereum/ethdb" 33 "github.com/ethereum/go-ethereum/ethdb/memorydb" 34 "github.com/ethereum/go-ethereum/log" 35 "github.com/olekukonko/tablewriter" 36 ) 37 38 var ErrDeleteRangeInterrupted = errors.New("safe delete range operation interrupted") 39 40 // freezerdb is a database wrapper that enables ancient chain segment freezing. 41 type freezerdb struct { 42 ethdb.KeyValueStore 43 *chainFreezer 44 45 readOnly bool 46 ancientRoot string 47 } 48 49 // AncientDatadir returns the path of root ancient directory. 50 func (frdb *freezerdb) AncientDatadir() (string, error) { 51 return frdb.ancientRoot, nil 52 } 53 54 // Close implements io.Closer, closing both the fast key-value store as well as 55 // the slow ancient tables. 56 func (frdb *freezerdb) Close() error { 57 var errs []error 58 if err := frdb.chainFreezer.Close(); err != nil { 59 errs = append(errs, err) 60 } 61 if err := frdb.KeyValueStore.Close(); err != nil { 62 errs = append(errs, err) 63 } 64 if len(errs) != 0 { 65 return fmt.Errorf("%v", errs) 66 } 67 return nil 68 } 69 70 // Freeze is a helper method used for external testing to trigger and block until 71 // a freeze cycle completes, without having to sleep for a minute to trigger the 72 // automatic background run. 73 func (frdb *freezerdb) Freeze() error { 74 if frdb.readOnly { 75 return errReadOnly 76 } 77 // Trigger a freeze cycle and block until it's done 78 trigger := make(chan struct{}, 1) 79 frdb.chainFreezer.trigger <- trigger 80 <-trigger 81 return nil 82 } 83 84 // nofreezedb is a database wrapper that disables freezer data retrievals. 85 type nofreezedb struct { 86 ethdb.KeyValueStore 87 } 88 89 // Ancient returns an error as we don't have a backing chain freezer. 90 func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) { 91 return nil, errNotSupported 92 } 93 94 // AncientRange returns an error as we don't have a backing chain freezer. 95 func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) { 96 return nil, errNotSupported 97 } 98 99 // Ancients returns an error as we don't have a backing chain freezer. 100 func (db *nofreezedb) Ancients() (uint64, error) { 101 return 0, errNotSupported 102 } 103 104 // Tail returns an error as we don't have a backing chain freezer. 105 func (db *nofreezedb) Tail() (uint64, error) { 106 return 0, errNotSupported 107 } 108 109 // AncientSize returns an error as we don't have a backing chain freezer. 110 func (db *nofreezedb) AncientSize(kind string) (uint64, error) { 111 return 0, errNotSupported 112 } 113 114 // ModifyAncients is not supported. 115 func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) { 116 return 0, errNotSupported 117 } 118 119 // TruncateHead returns an error as we don't have a backing chain freezer. 120 func (db *nofreezedb) TruncateHead(items uint64) (uint64, error) { 121 return 0, errNotSupported 122 } 123 124 // TruncateTail returns an error as we don't have a backing chain freezer. 125 func (db *nofreezedb) TruncateTail(items uint64) (uint64, error) { 126 return 0, errNotSupported 127 } 128 129 // SyncAncient returns an error as we don't have a backing chain freezer. 130 func (db *nofreezedb) SyncAncient() error { 131 return errNotSupported 132 } 133 134 func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) { 135 // Unlike other ancient-related methods, this method does not return 136 // errNotSupported when invoked. 137 // The reason for this is that the caller might want to do several things: 138 // 1. Check if something is in the freezer, 139 // 2. If not, check leveldb. 140 // 141 // This will work, since the ancient-checks inside 'fn' will return errors, 142 // and the leveldb work will continue. 143 // 144 // If we instead were to return errNotSupported here, then the caller would 145 // have to explicitly check for that, having an extra clause to do the 146 // non-ancient operations. 147 return fn(db) 148 } 149 150 // AncientDatadir returns an error as we don't have a backing chain freezer. 151 func (db *nofreezedb) AncientDatadir() (string, error) { 152 return "", errNotSupported 153 } 154 155 // NewDatabase creates a high level database on top of a given key-value data 156 // store without a freezer moving immutable chain segments into cold storage. 157 func NewDatabase(db ethdb.KeyValueStore) ethdb.Database { 158 return &nofreezedb{KeyValueStore: db} 159 } 160 161 // resolveChainFreezerDir is a helper function which resolves the absolute path 162 // of chain freezer by considering backward compatibility. 163 func resolveChainFreezerDir(ancient string) string { 164 // Check if the chain freezer is already present in the specified 165 // sub folder, if not then two possibilities: 166 // - chain freezer is not initialized 167 // - chain freezer exists in legacy location (root ancient folder) 168 freezer := filepath.Join(ancient, ChainFreezerName) 169 if !common.FileExist(freezer) { 170 if !common.FileExist(ancient) { 171 // The entire ancient store is not initialized, still use the sub 172 // folder for initialization. 173 } else { 174 // Ancient root is already initialized, then we hold the assumption 175 // that chain freezer is also initialized and located in root folder. 176 // In this case fallback to legacy location. 177 freezer = ancient 178 log.Info("Found legacy ancient chain path", "location", ancient) 179 } 180 } 181 return freezer 182 } 183 184 // resolveChainEraDir is a helper function which resolves the absolute path of era database. 185 func resolveChainEraDir(chainFreezerDir string, era string) string { 186 switch { 187 case era == "": 188 return filepath.Join(chainFreezerDir, "era") 189 case !filepath.IsAbs(era): 190 return filepath.Join(chainFreezerDir, era) 191 default: 192 return era 193 } 194 } 195 196 // NewDatabaseWithFreezer creates a high level database on top of a given key-value store. 197 // The passed ancient indicates the path of root ancient directory where the chain freezer 198 // can be opened. 199 // 200 // Deprecated: use Open. 201 func NewDatabaseWithFreezer(db ethdb.KeyValueStore, ancient string, namespace string, readonly bool) (ethdb.Database, error) { 202 return Open(db, OpenOptions{ 203 Ancient: ancient, 204 MetricsNamespace: namespace, 205 ReadOnly: readonly, 206 }) 207 } 208 209 // OpenOptions specifies options for opening the database. 210 type OpenOptions struct { 211 Ancient string // ancients directory 212 Era string // era files directory 213 MetricsNamespace string // prefix added to freezer metric names 214 ReadOnly bool 215 } 216 217 // Open creates a high-level database wrapper for the given key-value store. 218 func Open(db ethdb.KeyValueStore, opts OpenOptions) (ethdb.Database, error) { 219 // Create the idle freezer instance. If the given ancient directory is empty, 220 // in-memory chain freezer is used (e.g. dev mode); otherwise the regular 221 // file-based freezer is created. 222 chainFreezerDir := opts.Ancient 223 if chainFreezerDir != "" { 224 chainFreezerDir = resolveChainFreezerDir(chainFreezerDir) 225 } 226 frdb, err := newChainFreezer(chainFreezerDir, opts.Era, opts.MetricsNamespace, opts.ReadOnly) 227 if err != nil { 228 printChainMetadata(db) 229 return nil, err 230 } 231 // Since the freezer can be stored separately from the user's key-value database, 232 // there's a fairly high probability that the user requests invalid combinations 233 // of the freezer and database. Ensure that we don't shoot ourselves in the foot 234 // by serving up conflicting data, leading to both datastores getting corrupted. 235 // 236 // - If both the freezer and key-value store are empty (no genesis), we just 237 // initialized a new empty freezer, so everything's fine. 238 // - If the key-value store is empty, but the freezer is not, we need to make 239 // sure the user's genesis matches the freezer. That will be checked in the 240 // blockchain, since we don't have the genesis block here (nor should we at 241 // this point care, the key-value/freezer combo is valid). 242 // - If neither the key-value store nor the freezer is empty, cross validate 243 // the genesis hashes to make sure they are compatible. If they are, also 244 // ensure that there's no gap between the freezer and subsequently leveldb. 245 // - If the key-value store is not empty, but the freezer is, we might just be 246 // upgrading to the freezer release, or we might have had a small chain and 247 // not frozen anything yet. Ensure that no blocks are missing yet from the 248 // key-value store, since that would mean we already had an old freezer. 249 250 // If the genesis hash is empty, we have a new key-value store, so nothing to 251 // validate in this method. If, however, the genesis hash is not nil, compare 252 // it to the freezer content. 253 if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 { 254 if frozen, _ := frdb.Ancients(); frozen > 0 { 255 // If the freezer already contains something, ensure that the genesis blocks 256 // match, otherwise we might mix up freezers across chains and destroy both 257 // the freezer and the key-value store. 258 frgenesis, err := frdb.Ancient(ChainFreezerHashTable, 0) 259 if err != nil { 260 printChainMetadata(db) 261 return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err) 262 } else if !bytes.Equal(kvgenesis, frgenesis) { 263 printChainMetadata(db) 264 return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis) 265 } 266 // Key-value store and freezer belong to the same network. Ensure that they 267 // are contiguous, otherwise we might end up with a non-functional freezer. 268 if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 { 269 // Subsequent header after the freezer limit is missing from the database. 270 // Reject startup if the database has a more recent head. 271 if head := *ReadHeaderNumber(db, ReadHeadHeaderHash(db)); head > frozen-1 { 272 // Find the smallest block stored in the key-value store 273 // in range of [frozen, head] 274 var number uint64 275 for number = frozen; number <= head; number++ { 276 if present, _ := db.Has(headerHashKey(number)); present { 277 break 278 } 279 } 280 // We are about to exit on error. Print database metadata before exiting 281 printChainMetadata(db) 282 return nil, fmt.Errorf("gap in the chain between ancients [0 - #%d] and leveldb [#%d - #%d] ", 283 frozen-1, number, head) 284 } 285 // Database contains only older data than the freezer, this happens if the 286 // state was wiped and reinited from an existing freezer. 287 } 288 // Otherwise, key-value store continues where the freezer left off, all is fine. 289 // We might have duplicate blocks (crash after freezer write but before key-value 290 // store deletion, but that's fine). 291 } else { 292 // If the freezer is empty, ensure nothing was moved yet from the key-value 293 // store, otherwise we'll end up missing data. We check block #1 to decide 294 // if we froze anything previously or not, but do take care of databases with 295 // only the genesis block. 296 if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) { 297 // Key-value store contains more data than the genesis block, make sure we 298 // didn't freeze anything yet. 299 if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 { 300 printChainMetadata(db) 301 return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path") 302 } 303 // Block #1 is still in the database, we're allowed to init a new freezer 304 } 305 // Otherwise, the head header is still the genesis, we're allowed to init a new 306 // freezer. 307 } 308 } 309 // Freezer is consistent with the key-value database, permit combining the two 310 if !opts.ReadOnly { 311 frdb.wg.Add(1) 312 go func() { 313 frdb.freeze(db) 314 frdb.wg.Done() 315 }() 316 } 317 return &freezerdb{ 318 ancientRoot: opts.Ancient, 319 KeyValueStore: db, 320 chainFreezer: frdb, 321 }, nil 322 } 323 324 // NewMemoryDatabase creates an ephemeral in-memory key-value database without a 325 // freezer moving immutable chain segments into cold storage. 326 func NewMemoryDatabase() ethdb.Database { 327 return NewDatabase(memorydb.New()) 328 } 329 330 const ( 331 DBPebble = "pebble" 332 DBLeveldb = "leveldb" 333 ) 334 335 // PreexistingDatabase checks the given data directory whether a database is already 336 // instantiated at that location, and if so, returns the type of database (or the 337 // empty string). 338 func PreexistingDatabase(path string) string { 339 if _, err := os.Stat(filepath.Join(path, "CURRENT")); err != nil { 340 return "" // No pre-existing db 341 } 342 if matches, err := filepath.Glob(filepath.Join(path, "OPTIONS*")); len(matches) > 0 || err != nil { 343 if err != nil { 344 panic(err) // only possible if the pattern is malformed 345 } 346 return DBPebble 347 } 348 return DBLeveldb 349 } 350 351 type counter uint64 352 353 func (c counter) String() string { 354 return fmt.Sprintf("%d", c) 355 } 356 357 func (c counter) Percentage(current uint64) string { 358 return fmt.Sprintf("%d", current*100/uint64(c)) 359 } 360 361 // stat stores sizes and count for a parameter 362 type stat struct { 363 size common.StorageSize 364 count counter 365 } 366 367 // Add size to the stat and increase the counter by 1 368 func (s *stat) Add(size common.StorageSize) { 369 s.size += size 370 s.count++ 371 } 372 373 func (s *stat) Size() string { 374 return s.size.String() 375 } 376 377 func (s *stat) Count() string { 378 return s.count.String() 379 } 380 381 // InspectDatabase traverses the entire database and checks the size 382 // of all different categories of data. 383 func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error { 384 it := db.NewIterator(keyPrefix, keyStart) 385 defer it.Release() 386 387 var ( 388 count int64 389 start = time.Now() 390 logged = time.Now() 391 392 // Key-value store statistics 393 headers stat 394 bodies stat 395 receipts stat 396 tds stat 397 numHashPairings stat 398 hashNumPairings stat 399 legacyTries stat 400 stateLookups stat 401 accountTries stat 402 storageTries stat 403 codes stat 404 txLookups stat 405 accountSnaps stat 406 storageSnaps stat 407 preimages stat 408 beaconHeaders stat 409 cliqueSnaps stat 410 bloomBits stat 411 filterMapRows stat 412 filterMapLastBlock stat 413 filterMapBlockLV stat 414 415 // Path-mode archive data 416 stateIndex stat 417 418 // Verkle statistics 419 verkleTries stat 420 verkleStateLookups stat 421 422 // Meta- and unaccounted data 423 metadata stat 424 unaccounted stat 425 426 // Totals 427 total common.StorageSize 428 429 // This map tracks example keys for unaccounted data. 430 // For each unique two-byte prefix, the first unaccounted key encountered 431 // by the iterator will be stored. 432 unaccountedKeys = make(map[[2]byte][]byte) 433 ) 434 // Inspect key-value database first. 435 for it.Next() { 436 var ( 437 key = it.Key() 438 size = common.StorageSize(len(key) + len(it.Value())) 439 ) 440 total += size 441 switch { 442 case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength): 443 headers.Add(size) 444 case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength): 445 bodies.Add(size) 446 case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength): 447 receipts.Add(size) 448 case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix): 449 tds.Add(size) 450 case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix): 451 numHashPairings.Add(size) 452 case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength): 453 hashNumPairings.Add(size) 454 case IsLegacyTrieNode(key, it.Value()): 455 legacyTries.Add(size) 456 case bytes.HasPrefix(key, stateIDPrefix) && len(key) == len(stateIDPrefix)+common.HashLength: 457 stateLookups.Add(size) 458 case IsAccountTrieNode(key): 459 accountTries.Add(size) 460 case IsStorageTrieNode(key): 461 storageTries.Add(size) 462 case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength: 463 codes.Add(size) 464 case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength): 465 txLookups.Add(size) 466 case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength): 467 accountSnaps.Add(size) 468 case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength): 469 storageSnaps.Add(size) 470 case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength): 471 preimages.Add(size) 472 case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength): 473 metadata.Add(size) 474 case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength): 475 metadata.Add(size) 476 case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8): 477 beaconHeaders.Add(size) 478 case bytes.HasPrefix(key, CliqueSnapshotPrefix) && len(key) == 7+common.HashLength: 479 cliqueSnaps.Add(size) 480 481 // new log index 482 case bytes.HasPrefix(key, filterMapRowPrefix) && len(key) <= len(filterMapRowPrefix)+9: 483 filterMapRows.Add(size) 484 case bytes.HasPrefix(key, filterMapLastBlockPrefix) && len(key) == len(filterMapLastBlockPrefix)+4: 485 filterMapLastBlock.Add(size) 486 case bytes.HasPrefix(key, filterMapBlockLVPrefix) && len(key) == len(filterMapBlockLVPrefix)+8: 487 filterMapBlockLV.Add(size) 488 489 // old log index (deprecated) 490 case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength): 491 bloomBits.Add(size) 492 case bytes.HasPrefix(key, bloomBitsMetaPrefix) && len(key) < len(bloomBitsMetaPrefix)+8: 493 bloomBits.Add(size) 494 495 // Path-based historic state indexes 496 case bytes.HasPrefix(key, StateHistoryIndexPrefix) && len(key) >= len(StateHistoryIndexPrefix)+common.HashLength: 497 stateIndex.Add(size) 498 499 // Verkle trie data is detected, determine the sub-category 500 case bytes.HasPrefix(key, VerklePrefix): 501 remain := key[len(VerklePrefix):] 502 switch { 503 case IsAccountTrieNode(remain): 504 verkleTries.Add(size) 505 case bytes.HasPrefix(remain, stateIDPrefix) && len(remain) == len(stateIDPrefix)+common.HashLength: 506 verkleStateLookups.Add(size) 507 case bytes.Equal(remain, persistentStateIDKey): 508 metadata.Add(size) 509 case bytes.Equal(remain, trieJournalKey): 510 metadata.Add(size) 511 case bytes.Equal(remain, snapSyncStatusFlagKey): 512 metadata.Add(size) 513 default: 514 unaccounted.Add(size) 515 } 516 517 // Metadata keys 518 case slices.ContainsFunc(knownMetadataKeys, func(x []byte) bool { return bytes.Equal(x, key) }): 519 metadata.Add(size) 520 521 default: 522 unaccounted.Add(size) 523 if len(key) >= 2 { 524 prefix := [2]byte(key[:2]) 525 if _, ok := unaccountedKeys[prefix]; !ok { 526 unaccountedKeys[prefix] = bytes.Clone(key) 527 } 528 } 529 } 530 count++ 531 if count%1000 == 0 && time.Since(logged) > 8*time.Second { 532 log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 533 logged = time.Now() 534 } 535 } 536 // Display the database statistic of key-value store. 537 stats := [][]string{ 538 {"Key-Value store", "Headers", headers.Size(), headers.Count()}, 539 {"Key-Value store", "Bodies", bodies.Size(), bodies.Count()}, 540 {"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()}, 541 {"Key-Value store", "Difficulties (deprecated)", tds.Size(), tds.Count()}, 542 {"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()}, 543 {"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()}, 544 {"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()}, 545 {"Key-Value store", "Log index filter-map rows", filterMapRows.Size(), filterMapRows.Count()}, 546 {"Key-Value store", "Log index last-block-of-map", filterMapLastBlock.Size(), filterMapLastBlock.Count()}, 547 {"Key-Value store", "Log index block-lv", filterMapBlockLV.Size(), filterMapBlockLV.Count()}, 548 {"Key-Value store", "Log bloombits (deprecated)", bloomBits.Size(), bloomBits.Count()}, 549 {"Key-Value store", "Contract codes", codes.Size(), codes.Count()}, 550 {"Key-Value store", "Hash trie nodes", legacyTries.Size(), legacyTries.Count()}, 551 {"Key-Value store", "Path trie state lookups", stateLookups.Size(), stateLookups.Count()}, 552 {"Key-Value store", "Path trie account nodes", accountTries.Size(), accountTries.Count()}, 553 {"Key-Value store", "Path trie storage nodes", storageTries.Size(), storageTries.Count()}, 554 {"Key-Value store", "Path state history indexes", stateIndex.Size(), stateIndex.Count()}, 555 {"Key-Value store", "Verkle trie nodes", verkleTries.Size(), verkleTries.Count()}, 556 {"Key-Value store", "Verkle trie state lookups", verkleStateLookups.Size(), verkleStateLookups.Count()}, 557 {"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()}, 558 {"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()}, 559 {"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()}, 560 {"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()}, 561 {"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()}, 562 {"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()}, 563 } 564 // Inspect all registered append-only file store then. 565 ancients, err := inspectFreezers(db) 566 if err != nil { 567 return err 568 } 569 for _, ancient := range ancients { 570 for _, table := range ancient.sizes { 571 stats = append(stats, []string{ 572 fmt.Sprintf("Ancient store (%s)", strings.Title(ancient.name)), 573 strings.Title(table.name), 574 table.size.String(), 575 fmt.Sprintf("%d", ancient.count()), 576 }) 577 } 578 total += ancient.size() 579 } 580 table := tablewriter.NewWriter(os.Stdout) 581 table.SetHeader([]string{"Database", "Category", "Size", "Items"}) 582 table.SetFooter([]string{"", "Total", total.String(), " "}) 583 table.AppendBulk(stats) 584 table.Render() 585 586 if unaccounted.size > 0 { 587 log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count) 588 for _, e := range slices.SortedFunc(maps.Values(unaccountedKeys), bytes.Compare) { 589 log.Error(fmt.Sprintf(" example key: %x", e)) 590 } 591 } 592 return nil 593 } 594 595 // This is the list of known 'metadata' keys stored in the databasse. 596 var knownMetadataKeys = [][]byte{ 597 databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey, 598 lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey, 599 snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey, 600 uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey, 601 persistentStateIDKey, trieJournalKey, snapshotSyncStatusKey, snapSyncStatusFlagKey, 602 filterMapsRangeKey, headStateHistoryIndexKey, 603 } 604 605 // printChainMetadata prints out chain metadata to stderr. 606 func printChainMetadata(db ethdb.KeyValueStore) { 607 fmt.Fprintf(os.Stderr, "Chain metadata\n") 608 for _, v := range ReadChainMetadata(db) { 609 fmt.Fprintf(os.Stderr, " %s\n", strings.Join(v, ": ")) 610 } 611 fmt.Fprintf(os.Stderr, "\n\n") 612 } 613 614 // ReadChainMetadata returns a set of key/value pairs that contains information 615 // about the database chain status. This can be used for diagnostic purposes 616 // when investigating the state of the node. 617 func ReadChainMetadata(db ethdb.KeyValueStore) [][]string { 618 pp := func(val *uint64) string { 619 if val == nil { 620 return "<nil>" 621 } 622 return fmt.Sprintf("%d (%#x)", *val, *val) 623 } 624 625 data := [][]string{ 626 {"databaseVersion", pp(ReadDatabaseVersion(db))}, 627 {"headBlockHash", fmt.Sprintf("%v", ReadHeadBlockHash(db))}, 628 {"headFastBlockHash", fmt.Sprintf("%v", ReadHeadFastBlockHash(db))}, 629 {"headHeaderHash", fmt.Sprintf("%v", ReadHeadHeaderHash(db))}, 630 {"lastPivotNumber", pp(ReadLastPivotNumber(db))}, 631 {"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(ReadSnapshotSyncStatus(db)))}, 632 {"snapshotDisabled", fmt.Sprintf("%v", ReadSnapshotDisabled(db))}, 633 {"snapshotJournal", fmt.Sprintf("%d bytes", len(ReadSnapshotJournal(db)))}, 634 {"snapshotRecoveryNumber", pp(ReadSnapshotRecoveryNumber(db))}, 635 {"snapshotRoot", fmt.Sprintf("%v", ReadSnapshotRoot(db))}, 636 {"txIndexTail", pp(ReadTxIndexTail(db))}, 637 } 638 if b := ReadSkeletonSyncStatus(db); b != nil { 639 data = append(data, []string{"SkeletonSyncStatus", string(b)}) 640 } 641 if fmr, ok, _ := ReadFilterMapsRange(db); ok { 642 data = append(data, []string{"filterMapsRange", fmt.Sprintf("%+v", fmr)}) 643 } 644 return data 645 } 646 647 // SafeDeleteRange deletes all of the keys (and values) in the range 648 // [start,end) (inclusive on start, exclusive on end). 649 // If hashScheme is true then it always uses an iterator and skips hashdb trie 650 // node entries. If it is false and the backing db is pebble db then it uses 651 // the fast native range delete. 652 // In case of fallback mode (hashdb or leveldb) the range deletion might be 653 // very slow depending on the number of entries. In this case stopCallback 654 // is periodically called and if it returns an error then SafeDeleteRange 655 // stops and also returns that error. The callback is not called if native 656 // range delete is used or there are a small number of keys only. The bool 657 // argument passed to the callback is true if enrties have actually been 658 // deleted already. 659 func SafeDeleteRange(db ethdb.KeyValueStore, start, end []byte, hashScheme bool, stopCallback func(bool) bool) error { 660 if !hashScheme { 661 // delete entire range; use fast native range delete on pebble db 662 for { 663 switch err := db.DeleteRange(start, end); { 664 case err == nil: 665 return nil 666 case errors.Is(err, ethdb.ErrTooManyKeys): 667 if stopCallback(true) { 668 return ErrDeleteRangeInterrupted 669 } 670 default: 671 return err 672 } 673 } 674 } 675 676 var ( 677 count, deleted, skipped int 678 startTime = time.Now() 679 ) 680 681 batch := db.NewBatch() 682 it := db.NewIterator(nil, start) 683 defer func() { 684 it.Release() // it might be replaced during the process 685 log.Debug("SafeDeleteRange finished", "deleted", deleted, "skipped", skipped, "elapsed", common.PrettyDuration(time.Since(startTime))) 686 }() 687 688 for it.Next() && bytes.Compare(end, it.Key()) > 0 { 689 // Prevent deletion for trie nodes in hash mode 690 if len(it.Key()) != 32 || crypto.Keccak256Hash(it.Value()) != common.BytesToHash(it.Key()) { 691 if err := batch.Delete(it.Key()); err != nil { 692 return err 693 } 694 deleted++ 695 } else { 696 skipped++ 697 } 698 count++ 699 if count > 10000 { // should not block for more than a second 700 if err := batch.Write(); err != nil { 701 return err 702 } 703 if stopCallback(deleted != 0) { 704 return ErrDeleteRangeInterrupted 705 } 706 start = append(bytes.Clone(it.Key()), 0) // appending a zero gives us the next possible key 707 it.Release() 708 batch = db.NewBatch() 709 it = db.NewIterator(nil, start) 710 count = 0 711 } 712 } 713 return batch.Write() 714 }