github.com/Cleverse/go-ethereum@v0.0.0-20220927095127-45113064e7f2/core/rawdb/database.go (about)

     1  // Copyright 2018 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package rawdb
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"sync/atomic"
    25  	"time"
    26  
    27  	"github.com/ethereum/go-ethereum/common"
    28  	"github.com/ethereum/go-ethereum/ethdb"
    29  	"github.com/ethereum/go-ethereum/ethdb/leveldb"
    30  	"github.com/ethereum/go-ethereum/ethdb/memorydb"
    31  	"github.com/ethereum/go-ethereum/log"
    32  	"github.com/olekukonko/tablewriter"
    33  )
    34  
    35  // freezerdb is a database wrapper that enabled freezer data retrievals.
    36  type freezerdb struct {
    37  	ethdb.KeyValueStore
    38  	ethdb.AncientStore
    39  }
    40  
    41  // Close implements io.Closer, closing both the fast key-value store as well as
    42  // the slow ancient tables.
    43  func (frdb *freezerdb) Close() error {
    44  	var errs []error
    45  	if err := frdb.AncientStore.Close(); err != nil {
    46  		errs = append(errs, err)
    47  	}
    48  	if err := frdb.KeyValueStore.Close(); err != nil {
    49  		errs = append(errs, err)
    50  	}
    51  	if len(errs) != 0 {
    52  		return fmt.Errorf("%v", errs)
    53  	}
    54  	return nil
    55  }
    56  
    57  // Freeze is a helper method used for external testing to trigger and block until
    58  // a freeze cycle completes, without having to sleep for a minute to trigger the
    59  // automatic background run.
    60  func (frdb *freezerdb) Freeze(threshold uint64) error {
    61  	if frdb.AncientStore.(*chainFreezer).readonly {
    62  		return errReadOnly
    63  	}
    64  	// Set the freezer threshold to a temporary value
    65  	defer func(old uint64) {
    66  		atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, old)
    67  	}(atomic.LoadUint64(&frdb.AncientStore.(*chainFreezer).threshold))
    68  	atomic.StoreUint64(&frdb.AncientStore.(*chainFreezer).threshold, threshold)
    69  
    70  	// Trigger a freeze cycle and block until it's done
    71  	trigger := make(chan struct{}, 1)
    72  	frdb.AncientStore.(*chainFreezer).trigger <- trigger
    73  	<-trigger
    74  	return nil
    75  }
    76  
    77  // nofreezedb is a database wrapper that disables freezer data retrievals.
    78  type nofreezedb struct {
    79  	ethdb.KeyValueStore
    80  }
    81  
    82  // HasAncient returns an error as we don't have a backing chain freezer.
    83  func (db *nofreezedb) HasAncient(kind string, number uint64) (bool, error) {
    84  	return false, errNotSupported
    85  }
    86  
    87  // Ancient returns an error as we don't have a backing chain freezer.
    88  func (db *nofreezedb) Ancient(kind string, number uint64) ([]byte, error) {
    89  	return nil, errNotSupported
    90  }
    91  
    92  // AncientRange returns an error as we don't have a backing chain freezer.
    93  func (db *nofreezedb) AncientRange(kind string, start, max, maxByteSize uint64) ([][]byte, error) {
    94  	return nil, errNotSupported
    95  }
    96  
    97  // Ancients returns an error as we don't have a backing chain freezer.
    98  func (db *nofreezedb) Ancients() (uint64, error) {
    99  	return 0, errNotSupported
   100  }
   101  
   102  // Tail returns an error as we don't have a backing chain freezer.
   103  func (db *nofreezedb) Tail() (uint64, error) {
   104  	return 0, errNotSupported
   105  }
   106  
   107  // AncientSize returns an error as we don't have a backing chain freezer.
   108  func (db *nofreezedb) AncientSize(kind string) (uint64, error) {
   109  	return 0, errNotSupported
   110  }
   111  
   112  // ModifyAncients is not supported.
   113  func (db *nofreezedb) ModifyAncients(func(ethdb.AncientWriteOp) error) (int64, error) {
   114  	return 0, errNotSupported
   115  }
   116  
   117  // TruncateHead returns an error as we don't have a backing chain freezer.
   118  func (db *nofreezedb) TruncateHead(items uint64) error {
   119  	return errNotSupported
   120  }
   121  
   122  // TruncateTail returns an error as we don't have a backing chain freezer.
   123  func (db *nofreezedb) TruncateTail(items uint64) error {
   124  	return errNotSupported
   125  }
   126  
   127  // Sync returns an error as we don't have a backing chain freezer.
   128  func (db *nofreezedb) Sync() error {
   129  	return errNotSupported
   130  }
   131  
   132  func (db *nofreezedb) ReadAncients(fn func(reader ethdb.AncientReaderOp) error) (err error) {
   133  	// Unlike other ancient-related methods, this method does not return
   134  	// errNotSupported when invoked.
   135  	// The reason for this is that the caller might want to do several things:
   136  	// 1. Check if something is in freezer,
   137  	// 2. If not, check leveldb.
   138  	//
   139  	// This will work, since the ancient-checks inside 'fn' will return errors,
   140  	// and the leveldb work will continue.
   141  	//
   142  	// If we instead were to return errNotSupported here, then the caller would
   143  	// have to explicitly check for that, having an extra clause to do the
   144  	// non-ancient operations.
   145  	return fn(db)
   146  }
   147  
   148  // MigrateTable processes the entries in a given table in sequence
   149  // converting them to a new format if they're of an old format.
   150  func (db *nofreezedb) MigrateTable(kind string, convert convertLegacyFn) error {
   151  	return errNotSupported
   152  }
   153  
   154  // AncientDatadir returns an error as we don't have a backing chain freezer.
   155  func (db *nofreezedb) AncientDatadir() (string, error) {
   156  	return "", errNotSupported
   157  }
   158  
   159  // NewDatabase creates a high level database on top of a given key-value data
   160  // store without a freezer moving immutable chain segments into cold storage.
   161  func NewDatabase(db ethdb.KeyValueStore) ethdb.Database {
   162  	return &nofreezedb{KeyValueStore: db}
   163  }
   164  
   165  // NewDatabaseWithFreezer creates a high level database on top of a given key-
   166  // value data store with a freezer moving immutable chain segments into cold
   167  // storage.
   168  func NewDatabaseWithFreezer(db ethdb.KeyValueStore, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
   169  	// Create the idle freezer instance
   170  	frdb, err := newChainFreezer(freezer, namespace, readonly, freezerTableSize, FreezerNoSnappy)
   171  	if err != nil {
   172  		return nil, err
   173  	}
   174  	// Since the freezer can be stored separately from the user's key-value database,
   175  	// there's a fairly high probability that the user requests invalid combinations
   176  	// of the freezer and database. Ensure that we don't shoot ourselves in the foot
   177  	// by serving up conflicting data, leading to both datastores getting corrupted.
   178  	//
   179  	//   - If both the freezer and key-value store is empty (no genesis), we just
   180  	//     initialized a new empty freezer, so everything's fine.
   181  	//   - If the key-value store is empty, but the freezer is not, we need to make
   182  	//     sure the user's genesis matches the freezer. That will be checked in the
   183  	//     blockchain, since we don't have the genesis block here (nor should we at
   184  	//     this point care, the key-value/freezer combo is valid).
   185  	//   - If neither the key-value store nor the freezer is empty, cross validate
   186  	//     the genesis hashes to make sure they are compatible. If they are, also
   187  	//     ensure that there's no gap between the freezer and subsequently leveldb.
   188  	//   - If the key-value store is not empty, but the freezer is we might just be
   189  	//     upgrading to the freezer release, or we might have had a small chain and
   190  	//     not frozen anything yet. Ensure that no blocks are missing yet from the
   191  	//     key-value store, since that would mean we already had an old freezer.
   192  
   193  	// If the genesis hash is empty, we have a new key-value store, so nothing to
   194  	// validate in this method. If, however, the genesis hash is not nil, compare
   195  	// it to the freezer content.
   196  	if kvgenesis, _ := db.Get(headerHashKey(0)); len(kvgenesis) > 0 {
   197  		if frozen, _ := frdb.Ancients(); frozen > 0 {
   198  			// If the freezer already contains something, ensure that the genesis blocks
   199  			// match, otherwise we might mix up freezers across chains and destroy both
   200  			// the freezer and the key-value store.
   201  			frgenesis, err := frdb.Ancient(freezerHashTable, 0)
   202  			if err != nil {
   203  				return nil, fmt.Errorf("failed to retrieve genesis from ancient %v", err)
   204  			} else if !bytes.Equal(kvgenesis, frgenesis) {
   205  				return nil, fmt.Errorf("genesis mismatch: %#x (leveldb) != %#x (ancients)", kvgenesis, frgenesis)
   206  			}
   207  			// Key-value store and freezer belong to the same network. Ensure that they
   208  			// are contiguous, otherwise we might end up with a non-functional freezer.
   209  			if kvhash, _ := db.Get(headerHashKey(frozen)); len(kvhash) == 0 {
   210  				// Subsequent header after the freezer limit is missing from the database.
   211  				// Reject startup if the database has a more recent head.
   212  				if *ReadHeaderNumber(db, ReadHeadHeaderHash(db)) > frozen-1 {
   213  					return nil, fmt.Errorf("gap (#%d) in the chain between ancients and leveldb", frozen)
   214  				}
   215  				// Database contains only older data than the freezer, this happens if the
   216  				// state was wiped and reinited from an existing freezer.
   217  			}
   218  			// Otherwise, key-value store continues where the freezer left off, all is fine.
   219  			// We might have duplicate blocks (crash after freezer write but before key-value
   220  			// store deletion, but that's fine).
   221  		} else {
   222  			// If the freezer is empty, ensure nothing was moved yet from the key-value
   223  			// store, otherwise we'll end up missing data. We check block #1 to decide
   224  			// if we froze anything previously or not, but do take care of databases with
   225  			// only the genesis block.
   226  			if ReadHeadHeaderHash(db) != common.BytesToHash(kvgenesis) {
   227  				// Key-value store contains more data than the genesis block, make sure we
   228  				// didn't freeze anything yet.
   229  				if kvblob, _ := db.Get(headerHashKey(1)); len(kvblob) == 0 {
   230  					return nil, errors.New("ancient chain segments already extracted, please set --datadir.ancient to the correct path")
   231  				}
   232  				// Block #1 is still in the database, we're allowed to init a new feezer
   233  			}
   234  			// Otherwise, the head header is still the genesis, we're allowed to init a new
   235  			// freezer.
   236  		}
   237  	}
   238  	// Freezer is consistent with the key-value database, permit combining the two
   239  	if !frdb.readonly {
   240  		frdb.wg.Add(1)
   241  		go func() {
   242  			frdb.freeze(db)
   243  			frdb.wg.Done()
   244  		}()
   245  	}
   246  	return &freezerdb{
   247  		KeyValueStore: db,
   248  		AncientStore:  frdb,
   249  	}, nil
   250  }
   251  
   252  // NewMemoryDatabase creates an ephemeral in-memory key-value database without a
   253  // freezer moving immutable chain segments into cold storage.
   254  func NewMemoryDatabase() ethdb.Database {
   255  	return NewDatabase(memorydb.New())
   256  }
   257  
   258  // NewMemoryDatabaseWithCap creates an ephemeral in-memory key-value database
   259  // with an initial starting capacity, but without a freezer moving immutable
   260  // chain segments into cold storage.
   261  func NewMemoryDatabaseWithCap(size int) ethdb.Database {
   262  	return NewDatabase(memorydb.NewWithCap(size))
   263  }
   264  
   265  // NewLevelDBDatabase creates a persistent key-value database without a freezer
   266  // moving immutable chain segments into cold storage.
   267  func NewLevelDBDatabase(file string, cache int, handles int, namespace string, readonly bool) (ethdb.Database, error) {
   268  	db, err := leveldb.New(file, cache, handles, namespace, readonly)
   269  	if err != nil {
   270  		return nil, err
   271  	}
   272  	return NewDatabase(db), nil
   273  }
   274  
   275  // NewLevelDBDatabaseWithFreezer creates a persistent key-value database with a
   276  // freezer moving immutable chain segments into cold storage.
   277  func NewLevelDBDatabaseWithFreezer(file string, cache int, handles int, freezer string, namespace string, readonly bool) (ethdb.Database, error) {
   278  	kvdb, err := leveldb.New(file, cache, handles, namespace, readonly)
   279  	if err != nil {
   280  		return nil, err
   281  	}
   282  	frdb, err := NewDatabaseWithFreezer(kvdb, freezer, namespace, readonly)
   283  	if err != nil {
   284  		kvdb.Close()
   285  		return nil, err
   286  	}
   287  	return frdb, nil
   288  }
   289  
   290  type counter uint64
   291  
   292  func (c counter) String() string {
   293  	return fmt.Sprintf("%d", c)
   294  }
   295  
   296  func (c counter) Percentage(current uint64) string {
   297  	return fmt.Sprintf("%d", current*100/uint64(c))
   298  }
   299  
   300  // stat stores sizes and count for a parameter
   301  type stat struct {
   302  	size  common.StorageSize
   303  	count counter
   304  }
   305  
   306  // Add size to the stat and increase the counter by 1
   307  func (s *stat) Add(size common.StorageSize) {
   308  	s.size += size
   309  	s.count++
   310  }
   311  
   312  func (s *stat) Size() string {
   313  	return s.size.String()
   314  }
   315  
   316  func (s *stat) Count() string {
   317  	return s.count.String()
   318  }
   319  
   320  // InspectDatabase traverses the entire database and checks the size
   321  // of all different categories of data.
   322  func InspectDatabase(db ethdb.Database, keyPrefix, keyStart []byte) error {
   323  	it := db.NewIterator(keyPrefix, keyStart)
   324  	defer it.Release()
   325  
   326  	var (
   327  		count  int64
   328  		start  = time.Now()
   329  		logged = time.Now()
   330  
   331  		// Key-value store statistics
   332  		headers         stat
   333  		bodies          stat
   334  		receipts        stat
   335  		tds             stat
   336  		numHashPairings stat
   337  		hashNumPairings stat
   338  		tries           stat
   339  		codes           stat
   340  		txLookups       stat
   341  		accountSnaps    stat
   342  		storageSnaps    stat
   343  		preimages       stat
   344  		bloomBits       stat
   345  		beaconHeaders   stat
   346  		cliqueSnaps     stat
   347  
   348  		// Ancient store statistics
   349  		ancientHeadersSize  common.StorageSize
   350  		ancientBodiesSize   common.StorageSize
   351  		ancientReceiptsSize common.StorageSize
   352  		ancientTdsSize      common.StorageSize
   353  		ancientHashesSize   common.StorageSize
   354  
   355  		// Les statistic
   356  		chtTrieNodes   stat
   357  		bloomTrieNodes stat
   358  
   359  		// Meta- and unaccounted data
   360  		metadata    stat
   361  		unaccounted stat
   362  
   363  		// Totals
   364  		total common.StorageSize
   365  	)
   366  	// Inspect key-value database first.
   367  	for it.Next() {
   368  		var (
   369  			key  = it.Key()
   370  			size = common.StorageSize(len(key) + len(it.Value()))
   371  		)
   372  		total += size
   373  		switch {
   374  		case bytes.HasPrefix(key, headerPrefix) && len(key) == (len(headerPrefix)+8+common.HashLength):
   375  			headers.Add(size)
   376  		case bytes.HasPrefix(key, blockBodyPrefix) && len(key) == (len(blockBodyPrefix)+8+common.HashLength):
   377  			bodies.Add(size)
   378  		case bytes.HasPrefix(key, blockReceiptsPrefix) && len(key) == (len(blockReceiptsPrefix)+8+common.HashLength):
   379  			receipts.Add(size)
   380  		case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerTDSuffix):
   381  			tds.Add(size)
   382  		case bytes.HasPrefix(key, headerPrefix) && bytes.HasSuffix(key, headerHashSuffix):
   383  			numHashPairings.Add(size)
   384  		case bytes.HasPrefix(key, headerNumberPrefix) && len(key) == (len(headerNumberPrefix)+common.HashLength):
   385  			hashNumPairings.Add(size)
   386  		case len(key) == common.HashLength:
   387  			tries.Add(size)
   388  		case bytes.HasPrefix(key, CodePrefix) && len(key) == len(CodePrefix)+common.HashLength:
   389  			codes.Add(size)
   390  		case bytes.HasPrefix(key, txLookupPrefix) && len(key) == (len(txLookupPrefix)+common.HashLength):
   391  			txLookups.Add(size)
   392  		case bytes.HasPrefix(key, SnapshotAccountPrefix) && len(key) == (len(SnapshotAccountPrefix)+common.HashLength):
   393  			accountSnaps.Add(size)
   394  		case bytes.HasPrefix(key, SnapshotStoragePrefix) && len(key) == (len(SnapshotStoragePrefix)+2*common.HashLength):
   395  			storageSnaps.Add(size)
   396  		case bytes.HasPrefix(key, PreimagePrefix) && len(key) == (len(PreimagePrefix)+common.HashLength):
   397  			preimages.Add(size)
   398  		case bytes.HasPrefix(key, configPrefix) && len(key) == (len(configPrefix)+common.HashLength):
   399  			metadata.Add(size)
   400  		case bytes.HasPrefix(key, genesisPrefix) && len(key) == (len(genesisPrefix)+common.HashLength):
   401  			metadata.Add(size)
   402  		case bytes.HasPrefix(key, bloomBitsPrefix) && len(key) == (len(bloomBitsPrefix)+10+common.HashLength):
   403  			bloomBits.Add(size)
   404  		case bytes.HasPrefix(key, BloomBitsIndexPrefix):
   405  			bloomBits.Add(size)
   406  		case bytes.HasPrefix(key, skeletonHeaderPrefix) && len(key) == (len(skeletonHeaderPrefix)+8):
   407  			beaconHeaders.Add(size)
   408  		case bytes.HasPrefix(key, []byte("clique-")) && len(key) == 7+common.HashLength:
   409  			cliqueSnaps.Add(size)
   410  		case bytes.HasPrefix(key, []byte("cht-")) ||
   411  			bytes.HasPrefix(key, []byte("chtIndexV2-")) ||
   412  			bytes.HasPrefix(key, []byte("chtRootV2-")): // Canonical hash trie
   413  			chtTrieNodes.Add(size)
   414  		case bytes.HasPrefix(key, []byte("blt-")) ||
   415  			bytes.HasPrefix(key, []byte("bltIndex-")) ||
   416  			bytes.HasPrefix(key, []byte("bltRoot-")): // Bloomtrie sub
   417  			bloomTrieNodes.Add(size)
   418  		default:
   419  			var accounted bool
   420  			for _, meta := range [][]byte{
   421  				databaseVersionKey, headHeaderKey, headBlockKey, headFastBlockKey, headFinalizedBlockKey,
   422  				lastPivotKey, fastTrieProgressKey, snapshotDisabledKey, SnapshotRootKey, snapshotJournalKey,
   423  				snapshotGeneratorKey, snapshotRecoveryKey, txIndexTailKey, fastTxLookupLimitKey,
   424  				uncleanShutdownKey, badBlockKey, transitionStatusKey, skeletonSyncStatusKey,
   425  			} {
   426  				if bytes.Equal(key, meta) {
   427  					metadata.Add(size)
   428  					accounted = true
   429  					break
   430  				}
   431  			}
   432  			if !accounted {
   433  				unaccounted.Add(size)
   434  			}
   435  		}
   436  		count++
   437  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   438  			log.Info("Inspecting database", "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   439  			logged = time.Now()
   440  		}
   441  	}
   442  	// Inspect append-only file store then.
   443  	ancientSizes := []*common.StorageSize{&ancientHeadersSize, &ancientBodiesSize, &ancientReceiptsSize, &ancientHashesSize, &ancientTdsSize}
   444  	for i, category := range []string{freezerHeaderTable, freezerBodiesTable, freezerReceiptTable, freezerHashTable, freezerDifficultyTable} {
   445  		if size, err := db.AncientSize(category); err == nil {
   446  			*ancientSizes[i] += common.StorageSize(size)
   447  			total += common.StorageSize(size)
   448  		}
   449  	}
   450  	// Get number of ancient rows inside the freezer
   451  	ancients := counter(0)
   452  	if count, err := db.Ancients(); err == nil {
   453  		ancients = counter(count)
   454  	}
   455  	// Display the database statistic.
   456  	stats := [][]string{
   457  		{"Key-Value store", "Headers", headers.Size(), headers.Count()},
   458  		{"Key-Value store", "Bodies", bodies.Size(), bodies.Count()},
   459  		{"Key-Value store", "Receipt lists", receipts.Size(), receipts.Count()},
   460  		{"Key-Value store", "Difficulties", tds.Size(), tds.Count()},
   461  		{"Key-Value store", "Block number->hash", numHashPairings.Size(), numHashPairings.Count()},
   462  		{"Key-Value store", "Block hash->number", hashNumPairings.Size(), hashNumPairings.Count()},
   463  		{"Key-Value store", "Transaction index", txLookups.Size(), txLookups.Count()},
   464  		{"Key-Value store", "Bloombit index", bloomBits.Size(), bloomBits.Count()},
   465  		{"Key-Value store", "Contract codes", codes.Size(), codes.Count()},
   466  		{"Key-Value store", "Trie nodes", tries.Size(), tries.Count()},
   467  		{"Key-Value store", "Trie preimages", preimages.Size(), preimages.Count()},
   468  		{"Key-Value store", "Account snapshot", accountSnaps.Size(), accountSnaps.Count()},
   469  		{"Key-Value store", "Storage snapshot", storageSnaps.Size(), storageSnaps.Count()},
   470  		{"Key-Value store", "Beacon sync headers", beaconHeaders.Size(), beaconHeaders.Count()},
   471  		{"Key-Value store", "Clique snapshots", cliqueSnaps.Size(), cliqueSnaps.Count()},
   472  		{"Key-Value store", "Singleton metadata", metadata.Size(), metadata.Count()},
   473  		{"Ancient store", "Headers", ancientHeadersSize.String(), ancients.String()},
   474  		{"Ancient store", "Bodies", ancientBodiesSize.String(), ancients.String()},
   475  		{"Ancient store", "Receipt lists", ancientReceiptsSize.String(), ancients.String()},
   476  		{"Ancient store", "Difficulties", ancientTdsSize.String(), ancients.String()},
   477  		{"Ancient store", "Block number->hash", ancientHashesSize.String(), ancients.String()},
   478  		{"Light client", "CHT trie nodes", chtTrieNodes.Size(), chtTrieNodes.Count()},
   479  		{"Light client", "Bloom trie nodes", bloomTrieNodes.Size(), bloomTrieNodes.Count()},
   480  	}
   481  	table := tablewriter.NewWriter(os.Stdout)
   482  	table.SetHeader([]string{"Database", "Category", "Size", "Items"})
   483  	table.SetFooter([]string{"", "Total", total.String(), " "})
   484  	table.AppendBulk(stats)
   485  	table.Render()
   486  
   487  	if unaccounted.size > 0 {
   488  		log.Error("Database contains unaccounted data", "size", unaccounted.size, "count", unaccounted.count)
   489  	}
   490  
   491  	return nil
   492  }