github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"strconv"
    26  	"strings"
    27  	"syscall"
    28  	"time"
    29  
    30  	"github.com/ethxdao/go-ethereum/cmd/utils"
    31  	"github.com/ethxdao/go-ethereum/common"
    32  	"github.com/ethxdao/go-ethereum/common/hexutil"
    33  	"github.com/ethxdao/go-ethereum/console/prompt"
    34  	"github.com/ethxdao/go-ethereum/core/rawdb"
    35  	"github.com/ethxdao/go-ethereum/core/state/snapshot"
    36  	"github.com/ethxdao/go-ethereum/core/types"
    37  	"github.com/ethxdao/go-ethereum/crypto"
    38  	"github.com/ethxdao/go-ethereum/ethdb"
    39  	"github.com/ethxdao/go-ethereum/internal/flags"
    40  	"github.com/ethxdao/go-ethereum/log"
    41  	"github.com/ethxdao/go-ethereum/trie"
    42  	"github.com/olekukonko/tablewriter"
    43  )
    44  
    45  var (
    46  	removedbCommand = &cli.Command{
    47  		Action:    removeDB,
    48  		Name:      "removedb",
    49  		Usage:     "Remove blockchain and state databases",
    50  		ArgsUsage: "",
    51  		Flags:     utils.DatabasePathFlags,
    52  		Description: `
    53  Remove blockchain and state databases`,
    54  	}
    55  	dbCommand = &cli.Command{
    56  		Name:      "db",
    57  		Usage:     "Low level database operations",
    58  		ArgsUsage: "",
    59  		Subcommands: []*cli.Command{
    60  			dbInspectCmd,
    61  			dbStatCmd,
    62  			dbCompactCmd,
    63  			dbGetCmd,
    64  			dbDeleteCmd,
    65  			dbPutCmd,
    66  			dbGetSlotsCmd,
    67  			dbDumpFreezerIndex,
    68  			dbImportCmd,
    69  			dbExportCmd,
    70  			dbMetadataCmd,
    71  			dbMigrateFreezerCmd,
    72  			dbCheckStateContentCmd,
    73  		},
    74  	}
    75  	dbInspectCmd = &cli.Command{
    76  		Action:    inspect,
    77  		Name:      "inspect",
    78  		ArgsUsage: "<prefix> <start>",
    79  		Flags: flags.Merge([]cli.Flag{
    80  			utils.SyncModeFlag,
    81  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    82  		Usage:       "Inspect the storage size for each type of data in the database",
    83  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    84  	}
    85  	dbCheckStateContentCmd = &cli.Command{
    86  		Action:    checkStateContent,
    87  		Name:      "check-state-content",
    88  		ArgsUsage: "<start (optional)>",
    89  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    90  		Usage:     "Verify that state data is cryptographically correct",
    91  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    92  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    93  a data corruption.`,
    94  	}
    95  	dbStatCmd = &cli.Command{
    96  		Action: dbStats,
    97  		Name:   "stats",
    98  		Usage:  "Print leveldb statistics",
    99  		Flags: flags.Merge([]cli.Flag{
   100  			utils.SyncModeFlag,
   101  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   102  	}
   103  	dbCompactCmd = &cli.Command{
   104  		Action: dbCompact,
   105  		Name:   "compact",
   106  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   107  		Flags: flags.Merge([]cli.Flag{
   108  			utils.SyncModeFlag,
   109  			utils.CacheFlag,
   110  			utils.CacheDatabaseFlag,
   111  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   112  		Description: `This command performs a database compaction. 
   113  WARNING: This operation may take a very long time to finish, and may cause database
   114  corruption if it is aborted during execution'!`,
   115  	}
   116  	dbGetCmd = &cli.Command{
   117  		Action:    dbGet,
   118  		Name:      "get",
   119  		Usage:     "Show the value of a database key",
   120  		ArgsUsage: "<hex-encoded key>",
   121  		Flags: flags.Merge([]cli.Flag{
   122  			utils.SyncModeFlag,
   123  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   124  		Description: "This command looks up the specified database key from the database.",
   125  	}
   126  	dbDeleteCmd = &cli.Command{
   127  		Action:    dbDelete,
   128  		Name:      "delete",
   129  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   130  		ArgsUsage: "<hex-encoded key>",
   131  		Flags: flags.Merge([]cli.Flag{
   132  			utils.SyncModeFlag,
   133  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   134  		Description: `This command deletes the specified database key from the database. 
   135  WARNING: This is a low-level operation which may cause database corruption!`,
   136  	}
   137  	dbPutCmd = &cli.Command{
   138  		Action:    dbPut,
   139  		Name:      "put",
   140  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   141  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   142  		Flags: flags.Merge([]cli.Flag{
   143  			utils.SyncModeFlag,
   144  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   145  		Description: `This command sets a given database key to the given value. 
   146  WARNING: This is a low-level operation which may cause database corruption!`,
   147  	}
   148  	dbGetSlotsCmd = &cli.Command{
   149  		Action:    dbDumpTrie,
   150  		Name:      "dumptrie",
   151  		Usage:     "Show the storage key/values of a given storage trie",
   152  		ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   153  		Flags: flags.Merge([]cli.Flag{
   154  			utils.SyncModeFlag,
   155  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   156  		Description: "This command looks up the specified database key from the database.",
   157  	}
   158  	dbDumpFreezerIndex = &cli.Command{
   159  		Action:    freezerInspect,
   160  		Name:      "freezer-index",
   161  		Usage:     "Dump out the index of a specific freezer table",
   162  		ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
   163  		Flags: flags.Merge([]cli.Flag{
   164  			utils.SyncModeFlag,
   165  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   166  		Description: "This command displays information about the freezer index.",
   167  	}
   168  	dbImportCmd = &cli.Command{
   169  		Action:    importLDBdata,
   170  		Name:      "import",
   171  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   172  		ArgsUsage: "<dumpfile> <start (optional)",
   173  		Flags: flags.Merge([]cli.Flag{
   174  			utils.SyncModeFlag,
   175  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   176  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   177  	}
   178  	dbExportCmd = &cli.Command{
   179  		Action:    exportChaindata,
   180  		Name:      "export",
   181  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   182  		ArgsUsage: "<type> <dumpfile>",
   183  		Flags: flags.Merge([]cli.Flag{
   184  			utils.SyncModeFlag,
   185  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   186  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   187  	}
   188  	dbMetadataCmd = &cli.Command{
   189  		Action: showMetaData,
   190  		Name:   "metadata",
   191  		Usage:  "Shows metadata about the chain status.",
   192  		Flags: flags.Merge([]cli.Flag{
   193  			utils.SyncModeFlag,
   194  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   195  		Description: "Shows metadata about the chain status.",
   196  	}
   197  	dbMigrateFreezerCmd = &cli.Command{
   198  		Action:    freezerMigrate,
   199  		Name:      "freezer-migrate",
   200  		Usage:     "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
   201  		ArgsUsage: "",
   202  		Flags: flags.Merge([]cli.Flag{
   203  			utils.SyncModeFlag,
   204  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   205  		Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
   206  WARNING: please back-up the receipt files in your ancients before running this command.`,
   207  	}
   208  )
   209  
   210  func removeDB(ctx *cli.Context) error {
   211  	stack, config := makeConfigNode(ctx)
   212  
   213  	// Remove the full node state database
   214  	path := stack.ResolvePath("chaindata")
   215  	if common.FileExist(path) {
   216  		confirmAndRemoveDB(path, "full node state database")
   217  	} else {
   218  		log.Info("Full node state database missing", "path", path)
   219  	}
   220  	// Remove the full node ancient database
   221  	path = config.Eth.DatabaseFreezer
   222  	switch {
   223  	case path == "":
   224  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   225  	case !filepath.IsAbs(path):
   226  		path = config.Node.ResolvePath(path)
   227  	}
   228  	if common.FileExist(path) {
   229  		confirmAndRemoveDB(path, "full node ancient database")
   230  	} else {
   231  		log.Info("Full node ancient database missing", "path", path)
   232  	}
   233  	// Remove the light node database
   234  	path = stack.ResolvePath("lightchaindata")
   235  	if common.FileExist(path) {
   236  		confirmAndRemoveDB(path, "light node database")
   237  	} else {
   238  		log.Info("Light node database missing", "path", path)
   239  	}
   240  	return nil
   241  }
   242  
   243  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   244  // folder if accepted.
   245  func confirmAndRemoveDB(database string, kind string) {
   246  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   247  	switch {
   248  	case err != nil:
   249  		utils.Fatalf("%v", err)
   250  	case !confirm:
   251  		log.Info("Database deletion skipped", "path", database)
   252  	default:
   253  		start := time.Now()
   254  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   255  			// If we're at the top level folder, recurse into
   256  			if path == database {
   257  				return nil
   258  			}
   259  			// Delete all the files, but not subfolders
   260  			if !info.IsDir() {
   261  				os.Remove(path)
   262  				return nil
   263  			}
   264  			return filepath.SkipDir
   265  		})
   266  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   267  	}
   268  }
   269  
   270  func inspect(ctx *cli.Context) error {
   271  	var (
   272  		prefix []byte
   273  		start  []byte
   274  	)
   275  	if ctx.NArg() > 2 {
   276  		return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
   277  	}
   278  	if ctx.NArg() >= 1 {
   279  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   280  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   281  		} else {
   282  			prefix = d
   283  		}
   284  	}
   285  	if ctx.NArg() >= 2 {
   286  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   287  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   288  		} else {
   289  			start = d
   290  		}
   291  	}
   292  	stack, _ := makeConfigNode(ctx)
   293  	defer stack.Close()
   294  
   295  	db := utils.MakeChainDatabase(ctx, stack, true)
   296  	defer db.Close()
   297  
   298  	return rawdb.InspectDatabase(db, prefix, start)
   299  }
   300  
   301  func checkStateContent(ctx *cli.Context) error {
   302  	var (
   303  		prefix []byte
   304  		start  []byte
   305  	)
   306  	if ctx.NArg() > 1 {
   307  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   308  	}
   309  	if ctx.NArg() > 0 {
   310  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   311  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   312  		} else {
   313  			start = d
   314  		}
   315  	}
   316  	stack, _ := makeConfigNode(ctx)
   317  	defer stack.Close()
   318  
   319  	db := utils.MakeChainDatabase(ctx, stack, true)
   320  	defer db.Close()
   321  	var (
   322  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   323  		hasher    = crypto.NewKeccakState()
   324  		got       = make([]byte, 32)
   325  		errs      int
   326  		count     int
   327  		startTime = time.Now()
   328  		lastLog   = time.Now()
   329  	)
   330  	for it.Next() {
   331  		count++
   332  		k := it.Key()
   333  		v := it.Value()
   334  		hasher.Reset()
   335  		hasher.Write(v)
   336  		hasher.Read(got)
   337  		if !bytes.Equal(k, got) {
   338  			errs++
   339  			fmt.Printf("Error at %#x\n", k)
   340  			fmt.Printf("  Hash:  %#x\n", got)
   341  			fmt.Printf("  Data:  %#x\n", v)
   342  		}
   343  		if time.Since(lastLog) > 8*time.Second {
   344  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   345  			lastLog = time.Now()
   346  		}
   347  	}
   348  	if err := it.Error(); err != nil {
   349  		return err
   350  	}
   351  	log.Info("Iterated the state content", "errors", errs, "items", count)
   352  	return nil
   353  }
   354  
   355  func showLeveldbStats(db ethdb.KeyValueStater) {
   356  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   357  		log.Warn("Failed to read database stats", "error", err)
   358  	} else {
   359  		fmt.Println(stats)
   360  	}
   361  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   362  		log.Warn("Failed to read database iostats", "error", err)
   363  	} else {
   364  		fmt.Println(ioStats)
   365  	}
   366  }
   367  
   368  func dbStats(ctx *cli.Context) error {
   369  	stack, _ := makeConfigNode(ctx)
   370  	defer stack.Close()
   371  
   372  	db := utils.MakeChainDatabase(ctx, stack, true)
   373  	defer db.Close()
   374  
   375  	showLeveldbStats(db)
   376  	return nil
   377  }
   378  
   379  func dbCompact(ctx *cli.Context) error {
   380  	stack, _ := makeConfigNode(ctx)
   381  	defer stack.Close()
   382  
   383  	db := utils.MakeChainDatabase(ctx, stack, false)
   384  	defer db.Close()
   385  
   386  	log.Info("Stats before compaction")
   387  	showLeveldbStats(db)
   388  
   389  	log.Info("Triggering compaction")
   390  	if err := db.Compact(nil, nil); err != nil {
   391  		log.Info("Compact err", "error", err)
   392  		return err
   393  	}
   394  	log.Info("Stats after compaction")
   395  	showLeveldbStats(db)
   396  	return nil
   397  }
   398  
   399  // dbGet shows the value of a given database key
   400  func dbGet(ctx *cli.Context) error {
   401  	if ctx.NArg() != 1 {
   402  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   403  	}
   404  	stack, _ := makeConfigNode(ctx)
   405  	defer stack.Close()
   406  
   407  	db := utils.MakeChainDatabase(ctx, stack, true)
   408  	defer db.Close()
   409  
   410  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   411  	if err != nil {
   412  		log.Info("Could not decode the key", "error", err)
   413  		return err
   414  	}
   415  
   416  	data, err := db.Get(key)
   417  	if err != nil {
   418  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   419  		return err
   420  	}
   421  	fmt.Printf("key %#x: %#x\n", key, data)
   422  	return nil
   423  }
   424  
   425  // dbDelete deletes a key from the database
   426  func dbDelete(ctx *cli.Context) error {
   427  	if ctx.NArg() != 1 {
   428  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   429  	}
   430  	stack, _ := makeConfigNode(ctx)
   431  	defer stack.Close()
   432  
   433  	db := utils.MakeChainDatabase(ctx, stack, false)
   434  	defer db.Close()
   435  
   436  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   437  	if err != nil {
   438  		log.Info("Could not decode the key", "error", err)
   439  		return err
   440  	}
   441  	data, err := db.Get(key)
   442  	if err == nil {
   443  		fmt.Printf("Previous value: %#x\n", data)
   444  	}
   445  	if err = db.Delete(key); err != nil {
   446  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   447  		return err
   448  	}
   449  	return nil
   450  }
   451  
   452  // dbPut overwrite a value in the database
   453  func dbPut(ctx *cli.Context) error {
   454  	if ctx.NArg() != 2 {
   455  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   456  	}
   457  	stack, _ := makeConfigNode(ctx)
   458  	defer stack.Close()
   459  
   460  	db := utils.MakeChainDatabase(ctx, stack, false)
   461  	defer db.Close()
   462  
   463  	var (
   464  		key   []byte
   465  		value []byte
   466  		data  []byte
   467  		err   error
   468  	)
   469  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   470  	if err != nil {
   471  		log.Info("Could not decode the key", "error", err)
   472  		return err
   473  	}
   474  	value, err = hexutil.Decode(ctx.Args().Get(1))
   475  	if err != nil {
   476  		log.Info("Could not decode the value", "error", err)
   477  		return err
   478  	}
   479  	data, err = db.Get(key)
   480  	if err == nil {
   481  		fmt.Printf("Previous value: %#x\n", data)
   482  	}
   483  	return db.Put(key, value)
   484  }
   485  
   486  // dbDumpTrie shows the key-value slots of a given storage trie
   487  func dbDumpTrie(ctx *cli.Context) error {
   488  	if ctx.NArg() < 1 {
   489  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   490  	}
   491  	stack, _ := makeConfigNode(ctx)
   492  	defer stack.Close()
   493  
   494  	db := utils.MakeChainDatabase(ctx, stack, true)
   495  	defer db.Close()
   496  	var (
   497  		root  []byte
   498  		start []byte
   499  		max   = int64(-1)
   500  		err   error
   501  	)
   502  	if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   503  		log.Info("Could not decode the root", "error", err)
   504  		return err
   505  	}
   506  	stRoot := common.BytesToHash(root)
   507  	if ctx.NArg() >= 2 {
   508  		if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   509  			log.Info("Could not decode the seek position", "error", err)
   510  			return err
   511  		}
   512  	}
   513  	if ctx.NArg() >= 3 {
   514  		if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   515  			log.Info("Could not decode the max count", "error", err)
   516  			return err
   517  		}
   518  	}
   519  	theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
   520  	if err != nil {
   521  		return err
   522  	}
   523  	var count int64
   524  	it := trie.NewIterator(theTrie.NodeIterator(start))
   525  	for it.Next() {
   526  		if max > 0 && count == max {
   527  			fmt.Printf("Exiting after %d values\n", count)
   528  			break
   529  		}
   530  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   531  		count++
   532  	}
   533  	return it.Err
   534  }
   535  
   536  func freezerInspect(ctx *cli.Context) error {
   537  	if ctx.NArg() < 4 {
   538  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   539  	}
   540  	var (
   541  		freezer = ctx.Args().Get(0)
   542  		table   = ctx.Args().Get(1)
   543  	)
   544  	start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   545  	if err != nil {
   546  		log.Info("Could not read start-param", "err", err)
   547  		return err
   548  	}
   549  	end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
   550  	if err != nil {
   551  		log.Info("Could not read count param", "err", err)
   552  		return err
   553  	}
   554  	stack, _ := makeConfigNode(ctx)
   555  	defer stack.Close()
   556  
   557  	db := utils.MakeChainDatabase(ctx, stack, true)
   558  	defer db.Close()
   559  
   560  	ancient, err := db.AncientDatadir()
   561  	if err != nil {
   562  		log.Info("Failed to retrieve ancient root", "err", err)
   563  		return err
   564  	}
   565  	return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
   566  }
   567  
   568  func importLDBdata(ctx *cli.Context) error {
   569  	start := 0
   570  	switch ctx.NArg() {
   571  	case 1:
   572  		break
   573  	case 2:
   574  		s, err := strconv.Atoi(ctx.Args().Get(1))
   575  		if err != nil {
   576  			return fmt.Errorf("second arg must be an integer: %v", err)
   577  		}
   578  		start = s
   579  	default:
   580  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   581  	}
   582  	var (
   583  		fName     = ctx.Args().Get(0)
   584  		stack, _  = makeConfigNode(ctx)
   585  		interrupt = make(chan os.Signal, 1)
   586  		stop      = make(chan struct{})
   587  	)
   588  	defer stack.Close()
   589  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   590  	defer signal.Stop(interrupt)
   591  	defer close(interrupt)
   592  	go func() {
   593  		if _, ok := <-interrupt; ok {
   594  			log.Info("Interrupted during ldb import, stopping at next batch")
   595  		}
   596  		close(stop)
   597  	}()
   598  	db := utils.MakeChainDatabase(ctx, stack, false)
   599  	return utils.ImportLDBData(db, fName, int64(start), stop)
   600  }
   601  
   602  type preimageIterator struct {
   603  	iter ethdb.Iterator
   604  }
   605  
   606  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   607  	for iter.iter.Next() {
   608  		key := iter.iter.Key()
   609  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   610  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   611  		}
   612  	}
   613  	return 0, nil, nil, false
   614  }
   615  
   616  func (iter *preimageIterator) Release() {
   617  	iter.iter.Release()
   618  }
   619  
   620  type snapshotIterator struct {
   621  	init    bool
   622  	account ethdb.Iterator
   623  	storage ethdb.Iterator
   624  }
   625  
   626  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   627  	if !iter.init {
   628  		iter.init = true
   629  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   630  	}
   631  	for iter.account.Next() {
   632  		key := iter.account.Key()
   633  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   634  			return utils.OpBatchAdd, key, iter.account.Value(), true
   635  		}
   636  	}
   637  	for iter.storage.Next() {
   638  		key := iter.storage.Key()
   639  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   640  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   641  		}
   642  	}
   643  	return 0, nil, nil, false
   644  }
   645  
   646  func (iter *snapshotIterator) Release() {
   647  	iter.account.Release()
   648  	iter.storage.Release()
   649  }
   650  
   651  // chainExporters defines the export scheme for all exportable chain data.
   652  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   653  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   654  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   655  		return &preimageIterator{iter: iter}
   656  	},
   657  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   658  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   659  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   660  		return &snapshotIterator{account: account, storage: storage}
   661  	},
   662  }
   663  
   664  func exportChaindata(ctx *cli.Context) error {
   665  	if ctx.NArg() < 2 {
   666  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   667  	}
   668  	// Parse the required chain data type, make sure it's supported.
   669  	kind := ctx.Args().Get(0)
   670  	kind = strings.ToLower(strings.Trim(kind, " "))
   671  	exporter, ok := chainExporters[kind]
   672  	if !ok {
   673  		var kinds []string
   674  		for kind := range chainExporters {
   675  			kinds = append(kinds, kind)
   676  		}
   677  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   678  	}
   679  	var (
   680  		stack, _  = makeConfigNode(ctx)
   681  		interrupt = make(chan os.Signal, 1)
   682  		stop      = make(chan struct{})
   683  	)
   684  	defer stack.Close()
   685  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   686  	defer signal.Stop(interrupt)
   687  	defer close(interrupt)
   688  	go func() {
   689  		if _, ok := <-interrupt; ok {
   690  			log.Info("Interrupted during db export, stopping at next batch")
   691  		}
   692  		close(stop)
   693  	}()
   694  	db := utils.MakeChainDatabase(ctx, stack, true)
   695  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   696  }
   697  
   698  func showMetaData(ctx *cli.Context) error {
   699  	stack, _ := makeConfigNode(ctx)
   700  	defer stack.Close()
   701  	db := utils.MakeChainDatabase(ctx, stack, true)
   702  	ancients, err := db.Ancients()
   703  	if err != nil {
   704  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   705  	}
   706  	pp := func(val *uint64) string {
   707  		if val == nil {
   708  			return "<nil>"
   709  		}
   710  		return fmt.Sprintf("%d (%#x)", *val, *val)
   711  	}
   712  	data := [][]string{
   713  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   714  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   715  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   716  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   717  	if b := rawdb.ReadHeadBlock(db); b != nil {
   718  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   719  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   720  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   721  	}
   722  	if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
   723  		data = append(data, []string{"SkeletonSyncStatus", string(b)})
   724  	}
   725  	if h := rawdb.ReadHeadHeader(db); h != nil {
   726  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   727  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   728  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   729  	}
   730  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   731  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   732  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   733  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   734  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   735  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   736  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   737  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   738  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   739  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   740  	}...)
   741  	table := tablewriter.NewWriter(os.Stdout)
   742  	table.SetHeader([]string{"Field", "Value"})
   743  	table.AppendBulk(data)
   744  	table.Render()
   745  	return nil
   746  }
   747  
   748  func freezerMigrate(ctx *cli.Context) error {
   749  	stack, _ := makeConfigNode(ctx)
   750  	defer stack.Close()
   751  
   752  	db := utils.MakeChainDatabase(ctx, stack, false)
   753  	defer db.Close()
   754  
   755  	// Check first block for legacy receipt format
   756  	numAncients, err := db.Ancients()
   757  	if err != nil {
   758  		return err
   759  	}
   760  	if numAncients < 1 {
   761  		log.Info("No receipts in freezer to migrate")
   762  		return nil
   763  	}
   764  
   765  	isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
   766  	if err != nil {
   767  		return err
   768  	}
   769  	if !isFirstLegacy {
   770  		log.Info("No legacy receipts to migrate")
   771  		return nil
   772  	}
   773  
   774  	log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
   775  	start := time.Now()
   776  	if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
   777  		return err
   778  	}
   779  	if err := db.Close(); err != nil {
   780  		return err
   781  	}
   782  	log.Info("Migration finished", "duration", time.Since(start))
   783  
   784  	return nil
   785  }
   786  
   787  // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
   788  // non-empty receipt and checks its format. The index of this first non-empty element is
   789  // the second return parameter.
   790  func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
   791  	// Check first block for legacy receipt format
   792  	numAncients, err := db.Ancients()
   793  	if err != nil {
   794  		return false, 0, err
   795  	}
   796  	if numAncients < 1 {
   797  		return false, 0, nil
   798  	}
   799  	if firstIdx >= numAncients {
   800  		return false, firstIdx, nil
   801  	}
   802  	var (
   803  		legacy       bool
   804  		blob         []byte
   805  		emptyRLPList = []byte{192}
   806  	)
   807  	// Find first block with non-empty receipt, only if
   808  	// the index is not already provided.
   809  	if firstIdx == 0 {
   810  		for i := uint64(0); i < numAncients; i++ {
   811  			blob, err = db.Ancient("receipts", i)
   812  			if err != nil {
   813  				return false, 0, err
   814  			}
   815  			if len(blob) == 0 {
   816  				continue
   817  			}
   818  			if !bytes.Equal(blob, emptyRLPList) {
   819  				firstIdx = i
   820  				break
   821  			}
   822  		}
   823  	}
   824  	// Is first non-empty receipt legacy?
   825  	first, err := db.Ancient("receipts", firstIdx)
   826  	if err != nil {
   827  		return false, 0, err
   828  	}
   829  	legacy, err = types.IsLegacyStoredReceipts(first)
   830  	return legacy, firstIdx, err
   831  }