github.com/hardtosaygoodbye/go-ethereum@v1.10.16-0.20220122011429-97003b9e6c15/cmd/geth/dbcmd.go (about)

     1  // Copyright 2020 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"os/signal"
    25  	"path/filepath"
    26  	"sort"
    27  	"strconv"
    28  	"strings"
    29  	"syscall"
    30  	"time"
    31  
    32  	"github.com/hardtosaygoodbye/go-ethereum/cmd/utils"
    33  	"github.com/hardtosaygoodbye/go-ethereum/common"
    34  	"github.com/hardtosaygoodbye/go-ethereum/common/hexutil"
    35  	"github.com/hardtosaygoodbye/go-ethereum/console/prompt"
    36  	"github.com/hardtosaygoodbye/go-ethereum/core/rawdb"
    37  	"github.com/hardtosaygoodbye/go-ethereum/core/state/snapshot"
    38  	"github.com/hardtosaygoodbye/go-ethereum/ethdb"
    39  	"github.com/hardtosaygoodbye/go-ethereum/log"
    40  	"github.com/hardtosaygoodbye/go-ethereum/trie"
    41  	"github.com/olekukonko/tablewriter"
    42  	"gopkg.in/urfave/cli.v1"
    43  )
    44  
    45  var (
    46  	removedbCommand = cli.Command{
    47  		Action:    utils.MigrateFlags(removeDB),
    48  		Name:      "removedb",
    49  		Usage:     "Remove blockchain and state databases",
    50  		ArgsUsage: "",
    51  		Flags: []cli.Flag{
    52  			utils.DataDirFlag,
    53  		},
    54  		Category: "DATABASE COMMANDS",
    55  		Description: `
    56  Remove blockchain and state databases`,
    57  	}
    58  	dbCommand = cli.Command{
    59  		Name:      "db",
    60  		Usage:     "Low level database operations",
    61  		ArgsUsage: "",
    62  		Category:  "DATABASE COMMANDS",
    63  		Subcommands: []cli.Command{
    64  			dbInspectCmd,
    65  			dbStatCmd,
    66  			dbCompactCmd,
    67  			dbGetCmd,
    68  			dbDeleteCmd,
    69  			dbPutCmd,
    70  			dbGetSlotsCmd,
    71  			dbDumpFreezerIndex,
    72  			dbImportCmd,
    73  			dbExportCmd,
    74  			dbMetadataCmd,
    75  		},
    76  	}
    77  	dbInspectCmd = cli.Command{
    78  		Action:    utils.MigrateFlags(inspect),
    79  		Name:      "inspect",
    80  		ArgsUsage: "<prefix> <start>",
    81  		Flags: []cli.Flag{
    82  			utils.DataDirFlag,
    83  			utils.AncientFlag,
    84  			utils.SyncModeFlag,
    85  			utils.MainnetFlag,
    86  			utils.RopstenFlag,
    87  			utils.SepoliaFlag,
    88  			utils.RinkebyFlag,
    89  			utils.GoerliFlag,
    90  		},
    91  		Usage:       "Inspect the storage size for each type of data in the database",
    92  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    93  	}
    94  	dbStatCmd = cli.Command{
    95  		Action: utils.MigrateFlags(dbStats),
    96  		Name:   "stats",
    97  		Usage:  "Print leveldb statistics",
    98  		Flags: []cli.Flag{
    99  			utils.DataDirFlag,
   100  			utils.SyncModeFlag,
   101  			utils.MainnetFlag,
   102  			utils.RopstenFlag,
   103  			utils.SepoliaFlag,
   104  			utils.RinkebyFlag,
   105  			utils.GoerliFlag,
   106  		},
   107  	}
   108  	dbCompactCmd = cli.Command{
   109  		Action: utils.MigrateFlags(dbCompact),
   110  		Name:   "compact",
   111  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   112  		Flags: []cli.Flag{
   113  			utils.DataDirFlag,
   114  			utils.SyncModeFlag,
   115  			utils.MainnetFlag,
   116  			utils.RopstenFlag,
   117  			utils.SepoliaFlag,
   118  			utils.RinkebyFlag,
   119  			utils.GoerliFlag,
   120  			utils.CacheFlag,
   121  			utils.CacheDatabaseFlag,
   122  		},
   123  		Description: `This command performs a database compaction. 
   124  WARNING: This operation may take a very long time to finish, and may cause database
   125  corruption if it is aborted during execution'!`,
   126  	}
   127  	dbGetCmd = cli.Command{
   128  		Action:    utils.MigrateFlags(dbGet),
   129  		Name:      "get",
   130  		Usage:     "Show the value of a database key",
   131  		ArgsUsage: "<hex-encoded key>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.SyncModeFlag,
   135  			utils.MainnetFlag,
   136  			utils.RopstenFlag,
   137  			utils.SepoliaFlag,
   138  			utils.RinkebyFlag,
   139  			utils.GoerliFlag,
   140  		},
   141  		Description: "This command looks up the specified database key from the database.",
   142  	}
   143  	dbDeleteCmd = cli.Command{
   144  		Action:    utils.MigrateFlags(dbDelete),
   145  		Name:      "delete",
   146  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   147  		ArgsUsage: "<hex-encoded key>",
   148  		Flags: []cli.Flag{
   149  			utils.DataDirFlag,
   150  			utils.SyncModeFlag,
   151  			utils.MainnetFlag,
   152  			utils.RopstenFlag,
   153  			utils.SepoliaFlag,
   154  			utils.RinkebyFlag,
   155  			utils.GoerliFlag,
   156  		},
   157  		Description: `This command deletes the specified database key from the database. 
   158  WARNING: This is a low-level operation which may cause database corruption!`,
   159  	}
   160  	dbPutCmd = cli.Command{
   161  		Action:    utils.MigrateFlags(dbPut),
   162  		Name:      "put",
   163  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   164  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   165  		Flags: []cli.Flag{
   166  			utils.DataDirFlag,
   167  			utils.SyncModeFlag,
   168  			utils.MainnetFlag,
   169  			utils.RopstenFlag,
   170  			utils.SepoliaFlag,
   171  			utils.RinkebyFlag,
   172  			utils.GoerliFlag,
   173  		},
   174  		Description: `This command sets a given database key to the given value. 
   175  WARNING: This is a low-level operation which may cause database corruption!`,
   176  	}
   177  	dbGetSlotsCmd = cli.Command{
   178  		Action:    utils.MigrateFlags(dbDumpTrie),
   179  		Name:      "dumptrie",
   180  		Usage:     "Show the storage key/values of a given storage trie",
   181  		ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   182  		Flags: []cli.Flag{
   183  			utils.DataDirFlag,
   184  			utils.SyncModeFlag,
   185  			utils.MainnetFlag,
   186  			utils.RopstenFlag,
   187  			utils.SepoliaFlag,
   188  			utils.RinkebyFlag,
   189  			utils.GoerliFlag,
   190  		},
   191  		Description: "This command looks up the specified database key from the database.",
   192  	}
   193  	dbDumpFreezerIndex = cli.Command{
   194  		Action:    utils.MigrateFlags(freezerInspect),
   195  		Name:      "freezer-index",
   196  		Usage:     "Dump out the index of a given freezer type",
   197  		ArgsUsage: "<type> <start (int)> <end (int)>",
   198  		Flags: []cli.Flag{
   199  			utils.DataDirFlag,
   200  			utils.SyncModeFlag,
   201  			utils.MainnetFlag,
   202  			utils.RopstenFlag,
   203  			utils.SepoliaFlag,
   204  			utils.RinkebyFlag,
   205  			utils.GoerliFlag,
   206  		},
   207  		Description: "This command displays information about the freezer index.",
   208  	}
   209  	dbImportCmd = cli.Command{
   210  		Action:    utils.MigrateFlags(importLDBdata),
   211  		Name:      "import",
   212  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   213  		ArgsUsage: "<dumpfile> <start (optional)",
   214  		Flags: []cli.Flag{
   215  			utils.DataDirFlag,
   216  			utils.SyncModeFlag,
   217  			utils.MainnetFlag,
   218  			utils.RopstenFlag,
   219  			utils.RinkebyFlag,
   220  			utils.GoerliFlag,
   221  		},
   222  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   223  	}
   224  	dbExportCmd = cli.Command{
   225  		Action:    utils.MigrateFlags(exportChaindata),
   226  		Name:      "export",
   227  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   228  		ArgsUsage: "<type> <dumpfile>",
   229  		Flags: []cli.Flag{
   230  			utils.DataDirFlag,
   231  			utils.SyncModeFlag,
   232  			utils.MainnetFlag,
   233  			utils.RopstenFlag,
   234  			utils.RinkebyFlag,
   235  			utils.GoerliFlag,
   236  		},
   237  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   238  	}
   239  	dbMetadataCmd = cli.Command{
   240  		Action: utils.MigrateFlags(showMetaData),
   241  		Name:   "metadata",
   242  		Usage:  "Shows metadata about the chain status.",
   243  		Flags: []cli.Flag{
   244  			utils.DataDirFlag,
   245  			utils.SyncModeFlag,
   246  			utils.MainnetFlag,
   247  			utils.RopstenFlag,
   248  			utils.SepoliaFlag,
   249  			utils.RinkebyFlag,
   250  			utils.GoerliFlag,
   251  		},
   252  		Description: "Shows metadata about the chain status.",
   253  	}
   254  )
   255  
   256  func removeDB(ctx *cli.Context) error {
   257  	stack, config := makeConfigNode(ctx)
   258  
   259  	// Remove the full node state database
   260  	path := stack.ResolvePath("chaindata")
   261  	if common.FileExist(path) {
   262  		confirmAndRemoveDB(path, "full node state database")
   263  	} else {
   264  		log.Info("Full node state database missing", "path", path)
   265  	}
   266  	// Remove the full node ancient database
   267  	path = config.Eth.DatabaseFreezer
   268  	switch {
   269  	case path == "":
   270  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   271  	case !filepath.IsAbs(path):
   272  		path = config.Node.ResolvePath(path)
   273  	}
   274  	if common.FileExist(path) {
   275  		confirmAndRemoveDB(path, "full node ancient database")
   276  	} else {
   277  		log.Info("Full node ancient database missing", "path", path)
   278  	}
   279  	// Remove the light node database
   280  	path = stack.ResolvePath("lightchaindata")
   281  	if common.FileExist(path) {
   282  		confirmAndRemoveDB(path, "light node database")
   283  	} else {
   284  		log.Info("Light node database missing", "path", path)
   285  	}
   286  	return nil
   287  }
   288  
   289  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   290  // folder if accepted.
   291  func confirmAndRemoveDB(database string, kind string) {
   292  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   293  	switch {
   294  	case err != nil:
   295  		utils.Fatalf("%v", err)
   296  	case !confirm:
   297  		log.Info("Database deletion skipped", "path", database)
   298  	default:
   299  		start := time.Now()
   300  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   301  			// If we're at the top level folder, recurse into
   302  			if path == database {
   303  				return nil
   304  			}
   305  			// Delete all the files, but not subfolders
   306  			if !info.IsDir() {
   307  				os.Remove(path)
   308  				return nil
   309  			}
   310  			return filepath.SkipDir
   311  		})
   312  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   313  	}
   314  }
   315  
   316  func inspect(ctx *cli.Context) error {
   317  	var (
   318  		prefix []byte
   319  		start  []byte
   320  	)
   321  	if ctx.NArg() > 2 {
   322  		return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
   323  	}
   324  	if ctx.NArg() >= 1 {
   325  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   326  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   327  		} else {
   328  			prefix = d
   329  		}
   330  	}
   331  	if ctx.NArg() >= 2 {
   332  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   333  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   334  		} else {
   335  			start = d
   336  		}
   337  	}
   338  	stack, _ := makeConfigNode(ctx)
   339  	defer stack.Close()
   340  
   341  	db := utils.MakeChainDatabase(ctx, stack, true)
   342  	defer db.Close()
   343  
   344  	return rawdb.InspectDatabase(db, prefix, start)
   345  }
   346  
   347  func showLeveldbStats(db ethdb.Stater) {
   348  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   349  		log.Warn("Failed to read database stats", "error", err)
   350  	} else {
   351  		fmt.Println(stats)
   352  	}
   353  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   354  		log.Warn("Failed to read database iostats", "error", err)
   355  	} else {
   356  		fmt.Println(ioStats)
   357  	}
   358  }
   359  
   360  func dbStats(ctx *cli.Context) error {
   361  	stack, _ := makeConfigNode(ctx)
   362  	defer stack.Close()
   363  
   364  	db := utils.MakeChainDatabase(ctx, stack, true)
   365  	defer db.Close()
   366  
   367  	showLeveldbStats(db)
   368  	return nil
   369  }
   370  
   371  func dbCompact(ctx *cli.Context) error {
   372  	stack, _ := makeConfigNode(ctx)
   373  	defer stack.Close()
   374  
   375  	db := utils.MakeChainDatabase(ctx, stack, false)
   376  	defer db.Close()
   377  
   378  	log.Info("Stats before compaction")
   379  	showLeveldbStats(db)
   380  
   381  	log.Info("Triggering compaction")
   382  	if err := db.Compact(nil, nil); err != nil {
   383  		log.Info("Compact err", "error", err)
   384  		return err
   385  	}
   386  	log.Info("Stats after compaction")
   387  	showLeveldbStats(db)
   388  	return nil
   389  }
   390  
   391  // dbGet shows the value of a given database key
   392  func dbGet(ctx *cli.Context) error {
   393  	if ctx.NArg() != 1 {
   394  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   395  	}
   396  	stack, _ := makeConfigNode(ctx)
   397  	defer stack.Close()
   398  
   399  	db := utils.MakeChainDatabase(ctx, stack, true)
   400  	defer db.Close()
   401  
   402  	key, err := parseHexOrString(ctx.Args().Get(0))
   403  	if err != nil {
   404  		log.Info("Could not decode the key", "error", err)
   405  		return err
   406  	}
   407  
   408  	data, err := db.Get(key)
   409  	if err != nil {
   410  		log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
   411  		return err
   412  	}
   413  	fmt.Printf("key %#x: %#x\n", key, data)
   414  	return nil
   415  }
   416  
   417  // dbDelete deletes a key from the database
   418  func dbDelete(ctx *cli.Context) error {
   419  	if ctx.NArg() != 1 {
   420  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   421  	}
   422  	stack, _ := makeConfigNode(ctx)
   423  	defer stack.Close()
   424  
   425  	db := utils.MakeChainDatabase(ctx, stack, false)
   426  	defer db.Close()
   427  
   428  	key, err := parseHexOrString(ctx.Args().Get(0))
   429  	if err != nil {
   430  		log.Info("Could not decode the key", "error", err)
   431  		return err
   432  	}
   433  	data, err := db.Get(key)
   434  	if err == nil {
   435  		fmt.Printf("Previous value: %#x\n", data)
   436  	}
   437  	if err = db.Delete(key); err != nil {
   438  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
   439  		return err
   440  	}
   441  	return nil
   442  }
   443  
   444  // dbPut overwrite a value in the database
   445  func dbPut(ctx *cli.Context) error {
   446  	if ctx.NArg() != 2 {
   447  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   448  	}
   449  	stack, _ := makeConfigNode(ctx)
   450  	defer stack.Close()
   451  
   452  	db := utils.MakeChainDatabase(ctx, stack, false)
   453  	defer db.Close()
   454  
   455  	var (
   456  		key   []byte
   457  		value []byte
   458  		data  []byte
   459  		err   error
   460  	)
   461  	key, err = parseHexOrString(ctx.Args().Get(0))
   462  	if err != nil {
   463  		log.Info("Could not decode the key", "error", err)
   464  		return err
   465  	}
   466  	value, err = hexutil.Decode(ctx.Args().Get(1))
   467  	if err != nil {
   468  		log.Info("Could not decode the value", "error", err)
   469  		return err
   470  	}
   471  	data, err = db.Get(key)
   472  	if err == nil {
   473  		fmt.Printf("Previous value: %#x\n", data)
   474  	}
   475  	return db.Put(key, value)
   476  }
   477  
   478  // dbDumpTrie shows the key-value slots of a given storage trie
   479  func dbDumpTrie(ctx *cli.Context) error {
   480  	if ctx.NArg() < 1 {
   481  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   482  	}
   483  	stack, _ := makeConfigNode(ctx)
   484  	defer stack.Close()
   485  
   486  	db := utils.MakeChainDatabase(ctx, stack, true)
   487  	defer db.Close()
   488  	var (
   489  		root  []byte
   490  		start []byte
   491  		max   = int64(-1)
   492  		err   error
   493  	)
   494  	if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   495  		log.Info("Could not decode the root", "error", err)
   496  		return err
   497  	}
   498  	stRoot := common.BytesToHash(root)
   499  	if ctx.NArg() >= 2 {
   500  		if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   501  			log.Info("Could not decode the seek position", "error", err)
   502  			return err
   503  		}
   504  	}
   505  	if ctx.NArg() >= 3 {
   506  		if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   507  			log.Info("Could not decode the max count", "error", err)
   508  			return err
   509  		}
   510  	}
   511  	theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
   512  	if err != nil {
   513  		return err
   514  	}
   515  	var count int64
   516  	it := trie.NewIterator(theTrie.NodeIterator(start))
   517  	for it.Next() {
   518  		if max > 0 && count == max {
   519  			fmt.Printf("Exiting after %d values\n", count)
   520  			break
   521  		}
   522  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   523  		count++
   524  	}
   525  	return it.Err
   526  }
   527  
   528  func freezerInspect(ctx *cli.Context) error {
   529  	var (
   530  		start, end    int64
   531  		disableSnappy bool
   532  		err           error
   533  	)
   534  	if ctx.NArg() < 3 {
   535  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   536  	}
   537  	kind := ctx.Args().Get(0)
   538  	if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
   539  		var options []string
   540  		for opt := range rawdb.FreezerNoSnappy {
   541  			options = append(options, opt)
   542  		}
   543  		sort.Strings(options)
   544  		return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
   545  	} else {
   546  		disableSnappy = noSnap
   547  	}
   548  	if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
   549  		log.Info("Could read start-param", "error", err)
   550  		return err
   551  	}
   552  	if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   553  		log.Info("Could read count param", "error", err)
   554  		return err
   555  	}
   556  	stack, _ := makeConfigNode(ctx)
   557  	defer stack.Close()
   558  	path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   559  	log.Info("Opening freezer", "location", path, "name", kind)
   560  	if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
   561  		return err
   562  	} else {
   563  		f.DumpIndex(start, end)
   564  	}
   565  	return nil
   566  }
   567  
   568  // ParseHexOrString tries to hexdecode b, but if the prefix is missing, it instead just returns the raw bytes
   569  func parseHexOrString(str string) ([]byte, error) {
   570  	b, err := hexutil.Decode(str)
   571  	if errors.Is(err, hexutil.ErrMissingPrefix) {
   572  		return []byte(str), nil
   573  	}
   574  	return b, err
   575  }
   576  
   577  func importLDBdata(ctx *cli.Context) error {
   578  	start := 0
   579  	switch ctx.NArg() {
   580  	case 1:
   581  		break
   582  	case 2:
   583  		s, err := strconv.Atoi(ctx.Args().Get(1))
   584  		if err != nil {
   585  			return fmt.Errorf("second arg must be an integer: %v", err)
   586  		}
   587  		start = s
   588  	default:
   589  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   590  	}
   591  	var (
   592  		fName     = ctx.Args().Get(0)
   593  		stack, _  = makeConfigNode(ctx)
   594  		interrupt = make(chan os.Signal, 1)
   595  		stop      = make(chan struct{})
   596  	)
   597  	defer stack.Close()
   598  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   599  	defer signal.Stop(interrupt)
   600  	defer close(interrupt)
   601  	go func() {
   602  		if _, ok := <-interrupt; ok {
   603  			log.Info("Interrupted during ldb import, stopping at next batch")
   604  		}
   605  		close(stop)
   606  	}()
   607  	db := utils.MakeChainDatabase(ctx, stack, false)
   608  	return utils.ImportLDBData(db, fName, int64(start), stop)
   609  }
   610  
   611  type preimageIterator struct {
   612  	iter ethdb.Iterator
   613  }
   614  
   615  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   616  	for iter.iter.Next() {
   617  		key := iter.iter.Key()
   618  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   619  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   620  		}
   621  	}
   622  	return 0, nil, nil, false
   623  }
   624  
   625  func (iter *preimageIterator) Release() {
   626  	iter.iter.Release()
   627  }
   628  
   629  type snapshotIterator struct {
   630  	init    bool
   631  	account ethdb.Iterator
   632  	storage ethdb.Iterator
   633  }
   634  
   635  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   636  	if !iter.init {
   637  		iter.init = true
   638  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   639  	}
   640  	for iter.account.Next() {
   641  		key := iter.account.Key()
   642  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   643  			return utils.OpBatchAdd, key, iter.account.Value(), true
   644  		}
   645  	}
   646  	for iter.storage.Next() {
   647  		key := iter.storage.Key()
   648  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   649  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   650  		}
   651  	}
   652  	return 0, nil, nil, false
   653  }
   654  
   655  func (iter *snapshotIterator) Release() {
   656  	iter.account.Release()
   657  	iter.storage.Release()
   658  }
   659  
   660  // chainExporters defines the export scheme for all exportable chain data.
   661  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   662  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   663  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   664  		return &preimageIterator{iter: iter}
   665  	},
   666  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   667  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   668  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   669  		return &snapshotIterator{account: account, storage: storage}
   670  	},
   671  }
   672  
   673  func exportChaindata(ctx *cli.Context) error {
   674  	if ctx.NArg() < 2 {
   675  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   676  	}
   677  	// Parse the required chain data type, make sure it's supported.
   678  	kind := ctx.Args().Get(0)
   679  	kind = strings.ToLower(strings.Trim(kind, " "))
   680  	exporter, ok := chainExporters[kind]
   681  	if !ok {
   682  		var kinds []string
   683  		for kind := range chainExporters {
   684  			kinds = append(kinds, kind)
   685  		}
   686  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   687  	}
   688  	var (
   689  		stack, _  = makeConfigNode(ctx)
   690  		interrupt = make(chan os.Signal, 1)
   691  		stop      = make(chan struct{})
   692  	)
   693  	defer stack.Close()
   694  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   695  	defer signal.Stop(interrupt)
   696  	defer close(interrupt)
   697  	go func() {
   698  		if _, ok := <-interrupt; ok {
   699  			log.Info("Interrupted during db export, stopping at next batch")
   700  		}
   701  		close(stop)
   702  	}()
   703  	db := utils.MakeChainDatabase(ctx, stack, true)
   704  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   705  }
   706  
   707  func showMetaData(ctx *cli.Context) error {
   708  	stack, _ := makeConfigNode(ctx)
   709  	defer stack.Close()
   710  	db := utils.MakeChainDatabase(ctx, stack, true)
   711  	ancients, err := db.Ancients()
   712  	if err != nil {
   713  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   714  	}
   715  	pp := func(val *uint64) string {
   716  		if val == nil {
   717  			return "<nil>"
   718  		}
   719  		return fmt.Sprintf("%d (0x%x)", *val, *val)
   720  	}
   721  	data := [][]string{
   722  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   723  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   724  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   725  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   726  	if b := rawdb.ReadHeadBlock(db); b != nil {
   727  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   728  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   729  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
   730  	}
   731  	if h := rawdb.ReadHeadHeader(db); h != nil {
   732  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   733  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   734  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
   735  	}
   736  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   737  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   738  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   739  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   740  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   741  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   742  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   743  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   744  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   745  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   746  	}...)
   747  	table := tablewriter.NewWriter(os.Stdout)
   748  	table.SetHeader([]string{"Field", "Value"})
   749  	table.AppendBulk(data)
   750  	table.Render()
   751  	return nil
   752  }