github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"strconv"
    26  	"strings"
    27  	"syscall"
    28  	"time"
    29  
    30  	"github.com/olekukonko/tablewriter"
    31  	"github.com/tirogen/go-ethereum/cmd/utils"
    32  	"github.com/tirogen/go-ethereum/common"
    33  	"github.com/tirogen/go-ethereum/common/hexutil"
    34  	"github.com/tirogen/go-ethereum/console/prompt"
    35  	"github.com/tirogen/go-ethereum/core/rawdb"
    36  	"github.com/tirogen/go-ethereum/core/state/snapshot"
    37  	"github.com/tirogen/go-ethereum/crypto"
    38  	"github.com/tirogen/go-ethereum/ethdb"
    39  	"github.com/tirogen/go-ethereum/internal/flags"
    40  	"github.com/tirogen/go-ethereum/log"
    41  	"github.com/tirogen/go-ethereum/trie"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	removedbCommand = &cli.Command{
    47  		Action:    removeDB,
    48  		Name:      "removedb",
    49  		Usage:     "Remove blockchain and state databases",
    50  		ArgsUsage: "",
    51  		Flags:     utils.DatabasePathFlags,
    52  		Description: `
    53  Remove blockchain and state databases`,
    54  	}
    55  	dbCommand = &cli.Command{
    56  		Name:      "db",
    57  		Usage:     "Low level database operations",
    58  		ArgsUsage: "",
    59  		Subcommands: []*cli.Command{
    60  			dbInspectCmd,
    61  			dbStatCmd,
    62  			dbCompactCmd,
    63  			dbGetCmd,
    64  			dbDeleteCmd,
    65  			dbPutCmd,
    66  			dbGetSlotsCmd,
    67  			dbDumpFreezerIndex,
    68  			dbImportCmd,
    69  			dbExportCmd,
    70  			dbMetadataCmd,
    71  			dbCheckStateContentCmd,
    72  		},
    73  	}
    74  	dbInspectCmd = &cli.Command{
    75  		Action:    inspect,
    76  		Name:      "inspect",
    77  		ArgsUsage: "<prefix> <start>",
    78  		Flags: flags.Merge([]cli.Flag{
    79  			utils.SyncModeFlag,
    80  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    81  		Usage:       "Inspect the storage size for each type of data in the database",
    82  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    83  	}
    84  	dbCheckStateContentCmd = &cli.Command{
    85  		Action:    checkStateContent,
    86  		Name:      "check-state-content",
    87  		ArgsUsage: "<start (optional)>",
    88  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    89  		Usage:     "Verify that state data is cryptographically correct",
    90  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    91  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    92  a data corruption.`,
    93  	}
    94  	dbStatCmd = &cli.Command{
    95  		Action: dbStats,
    96  		Name:   "stats",
    97  		Usage:  "Print leveldb statistics",
    98  		Flags: flags.Merge([]cli.Flag{
    99  			utils.SyncModeFlag,
   100  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   101  	}
   102  	dbCompactCmd = &cli.Command{
   103  		Action: dbCompact,
   104  		Name:   "compact",
   105  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   106  		Flags: flags.Merge([]cli.Flag{
   107  			utils.SyncModeFlag,
   108  			utils.CacheFlag,
   109  			utils.CacheDatabaseFlag,
   110  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   111  		Description: `This command performs a database compaction. 
   112  WARNING: This operation may take a very long time to finish, and may cause database
   113  corruption if it is aborted during execution'!`,
   114  	}
   115  	dbGetCmd = &cli.Command{
   116  		Action:    dbGet,
   117  		Name:      "get",
   118  		Usage:     "Show the value of a database key",
   119  		ArgsUsage: "<hex-encoded key>",
   120  		Flags: flags.Merge([]cli.Flag{
   121  			utils.SyncModeFlag,
   122  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   123  		Description: "This command looks up the specified database key from the database.",
   124  	}
   125  	dbDeleteCmd = &cli.Command{
   126  		Action:    dbDelete,
   127  		Name:      "delete",
   128  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   129  		ArgsUsage: "<hex-encoded key>",
   130  		Flags: flags.Merge([]cli.Flag{
   131  			utils.SyncModeFlag,
   132  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   133  		Description: `This command deletes the specified database key from the database. 
   134  WARNING: This is a low-level operation which may cause database corruption!`,
   135  	}
   136  	dbPutCmd = &cli.Command{
   137  		Action:    dbPut,
   138  		Name:      "put",
   139  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   140  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   141  		Flags: flags.Merge([]cli.Flag{
   142  			utils.SyncModeFlag,
   143  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   144  		Description: `This command sets a given database key to the given value. 
   145  WARNING: This is a low-level operation which may cause database corruption!`,
   146  	}
   147  	dbGetSlotsCmd = &cli.Command{
   148  		Action:    dbDumpTrie,
   149  		Name:      "dumptrie",
   150  		Usage:     "Show the storage key/values of a given storage trie",
   151  		ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   152  		Flags: flags.Merge([]cli.Flag{
   153  			utils.SyncModeFlag,
   154  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   155  		Description: "This command looks up the specified database key from the database.",
   156  	}
   157  	dbDumpFreezerIndex = &cli.Command{
   158  		Action:    freezerInspect,
   159  		Name:      "freezer-index",
   160  		Usage:     "Dump out the index of a specific freezer table",
   161  		ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
   162  		Flags: flags.Merge([]cli.Flag{
   163  			utils.SyncModeFlag,
   164  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   165  		Description: "This command displays information about the freezer index.",
   166  	}
   167  	dbImportCmd = &cli.Command{
   168  		Action:    importLDBdata,
   169  		Name:      "import",
   170  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   171  		ArgsUsage: "<dumpfile> <start (optional)",
   172  		Flags: flags.Merge([]cli.Flag{
   173  			utils.SyncModeFlag,
   174  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   175  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   176  	}
   177  	dbExportCmd = &cli.Command{
   178  		Action:    exportChaindata,
   179  		Name:      "export",
   180  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   181  		ArgsUsage: "<type> <dumpfile>",
   182  		Flags: flags.Merge([]cli.Flag{
   183  			utils.SyncModeFlag,
   184  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   185  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   186  	}
   187  	dbMetadataCmd = &cli.Command{
   188  		Action: showMetaData,
   189  		Name:   "metadata",
   190  		Usage:  "Shows metadata about the chain status.",
   191  		Flags: flags.Merge([]cli.Flag{
   192  			utils.SyncModeFlag,
   193  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   194  		Description: "Shows metadata about the chain status.",
   195  	}
   196  )
   197  
   198  func removeDB(ctx *cli.Context) error {
   199  	stack, config := makeConfigNode(ctx)
   200  
   201  	// Remove the full node state database
   202  	path := stack.ResolvePath("chaindata")
   203  	if common.FileExist(path) {
   204  		confirmAndRemoveDB(path, "full node state database")
   205  	} else {
   206  		log.Info("Full node state database missing", "path", path)
   207  	}
   208  	// Remove the full node ancient database
   209  	path = config.Eth.DatabaseFreezer
   210  	switch {
   211  	case path == "":
   212  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   213  	case !filepath.IsAbs(path):
   214  		path = config.Node.ResolvePath(path)
   215  	}
   216  	if common.FileExist(path) {
   217  		confirmAndRemoveDB(path, "full node ancient database")
   218  	} else {
   219  		log.Info("Full node ancient database missing", "path", path)
   220  	}
   221  	// Remove the light node database
   222  	path = stack.ResolvePath("lightchaindata")
   223  	if common.FileExist(path) {
   224  		confirmAndRemoveDB(path, "light node database")
   225  	} else {
   226  		log.Info("Light node database missing", "path", path)
   227  	}
   228  	return nil
   229  }
   230  
   231  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   232  // folder if accepted.
   233  func confirmAndRemoveDB(database string, kind string) {
   234  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   235  	switch {
   236  	case err != nil:
   237  		utils.Fatalf("%v", err)
   238  	case !confirm:
   239  		log.Info("Database deletion skipped", "path", database)
   240  	default:
   241  		start := time.Now()
   242  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   243  			// If we're at the top level folder, recurse into
   244  			if path == database {
   245  				return nil
   246  			}
   247  			// Delete all the files, but not subfolders
   248  			if !info.IsDir() {
   249  				os.Remove(path)
   250  				return nil
   251  			}
   252  			return filepath.SkipDir
   253  		})
   254  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   255  	}
   256  }
   257  
   258  func inspect(ctx *cli.Context) error {
   259  	var (
   260  		prefix []byte
   261  		start  []byte
   262  	)
   263  	if ctx.NArg() > 2 {
   264  		return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
   265  	}
   266  	if ctx.NArg() >= 1 {
   267  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   268  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   269  		} else {
   270  			prefix = d
   271  		}
   272  	}
   273  	if ctx.NArg() >= 2 {
   274  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   275  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   276  		} else {
   277  			start = d
   278  		}
   279  	}
   280  	stack, _ := makeConfigNode(ctx)
   281  	defer stack.Close()
   282  
   283  	db := utils.MakeChainDatabase(ctx, stack, true)
   284  	defer db.Close()
   285  
   286  	return rawdb.InspectDatabase(db, prefix, start)
   287  }
   288  
   289  func checkStateContent(ctx *cli.Context) error {
   290  	var (
   291  		prefix []byte
   292  		start  []byte
   293  	)
   294  	if ctx.NArg() > 1 {
   295  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   296  	}
   297  	if ctx.NArg() > 0 {
   298  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   299  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   300  		} else {
   301  			start = d
   302  		}
   303  	}
   304  	stack, _ := makeConfigNode(ctx)
   305  	defer stack.Close()
   306  
   307  	db := utils.MakeChainDatabase(ctx, stack, true)
   308  	defer db.Close()
   309  	var (
   310  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   311  		hasher    = crypto.NewKeccakState()
   312  		got       = make([]byte, 32)
   313  		errs      int
   314  		count     int
   315  		startTime = time.Now()
   316  		lastLog   = time.Now()
   317  	)
   318  	for it.Next() {
   319  		count++
   320  		k := it.Key()
   321  		v := it.Value()
   322  		hasher.Reset()
   323  		hasher.Write(v)
   324  		hasher.Read(got)
   325  		if !bytes.Equal(k, got) {
   326  			errs++
   327  			fmt.Printf("Error at %#x\n", k)
   328  			fmt.Printf("  Hash:  %#x\n", got)
   329  			fmt.Printf("  Data:  %#x\n", v)
   330  		}
   331  		if time.Since(lastLog) > 8*time.Second {
   332  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   333  			lastLog = time.Now()
   334  		}
   335  	}
   336  	if err := it.Error(); err != nil {
   337  		return err
   338  	}
   339  	log.Info("Iterated the state content", "errors", errs, "items", count)
   340  	return nil
   341  }
   342  
   343  func showLeveldbStats(db ethdb.KeyValueStater) {
   344  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   345  		log.Warn("Failed to read database stats", "error", err)
   346  	} else {
   347  		fmt.Println(stats)
   348  	}
   349  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   350  		log.Warn("Failed to read database iostats", "error", err)
   351  	} else {
   352  		fmt.Println(ioStats)
   353  	}
   354  }
   355  
   356  func dbStats(ctx *cli.Context) error {
   357  	stack, _ := makeConfigNode(ctx)
   358  	defer stack.Close()
   359  
   360  	db := utils.MakeChainDatabase(ctx, stack, true)
   361  	defer db.Close()
   362  
   363  	showLeveldbStats(db)
   364  	return nil
   365  }
   366  
   367  func dbCompact(ctx *cli.Context) error {
   368  	stack, _ := makeConfigNode(ctx)
   369  	defer stack.Close()
   370  
   371  	db := utils.MakeChainDatabase(ctx, stack, false)
   372  	defer db.Close()
   373  
   374  	log.Info("Stats before compaction")
   375  	showLeveldbStats(db)
   376  
   377  	log.Info("Triggering compaction")
   378  	if err := db.Compact(nil, nil); err != nil {
   379  		log.Info("Compact err", "error", err)
   380  		return err
   381  	}
   382  	log.Info("Stats after compaction")
   383  	showLeveldbStats(db)
   384  	return nil
   385  }
   386  
   387  // dbGet shows the value of a given database key
   388  func dbGet(ctx *cli.Context) error {
   389  	if ctx.NArg() != 1 {
   390  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   391  	}
   392  	stack, _ := makeConfigNode(ctx)
   393  	defer stack.Close()
   394  
   395  	db := utils.MakeChainDatabase(ctx, stack, true)
   396  	defer db.Close()
   397  
   398  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   399  	if err != nil {
   400  		log.Info("Could not decode the key", "error", err)
   401  		return err
   402  	}
   403  
   404  	data, err := db.Get(key)
   405  	if err != nil {
   406  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   407  		return err
   408  	}
   409  	fmt.Printf("key %#x: %#x\n", key, data)
   410  	return nil
   411  }
   412  
   413  // dbDelete deletes a key from the database
   414  func dbDelete(ctx *cli.Context) error {
   415  	if ctx.NArg() != 1 {
   416  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   417  	}
   418  	stack, _ := makeConfigNode(ctx)
   419  	defer stack.Close()
   420  
   421  	db := utils.MakeChainDatabase(ctx, stack, false)
   422  	defer db.Close()
   423  
   424  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   425  	if err != nil {
   426  		log.Info("Could not decode the key", "error", err)
   427  		return err
   428  	}
   429  	data, err := db.Get(key)
   430  	if err == nil {
   431  		fmt.Printf("Previous value: %#x\n", data)
   432  	}
   433  	if err = db.Delete(key); err != nil {
   434  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   435  		return err
   436  	}
   437  	return nil
   438  }
   439  
   440  // dbPut overwrite a value in the database
   441  func dbPut(ctx *cli.Context) error {
   442  	if ctx.NArg() != 2 {
   443  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   444  	}
   445  	stack, _ := makeConfigNode(ctx)
   446  	defer stack.Close()
   447  
   448  	db := utils.MakeChainDatabase(ctx, stack, false)
   449  	defer db.Close()
   450  
   451  	var (
   452  		key   []byte
   453  		value []byte
   454  		data  []byte
   455  		err   error
   456  	)
   457  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   458  	if err != nil {
   459  		log.Info("Could not decode the key", "error", err)
   460  		return err
   461  	}
   462  	value, err = hexutil.Decode(ctx.Args().Get(1))
   463  	if err != nil {
   464  		log.Info("Could not decode the value", "error", err)
   465  		return err
   466  	}
   467  	data, err = db.Get(key)
   468  	if err == nil {
   469  		fmt.Printf("Previous value: %#x\n", data)
   470  	}
   471  	return db.Put(key, value)
   472  }
   473  
   474  // dbDumpTrie shows the key-value slots of a given storage trie
   475  func dbDumpTrie(ctx *cli.Context) error {
   476  	if ctx.NArg() < 3 {
   477  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   478  	}
   479  	stack, _ := makeConfigNode(ctx)
   480  	defer stack.Close()
   481  
   482  	db := utils.MakeChainDatabase(ctx, stack, true)
   483  	defer db.Close()
   484  
   485  	var (
   486  		state   []byte
   487  		storage []byte
   488  		account []byte
   489  		start   []byte
   490  		max     = int64(-1)
   491  		err     error
   492  	)
   493  	if state, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   494  		log.Info("Could not decode the state root", "error", err)
   495  		return err
   496  	}
   497  	if account, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   498  		log.Info("Could not decode the account hash", "error", err)
   499  		return err
   500  	}
   501  	if storage, err = hexutil.Decode(ctx.Args().Get(2)); err != nil {
   502  		log.Info("Could not decode the storage trie root", "error", err)
   503  		return err
   504  	}
   505  	if ctx.NArg() > 3 {
   506  		if start, err = hexutil.Decode(ctx.Args().Get(3)); err != nil {
   507  			log.Info("Could not decode the seek position", "error", err)
   508  			return err
   509  		}
   510  	}
   511  	if ctx.NArg() > 4 {
   512  		if max, err = strconv.ParseInt(ctx.Args().Get(4), 10, 64); err != nil {
   513  			log.Info("Could not decode the max count", "error", err)
   514  			return err
   515  		}
   516  	}
   517  	id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
   518  	theTrie, err := trie.New(id, trie.NewDatabase(db))
   519  	if err != nil {
   520  		return err
   521  	}
   522  	var count int64
   523  	it := trie.NewIterator(theTrie.NodeIterator(start))
   524  	for it.Next() {
   525  		if max > 0 && count == max {
   526  			fmt.Printf("Exiting after %d values\n", count)
   527  			break
   528  		}
   529  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   530  		count++
   531  	}
   532  	return it.Err
   533  }
   534  
   535  func freezerInspect(ctx *cli.Context) error {
   536  	if ctx.NArg() < 4 {
   537  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   538  	}
   539  	var (
   540  		freezer = ctx.Args().Get(0)
   541  		table   = ctx.Args().Get(1)
   542  	)
   543  	start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   544  	if err != nil {
   545  		log.Info("Could not read start-param", "err", err)
   546  		return err
   547  	}
   548  	end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
   549  	if err != nil {
   550  		log.Info("Could not read count param", "err", err)
   551  		return err
   552  	}
   553  	stack, _ := makeConfigNode(ctx)
   554  	defer stack.Close()
   555  
   556  	db := utils.MakeChainDatabase(ctx, stack, true)
   557  	defer db.Close()
   558  
   559  	ancient, err := db.AncientDatadir()
   560  	if err != nil {
   561  		log.Info("Failed to retrieve ancient root", "err", err)
   562  		return err
   563  	}
   564  	return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
   565  }
   566  
   567  func importLDBdata(ctx *cli.Context) error {
   568  	start := 0
   569  	switch ctx.NArg() {
   570  	case 1:
   571  		break
   572  	case 2:
   573  		s, err := strconv.Atoi(ctx.Args().Get(1))
   574  		if err != nil {
   575  			return fmt.Errorf("second arg must be an integer: %v", err)
   576  		}
   577  		start = s
   578  	default:
   579  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   580  	}
   581  	var (
   582  		fName     = ctx.Args().Get(0)
   583  		stack, _  = makeConfigNode(ctx)
   584  		interrupt = make(chan os.Signal, 1)
   585  		stop      = make(chan struct{})
   586  	)
   587  	defer stack.Close()
   588  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   589  	defer signal.Stop(interrupt)
   590  	defer close(interrupt)
   591  	go func() {
   592  		if _, ok := <-interrupt; ok {
   593  			log.Info("Interrupted during ldb import, stopping at next batch")
   594  		}
   595  		close(stop)
   596  	}()
   597  	db := utils.MakeChainDatabase(ctx, stack, false)
   598  	return utils.ImportLDBData(db, fName, int64(start), stop)
   599  }
   600  
   601  type preimageIterator struct {
   602  	iter ethdb.Iterator
   603  }
   604  
   605  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   606  	for iter.iter.Next() {
   607  		key := iter.iter.Key()
   608  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   609  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   610  		}
   611  	}
   612  	return 0, nil, nil, false
   613  }
   614  
   615  func (iter *preimageIterator) Release() {
   616  	iter.iter.Release()
   617  }
   618  
   619  type snapshotIterator struct {
   620  	init    bool
   621  	account ethdb.Iterator
   622  	storage ethdb.Iterator
   623  }
   624  
   625  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   626  	if !iter.init {
   627  		iter.init = true
   628  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   629  	}
   630  	for iter.account.Next() {
   631  		key := iter.account.Key()
   632  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   633  			return utils.OpBatchAdd, key, iter.account.Value(), true
   634  		}
   635  	}
   636  	for iter.storage.Next() {
   637  		key := iter.storage.Key()
   638  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   639  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   640  		}
   641  	}
   642  	return 0, nil, nil, false
   643  }
   644  
   645  func (iter *snapshotIterator) Release() {
   646  	iter.account.Release()
   647  	iter.storage.Release()
   648  }
   649  
   650  // chainExporters defines the export scheme for all exportable chain data.
   651  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   652  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   653  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   654  		return &preimageIterator{iter: iter}
   655  	},
   656  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   657  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   658  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   659  		return &snapshotIterator{account: account, storage: storage}
   660  	},
   661  }
   662  
   663  func exportChaindata(ctx *cli.Context) error {
   664  	if ctx.NArg() < 2 {
   665  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   666  	}
   667  	// Parse the required chain data type, make sure it's supported.
   668  	kind := ctx.Args().Get(0)
   669  	kind = strings.ToLower(strings.Trim(kind, " "))
   670  	exporter, ok := chainExporters[kind]
   671  	if !ok {
   672  		var kinds []string
   673  		for kind := range chainExporters {
   674  			kinds = append(kinds, kind)
   675  		}
   676  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   677  	}
   678  	var (
   679  		stack, _  = makeConfigNode(ctx)
   680  		interrupt = make(chan os.Signal, 1)
   681  		stop      = make(chan struct{})
   682  	)
   683  	defer stack.Close()
   684  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   685  	defer signal.Stop(interrupt)
   686  	defer close(interrupt)
   687  	go func() {
   688  		if _, ok := <-interrupt; ok {
   689  			log.Info("Interrupted during db export, stopping at next batch")
   690  		}
   691  		close(stop)
   692  	}()
   693  	db := utils.MakeChainDatabase(ctx, stack, true)
   694  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   695  }
   696  
   697  func showMetaData(ctx *cli.Context) error {
   698  	stack, _ := makeConfigNode(ctx)
   699  	defer stack.Close()
   700  	db := utils.MakeChainDatabase(ctx, stack, true)
   701  	ancients, err := db.Ancients()
   702  	if err != nil {
   703  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   704  	}
   705  	pp := func(val *uint64) string {
   706  		if val == nil {
   707  			return "<nil>"
   708  		}
   709  		return fmt.Sprintf("%d (%#x)", *val, *val)
   710  	}
   711  	data := [][]string{
   712  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   713  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   714  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   715  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   716  	if b := rawdb.ReadHeadBlock(db); b != nil {
   717  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   718  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   719  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   720  	}
   721  	if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
   722  		data = append(data, []string{"SkeletonSyncStatus", string(b)})
   723  	}
   724  	if h := rawdb.ReadHeadHeader(db); h != nil {
   725  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   726  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   727  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   728  	}
   729  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   730  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   731  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   732  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   733  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   734  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   735  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   736  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   737  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   738  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   739  	}...)
   740  	table := tablewriter.NewWriter(os.Stdout)
   741  	table.SetHeader([]string{"Field", "Value"})
   742  	table.AppendBulk(data)
   743  	table.Render()
   744  	return nil
   745  }