github.com/calmw/ethereum@v0.1.1/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"strconv"
    26  	"strings"
    27  	"syscall"
    28  	"time"
    29  
    30  	"github.com/calmw/ethereum/cmd/utils"
    31  	"github.com/calmw/ethereum/common"
    32  	"github.com/calmw/ethereum/common/hexutil"
    33  	"github.com/calmw/ethereum/console/prompt"
    34  	"github.com/calmw/ethereum/core/rawdb"
    35  	"github.com/calmw/ethereum/core/state/snapshot"
    36  	"github.com/calmw/ethereum/crypto"
    37  	"github.com/calmw/ethereum/ethdb"
    38  	"github.com/calmw/ethereum/internal/flags"
    39  	"github.com/calmw/ethereum/log"
    40  	"github.com/calmw/ethereum/trie"
    41  	"github.com/olekukonko/tablewriter"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	removedbCommand = &cli.Command{
    47  		Action:    removeDB,
    48  		Name:      "removedb",
    49  		Usage:     "Remove blockchain and state databases",
    50  		ArgsUsage: "",
    51  		Flags:     utils.DatabasePathFlags,
    52  		Description: `
    53  Remove blockchain and state databases`,
    54  	}
    55  	dbCommand = &cli.Command{
    56  		Name:      "db",
    57  		Usage:     "Low level database operations",
    58  		ArgsUsage: "",
    59  		Subcommands: []*cli.Command{
    60  			dbInspectCmd,
    61  			dbStatCmd,
    62  			dbCompactCmd,
    63  			dbGetCmd,
    64  			dbDeleteCmd,
    65  			dbPutCmd,
    66  			dbGetSlotsCmd,
    67  			dbDumpFreezerIndex,
    68  			dbImportCmd,
    69  			dbExportCmd,
    70  			dbMetadataCmd,
    71  			dbCheckStateContentCmd,
    72  		},
    73  	}
    74  	dbInspectCmd = &cli.Command{
    75  		Action:    inspect,
    76  		Name:      "inspect",
    77  		ArgsUsage: "<prefix> <start>",
    78  		Flags: flags.Merge([]cli.Flag{
    79  			utils.SyncModeFlag,
    80  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    81  		Usage:       "Inspect the storage size for each type of data in the database",
    82  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    83  	}
    84  	dbCheckStateContentCmd = &cli.Command{
    85  		Action:    checkStateContent,
    86  		Name:      "check-state-content",
    87  		ArgsUsage: "<start (optional)>",
    88  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    89  		Usage:     "Verify that state data is cryptographically correct",
    90  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    91  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    92  a data corruption.`,
    93  	}
    94  	dbStatCmd = &cli.Command{
    95  		Action: dbStats,
    96  		Name:   "stats",
    97  		Usage:  "Print leveldb statistics",
    98  		Flags: flags.Merge([]cli.Flag{
    99  			utils.SyncModeFlag,
   100  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   101  	}
   102  	dbCompactCmd = &cli.Command{
   103  		Action: dbCompact,
   104  		Name:   "compact",
   105  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   106  		Flags: flags.Merge([]cli.Flag{
   107  			utils.SyncModeFlag,
   108  			utils.CacheFlag,
   109  			utils.CacheDatabaseFlag,
   110  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   111  		Description: `This command performs a database compaction. 
   112  WARNING: This operation may take a very long time to finish, and may cause database
   113  corruption if it is aborted during execution'!`,
   114  	}
   115  	dbGetCmd = &cli.Command{
   116  		Action:    dbGet,
   117  		Name:      "get",
   118  		Usage:     "Show the value of a database key",
   119  		ArgsUsage: "<hex-encoded key>",
   120  		Flags: flags.Merge([]cli.Flag{
   121  			utils.SyncModeFlag,
   122  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   123  		Description: "This command looks up the specified database key from the database.",
   124  	}
   125  	dbDeleteCmd = &cli.Command{
   126  		Action:    dbDelete,
   127  		Name:      "delete",
   128  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   129  		ArgsUsage: "<hex-encoded key>",
   130  		Flags: flags.Merge([]cli.Flag{
   131  			utils.SyncModeFlag,
   132  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   133  		Description: `This command deletes the specified database key from the database. 
   134  WARNING: This is a low-level operation which may cause database corruption!`,
   135  	}
   136  	dbPutCmd = &cli.Command{
   137  		Action:    dbPut,
   138  		Name:      "put",
   139  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   140  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   141  		Flags: flags.Merge([]cli.Flag{
   142  			utils.SyncModeFlag,
   143  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   144  		Description: `This command sets a given database key to the given value. 
   145  WARNING: This is a low-level operation which may cause database corruption!`,
   146  	}
   147  	dbGetSlotsCmd = &cli.Command{
   148  		Action:    dbDumpTrie,
   149  		Name:      "dumptrie",
   150  		Usage:     "Show the storage key/values of a given storage trie",
   151  		ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   152  		Flags: flags.Merge([]cli.Flag{
   153  			utils.SyncModeFlag,
   154  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   155  		Description: "This command looks up the specified database key from the database.",
   156  	}
   157  	dbDumpFreezerIndex = &cli.Command{
   158  		Action:    freezerInspect,
   159  		Name:      "freezer-index",
   160  		Usage:     "Dump out the index of a specific freezer table",
   161  		ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
   162  		Flags: flags.Merge([]cli.Flag{
   163  			utils.SyncModeFlag,
   164  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   165  		Description: "This command displays information about the freezer index.",
   166  	}
   167  	dbImportCmd = &cli.Command{
   168  		Action:    importLDBdata,
   169  		Name:      "import",
   170  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   171  		ArgsUsage: "<dumpfile> <start (optional)",
   172  		Flags: flags.Merge([]cli.Flag{
   173  			utils.SyncModeFlag,
   174  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   175  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   176  	}
   177  	dbExportCmd = &cli.Command{
   178  		Action:    exportChaindata,
   179  		Name:      "export",
   180  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   181  		ArgsUsage: "<type> <dumpfile>",
   182  		Flags: flags.Merge([]cli.Flag{
   183  			utils.SyncModeFlag,
   184  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   185  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   186  	}
   187  	dbMetadataCmd = &cli.Command{
   188  		Action: showMetaData,
   189  		Name:   "metadata",
   190  		Usage:  "Shows metadata about the chain status.",
   191  		Flags: flags.Merge([]cli.Flag{
   192  			utils.SyncModeFlag,
   193  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   194  		Description: "Shows metadata about the chain status.",
   195  	}
   196  )
   197  
   198  func removeDB(ctx *cli.Context) error {
   199  	stack, config := makeConfigNode(ctx)
   200  
   201  	// Remove the full node state database
   202  	path := stack.ResolvePath("chaindata")
   203  	if common.FileExist(path) {
   204  		confirmAndRemoveDB(path, "full node state database")
   205  	} else {
   206  		log.Info("Full node state database missing", "path", path)
   207  	}
   208  	// Remove the full node ancient database
   209  	path = config.Eth.DatabaseFreezer
   210  	switch {
   211  	case path == "":
   212  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   213  	case !filepath.IsAbs(path):
   214  		path = config.Node.ResolvePath(path)
   215  	}
   216  	if common.FileExist(path) {
   217  		confirmAndRemoveDB(path, "full node ancient database")
   218  	} else {
   219  		log.Info("Full node ancient database missing", "path", path)
   220  	}
   221  	// Remove the light node database
   222  	path = stack.ResolvePath("lightchaindata")
   223  	if common.FileExist(path) {
   224  		confirmAndRemoveDB(path, "light node database")
   225  	} else {
   226  		log.Info("Light node database missing", "path", path)
   227  	}
   228  	return nil
   229  }
   230  
   231  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   232  // folder if accepted.
   233  func confirmAndRemoveDB(database string, kind string) {
   234  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   235  	switch {
   236  	case err != nil:
   237  		utils.Fatalf("%v", err)
   238  	case !confirm:
   239  		log.Info("Database deletion skipped", "path", database)
   240  	default:
   241  		start := time.Now()
   242  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   243  			// If we're at the top level folder, recurse into
   244  			if path == database {
   245  				return nil
   246  			}
   247  			// Delete all the files, but not subfolders
   248  			if !info.IsDir() {
   249  				os.Remove(path)
   250  				return nil
   251  			}
   252  			return filepath.SkipDir
   253  		})
   254  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   255  	}
   256  }
   257  
   258  func inspect(ctx *cli.Context) error {
   259  	var (
   260  		prefix []byte
   261  		start  []byte
   262  	)
   263  	if ctx.NArg() > 2 {
   264  		return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
   265  	}
   266  	if ctx.NArg() >= 1 {
   267  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   268  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   269  		} else {
   270  			prefix = d
   271  		}
   272  	}
   273  	if ctx.NArg() >= 2 {
   274  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   275  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   276  		} else {
   277  			start = d
   278  		}
   279  	}
   280  	stack, _ := makeConfigNode(ctx)
   281  	defer stack.Close()
   282  
   283  	db := utils.MakeChainDatabase(ctx, stack, true)
   284  	defer db.Close()
   285  
   286  	return rawdb.InspectDatabase(db, prefix, start)
   287  }
   288  
   289  func checkStateContent(ctx *cli.Context) error {
   290  	var (
   291  		prefix []byte
   292  		start  []byte
   293  	)
   294  	if ctx.NArg() > 1 {
   295  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   296  	}
   297  	if ctx.NArg() > 0 {
   298  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   299  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   300  		} else {
   301  			start = d
   302  		}
   303  	}
   304  	stack, _ := makeConfigNode(ctx)
   305  	defer stack.Close()
   306  
   307  	db := utils.MakeChainDatabase(ctx, stack, true)
   308  	defer db.Close()
   309  	var (
   310  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   311  		hasher    = crypto.NewKeccakState()
   312  		got       = make([]byte, 32)
   313  		errs      int
   314  		count     int
   315  		startTime = time.Now()
   316  		lastLog   = time.Now()
   317  	)
   318  	for it.Next() {
   319  		count++
   320  		k := it.Key()
   321  		v := it.Value()
   322  		hasher.Reset()
   323  		hasher.Write(v)
   324  		hasher.Read(got)
   325  		if !bytes.Equal(k, got) {
   326  			errs++
   327  			fmt.Printf("Error at %#x\n", k)
   328  			fmt.Printf("  Hash:  %#x\n", got)
   329  			fmt.Printf("  Data:  %#x\n", v)
   330  		}
   331  		if time.Since(lastLog) > 8*time.Second {
   332  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   333  			lastLog = time.Now()
   334  		}
   335  	}
   336  	if err := it.Error(); err != nil {
   337  		return err
   338  	}
   339  	log.Info("Iterated the state content", "errors", errs, "items", count)
   340  	return nil
   341  }
   342  
   343  func showLeveldbStats(db ethdb.KeyValueStater) {
   344  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   345  		log.Warn("Failed to read database stats", "error", err)
   346  	} else {
   347  		fmt.Println(stats)
   348  	}
   349  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   350  		log.Warn("Failed to read database iostats", "error", err)
   351  	} else {
   352  		fmt.Println(ioStats)
   353  	}
   354  }
   355  
   356  func dbStats(ctx *cli.Context) error {
   357  	stack, _ := makeConfigNode(ctx)
   358  	defer stack.Close()
   359  
   360  	db := utils.MakeChainDatabase(ctx, stack, true)
   361  	defer db.Close()
   362  
   363  	showLeveldbStats(db)
   364  	return nil
   365  }
   366  
   367  func dbCompact(ctx *cli.Context) error {
   368  	stack, _ := makeConfigNode(ctx)
   369  	defer stack.Close()
   370  
   371  	db := utils.MakeChainDatabase(ctx, stack, false)
   372  	defer db.Close()
   373  
   374  	log.Info("Stats before compaction")
   375  	showLeveldbStats(db)
   376  
   377  	log.Info("Triggering compaction")
   378  	if err := db.Compact(nil, nil); err != nil {
   379  		log.Info("Compact err", "error", err)
   380  		return err
   381  	}
   382  	log.Info("Stats after compaction")
   383  	showLeveldbStats(db)
   384  	return nil
   385  }
   386  
   387  // dbGet shows the value of a given database key
   388  func dbGet(ctx *cli.Context) error {
   389  	if ctx.NArg() != 1 {
   390  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   391  	}
   392  	stack, _ := makeConfigNode(ctx)
   393  	defer stack.Close()
   394  
   395  	db := utils.MakeChainDatabase(ctx, stack, true)
   396  	defer db.Close()
   397  
   398  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   399  	if err != nil {
   400  		log.Info("Could not decode the key", "error", err)
   401  		return err
   402  	}
   403  
   404  	data, err := db.Get(key)
   405  	if err != nil {
   406  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   407  		return err
   408  	}
   409  	fmt.Printf("key %#x: %#x\n", key, data)
   410  	return nil
   411  }
   412  
   413  // dbDelete deletes a key from the database
   414  func dbDelete(ctx *cli.Context) error {
   415  	if ctx.NArg() != 1 {
   416  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   417  	}
   418  	stack, _ := makeConfigNode(ctx)
   419  	defer stack.Close()
   420  
   421  	db := utils.MakeChainDatabase(ctx, stack, false)
   422  	defer db.Close()
   423  
   424  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   425  	if err != nil {
   426  		log.Info("Could not decode the key", "error", err)
   427  		return err
   428  	}
   429  	data, err := db.Get(key)
   430  	if err == nil {
   431  		fmt.Printf("Previous value: %#x\n", data)
   432  	}
   433  	if err = db.Delete(key); err != nil {
   434  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   435  		return err
   436  	}
   437  	return nil
   438  }
   439  
   440  // dbPut overwrite a value in the database
   441  func dbPut(ctx *cli.Context) error {
   442  	if ctx.NArg() != 2 {
   443  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   444  	}
   445  	stack, _ := makeConfigNode(ctx)
   446  	defer stack.Close()
   447  
   448  	db := utils.MakeChainDatabase(ctx, stack, false)
   449  	defer db.Close()
   450  
   451  	var (
   452  		key   []byte
   453  		value []byte
   454  		data  []byte
   455  		err   error
   456  	)
   457  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   458  	if err != nil {
   459  		log.Info("Could not decode the key", "error", err)
   460  		return err
   461  	}
   462  	value, err = hexutil.Decode(ctx.Args().Get(1))
   463  	if err != nil {
   464  		log.Info("Could not decode the value", "error", err)
   465  		return err
   466  	}
   467  	data, err = db.Get(key)
   468  	if err == nil {
   469  		fmt.Printf("Previous value: %#x\n", data)
   470  	}
   471  	return db.Put(key, value)
   472  }
   473  
   474  // dbDumpTrie shows the key-value slots of a given storage trie
   475  func dbDumpTrie(ctx *cli.Context) error {
   476  	if ctx.NArg() < 3 {
   477  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   478  	}
   479  	stack, _ := makeConfigNode(ctx)
   480  	defer stack.Close()
   481  
   482  	db := utils.MakeChainDatabase(ctx, stack, true)
   483  	defer db.Close()
   484  
   485  	var (
   486  		state   []byte
   487  		storage []byte
   488  		account []byte
   489  		start   []byte
   490  		max     = int64(-1)
   491  		err     error
   492  	)
   493  	if state, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   494  		log.Info("Could not decode the state root", "error", err)
   495  		return err
   496  	}
   497  	if account, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   498  		log.Info("Could not decode the account hash", "error", err)
   499  		return err
   500  	}
   501  	if storage, err = hexutil.Decode(ctx.Args().Get(2)); err != nil {
   502  		log.Info("Could not decode the storage trie root", "error", err)
   503  		return err
   504  	}
   505  	if ctx.NArg() > 3 {
   506  		if start, err = hexutil.Decode(ctx.Args().Get(3)); err != nil {
   507  			log.Info("Could not decode the seek position", "error", err)
   508  			return err
   509  		}
   510  	}
   511  	if ctx.NArg() > 4 {
   512  		if max, err = strconv.ParseInt(ctx.Args().Get(4), 10, 64); err != nil {
   513  			log.Info("Could not decode the max count", "error", err)
   514  			return err
   515  		}
   516  	}
   517  	id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
   518  	theTrie, err := trie.New(id, trie.NewDatabase(db))
   519  	if err != nil {
   520  		return err
   521  	}
   522  	var count int64
   523  	it := trie.NewIterator(theTrie.NodeIterator(start))
   524  	for it.Next() {
   525  		if max > 0 && count == max {
   526  			fmt.Printf("Exiting after %d values\n", count)
   527  			break
   528  		}
   529  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   530  		count++
   531  	}
   532  	return it.Err
   533  }
   534  
   535  func freezerInspect(ctx *cli.Context) error {
   536  	if ctx.NArg() < 4 {
   537  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   538  	}
   539  	var (
   540  		freezer = ctx.Args().Get(0)
   541  		table   = ctx.Args().Get(1)
   542  	)
   543  	start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   544  	if err != nil {
   545  		log.Info("Could not read start-param", "err", err)
   546  		return err
   547  	}
   548  	end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
   549  	if err != nil {
   550  		log.Info("Could not read count param", "err", err)
   551  		return err
   552  	}
   553  	stack, _ := makeConfigNode(ctx)
   554  	ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
   555  	stack.Close()
   556  	return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
   557  }
   558  
   559  func importLDBdata(ctx *cli.Context) error {
   560  	start := 0
   561  	switch ctx.NArg() {
   562  	case 1:
   563  		break
   564  	case 2:
   565  		s, err := strconv.Atoi(ctx.Args().Get(1))
   566  		if err != nil {
   567  			return fmt.Errorf("second arg must be an integer: %v", err)
   568  		}
   569  		start = s
   570  	default:
   571  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   572  	}
   573  	var (
   574  		fName     = ctx.Args().Get(0)
   575  		stack, _  = makeConfigNode(ctx)
   576  		interrupt = make(chan os.Signal, 1)
   577  		stop      = make(chan struct{})
   578  	)
   579  	defer stack.Close()
   580  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   581  	defer signal.Stop(interrupt)
   582  	defer close(interrupt)
   583  	go func() {
   584  		if _, ok := <-interrupt; ok {
   585  			log.Info("Interrupted during ldb import, stopping at next batch")
   586  		}
   587  		close(stop)
   588  	}()
   589  	db := utils.MakeChainDatabase(ctx, stack, false)
   590  	return utils.ImportLDBData(db, fName, int64(start), stop)
   591  }
   592  
   593  type preimageIterator struct {
   594  	iter ethdb.Iterator
   595  }
   596  
   597  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   598  	for iter.iter.Next() {
   599  		key := iter.iter.Key()
   600  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   601  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   602  		}
   603  	}
   604  	return 0, nil, nil, false
   605  }
   606  
   607  func (iter *preimageIterator) Release() {
   608  	iter.iter.Release()
   609  }
   610  
   611  type snapshotIterator struct {
   612  	init    bool
   613  	account ethdb.Iterator
   614  	storage ethdb.Iterator
   615  }
   616  
   617  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   618  	if !iter.init {
   619  		iter.init = true
   620  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   621  	}
   622  	for iter.account.Next() {
   623  		key := iter.account.Key()
   624  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   625  			return utils.OpBatchAdd, key, iter.account.Value(), true
   626  		}
   627  	}
   628  	for iter.storage.Next() {
   629  		key := iter.storage.Key()
   630  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   631  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   632  		}
   633  	}
   634  	return 0, nil, nil, false
   635  }
   636  
   637  func (iter *snapshotIterator) Release() {
   638  	iter.account.Release()
   639  	iter.storage.Release()
   640  }
   641  
   642  // chainExporters defines the export scheme for all exportable chain data.
   643  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   644  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   645  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   646  		return &preimageIterator{iter: iter}
   647  	},
   648  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   649  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   650  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   651  		return &snapshotIterator{account: account, storage: storage}
   652  	},
   653  }
   654  
   655  func exportChaindata(ctx *cli.Context) error {
   656  	if ctx.NArg() < 2 {
   657  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   658  	}
   659  	// Parse the required chain data type, make sure it's supported.
   660  	kind := ctx.Args().Get(0)
   661  	kind = strings.ToLower(strings.Trim(kind, " "))
   662  	exporter, ok := chainExporters[kind]
   663  	if !ok {
   664  		var kinds []string
   665  		for kind := range chainExporters {
   666  			kinds = append(kinds, kind)
   667  		}
   668  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   669  	}
   670  	var (
   671  		stack, _  = makeConfigNode(ctx)
   672  		interrupt = make(chan os.Signal, 1)
   673  		stop      = make(chan struct{})
   674  	)
   675  	defer stack.Close()
   676  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   677  	defer signal.Stop(interrupt)
   678  	defer close(interrupt)
   679  	go func() {
   680  		if _, ok := <-interrupt; ok {
   681  			log.Info("Interrupted during db export, stopping at next batch")
   682  		}
   683  		close(stop)
   684  	}()
   685  	db := utils.MakeChainDatabase(ctx, stack, true)
   686  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   687  }
   688  
   689  func showMetaData(ctx *cli.Context) error {
   690  	stack, _ := makeConfigNode(ctx)
   691  	defer stack.Close()
   692  	db := utils.MakeChainDatabase(ctx, stack, true)
   693  	ancients, err := db.Ancients()
   694  	if err != nil {
   695  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   696  	}
   697  	data := rawdb.ReadChainMetadata(db)
   698  	data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
   699  	data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
   700  	if b := rawdb.ReadHeadBlock(db); b != nil {
   701  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   702  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   703  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   704  	}
   705  	if h := rawdb.ReadHeadHeader(db); h != nil {
   706  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   707  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   708  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   709  	}
   710  	table := tablewriter.NewWriter(os.Stdout)
   711  	table.SetHeader([]string{"Field", "Value"})
   712  	table.AppendBulk(data)
   713  	table.Render()
   714  	return nil
   715  }