github.com/theQRL/go-zond@v0.1.1/cmd/gzond/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"strconv"
    26  	"strings"
    27  	"syscall"
    28  	"time"
    29  
    30  	"github.com/olekukonko/tablewriter"
    31  	"github.com/theQRL/go-zond/cmd/utils"
    32  	"github.com/theQRL/go-zond/common"
    33  	"github.com/theQRL/go-zond/common/hexutil"
    34  	"github.com/theQRL/go-zond/console/prompt"
    35  	"github.com/theQRL/go-zond/core/rawdb"
    36  	"github.com/theQRL/go-zond/core/state/snapshot"
    37  	"github.com/theQRL/go-zond/crypto"
    38  	"github.com/theQRL/go-zond/internal/flags"
    39  	"github.com/theQRL/go-zond/log"
    40  	"github.com/theQRL/go-zond/trie"
    41  	"github.com/theQRL/go-zond/zonddb"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	removedbCommand = &cli.Command{
    47  		Action:    removeDB,
    48  		Name:      "removedb",
    49  		Usage:     "Remove blockchain and state databases",
    50  		ArgsUsage: "",
    51  		Flags:     utils.DatabasePathFlags,
    52  		Description: `
    53  Remove blockchain and state databases`,
    54  	}
    55  	dbCommand = &cli.Command{
    56  		Name:      "db",
    57  		Usage:     "Low level database operations",
    58  		ArgsUsage: "",
    59  		Subcommands: []*cli.Command{
    60  			dbInspectCmd,
    61  			dbStatCmd,
    62  			dbCompactCmd,
    63  			dbGetCmd,
    64  			dbDeleteCmd,
    65  			dbPutCmd,
    66  			dbGetSlotsCmd,
    67  			dbDumpFreezerIndex,
    68  			dbImportCmd,
    69  			dbExportCmd,
    70  			dbMetadataCmd,
    71  			dbCheckStateContentCmd,
    72  		},
    73  	}
    74  	dbInspectCmd = &cli.Command{
    75  		Action:    inspect,
    76  		Name:      "inspect",
    77  		ArgsUsage: "<prefix> <start>",
    78  		Flags: flags.Merge([]cli.Flag{
    79  			utils.SyncModeFlag,
    80  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    81  		Usage:       "Inspect the storage size for each type of data in the database",
    82  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    83  	}
    84  	dbCheckStateContentCmd = &cli.Command{
    85  		Action:    checkStateContent,
    86  		Name:      "check-state-content",
    87  		ArgsUsage: "<start (optional)>",
    88  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    89  		Usage:     "Verify that state data is cryptographically correct",
    90  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    91  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    92  a data corruption.`,
    93  	}
    94  	dbStatCmd = &cli.Command{
    95  		Action: dbStats,
    96  		Name:   "stats",
    97  		Usage:  "Print leveldb statistics",
    98  		Flags: flags.Merge([]cli.Flag{
    99  			utils.SyncModeFlag,
   100  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   101  	}
   102  	dbCompactCmd = &cli.Command{
   103  		Action: dbCompact,
   104  		Name:   "compact",
   105  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   106  		Flags: flags.Merge([]cli.Flag{
   107  			utils.SyncModeFlag,
   108  			utils.CacheFlag,
   109  			utils.CacheDatabaseFlag,
   110  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   111  		Description: `This command performs a database compaction. 
   112  WARNING: This operation may take a very long time to finish, and may cause database
   113  corruption if it is aborted during execution'!`,
   114  	}
   115  	dbGetCmd = &cli.Command{
   116  		Action:    dbGet,
   117  		Name:      "get",
   118  		Usage:     "Show the value of a database key",
   119  		ArgsUsage: "<hex-encoded key>",
   120  		Flags: flags.Merge([]cli.Flag{
   121  			utils.SyncModeFlag,
   122  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   123  		Description: "This command looks up the specified database key from the database.",
   124  	}
   125  	dbDeleteCmd = &cli.Command{
   126  		Action:    dbDelete,
   127  		Name:      "delete",
   128  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   129  		ArgsUsage: "<hex-encoded key>",
   130  		Flags: flags.Merge([]cli.Flag{
   131  			utils.SyncModeFlag,
   132  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   133  		Description: `This command deletes the specified database key from the database. 
   134  WARNING: This is a low-level operation which may cause database corruption!`,
   135  	}
   136  	dbPutCmd = &cli.Command{
   137  		Action:    dbPut,
   138  		Name:      "put",
   139  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   140  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   141  		Flags: flags.Merge([]cli.Flag{
   142  			utils.SyncModeFlag,
   143  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   144  		Description: `This command sets a given database key to the given value. 
   145  WARNING: This is a low-level operation which may cause database corruption!`,
   146  	}
   147  	dbGetSlotsCmd = &cli.Command{
   148  		Action:    dbDumpTrie,
   149  		Name:      "dumptrie",
   150  		Usage:     "Show the storage key/values of a given storage trie",
   151  		ArgsUsage: "<hex-encoded state root> <hex-encoded account hash> <hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   152  		Flags: flags.Merge([]cli.Flag{
   153  			utils.SyncModeFlag,
   154  			utils.StateSchemeFlag,
   155  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   156  		Description: "This command looks up the specified database key from the database.",
   157  	}
   158  	dbDumpFreezerIndex = &cli.Command{
   159  		Action:    freezerInspect,
   160  		Name:      "freezer-index",
   161  		Usage:     "Dump out the index of a specific freezer table",
   162  		ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
   163  		Flags: flags.Merge([]cli.Flag{
   164  			utils.SyncModeFlag,
   165  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   166  		Description: "This command displays information about the freezer index.",
   167  	}
   168  	dbImportCmd = &cli.Command{
   169  		Action:    importLDBdata,
   170  		Name:      "import",
   171  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   172  		ArgsUsage: "<dumpfile> <start (optional)",
   173  		Flags: flags.Merge([]cli.Flag{
   174  			utils.SyncModeFlag,
   175  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   176  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   177  	}
   178  	dbExportCmd = &cli.Command{
   179  		Action:    exportChaindata,
   180  		Name:      "export",
   181  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   182  		ArgsUsage: "<type> <dumpfile>",
   183  		Flags: flags.Merge([]cli.Flag{
   184  			utils.SyncModeFlag,
   185  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   186  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   187  	}
   188  	dbMetadataCmd = &cli.Command{
   189  		Action: showMetaData,
   190  		Name:   "metadata",
   191  		Usage:  "Shows metadata about the chain status.",
   192  		Flags: flags.Merge([]cli.Flag{
   193  			utils.SyncModeFlag,
   194  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   195  		Description: "Shows metadata about the chain status.",
   196  	}
   197  )
   198  
   199  func removeDB(ctx *cli.Context) error {
   200  	stack, config := makeConfigNode(ctx)
   201  
   202  	// Remove the full node state database
   203  	path := stack.ResolvePath("chaindata")
   204  	if common.FileExist(path) {
   205  		confirmAndRemoveDB(path, "full node state database")
   206  	} else {
   207  		log.Info("Full node state database missing", "path", path)
   208  	}
   209  	// Remove the full node ancient database
   210  	path = config.Eth.DatabaseFreezer
   211  	switch {
   212  	case path == "":
   213  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   214  	case !filepath.IsAbs(path):
   215  		path = config.Node.ResolvePath(path)
   216  	}
   217  	if common.FileExist(path) {
   218  		confirmAndRemoveDB(path, "full node ancient database")
   219  	} else {
   220  		log.Info("Full node ancient database missing", "path", path)
   221  	}
   222  	// Remove the light node database
   223  	path = stack.ResolvePath("lightchaindata")
   224  	if common.FileExist(path) {
   225  		confirmAndRemoveDB(path, "light node database")
   226  	} else {
   227  		log.Info("Light node database missing", "path", path)
   228  	}
   229  	return nil
   230  }
   231  
   232  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   233  // folder if accepted.
   234  func confirmAndRemoveDB(database string, kind string) {
   235  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   236  	switch {
   237  	case err != nil:
   238  		utils.Fatalf("%v", err)
   239  	case !confirm:
   240  		log.Info("Database deletion skipped", "path", database)
   241  	default:
   242  		start := time.Now()
   243  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   244  			// If we're at the top level folder, recurse into
   245  			if path == database {
   246  				return nil
   247  			}
   248  			// Delete all the files, but not subfolders
   249  			if !info.IsDir() {
   250  				os.Remove(path)
   251  				return nil
   252  			}
   253  			return filepath.SkipDir
   254  		})
   255  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   256  	}
   257  }
   258  
   259  func inspect(ctx *cli.Context) error {
   260  	var (
   261  		prefix []byte
   262  		start  []byte
   263  	)
   264  	if ctx.NArg() > 2 {
   265  		return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
   266  	}
   267  	if ctx.NArg() >= 1 {
   268  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   269  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   270  		} else {
   271  			prefix = d
   272  		}
   273  	}
   274  	if ctx.NArg() >= 2 {
   275  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   276  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   277  		} else {
   278  			start = d
   279  		}
   280  	}
   281  	stack, _ := makeConfigNode(ctx)
   282  	defer stack.Close()
   283  
   284  	db := utils.MakeChainDatabase(ctx, stack, true)
   285  	defer db.Close()
   286  
   287  	return rawdb.InspectDatabase(db, prefix, start)
   288  }
   289  
   290  func checkStateContent(ctx *cli.Context) error {
   291  	var (
   292  		prefix []byte
   293  		start  []byte
   294  	)
   295  	if ctx.NArg() > 1 {
   296  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   297  	}
   298  	if ctx.NArg() > 0 {
   299  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   300  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   301  		} else {
   302  			start = d
   303  		}
   304  	}
   305  	stack, _ := makeConfigNode(ctx)
   306  	defer stack.Close()
   307  
   308  	db := utils.MakeChainDatabase(ctx, stack, true)
   309  	defer db.Close()
   310  	var (
   311  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   312  		hasher    = crypto.NewKeccakState()
   313  		got       = make([]byte, 32)
   314  		errs      int
   315  		count     int
   316  		startTime = time.Now()
   317  		lastLog   = time.Now()
   318  	)
   319  	for it.Next() {
   320  		count++
   321  		k := it.Key()
   322  		v := it.Value()
   323  		hasher.Reset()
   324  		hasher.Write(v)
   325  		hasher.Read(got)
   326  		if !bytes.Equal(k, got) {
   327  			errs++
   328  			fmt.Printf("Error at %#x\n", k)
   329  			fmt.Printf("  Hash:  %#x\n", got)
   330  			fmt.Printf("  Data:  %#x\n", v)
   331  		}
   332  		if time.Since(lastLog) > 8*time.Second {
   333  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   334  			lastLog = time.Now()
   335  		}
   336  	}
   337  	if err := it.Error(); err != nil {
   338  		return err
   339  	}
   340  	log.Info("Iterated the state content", "errors", errs, "items", count)
   341  	return nil
   342  }
   343  
   344  func showLeveldbStats(db zonddb.KeyValueStater) {
   345  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   346  		log.Warn("Failed to read database stats", "error", err)
   347  	} else {
   348  		fmt.Println(stats)
   349  	}
   350  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   351  		log.Warn("Failed to read database iostats", "error", err)
   352  	} else {
   353  		fmt.Println(ioStats)
   354  	}
   355  }
   356  
   357  func dbStats(ctx *cli.Context) error {
   358  	stack, _ := makeConfigNode(ctx)
   359  	defer stack.Close()
   360  
   361  	db := utils.MakeChainDatabase(ctx, stack, true)
   362  	defer db.Close()
   363  
   364  	showLeveldbStats(db)
   365  	return nil
   366  }
   367  
   368  func dbCompact(ctx *cli.Context) error {
   369  	stack, _ := makeConfigNode(ctx)
   370  	defer stack.Close()
   371  
   372  	db := utils.MakeChainDatabase(ctx, stack, false)
   373  	defer db.Close()
   374  
   375  	log.Info("Stats before compaction")
   376  	showLeveldbStats(db)
   377  
   378  	log.Info("Triggering compaction")
   379  	if err := db.Compact(nil, nil); err != nil {
   380  		log.Info("Compact err", "error", err)
   381  		return err
   382  	}
   383  	log.Info("Stats after compaction")
   384  	showLeveldbStats(db)
   385  	return nil
   386  }
   387  
   388  // dbGet shows the value of a given database key
   389  func dbGet(ctx *cli.Context) error {
   390  	if ctx.NArg() != 1 {
   391  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   392  	}
   393  	stack, _ := makeConfigNode(ctx)
   394  	defer stack.Close()
   395  
   396  	db := utils.MakeChainDatabase(ctx, stack, true)
   397  	defer db.Close()
   398  
   399  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   400  	if err != nil {
   401  		log.Info("Could not decode the key", "error", err)
   402  		return err
   403  	}
   404  
   405  	data, err := db.Get(key)
   406  	if err != nil {
   407  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   408  		return err
   409  	}
   410  	fmt.Printf("key %#x: %#x\n", key, data)
   411  	return nil
   412  }
   413  
   414  // dbDelete deletes a key from the database
   415  func dbDelete(ctx *cli.Context) error {
   416  	if ctx.NArg() != 1 {
   417  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   418  	}
   419  	stack, _ := makeConfigNode(ctx)
   420  	defer stack.Close()
   421  
   422  	db := utils.MakeChainDatabase(ctx, stack, false)
   423  	defer db.Close()
   424  
   425  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   426  	if err != nil {
   427  		log.Info("Could not decode the key", "error", err)
   428  		return err
   429  	}
   430  	data, err := db.Get(key)
   431  	if err == nil {
   432  		fmt.Printf("Previous value: %#x\n", data)
   433  	}
   434  	if err = db.Delete(key); err != nil {
   435  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   436  		return err
   437  	}
   438  	return nil
   439  }
   440  
   441  // dbPut overwrite a value in the database
   442  func dbPut(ctx *cli.Context) error {
   443  	if ctx.NArg() != 2 {
   444  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   445  	}
   446  	stack, _ := makeConfigNode(ctx)
   447  	defer stack.Close()
   448  
   449  	db := utils.MakeChainDatabase(ctx, stack, false)
   450  	defer db.Close()
   451  
   452  	var (
   453  		key   []byte
   454  		value []byte
   455  		data  []byte
   456  		err   error
   457  	)
   458  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   459  	if err != nil {
   460  		log.Info("Could not decode the key", "error", err)
   461  		return err
   462  	}
   463  	value, err = hexutil.Decode(ctx.Args().Get(1))
   464  	if err != nil {
   465  		log.Info("Could not decode the value", "error", err)
   466  		return err
   467  	}
   468  	data, err = db.Get(key)
   469  	if err == nil {
   470  		fmt.Printf("Previous value: %#x\n", data)
   471  	}
   472  	return db.Put(key, value)
   473  }
   474  
   475  // dbDumpTrie shows the key-value slots of a given storage trie
   476  func dbDumpTrie(ctx *cli.Context) error {
   477  	if ctx.NArg() < 3 {
   478  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   479  	}
   480  	stack, _ := makeConfigNode(ctx)
   481  	defer stack.Close()
   482  
   483  	db := utils.MakeChainDatabase(ctx, stack, true)
   484  	defer db.Close()
   485  
   486  	triedb := utils.MakeTrieDatabase(ctx, db, false, true)
   487  	defer triedb.Close()
   488  
   489  	var (
   490  		state   []byte
   491  		storage []byte
   492  		account []byte
   493  		start   []byte
   494  		max     = int64(-1)
   495  		err     error
   496  	)
   497  	if state, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   498  		log.Info("Could not decode the state root", "error", err)
   499  		return err
   500  	}
   501  	if account, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   502  		log.Info("Could not decode the account hash", "error", err)
   503  		return err
   504  	}
   505  	if storage, err = hexutil.Decode(ctx.Args().Get(2)); err != nil {
   506  		log.Info("Could not decode the storage trie root", "error", err)
   507  		return err
   508  	}
   509  	if ctx.NArg() > 3 {
   510  		if start, err = hexutil.Decode(ctx.Args().Get(3)); err != nil {
   511  			log.Info("Could not decode the seek position", "error", err)
   512  			return err
   513  		}
   514  	}
   515  	if ctx.NArg() > 4 {
   516  		if max, err = strconv.ParseInt(ctx.Args().Get(4), 10, 64); err != nil {
   517  			log.Info("Could not decode the max count", "error", err)
   518  			return err
   519  		}
   520  	}
   521  	id := trie.StorageTrieID(common.BytesToHash(state), common.BytesToHash(account), common.BytesToHash(storage))
   522  	theTrie, err := trie.New(id, triedb)
   523  	if err != nil {
   524  		return err
   525  	}
   526  	trieIt, err := theTrie.NodeIterator(start)
   527  	if err != nil {
   528  		return err
   529  	}
   530  	var count int64
   531  	it := trie.NewIterator(trieIt)
   532  	for it.Next() {
   533  		if max > 0 && count == max {
   534  			fmt.Printf("Exiting after %d values\n", count)
   535  			break
   536  		}
   537  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   538  		count++
   539  	}
   540  	return it.Err
   541  }
   542  
   543  func freezerInspect(ctx *cli.Context) error {
   544  	if ctx.NArg() < 4 {
   545  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   546  	}
   547  	var (
   548  		freezer = ctx.Args().Get(0)
   549  		table   = ctx.Args().Get(1)
   550  	)
   551  	start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   552  	if err != nil {
   553  		log.Info("Could not read start-param", "err", err)
   554  		return err
   555  	}
   556  	end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
   557  	if err != nil {
   558  		log.Info("Could not read count param", "err", err)
   559  		return err
   560  	}
   561  	stack, _ := makeConfigNode(ctx)
   562  	ancient := stack.ResolveAncient("chaindata", ctx.String(utils.AncientFlag.Name))
   563  	stack.Close()
   564  	return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
   565  }
   566  
   567  func importLDBdata(ctx *cli.Context) error {
   568  	start := 0
   569  	switch ctx.NArg() {
   570  	case 1:
   571  		break
   572  	case 2:
   573  		s, err := strconv.Atoi(ctx.Args().Get(1))
   574  		if err != nil {
   575  			return fmt.Errorf("second arg must be an integer: %v", err)
   576  		}
   577  		start = s
   578  	default:
   579  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   580  	}
   581  	var (
   582  		fName     = ctx.Args().Get(0)
   583  		stack, _  = makeConfigNode(ctx)
   584  		interrupt = make(chan os.Signal, 1)
   585  		stop      = make(chan struct{})
   586  	)
   587  	defer stack.Close()
   588  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   589  	defer signal.Stop(interrupt)
   590  	defer close(interrupt)
   591  	go func() {
   592  		if _, ok := <-interrupt; ok {
   593  			log.Info("Interrupted during ldb import, stopping at next batch")
   594  		}
   595  		close(stop)
   596  	}()
   597  	db := utils.MakeChainDatabase(ctx, stack, false)
   598  	return utils.ImportLDBData(db, fName, int64(start), stop)
   599  }
   600  
   601  type preimageIterator struct {
   602  	iter zonddb.Iterator
   603  }
   604  
   605  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   606  	for iter.iter.Next() {
   607  		key := iter.iter.Key()
   608  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   609  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   610  		}
   611  	}
   612  	return 0, nil, nil, false
   613  }
   614  
   615  func (iter *preimageIterator) Release() {
   616  	iter.iter.Release()
   617  }
   618  
   619  type snapshotIterator struct {
   620  	init    bool
   621  	account zonddb.Iterator
   622  	storage zonddb.Iterator
   623  }
   624  
   625  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   626  	if !iter.init {
   627  		iter.init = true
   628  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   629  	}
   630  	for iter.account.Next() {
   631  		key := iter.account.Key()
   632  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   633  			return utils.OpBatchAdd, key, iter.account.Value(), true
   634  		}
   635  	}
   636  	for iter.storage.Next() {
   637  		key := iter.storage.Key()
   638  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   639  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   640  		}
   641  	}
   642  	return 0, nil, nil, false
   643  }
   644  
   645  func (iter *snapshotIterator) Release() {
   646  	iter.account.Release()
   647  	iter.storage.Release()
   648  }
   649  
   650  // chainExporters defines the export scheme for all exportable chain data.
   651  var chainExporters = map[string]func(db zonddb.Database) utils.ChainDataIterator{
   652  	"preimage": func(db zonddb.Database) utils.ChainDataIterator {
   653  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   654  		return &preimageIterator{iter: iter}
   655  	},
   656  	"snapshot": func(db zonddb.Database) utils.ChainDataIterator {
   657  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   658  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   659  		return &snapshotIterator{account: account, storage: storage}
   660  	},
   661  }
   662  
   663  func exportChaindata(ctx *cli.Context) error {
   664  	if ctx.NArg() < 2 {
   665  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   666  	}
   667  	// Parse the required chain data type, make sure it's supported.
   668  	kind := ctx.Args().Get(0)
   669  	kind = strings.ToLower(strings.Trim(kind, " "))
   670  	exporter, ok := chainExporters[kind]
   671  	if !ok {
   672  		var kinds []string
   673  		for kind := range chainExporters {
   674  			kinds = append(kinds, kind)
   675  		}
   676  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   677  	}
   678  	var (
   679  		stack, _  = makeConfigNode(ctx)
   680  		interrupt = make(chan os.Signal, 1)
   681  		stop      = make(chan struct{})
   682  	)
   683  	defer stack.Close()
   684  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   685  	defer signal.Stop(interrupt)
   686  	defer close(interrupt)
   687  	go func() {
   688  		if _, ok := <-interrupt; ok {
   689  			log.Info("Interrupted during db export, stopping at next batch")
   690  		}
   691  		close(stop)
   692  	}()
   693  	db := utils.MakeChainDatabase(ctx, stack, true)
   694  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   695  }
   696  
   697  func showMetaData(ctx *cli.Context) error {
   698  	stack, _ := makeConfigNode(ctx)
   699  	defer stack.Close()
   700  	db := utils.MakeChainDatabase(ctx, stack, true)
   701  	ancients, err := db.Ancients()
   702  	if err != nil {
   703  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   704  	}
   705  	data := rawdb.ReadChainMetadata(db)
   706  	data = append(data, []string{"frozen", fmt.Sprintf("%d items", ancients)})
   707  	data = append(data, []string{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))})
   708  	if b := rawdb.ReadHeadBlock(db); b != nil {
   709  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   710  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   711  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   712  	}
   713  	if h := rawdb.ReadHeadHeader(db); h != nil {
   714  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   715  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   716  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   717  	}
   718  	table := tablewriter.NewWriter(os.Stdout)
   719  	table.SetHeader([]string{"Field", "Value"})
   720  	table.AppendBulk(data)
   721  	table.Render()
   722  	return nil
   723  }