github.com/bearnetworkchain/go-bearnetwork@v1.10.19-0.20220604150648-d63890c2e42b/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"syscall"
    29  	"time"
    30  
    31  	"github.com/bearnetworkchain/go-bearnetwork/cmd/utils"
    32  	"github.com/bearnetworkchain/go-bearnetwork/common"
    33  	"github.com/bearnetworkchain/go-bearnetwork/common/hexutil"
    34  	"github.com/bearnetworkchain/go-bearnetwork/console/prompt"
    35  	"github.com/bearnetworkchain/go-bearnetwork/core/rawdb"
    36  	"github.com/bearnetworkchain/go-bearnetwork/core/state/snapshot"
    37  	"github.com/bearnetworkchain/go-bearnetwork/core/types"
    38  	"github.com/bearnetworkchain/go-bearnetwork/crypto"
    39  	"github.com/bearnetworkchain/go-bearnetwork/ethdb"
    40  	"github.com/bearnetworkchain/go-bearnetwork/log"
    41  	"github.com/bearnetworkchain/go-bearnetwork/trie"
    42  	"github.com/olekukonko/tablewriter"
    43  	"gopkg.in/urfave/cli.v1"
    44  )
    45  
    46  var (
    47  	removedbCommand = cli.Command{
    48  		Action:    utils.MigrateFlags(removeDB),
    49  		Name:      "removedb",
    50  		Usage:     "Remove blockchain and state databases",
    51  		ArgsUsage: "",
    52  		Flags:     utils.DatabasePathFlags,
    53  		Category:  "DATABASE COMMANDS",
    54  		Description: `
    55  Remove blockchain and state databases`,
    56  	}
    57  	dbCommand = cli.Command{
    58  		Name:      "db",
    59  		Usage:     "Low level database operations",
    60  		ArgsUsage: "",
    61  		Category:  "DATABASE COMMANDS",
    62  		Subcommands: []cli.Command{
    63  			dbInspectCmd,
    64  			dbStatCmd,
    65  			dbCompactCmd,
    66  			dbGetCmd,
    67  			dbDeleteCmd,
    68  			dbPutCmd,
    69  			dbGetSlotsCmd,
    70  			dbDumpFreezerIndex,
    71  			dbImportCmd,
    72  			dbExportCmd,
    73  			dbMetadataCmd,
    74  			dbMigrateFreezerCmd,
    75  			dbCheckStateContentCmd,
    76  		},
    77  	}
    78  	dbInspectCmd = cli.Command{
    79  		Action:    utils.MigrateFlags(inspect),
    80  		Name:      "inspect",
    81  		ArgsUsage: "<prefix> <start>",
    82  		Flags: utils.GroupFlags([]cli.Flag{
    83  			utils.SyncModeFlag,
    84  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    85  		Usage:       "Inspect the storage size for each type of data in the database",
    86  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    87  	}
    88  	dbCheckStateContentCmd = cli.Command{
    89  		Action:    utils.MigrateFlags(checkStateContent),
    90  		Name:      "check-state-content",
    91  		ArgsUsage: "<start (optional)>",
    92  		Flags:     utils.GroupFlags(utils.NetworkFlags, utils.DatabasePathFlags),
    93  		Usage:     "Verify that state data is cryptographically correct",
    94  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    95  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    96  a data corruption.`,
    97  	}
    98  	dbStatCmd = cli.Command{
    99  		Action: utils.MigrateFlags(dbStats),
   100  		Name:   "stats",
   101  		Usage:  "Print leveldb statistics",
   102  		Flags: utils.GroupFlags([]cli.Flag{
   103  			utils.SyncModeFlag,
   104  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   105  	}
   106  	dbCompactCmd = cli.Command{
   107  		Action: utils.MigrateFlags(dbCompact),
   108  		Name:   "compact",
   109  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   110  		Flags: utils.GroupFlags([]cli.Flag{
   111  			utils.SyncModeFlag,
   112  			utils.CacheFlag,
   113  			utils.CacheDatabaseFlag,
   114  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   115  		Description: `This command performs a database compaction. 
   116  WARNING: This operation may take a very long time to finish, and may cause database
   117  corruption if it is aborted during execution'!`,
   118  	}
   119  	dbGetCmd = cli.Command{
   120  		Action:    utils.MigrateFlags(dbGet),
   121  		Name:      "get",
   122  		Usage:     "Show the value of a database key",
   123  		ArgsUsage: "<hex-encoded key>",
   124  		Flags: utils.GroupFlags([]cli.Flag{
   125  			utils.SyncModeFlag,
   126  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   127  		Description: "This command looks up the specified database key from the database.",
   128  	}
   129  	dbDeleteCmd = cli.Command{
   130  		Action:    utils.MigrateFlags(dbDelete),
   131  		Name:      "delete",
   132  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   133  		ArgsUsage: "<hex-encoded key>",
   134  		Flags: utils.GroupFlags([]cli.Flag{
   135  			utils.SyncModeFlag,
   136  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   137  		Description: `This command deletes the specified database key from the database. 
   138  WARNING: This is a low-level operation which may cause database corruption!`,
   139  	}
   140  	dbPutCmd = cli.Command{
   141  		Action:    utils.MigrateFlags(dbPut),
   142  		Name:      "put",
   143  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   144  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   145  		Flags: utils.GroupFlags([]cli.Flag{
   146  			utils.SyncModeFlag,
   147  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   148  		Description: `This command sets a given database key to the given value. 
   149  WARNING: This is a low-level operation which may cause database corruption!`,
   150  	}
   151  	dbGetSlotsCmd = cli.Command{
   152  		Action:    utils.MigrateFlags(dbDumpTrie),
   153  		Name:      "dumptrie",
   154  		Usage:     "Show the storage key/values of a given storage trie",
   155  		ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   156  		Flags: utils.GroupFlags([]cli.Flag{
   157  			utils.SyncModeFlag,
   158  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   159  		Description: "This command looks up the specified database key from the database.",
   160  	}
   161  	dbDumpFreezerIndex = cli.Command{
   162  		Action:    utils.MigrateFlags(freezerInspect),
   163  		Name:      "freezer-index",
   164  		Usage:     "Dump out the index of a given freezer type",
   165  		ArgsUsage: "<type> <start (int)> <end (int)>",
   166  		Flags: utils.GroupFlags([]cli.Flag{
   167  			utils.SyncModeFlag,
   168  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   169  		Description: "This command displays information about the freezer index.",
   170  	}
   171  	dbImportCmd = cli.Command{
   172  		Action:    utils.MigrateFlags(importLDBdata),
   173  		Name:      "import",
   174  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   175  		ArgsUsage: "<dumpfile> <start (optional)",
   176  		Flags: utils.GroupFlags([]cli.Flag{
   177  			utils.SyncModeFlag,
   178  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   179  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   180  	}
   181  	dbExportCmd = cli.Command{
   182  		Action:    utils.MigrateFlags(exportChaindata),
   183  		Name:      "export",
   184  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   185  		ArgsUsage: "<type> <dumpfile>",
   186  		Flags: utils.GroupFlags([]cli.Flag{
   187  			utils.SyncModeFlag,
   188  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   189  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   190  	}
   191  	dbMetadataCmd = cli.Command{
   192  		Action: utils.MigrateFlags(showMetaData),
   193  		Name:   "metadata",
   194  		Usage:  "Shows metadata about the chain status.",
   195  		Flags: utils.GroupFlags([]cli.Flag{
   196  			utils.SyncModeFlag,
   197  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   198  		Description: "Shows metadata about the chain status.",
   199  	}
   200  	dbMigrateFreezerCmd = cli.Command{
   201  		Action:    utils.MigrateFlags(freezerMigrate),
   202  		Name:      "freezer-migrate",
   203  		Usage:     "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
   204  		ArgsUsage: "",
   205  		Flags: utils.GroupFlags([]cli.Flag{
   206  			utils.SyncModeFlag,
   207  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   208  		Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
   209  WARNING: please back-up the receipt files in your ancients before running this command.`,
   210  	}
   211  )
   212  
   213  func removeDB(ctx *cli.Context) error {
   214  	stack, config := makeConfigNode(ctx)
   215  
   216  	// Remove the full node state database
   217  	path := stack.ResolvePath("chaindata")
   218  	if common.FileExist(path) {
   219  		confirmAndRemoveDB(path, "full node state database")
   220  	} else {
   221  		log.Info("Full node state database missing", "path", path)
   222  	}
   223  	// Remove the full node ancient database
   224  	path = config.Eth.DatabaseFreezer
   225  	switch {
   226  	case path == "":
   227  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   228  	case !filepath.IsAbs(path):
   229  		path = config.Node.ResolvePath(path)
   230  	}
   231  	if common.FileExist(path) {
   232  		confirmAndRemoveDB(path, "full node ancient database")
   233  	} else {
   234  		log.Info("Full node ancient database missing", "path", path)
   235  	}
   236  	// Remove the light node database
   237  	path = stack.ResolvePath("lightchaindata")
   238  	if common.FileExist(path) {
   239  		confirmAndRemoveDB(path, "light node database")
   240  	} else {
   241  		log.Info("Light node database missing", "path", path)
   242  	}
   243  	return nil
   244  }
   245  
   246  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   247  // folder if accepted.
   248  func confirmAndRemoveDB(database string, kind string) {
   249  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   250  	switch {
   251  	case err != nil:
   252  		utils.Fatalf("%v", err)
   253  	case !confirm:
   254  		log.Info("Database deletion skipped", "path", database)
   255  	default:
   256  		start := time.Now()
   257  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   258  			// If we're at the top level folder, recurse into
   259  			if path == database {
   260  				return nil
   261  			}
   262  			// Delete all the files, but not subfolders
   263  			if !info.IsDir() {
   264  				os.Remove(path)
   265  				return nil
   266  			}
   267  			return filepath.SkipDir
   268  		})
   269  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   270  	}
   271  }
   272  
   273  func inspect(ctx *cli.Context) error {
   274  	var (
   275  		prefix []byte
   276  		start  []byte
   277  	)
   278  	if ctx.NArg() > 2 {
   279  		return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
   280  	}
   281  	if ctx.NArg() >= 1 {
   282  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   283  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   284  		} else {
   285  			prefix = d
   286  		}
   287  	}
   288  	if ctx.NArg() >= 2 {
   289  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   290  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   291  		} else {
   292  			start = d
   293  		}
   294  	}
   295  	stack, _ := makeConfigNode(ctx)
   296  	defer stack.Close()
   297  
   298  	db := utils.MakeChainDatabase(ctx, stack, true)
   299  	defer db.Close()
   300  
   301  	return rawdb.InspectDatabase(db, prefix, start)
   302  }
   303  
   304  func checkStateContent(ctx *cli.Context) error {
   305  	var (
   306  		prefix []byte
   307  		start  []byte
   308  	)
   309  	if ctx.NArg() > 1 {
   310  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   311  	}
   312  	if ctx.NArg() > 0 {
   313  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   314  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   315  		} else {
   316  			start = d
   317  		}
   318  	}
   319  	stack, _ := makeConfigNode(ctx)
   320  	defer stack.Close()
   321  
   322  	db := utils.MakeChainDatabase(ctx, stack, true)
   323  	defer db.Close()
   324  	var (
   325  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   326  		hasher    = crypto.NewKeccakState()
   327  		got       = make([]byte, 32)
   328  		errs      int
   329  		count     int
   330  		startTime = time.Now()
   331  		lastLog   = time.Now()
   332  	)
   333  	for it.Next() {
   334  		count++
   335  		k := it.Key()
   336  		v := it.Value()
   337  		hasher.Reset()
   338  		hasher.Write(v)
   339  		hasher.Read(got)
   340  		if !bytes.Equal(k, got) {
   341  			errs++
   342  			fmt.Printf("Error at 0x%x\n", k)
   343  			fmt.Printf("  Hash:  0x%x\n", got)
   344  			fmt.Printf("  Data:  0x%x\n", v)
   345  		}
   346  		if time.Since(lastLog) > 8*time.Second {
   347  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   348  			lastLog = time.Now()
   349  		}
   350  	}
   351  	if err := it.Error(); err != nil {
   352  		return err
   353  	}
   354  	log.Info("Iterated the state content", "errors", errs, "items", count)
   355  	return nil
   356  }
   357  
   358  func showLeveldbStats(db ethdb.KeyValueStater) {
   359  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   360  		log.Warn("Failed to read database stats", "error", err)
   361  	} else {
   362  		fmt.Println(stats)
   363  	}
   364  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   365  		log.Warn("Failed to read database iostats", "error", err)
   366  	} else {
   367  		fmt.Println(ioStats)
   368  	}
   369  }
   370  
   371  func dbStats(ctx *cli.Context) error {
   372  	stack, _ := makeConfigNode(ctx)
   373  	defer stack.Close()
   374  
   375  	db := utils.MakeChainDatabase(ctx, stack, true)
   376  	defer db.Close()
   377  
   378  	showLeveldbStats(db)
   379  	return nil
   380  }
   381  
   382  func dbCompact(ctx *cli.Context) error {
   383  	stack, _ := makeConfigNode(ctx)
   384  	defer stack.Close()
   385  
   386  	db := utils.MakeChainDatabase(ctx, stack, false)
   387  	defer db.Close()
   388  
   389  	log.Info("Stats before compaction")
   390  	showLeveldbStats(db)
   391  
   392  	log.Info("Triggering compaction")
   393  	if err := db.Compact(nil, nil); err != nil {
   394  		log.Info("Compact err", "error", err)
   395  		return err
   396  	}
   397  	log.Info("Stats after compaction")
   398  	showLeveldbStats(db)
   399  	return nil
   400  }
   401  
   402  // dbGet shows the value of a given database key
   403  func dbGet(ctx *cli.Context) error {
   404  	if ctx.NArg() != 1 {
   405  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   406  	}
   407  	stack, _ := makeConfigNode(ctx)
   408  	defer stack.Close()
   409  
   410  	db := utils.MakeChainDatabase(ctx, stack, true)
   411  	defer db.Close()
   412  
   413  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   414  	if err != nil {
   415  		log.Info("Could not decode the key", "error", err)
   416  		return err
   417  	}
   418  
   419  	data, err := db.Get(key)
   420  	if err != nil {
   421  		log.Info("Get operation failed", "key", fmt.Sprintf("0x%#x", key), "error", err)
   422  		return err
   423  	}
   424  	fmt.Printf("key %#x: %#x\n", key, data)
   425  	return nil
   426  }
   427  
   428  // dbDelete deletes a key from the database
   429  func dbDelete(ctx *cli.Context) error {
   430  	if ctx.NArg() != 1 {
   431  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   432  	}
   433  	stack, _ := makeConfigNode(ctx)
   434  	defer stack.Close()
   435  
   436  	db := utils.MakeChainDatabase(ctx, stack, false)
   437  	defer db.Close()
   438  
   439  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   440  	if err != nil {
   441  		log.Info("Could not decode the key", "error", err)
   442  		return err
   443  	}
   444  	data, err := db.Get(key)
   445  	if err == nil {
   446  		fmt.Printf("Previous value: %#x\n", data)
   447  	}
   448  	if err = db.Delete(key); err != nil {
   449  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("0x%#x", key), "error", err)
   450  		return err
   451  	}
   452  	return nil
   453  }
   454  
   455  // dbPut overwrite a value in the database
   456  func dbPut(ctx *cli.Context) error {
   457  	if ctx.NArg() != 2 {
   458  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   459  	}
   460  	stack, _ := makeConfigNode(ctx)
   461  	defer stack.Close()
   462  
   463  	db := utils.MakeChainDatabase(ctx, stack, false)
   464  	defer db.Close()
   465  
   466  	var (
   467  		key   []byte
   468  		value []byte
   469  		data  []byte
   470  		err   error
   471  	)
   472  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   473  	if err != nil {
   474  		log.Info("Could not decode the key", "error", err)
   475  		return err
   476  	}
   477  	value, err = hexutil.Decode(ctx.Args().Get(1))
   478  	if err != nil {
   479  		log.Info("Could not decode the value", "error", err)
   480  		return err
   481  	}
   482  	data, err = db.Get(key)
   483  	if err == nil {
   484  		fmt.Printf("Previous value: %#x\n", data)
   485  	}
   486  	return db.Put(key, value)
   487  }
   488  
   489  // dbDumpTrie shows the key-value slots of a given storage trie
   490  func dbDumpTrie(ctx *cli.Context) error {
   491  	if ctx.NArg() < 1 {
   492  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   493  	}
   494  	stack, _ := makeConfigNode(ctx)
   495  	defer stack.Close()
   496  
   497  	db := utils.MakeChainDatabase(ctx, stack, true)
   498  	defer db.Close()
   499  	var (
   500  		root  []byte
   501  		start []byte
   502  		max   = int64(-1)
   503  		err   error
   504  	)
   505  	if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   506  		log.Info("Could not decode the root", "error", err)
   507  		return err
   508  	}
   509  	stRoot := common.BytesToHash(root)
   510  	if ctx.NArg() >= 2 {
   511  		if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   512  			log.Info("Could not decode the seek position", "error", err)
   513  			return err
   514  		}
   515  	}
   516  	if ctx.NArg() >= 3 {
   517  		if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   518  			log.Info("Could not decode the max count", "error", err)
   519  			return err
   520  		}
   521  	}
   522  	theTrie, err := trie.New(stRoot, trie.NewDatabase(db))
   523  	if err != nil {
   524  		return err
   525  	}
   526  	var count int64
   527  	it := trie.NewIterator(theTrie.NodeIterator(start))
   528  	for it.Next() {
   529  		if max > 0 && count == max {
   530  			fmt.Printf("Exiting after %d values\n", count)
   531  			break
   532  		}
   533  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   534  		count++
   535  	}
   536  	return it.Err
   537  }
   538  
   539  func freezerInspect(ctx *cli.Context) error {
   540  	var (
   541  		start, end    int64
   542  		disableSnappy bool
   543  		err           error
   544  	)
   545  	if ctx.NArg() < 3 {
   546  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   547  	}
   548  	kind := ctx.Args().Get(0)
   549  	if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
   550  		var options []string
   551  		for opt := range rawdb.FreezerNoSnappy {
   552  			options = append(options, opt)
   553  		}
   554  		sort.Strings(options)
   555  		return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
   556  	} else {
   557  		disableSnappy = noSnap
   558  	}
   559  	if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
   560  		log.Info("Could read start-param", "error", err)
   561  		return err
   562  	}
   563  	if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   564  		log.Info("Could read count param", "error", err)
   565  		return err
   566  	}
   567  	stack, _ := makeConfigNode(ctx)
   568  	defer stack.Close()
   569  	path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   570  	log.Info("Opening freezer", "location", path, "name", kind)
   571  	if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
   572  		return err
   573  	} else {
   574  		f.DumpIndex(start, end)
   575  	}
   576  	return nil
   577  }
   578  
   579  func importLDBdata(ctx *cli.Context) error {
   580  	start := 0
   581  	switch ctx.NArg() {
   582  	case 1:
   583  		break
   584  	case 2:
   585  		s, err := strconv.Atoi(ctx.Args().Get(1))
   586  		if err != nil {
   587  			return fmt.Errorf("second arg must be an integer: %v", err)
   588  		}
   589  		start = s
   590  	default:
   591  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   592  	}
   593  	var (
   594  		fName     = ctx.Args().Get(0)
   595  		stack, _  = makeConfigNode(ctx)
   596  		interrupt = make(chan os.Signal, 1)
   597  		stop      = make(chan struct{})
   598  	)
   599  	defer stack.Close()
   600  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   601  	defer signal.Stop(interrupt)
   602  	defer close(interrupt)
   603  	go func() {
   604  		if _, ok := <-interrupt; ok {
   605  			log.Info("Interrupted during ldb import, stopping at next batch")
   606  		}
   607  		close(stop)
   608  	}()
   609  	db := utils.MakeChainDatabase(ctx, stack, false)
   610  	return utils.ImportLDBData(db, fName, int64(start), stop)
   611  }
   612  
   613  type preimageIterator struct {
   614  	iter ethdb.Iterator
   615  }
   616  
   617  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   618  	for iter.iter.Next() {
   619  		key := iter.iter.Key()
   620  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   621  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   622  		}
   623  	}
   624  	return 0, nil, nil, false
   625  }
   626  
   627  func (iter *preimageIterator) Release() {
   628  	iter.iter.Release()
   629  }
   630  
   631  type snapshotIterator struct {
   632  	init    bool
   633  	account ethdb.Iterator
   634  	storage ethdb.Iterator
   635  }
   636  
   637  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   638  	if !iter.init {
   639  		iter.init = true
   640  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   641  	}
   642  	for iter.account.Next() {
   643  		key := iter.account.Key()
   644  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   645  			return utils.OpBatchAdd, key, iter.account.Value(), true
   646  		}
   647  	}
   648  	for iter.storage.Next() {
   649  		key := iter.storage.Key()
   650  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   651  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   652  		}
   653  	}
   654  	return 0, nil, nil, false
   655  }
   656  
   657  func (iter *snapshotIterator) Release() {
   658  	iter.account.Release()
   659  	iter.storage.Release()
   660  }
   661  
   662  // chainExporters defines the export scheme for all exportable chain data.
   663  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   664  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   665  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   666  		return &preimageIterator{iter: iter}
   667  	},
   668  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   669  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   670  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   671  		return &snapshotIterator{account: account, storage: storage}
   672  	},
   673  }
   674  
   675  func exportChaindata(ctx *cli.Context) error {
   676  	if ctx.NArg() < 2 {
   677  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   678  	}
   679  	// Parse the required chain data type, make sure it's supported.
   680  	kind := ctx.Args().Get(0)
   681  	kind = strings.ToLower(strings.Trim(kind, " "))
   682  	exporter, ok := chainExporters[kind]
   683  	if !ok {
   684  		var kinds []string
   685  		for kind := range chainExporters {
   686  			kinds = append(kinds, kind)
   687  		}
   688  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   689  	}
   690  	var (
   691  		stack, _  = makeConfigNode(ctx)
   692  		interrupt = make(chan os.Signal, 1)
   693  		stop      = make(chan struct{})
   694  	)
   695  	defer stack.Close()
   696  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   697  	defer signal.Stop(interrupt)
   698  	defer close(interrupt)
   699  	go func() {
   700  		if _, ok := <-interrupt; ok {
   701  			log.Info("Interrupted during db export, stopping at next batch")
   702  		}
   703  		close(stop)
   704  	}()
   705  	db := utils.MakeChainDatabase(ctx, stack, true)
   706  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   707  }
   708  
   709  func showMetaData(ctx *cli.Context) error {
   710  	stack, _ := makeConfigNode(ctx)
   711  	defer stack.Close()
   712  	db := utils.MakeChainDatabase(ctx, stack, true)
   713  	ancients, err := db.Ancients()
   714  	if err != nil {
   715  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   716  	}
   717  	pp := func(val *uint64) string {
   718  		if val == nil {
   719  			return "<nil>"
   720  		}
   721  		return fmt.Sprintf("%d (0x%x)", *val, *val)
   722  	}
   723  	data := [][]string{
   724  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   725  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   726  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   727  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   728  	if b := rawdb.ReadHeadBlock(db); b != nil {
   729  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   730  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   731  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (0x%x)", b.Number(), b.Number())})
   732  	}
   733  	if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
   734  		data = append(data, []string{"SkeletonSyncStatus", string(b)})
   735  	}
   736  	if h := rawdb.ReadHeadHeader(db); h != nil {
   737  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   738  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   739  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (0x%x)", h.Number, h.Number)})
   740  	}
   741  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   742  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   743  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   744  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   745  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   746  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   747  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   748  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   749  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   750  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   751  	}...)
   752  	table := tablewriter.NewWriter(os.Stdout)
   753  	table.SetHeader([]string{"Field", "Value"})
   754  	table.AppendBulk(data)
   755  	table.Render()
   756  	return nil
   757  }
   758  
   759  func freezerMigrate(ctx *cli.Context) error {
   760  	stack, _ := makeConfigNode(ctx)
   761  	defer stack.Close()
   762  
   763  	db := utils.MakeChainDatabase(ctx, stack, false)
   764  	defer db.Close()
   765  
   766  	// Check first block for legacy receipt format
   767  	numAncients, err := db.Ancients()
   768  	if err != nil {
   769  		return err
   770  	}
   771  	if numAncients < 1 {
   772  		log.Info("No receipts in freezer to migrate")
   773  		return nil
   774  	}
   775  
   776  	isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
   777  	if err != nil {
   778  		return err
   779  	}
   780  	if !isFirstLegacy {
   781  		log.Info("No legacy receipts to migrate")
   782  		return nil
   783  	}
   784  
   785  	log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
   786  	start := time.Now()
   787  	if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
   788  		return err
   789  	}
   790  	if err := db.Close(); err != nil {
   791  		return err
   792  	}
   793  	log.Info("Migration finished", "duration", time.Since(start))
   794  
   795  	return nil
   796  }
   797  
   798  // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
   799  // non-empty receipt and checks its format. The index of this first non-empty element is
   800  // the second return parameter.
   801  func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
   802  	// Check first block for legacy receipt format
   803  	numAncients, err := db.Ancients()
   804  	if err != nil {
   805  		return false, 0, err
   806  	}
   807  	if numAncients < 1 {
   808  		return false, 0, nil
   809  	}
   810  	if firstIdx >= numAncients {
   811  		return false, firstIdx, nil
   812  	}
   813  	var (
   814  		legacy       bool
   815  		blob         []byte
   816  		emptyRLPList = []byte{192}
   817  	)
   818  	// Find first block with non-empty receipt, only if
   819  	// the index is not already provided.
   820  	if firstIdx == 0 {
   821  		for i := uint64(0); i < numAncients; i++ {
   822  			blob, err = db.Ancient("receipts", i)
   823  			if err != nil {
   824  				return false, 0, err
   825  			}
   826  			if len(blob) == 0 {
   827  				continue
   828  			}
   829  			if !bytes.Equal(blob, emptyRLPList) {
   830  				firstIdx = i
   831  				break
   832  			}
   833  		}
   834  	}
   835  	// Is first non-empty receipt legacy?
   836  	first, err := db.Ancient("receipts", firstIdx)
   837  	if err != nil {
   838  		return false, 0, err
   839  	}
   840  	legacy, err = types.IsLegacyStoredReceipts(first)
   841  	return legacy, firstIdx, err
   842  }