github.com/sexdefi/go-ethereum@v0.0.0-20230807164010-b4cd42fe399f/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"strconv"
    26  	"strings"
    27  	"syscall"
    28  	"time"
    29  
    30  	"github.com/sexdefi/go-ethereum/cmd/utils"
    31  	"github.com/sexdefi/go-ethereum/common"
    32  	"github.com/sexdefi/go-ethereum/common/hexutil"
    33  	"github.com/sexdefi/go-ethereum/console/prompt"
    34  	"github.com/sexdefi/go-ethereum/core/rawdb"
    35  	"github.com/sexdefi/go-ethereum/core/state/snapshot"
    36  	"github.com/sexdefi/go-ethereum/core/types"
    37  	"github.com/sexdefi/go-ethereum/crypto"
    38  	"github.com/sexdefi/go-ethereum/ethdb"
    39  	"github.com/sexdefi/go-ethereum/internal/flags"
    40  	"github.com/sexdefi/go-ethereum/log"
    41  	"github.com/sexdefi/go-ethereum/trie"
    42  	"github.com/olekukonko/tablewriter"
    43  	"github.com/urfave/cli/v2"
    44  )
    45  
    46  var (
    47  	removedbCommand = &cli.Command{
    48  		Action:    removeDB,
    49  		Name:      "removedb",
    50  		Usage:     "Remove blockchain and state databases",
    51  		ArgsUsage: "",
    52  		Flags:     utils.DatabasePathFlags,
    53  		Description: `
    54  Remove blockchain and state databases`,
    55  	}
    56  	dbCommand = &cli.Command{
    57  		Name:      "db",
    58  		Usage:     "Low level database operations",
    59  		ArgsUsage: "",
    60  		Subcommands: []*cli.Command{
    61  			dbInspectCmd,
    62  			dbStatCmd,
    63  			dbCompactCmd,
    64  			dbGetCmd,
    65  			dbDeleteCmd,
    66  			dbPutCmd,
    67  			dbGetSlotsCmd,
    68  			dbDumpFreezerIndex,
    69  			dbImportCmd,
    70  			dbExportCmd,
    71  			dbMetadataCmd,
    72  			dbMigrateFreezerCmd,
    73  			dbCheckStateContentCmd,
    74  		},
    75  	}
    76  	dbInspectCmd = &cli.Command{
    77  		Action:    inspect,
    78  		Name:      "inspect",
    79  		ArgsUsage: "<prefix> <start>",
    80  		Flags: flags.Merge([]cli.Flag{
    81  			utils.SyncModeFlag,
    82  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    83  		Usage:       "Inspect the storage size for each type of data in the database",
    84  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    85  	}
    86  	dbCheckStateContentCmd = &cli.Command{
    87  		Action:    checkStateContent,
    88  		Name:      "check-state-content",
    89  		ArgsUsage: "<start (optional)>",
    90  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    91  		Usage:     "Verify that state data is cryptographically correct",
    92  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    93  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    94  a data corruption.`,
    95  	}
    96  	dbStatCmd = &cli.Command{
    97  		Action: dbStats,
    98  		Name:   "stats",
    99  		Usage:  "Print leveldb statistics",
   100  		Flags: flags.Merge([]cli.Flag{
   101  			utils.SyncModeFlag,
   102  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   103  	}
   104  	dbCompactCmd = &cli.Command{
   105  		Action: dbCompact,
   106  		Name:   "compact",
   107  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   108  		Flags: flags.Merge([]cli.Flag{
   109  			utils.SyncModeFlag,
   110  			utils.CacheFlag,
   111  			utils.CacheDatabaseFlag,
   112  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   113  		Description: `This command performs a database compaction. 
   114  WARNING: This operation may take a very long time to finish, and may cause database
   115  corruption if it is aborted during execution'!`,
   116  	}
   117  	dbGetCmd = &cli.Command{
   118  		Action:    dbGet,
   119  		Name:      "get",
   120  		Usage:     "Show the value of a database key",
   121  		ArgsUsage: "<hex-encoded key>",
   122  		Flags: flags.Merge([]cli.Flag{
   123  			utils.SyncModeFlag,
   124  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   125  		Description: "This command looks up the specified database key from the database.",
   126  	}
   127  	dbDeleteCmd = &cli.Command{
   128  		Action:    dbDelete,
   129  		Name:      "delete",
   130  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   131  		ArgsUsage: "<hex-encoded key>",
   132  		Flags: flags.Merge([]cli.Flag{
   133  			utils.SyncModeFlag,
   134  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   135  		Description: `This command deletes the specified database key from the database. 
   136  WARNING: This is a low-level operation which may cause database corruption!`,
   137  	}
   138  	dbPutCmd = &cli.Command{
   139  		Action:    dbPut,
   140  		Name:      "put",
   141  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   142  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   143  		Flags: flags.Merge([]cli.Flag{
   144  			utils.SyncModeFlag,
   145  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   146  		Description: `This command sets a given database key to the given value. 
   147  WARNING: This is a low-level operation which may cause database corruption!`,
   148  	}
   149  	dbGetSlotsCmd = &cli.Command{
   150  		Action:    dbDumpTrie,
   151  		Name:      "dumptrie",
   152  		Usage:     "Show the storage key/values of a given storage trie",
   153  		ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   154  		Flags: flags.Merge([]cli.Flag{
   155  			utils.SyncModeFlag,
   156  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   157  		Description: "This command looks up the specified database key from the database.",
   158  	}
   159  	dbDumpFreezerIndex = &cli.Command{
   160  		Action:    freezerInspect,
   161  		Name:      "freezer-index",
   162  		Usage:     "Dump out the index of a specific freezer table",
   163  		ArgsUsage: "<freezer-type> <table-type> <start (int)> <end (int)>",
   164  		Flags: flags.Merge([]cli.Flag{
   165  			utils.SyncModeFlag,
   166  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   167  		Description: "This command displays information about the freezer index.",
   168  	}
   169  	dbImportCmd = &cli.Command{
   170  		Action:    importLDBdata,
   171  		Name:      "import",
   172  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   173  		ArgsUsage: "<dumpfile> <start (optional)",
   174  		Flags: flags.Merge([]cli.Flag{
   175  			utils.SyncModeFlag,
   176  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   177  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   178  	}
   179  	dbExportCmd = &cli.Command{
   180  		Action:    exportChaindata,
   181  		Name:      "export",
   182  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   183  		ArgsUsage: "<type> <dumpfile>",
   184  		Flags: flags.Merge([]cli.Flag{
   185  			utils.SyncModeFlag,
   186  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   187  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   188  	}
   189  	dbMetadataCmd = &cli.Command{
   190  		Action: showMetaData,
   191  		Name:   "metadata",
   192  		Usage:  "Shows metadata about the chain status.",
   193  		Flags: flags.Merge([]cli.Flag{
   194  			utils.SyncModeFlag,
   195  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   196  		Description: "Shows metadata about the chain status.",
   197  	}
   198  	dbMigrateFreezerCmd = &cli.Command{
   199  		Action:    freezerMigrate,
   200  		Name:      "freezer-migrate",
   201  		Usage:     "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
   202  		ArgsUsage: "",
   203  		Flags: flags.Merge([]cli.Flag{
   204  			utils.SyncModeFlag,
   205  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   206  		Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
   207  WARNING: please back-up the receipt files in your ancients before running this command.`,
   208  	}
   209  )
   210  
   211  func removeDB(ctx *cli.Context) error {
   212  	stack, config := makeConfigNode(ctx)
   213  
   214  	// Remove the full node state database
   215  	path := stack.ResolvePath("chaindata")
   216  	if common.FileExist(path) {
   217  		confirmAndRemoveDB(path, "full node state database")
   218  	} else {
   219  		log.Info("Full node state database missing", "path", path)
   220  	}
   221  	// Remove the full node ancient database
   222  	path = config.Eth.DatabaseFreezer
   223  	switch {
   224  	case path == "":
   225  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   226  	case !filepath.IsAbs(path):
   227  		path = config.Node.ResolvePath(path)
   228  	}
   229  	if common.FileExist(path) {
   230  		confirmAndRemoveDB(path, "full node ancient database")
   231  	} else {
   232  		log.Info("Full node ancient database missing", "path", path)
   233  	}
   234  	// Remove the light node database
   235  	path = stack.ResolvePath("lightchaindata")
   236  	if common.FileExist(path) {
   237  		confirmAndRemoveDB(path, "light node database")
   238  	} else {
   239  		log.Info("Light node database missing", "path", path)
   240  	}
   241  	return nil
   242  }
   243  
   244  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   245  // folder if accepted.
   246  func confirmAndRemoveDB(database string, kind string) {
   247  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   248  	switch {
   249  	case err != nil:
   250  		utils.Fatalf("%v", err)
   251  	case !confirm:
   252  		log.Info("Database deletion skipped", "path", database)
   253  	default:
   254  		start := time.Now()
   255  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   256  			// If we're at the top level folder, recurse into
   257  			if path == database {
   258  				return nil
   259  			}
   260  			// Delete all the files, but not subfolders
   261  			if !info.IsDir() {
   262  				os.Remove(path)
   263  				return nil
   264  			}
   265  			return filepath.SkipDir
   266  		})
   267  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   268  	}
   269  }
   270  
   271  func inspect(ctx *cli.Context) error {
   272  	var (
   273  		prefix []byte
   274  		start  []byte
   275  	)
   276  	if ctx.NArg() > 2 {
   277  		return fmt.Errorf("max 2 arguments: %v", ctx.Command.ArgsUsage)
   278  	}
   279  	if ctx.NArg() >= 1 {
   280  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   281  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   282  		} else {
   283  			prefix = d
   284  		}
   285  	}
   286  	if ctx.NArg() >= 2 {
   287  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   288  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   289  		} else {
   290  			start = d
   291  		}
   292  	}
   293  	stack, _ := makeConfigNode(ctx)
   294  	defer stack.Close()
   295  
   296  	db := utils.MakeChainDatabase(ctx, stack, true)
   297  	defer db.Close()
   298  
   299  	return rawdb.InspectDatabase(db, prefix, start)
   300  }
   301  
   302  func checkStateContent(ctx *cli.Context) error {
   303  	var (
   304  		prefix []byte
   305  		start  []byte
   306  	)
   307  	if ctx.NArg() > 1 {
   308  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   309  	}
   310  	if ctx.NArg() > 0 {
   311  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   312  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   313  		} else {
   314  			start = d
   315  		}
   316  	}
   317  	stack, _ := makeConfigNode(ctx)
   318  	defer stack.Close()
   319  
   320  	db := utils.MakeChainDatabase(ctx, stack, true)
   321  	defer db.Close()
   322  	var (
   323  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   324  		hasher    = crypto.NewKeccakState()
   325  		got       = make([]byte, 32)
   326  		errs      int
   327  		count     int
   328  		startTime = time.Now()
   329  		lastLog   = time.Now()
   330  	)
   331  	for it.Next() {
   332  		count++
   333  		k := it.Key()
   334  		v := it.Value()
   335  		hasher.Reset()
   336  		hasher.Write(v)
   337  		hasher.Read(got)
   338  		if !bytes.Equal(k, got) {
   339  			errs++
   340  			fmt.Printf("Error at %#x\n", k)
   341  			fmt.Printf("  Hash:  %#x\n", got)
   342  			fmt.Printf("  Data:  %#x\n", v)
   343  		}
   344  		if time.Since(lastLog) > 8*time.Second {
   345  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   346  			lastLog = time.Now()
   347  		}
   348  	}
   349  	if err := it.Error(); err != nil {
   350  		return err
   351  	}
   352  	log.Info("Iterated the state content", "errors", errs, "items", count)
   353  	return nil
   354  }
   355  
   356  func showLeveldbStats(db ethdb.KeyValueStater) {
   357  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   358  		log.Warn("Failed to read database stats", "error", err)
   359  	} else {
   360  		fmt.Println(stats)
   361  	}
   362  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   363  		log.Warn("Failed to read database iostats", "error", err)
   364  	} else {
   365  		fmt.Println(ioStats)
   366  	}
   367  }
   368  
   369  func dbStats(ctx *cli.Context) error {
   370  	stack, _ := makeConfigNode(ctx)
   371  	defer stack.Close()
   372  
   373  	db := utils.MakeChainDatabase(ctx, stack, true)
   374  	defer db.Close()
   375  
   376  	showLeveldbStats(db)
   377  	return nil
   378  }
   379  
   380  func dbCompact(ctx *cli.Context) error {
   381  	stack, _ := makeConfigNode(ctx)
   382  	defer stack.Close()
   383  
   384  	db := utils.MakeChainDatabase(ctx, stack, false)
   385  	defer db.Close()
   386  
   387  	log.Info("Stats before compaction")
   388  	showLeveldbStats(db)
   389  
   390  	log.Info("Triggering compaction")
   391  	if err := db.Compact(nil, nil); err != nil {
   392  		log.Info("Compact err", "error", err)
   393  		return err
   394  	}
   395  	log.Info("Stats after compaction")
   396  	showLeveldbStats(db)
   397  	return nil
   398  }
   399  
   400  // dbGet shows the value of a given database key
   401  func dbGet(ctx *cli.Context) error {
   402  	if ctx.NArg() != 1 {
   403  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   404  	}
   405  	stack, _ := makeConfigNode(ctx)
   406  	defer stack.Close()
   407  
   408  	db := utils.MakeChainDatabase(ctx, stack, true)
   409  	defer db.Close()
   410  
   411  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   412  	if err != nil {
   413  		log.Info("Could not decode the key", "error", err)
   414  		return err
   415  	}
   416  
   417  	data, err := db.Get(key)
   418  	if err != nil {
   419  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   420  		return err
   421  	}
   422  	fmt.Printf("key %#x: %#x\n", key, data)
   423  	return nil
   424  }
   425  
   426  // dbDelete deletes a key from the database
   427  func dbDelete(ctx *cli.Context) error {
   428  	if ctx.NArg() != 1 {
   429  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   430  	}
   431  	stack, _ := makeConfigNode(ctx)
   432  	defer stack.Close()
   433  
   434  	db := utils.MakeChainDatabase(ctx, stack, false)
   435  	defer db.Close()
   436  
   437  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   438  	if err != nil {
   439  		log.Info("Could not decode the key", "error", err)
   440  		return err
   441  	}
   442  	data, err := db.Get(key)
   443  	if err == nil {
   444  		fmt.Printf("Previous value: %#x\n", data)
   445  	}
   446  	if err = db.Delete(key); err != nil {
   447  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   448  		return err
   449  	}
   450  	return nil
   451  }
   452  
   453  // dbPut overwrite a value in the database
   454  func dbPut(ctx *cli.Context) error {
   455  	if ctx.NArg() != 2 {
   456  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   457  	}
   458  	stack, _ := makeConfigNode(ctx)
   459  	defer stack.Close()
   460  
   461  	db := utils.MakeChainDatabase(ctx, stack, false)
   462  	defer db.Close()
   463  
   464  	var (
   465  		key   []byte
   466  		value []byte
   467  		data  []byte
   468  		err   error
   469  	)
   470  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   471  	if err != nil {
   472  		log.Info("Could not decode the key", "error", err)
   473  		return err
   474  	}
   475  	value, err = hexutil.Decode(ctx.Args().Get(1))
   476  	if err != nil {
   477  		log.Info("Could not decode the value", "error", err)
   478  		return err
   479  	}
   480  	data, err = db.Get(key)
   481  	if err == nil {
   482  		fmt.Printf("Previous value: %#x\n", data)
   483  	}
   484  	return db.Put(key, value)
   485  }
   486  
   487  // dbDumpTrie shows the key-value slots of a given storage trie
   488  func dbDumpTrie(ctx *cli.Context) error {
   489  	if ctx.NArg() < 1 {
   490  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   491  	}
   492  	stack, _ := makeConfigNode(ctx)
   493  	defer stack.Close()
   494  
   495  	db := utils.MakeChainDatabase(ctx, stack, true)
   496  	defer db.Close()
   497  	var (
   498  		root  []byte
   499  		start []byte
   500  		max   = int64(-1)
   501  		err   error
   502  	)
   503  	if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   504  		log.Info("Could not decode the root", "error", err)
   505  		return err
   506  	}
   507  	stRoot := common.BytesToHash(root)
   508  	if ctx.NArg() >= 2 {
   509  		if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   510  			log.Info("Could not decode the seek position", "error", err)
   511  			return err
   512  		}
   513  	}
   514  	if ctx.NArg() >= 3 {
   515  		if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   516  			log.Info("Could not decode the max count", "error", err)
   517  			return err
   518  		}
   519  	}
   520  	theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
   521  	if err != nil {
   522  		return err
   523  	}
   524  	var count int64
   525  	it := trie.NewIterator(theTrie.NodeIterator(start))
   526  	for it.Next() {
   527  		if max > 0 && count == max {
   528  			fmt.Printf("Exiting after %d values\n", count)
   529  			break
   530  		}
   531  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   532  		count++
   533  	}
   534  	return it.Err
   535  }
   536  
   537  func freezerInspect(ctx *cli.Context) error {
   538  	if ctx.NArg() < 4 {
   539  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   540  	}
   541  	var (
   542  		freezer = ctx.Args().Get(0)
   543  		table   = ctx.Args().Get(1)
   544  	)
   545  	start, err := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   546  	if err != nil {
   547  		log.Info("Could not read start-param", "err", err)
   548  		return err
   549  	}
   550  	end, err := strconv.ParseInt(ctx.Args().Get(3), 10, 64)
   551  	if err != nil {
   552  		log.Info("Could not read count param", "err", err)
   553  		return err
   554  	}
   555  	stack, _ := makeConfigNode(ctx)
   556  	defer stack.Close()
   557  
   558  	db := utils.MakeChainDatabase(ctx, stack, true)
   559  	defer db.Close()
   560  
   561  	ancient, err := db.AncientDatadir()
   562  	if err != nil {
   563  		log.Info("Failed to retrieve ancient root", "err", err)
   564  		return err
   565  	}
   566  	return rawdb.InspectFreezerTable(ancient, freezer, table, start, end)
   567  }
   568  
   569  func importLDBdata(ctx *cli.Context) error {
   570  	start := 0
   571  	switch ctx.NArg() {
   572  	case 1:
   573  		break
   574  	case 2:
   575  		s, err := strconv.Atoi(ctx.Args().Get(1))
   576  		if err != nil {
   577  			return fmt.Errorf("second arg must be an integer: %v", err)
   578  		}
   579  		start = s
   580  	default:
   581  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   582  	}
   583  	var (
   584  		fName     = ctx.Args().Get(0)
   585  		stack, _  = makeConfigNode(ctx)
   586  		interrupt = make(chan os.Signal, 1)
   587  		stop      = make(chan struct{})
   588  	)
   589  	defer stack.Close()
   590  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   591  	defer signal.Stop(interrupt)
   592  	defer close(interrupt)
   593  	go func() {
   594  		if _, ok := <-interrupt; ok {
   595  			log.Info("Interrupted during ldb import, stopping at next batch")
   596  		}
   597  		close(stop)
   598  	}()
   599  	db := utils.MakeChainDatabase(ctx, stack, false)
   600  	return utils.ImportLDBData(db, fName, int64(start), stop)
   601  }
   602  
   603  type preimageIterator struct {
   604  	iter ethdb.Iterator
   605  }
   606  
   607  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   608  	for iter.iter.Next() {
   609  		key := iter.iter.Key()
   610  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   611  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   612  		}
   613  	}
   614  	return 0, nil, nil, false
   615  }
   616  
   617  func (iter *preimageIterator) Release() {
   618  	iter.iter.Release()
   619  }
   620  
   621  type snapshotIterator struct {
   622  	init    bool
   623  	account ethdb.Iterator
   624  	storage ethdb.Iterator
   625  }
   626  
   627  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   628  	if !iter.init {
   629  		iter.init = true
   630  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   631  	}
   632  	for iter.account.Next() {
   633  		key := iter.account.Key()
   634  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   635  			return utils.OpBatchAdd, key, iter.account.Value(), true
   636  		}
   637  	}
   638  	for iter.storage.Next() {
   639  		key := iter.storage.Key()
   640  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   641  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   642  		}
   643  	}
   644  	return 0, nil, nil, false
   645  }
   646  
   647  func (iter *snapshotIterator) Release() {
   648  	iter.account.Release()
   649  	iter.storage.Release()
   650  }
   651  
   652  // chainExporters defines the export scheme for all exportable chain data.
   653  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   654  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   655  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   656  		return &preimageIterator{iter: iter}
   657  	},
   658  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   659  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   660  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   661  		return &snapshotIterator{account: account, storage: storage}
   662  	},
   663  }
   664  
   665  func exportChaindata(ctx *cli.Context) error {
   666  	if ctx.NArg() < 2 {
   667  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   668  	}
   669  	// Parse the required chain data type, make sure it's supported.
   670  	kind := ctx.Args().Get(0)
   671  	kind = strings.ToLower(strings.Trim(kind, " "))
   672  	exporter, ok := chainExporters[kind]
   673  	if !ok {
   674  		var kinds []string
   675  		for kind := range chainExporters {
   676  			kinds = append(kinds, kind)
   677  		}
   678  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   679  	}
   680  	var (
   681  		stack, _  = makeConfigNode(ctx)
   682  		interrupt = make(chan os.Signal, 1)
   683  		stop      = make(chan struct{})
   684  	)
   685  	defer stack.Close()
   686  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   687  	defer signal.Stop(interrupt)
   688  	defer close(interrupt)
   689  	go func() {
   690  		if _, ok := <-interrupt; ok {
   691  			log.Info("Interrupted during db export, stopping at next batch")
   692  		}
   693  		close(stop)
   694  	}()
   695  	db := utils.MakeChainDatabase(ctx, stack, true)
   696  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   697  }
   698  
   699  func showMetaData(ctx *cli.Context) error {
   700  	stack, _ := makeConfigNode(ctx)
   701  	defer stack.Close()
   702  	db := utils.MakeChainDatabase(ctx, stack, true)
   703  	ancients, err := db.Ancients()
   704  	if err != nil {
   705  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   706  	}
   707  	pp := func(val *uint64) string {
   708  		if val == nil {
   709  			return "<nil>"
   710  		}
   711  		return fmt.Sprintf("%d (%#x)", *val, *val)
   712  	}
   713  	data := [][]string{
   714  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   715  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   716  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   717  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   718  	if b := rawdb.ReadHeadBlock(db); b != nil {
   719  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   720  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   721  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   722  	}
   723  	if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
   724  		data = append(data, []string{"SkeletonSyncStatus", string(b)})
   725  	}
   726  	if h := rawdb.ReadHeadHeader(db); h != nil {
   727  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   728  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   729  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   730  	}
   731  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   732  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   733  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   734  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   735  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   736  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   737  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   738  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   739  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   740  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   741  	}...)
   742  	table := tablewriter.NewWriter(os.Stdout)
   743  	table.SetHeader([]string{"Field", "Value"})
   744  	table.AppendBulk(data)
   745  	table.Render()
   746  	return nil
   747  }
   748  
   749  func freezerMigrate(ctx *cli.Context) error {
   750  	stack, _ := makeConfigNode(ctx)
   751  	defer stack.Close()
   752  
   753  	db := utils.MakeChainDatabase(ctx, stack, false)
   754  	defer db.Close()
   755  
   756  	// Check first block for legacy receipt format
   757  	numAncients, err := db.Ancients()
   758  	if err != nil {
   759  		return err
   760  	}
   761  	if numAncients < 1 {
   762  		log.Info("No receipts in freezer to migrate")
   763  		return nil
   764  	}
   765  
   766  	isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
   767  	if err != nil {
   768  		return err
   769  	}
   770  	if !isFirstLegacy {
   771  		log.Info("No legacy receipts to migrate")
   772  		return nil
   773  	}
   774  
   775  	log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
   776  	start := time.Now()
   777  	if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
   778  		return err
   779  	}
   780  	if err := db.Close(); err != nil {
   781  		return err
   782  	}
   783  	log.Info("Migration finished", "duration", time.Since(start))
   784  
   785  	return nil
   786  }
   787  
   788  // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
   789  // non-empty receipt and checks its format. The index of this first non-empty element is
   790  // the second return parameter.
   791  func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
   792  	// Check first block for legacy receipt format
   793  	numAncients, err := db.Ancients()
   794  	if err != nil {
   795  		return false, 0, err
   796  	}
   797  	if numAncients < 1 {
   798  		return false, 0, nil
   799  	}
   800  	if firstIdx >= numAncients {
   801  		return false, firstIdx, nil
   802  	}
   803  	var (
   804  		legacy       bool
   805  		blob         []byte
   806  		emptyRLPList = []byte{192}
   807  	)
   808  	// Find first block with non-empty receipt, only if
   809  	// the index is not already provided.
   810  	if firstIdx == 0 {
   811  		for i := uint64(0); i < numAncients; i++ {
   812  			blob, err = db.Ancient("receipts", i)
   813  			if err != nil {
   814  				return false, 0, err
   815  			}
   816  			if len(blob) == 0 {
   817  				continue
   818  			}
   819  			if !bytes.Equal(blob, emptyRLPList) {
   820  				firstIdx = i
   821  				break
   822  			}
   823  		}
   824  	}
   825  	// Is first non-empty receipt legacy?
   826  	first, err := db.Ancient("receipts", firstIdx)
   827  	if err != nil {
   828  		return false, 0, err
   829  	}
   830  	legacy, err = types.IsLegacyStoredReceipts(first)
   831  	return legacy, firstIdx, err
   832  }