github.com/daethereum/go-dae@v2.2.3+incompatible/cmd/geth/dbcmd.go (about)

     1  // Copyright 2021 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"fmt"
    22  	"os"
    23  	"os/signal"
    24  	"path/filepath"
    25  	"sort"
    26  	"strconv"
    27  	"strings"
    28  	"syscall"
    29  	"time"
    30  
    31  	"github.com/daethereum/go-dae/cmd/utils"
    32  	"github.com/daethereum/go-dae/common"
    33  	"github.com/daethereum/go-dae/common/hexutil"
    34  	"github.com/daethereum/go-dae/console/prompt"
    35  	"github.com/daethereum/go-dae/core/rawdb"
    36  	"github.com/daethereum/go-dae/core/state/snapshot"
    37  	"github.com/daethereum/go-dae/core/types"
    38  	"github.com/daethereum/go-dae/crypto"
    39  	"github.com/daethereum/go-dae/ethdb"
    40  	"github.com/daethereum/go-dae/internal/flags"
    41  	"github.com/daethereum/go-dae/log"
    42  	"github.com/daethereum/go-dae/trie"
    43  	"github.com/olekukonko/tablewriter"
    44  	"github.com/urfave/cli/v2"
    45  )
    46  
    47  var (
    48  	removedbCommand = &cli.Command{
    49  		Action:    removeDB,
    50  		Name:      "removedb",
    51  		Usage:     "Remove blockchain and state databases",
    52  		ArgsUsage: "",
    53  		Flags:     utils.DatabasePathFlags,
    54  		Description: `
    55  Remove blockchain and state databases`,
    56  	}
    57  	dbCommand = &cli.Command{
    58  		Name:      "db",
    59  		Usage:     "Low level database operations",
    60  		ArgsUsage: "",
    61  		Subcommands: []*cli.Command{
    62  			dbInspectCmd,
    63  			dbStatCmd,
    64  			dbCompactCmd,
    65  			dbGetCmd,
    66  			dbDeleteCmd,
    67  			dbPutCmd,
    68  			dbGetSlotsCmd,
    69  			dbDumpFreezerIndex,
    70  			dbImportCmd,
    71  			dbExportCmd,
    72  			dbMetadataCmd,
    73  			dbMigrateFreezerCmd,
    74  			dbCheckStateContentCmd,
    75  		},
    76  	}
    77  	dbInspectCmd = &cli.Command{
    78  		Action:    inspect,
    79  		Name:      "inspect",
    80  		ArgsUsage: "<prefix> <start>",
    81  		Flags: flags.Merge([]cli.Flag{
    82  			utils.SyncModeFlag,
    83  		}, utils.NetworkFlags, utils.DatabasePathFlags),
    84  		Usage:       "Inspect the storage size for each type of data in the database",
    85  		Description: `This commands iterates the entire database. If the optional 'prefix' and 'start' arguments are provided, then the iteration is limited to the given subset of data.`,
    86  	}
    87  	dbCheckStateContentCmd = &cli.Command{
    88  		Action:    checkStateContent,
    89  		Name:      "check-state-content",
    90  		ArgsUsage: "<start (optional)>",
    91  		Flags:     flags.Merge(utils.NetworkFlags, utils.DatabasePathFlags),
    92  		Usage:     "Verify that state data is cryptographically correct",
    93  		Description: `This command iterates the entire database for 32-byte keys, looking for rlp-encoded trie nodes.
    94  For each trie node encountered, it checks that the key corresponds to the keccak256(value). If this is not true, this indicates
    95  a data corruption.`,
    96  	}
    97  	dbStatCmd = &cli.Command{
    98  		Action: dbStats,
    99  		Name:   "stats",
   100  		Usage:  "Print leveldb statistics",
   101  		Flags: flags.Merge([]cli.Flag{
   102  			utils.SyncModeFlag,
   103  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   104  	}
   105  	dbCompactCmd = &cli.Command{
   106  		Action: dbCompact,
   107  		Name:   "compact",
   108  		Usage:  "Compact leveldb database. WARNING: May take a very long time",
   109  		Flags: flags.Merge([]cli.Flag{
   110  			utils.SyncModeFlag,
   111  			utils.CacheFlag,
   112  			utils.CacheDatabaseFlag,
   113  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   114  		Description: `This command performs a database compaction. 
   115  WARNING: This operation may take a very long time to finish, and may cause database
   116  corruption if it is aborted during execution'!`,
   117  	}
   118  	dbGetCmd = &cli.Command{
   119  		Action:    dbGet,
   120  		Name:      "get",
   121  		Usage:     "Show the value of a database key",
   122  		ArgsUsage: "<hex-encoded key>",
   123  		Flags: flags.Merge([]cli.Flag{
   124  			utils.SyncModeFlag,
   125  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   126  		Description: "This command looks up the specified database key from the database.",
   127  	}
   128  	dbDeleteCmd = &cli.Command{
   129  		Action:    dbDelete,
   130  		Name:      "delete",
   131  		Usage:     "Delete a database key (WARNING: may corrupt your database)",
   132  		ArgsUsage: "<hex-encoded key>",
   133  		Flags: flags.Merge([]cli.Flag{
   134  			utils.SyncModeFlag,
   135  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   136  		Description: `This command deletes the specified database key from the database. 
   137  WARNING: This is a low-level operation which may cause database corruption!`,
   138  	}
   139  	dbPutCmd = &cli.Command{
   140  		Action:    dbPut,
   141  		Name:      "put",
   142  		Usage:     "Set the value of a database key (WARNING: may corrupt your database)",
   143  		ArgsUsage: "<hex-encoded key> <hex-encoded value>",
   144  		Flags: flags.Merge([]cli.Flag{
   145  			utils.SyncModeFlag,
   146  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   147  		Description: `This command sets a given database key to the given value. 
   148  WARNING: This is a low-level operation which may cause database corruption!`,
   149  	}
   150  	dbGetSlotsCmd = &cli.Command{
   151  		Action:    dbDumpTrie,
   152  		Name:      "dumptrie",
   153  		Usage:     "Show the storage key/values of a given storage trie",
   154  		ArgsUsage: "<hex-encoded storage trie root> <hex-encoded start (optional)> <int max elements (optional)>",
   155  		Flags: flags.Merge([]cli.Flag{
   156  			utils.SyncModeFlag,
   157  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   158  		Description: "This command looks up the specified database key from the database.",
   159  	}
   160  	dbDumpFreezerIndex = &cli.Command{
   161  		Action:    freezerInspect,
   162  		Name:      "freezer-index",
   163  		Usage:     "Dump out the index of a given freezer type",
   164  		ArgsUsage: "<type> <start (int)> <end (int)>",
   165  		Flags: flags.Merge([]cli.Flag{
   166  			utils.SyncModeFlag,
   167  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   168  		Description: "This command displays information about the freezer index.",
   169  	}
   170  	dbImportCmd = &cli.Command{
   171  		Action:    importLDBdata,
   172  		Name:      "import",
   173  		Usage:     "Imports leveldb-data from an exported RLP dump.",
   174  		ArgsUsage: "<dumpfile> <start (optional)",
   175  		Flags: flags.Merge([]cli.Flag{
   176  			utils.SyncModeFlag,
   177  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   178  		Description: "The import command imports the specific chain data from an RLP encoded stream.",
   179  	}
   180  	dbExportCmd = &cli.Command{
   181  		Action:    exportChaindata,
   182  		Name:      "export",
   183  		Usage:     "Exports the chain data into an RLP dump. If the <dumpfile> has .gz suffix, gzip compression will be used.",
   184  		ArgsUsage: "<type> <dumpfile>",
   185  		Flags: flags.Merge([]cli.Flag{
   186  			utils.SyncModeFlag,
   187  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   188  		Description: "Exports the specified chain data to an RLP encoded stream, optionally gzip-compressed.",
   189  	}
   190  	dbMetadataCmd = &cli.Command{
   191  		Action: showMetaData,
   192  		Name:   "metadata",
   193  		Usage:  "Shows metadata about the chain status.",
   194  		Flags: flags.Merge([]cli.Flag{
   195  			utils.SyncModeFlag,
   196  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   197  		Description: "Shows metadata about the chain status.",
   198  	}
   199  	dbMigrateFreezerCmd = &cli.Command{
   200  		Action:    freezerMigrate,
   201  		Name:      "freezer-migrate",
   202  		Usage:     "Migrate legacy parts of the freezer. (WARNING: may take a long time)",
   203  		ArgsUsage: "",
   204  		Flags: flags.Merge([]cli.Flag{
   205  			utils.SyncModeFlag,
   206  		}, utils.NetworkFlags, utils.DatabasePathFlags),
   207  		Description: `The freezer-migrate command checks your database for receipts in a legacy format and updates those.
   208  WARNING: please back-up the receipt files in your ancients before running this command.`,
   209  	}
   210  )
   211  
   212  func removeDB(ctx *cli.Context) error {
   213  	stack, config := makeConfigNode(ctx)
   214  
   215  	// Remove the full node state database
   216  	path := stack.ResolvePath("chaindata")
   217  	if common.FileExist(path) {
   218  		confirmAndRemoveDB(path, "full node state database")
   219  	} else {
   220  		log.Info("Full node state database missing", "path", path)
   221  	}
   222  	// Remove the full node ancient database
   223  	path = config.Eth.DatabaseFreezer
   224  	switch {
   225  	case path == "":
   226  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   227  	case !filepath.IsAbs(path):
   228  		path = config.Node.ResolvePath(path)
   229  	}
   230  	if common.FileExist(path) {
   231  		confirmAndRemoveDB(path, "full node ancient database")
   232  	} else {
   233  		log.Info("Full node ancient database missing", "path", path)
   234  	}
   235  	// Remove the light node database
   236  	path = stack.ResolvePath("lightchaindata")
   237  	if common.FileExist(path) {
   238  		confirmAndRemoveDB(path, "light node database")
   239  	} else {
   240  		log.Info("Light node database missing", "path", path)
   241  	}
   242  	return nil
   243  }
   244  
   245  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   246  // folder if accepted.
   247  func confirmAndRemoveDB(database string, kind string) {
   248  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   249  	switch {
   250  	case err != nil:
   251  		utils.Fatalf("%v", err)
   252  	case !confirm:
   253  		log.Info("Database deletion skipped", "path", database)
   254  	default:
   255  		start := time.Now()
   256  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   257  			// If we're at the top level folder, recurse into
   258  			if path == database {
   259  				return nil
   260  			}
   261  			// Delete all the files, but not subfolders
   262  			if !info.IsDir() {
   263  				os.Remove(path)
   264  				return nil
   265  			}
   266  			return filepath.SkipDir
   267  		})
   268  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   269  	}
   270  }
   271  
   272  func inspect(ctx *cli.Context) error {
   273  	var (
   274  		prefix []byte
   275  		start  []byte
   276  	)
   277  	if ctx.NArg() > 2 {
   278  		return fmt.Errorf("Max 2 arguments: %v", ctx.Command.ArgsUsage)
   279  	}
   280  	if ctx.NArg() >= 1 {
   281  		if d, err := hexutil.Decode(ctx.Args().Get(0)); err != nil {
   282  			return fmt.Errorf("failed to hex-decode 'prefix': %v", err)
   283  		} else {
   284  			prefix = d
   285  		}
   286  	}
   287  	if ctx.NArg() >= 2 {
   288  		if d, err := hexutil.Decode(ctx.Args().Get(1)); err != nil {
   289  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   290  		} else {
   291  			start = d
   292  		}
   293  	}
   294  	stack, _ := makeConfigNode(ctx)
   295  	defer stack.Close()
   296  
   297  	db := utils.MakeChainDatabase(ctx, stack, true)
   298  	defer db.Close()
   299  
   300  	return rawdb.InspectDatabase(db, prefix, start)
   301  }
   302  
   303  func checkStateContent(ctx *cli.Context) error {
   304  	var (
   305  		prefix []byte
   306  		start  []byte
   307  	)
   308  	if ctx.NArg() > 1 {
   309  		return fmt.Errorf("max 1 argument: %v", ctx.Command.ArgsUsage)
   310  	}
   311  	if ctx.NArg() > 0 {
   312  		if d, err := hexutil.Decode(ctx.Args().First()); err != nil {
   313  			return fmt.Errorf("failed to hex-decode 'start': %v", err)
   314  		} else {
   315  			start = d
   316  		}
   317  	}
   318  	stack, _ := makeConfigNode(ctx)
   319  	defer stack.Close()
   320  
   321  	db := utils.MakeChainDatabase(ctx, stack, true)
   322  	defer db.Close()
   323  	var (
   324  		it        = rawdb.NewKeyLengthIterator(db.NewIterator(prefix, start), 32)
   325  		hasher    = crypto.NewKeccakState()
   326  		got       = make([]byte, 32)
   327  		errs      int
   328  		count     int
   329  		startTime = time.Now()
   330  		lastLog   = time.Now()
   331  	)
   332  	for it.Next() {
   333  		count++
   334  		k := it.Key()
   335  		v := it.Value()
   336  		hasher.Reset()
   337  		hasher.Write(v)
   338  		hasher.Read(got)
   339  		if !bytes.Equal(k, got) {
   340  			errs++
   341  			fmt.Printf("Error at %#x\n", k)
   342  			fmt.Printf("  Hash:  %#x\n", got)
   343  			fmt.Printf("  Data:  %#x\n", v)
   344  		}
   345  		if time.Since(lastLog) > 8*time.Second {
   346  			log.Info("Iterating the database", "at", fmt.Sprintf("%#x", k), "elapsed", common.PrettyDuration(time.Since(startTime)))
   347  			lastLog = time.Now()
   348  		}
   349  	}
   350  	if err := it.Error(); err != nil {
   351  		return err
   352  	}
   353  	log.Info("Iterated the state content", "errors", errs, "items", count)
   354  	return nil
   355  }
   356  
   357  func showLeveldbStats(db ethdb.KeyValueStater) {
   358  	if stats, err := db.Stat("leveldb.stats"); err != nil {
   359  		log.Warn("Failed to read database stats", "error", err)
   360  	} else {
   361  		fmt.Println(stats)
   362  	}
   363  	if ioStats, err := db.Stat("leveldb.iostats"); err != nil {
   364  		log.Warn("Failed to read database iostats", "error", err)
   365  	} else {
   366  		fmt.Println(ioStats)
   367  	}
   368  }
   369  
   370  func dbStats(ctx *cli.Context) error {
   371  	stack, _ := makeConfigNode(ctx)
   372  	defer stack.Close()
   373  
   374  	db := utils.MakeChainDatabase(ctx, stack, true)
   375  	defer db.Close()
   376  
   377  	showLeveldbStats(db)
   378  	return nil
   379  }
   380  
   381  func dbCompact(ctx *cli.Context) error {
   382  	stack, _ := makeConfigNode(ctx)
   383  	defer stack.Close()
   384  
   385  	db := utils.MakeChainDatabase(ctx, stack, false)
   386  	defer db.Close()
   387  
   388  	log.Info("Stats before compaction")
   389  	showLeveldbStats(db)
   390  
   391  	log.Info("Triggering compaction")
   392  	if err := db.Compact(nil, nil); err != nil {
   393  		log.Info("Compact err", "error", err)
   394  		return err
   395  	}
   396  	log.Info("Stats after compaction")
   397  	showLeveldbStats(db)
   398  	return nil
   399  }
   400  
   401  // dbGet shows the value of a given database key
   402  func dbGet(ctx *cli.Context) error {
   403  	if ctx.NArg() != 1 {
   404  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   405  	}
   406  	stack, _ := makeConfigNode(ctx)
   407  	defer stack.Close()
   408  
   409  	db := utils.MakeChainDatabase(ctx, stack, true)
   410  	defer db.Close()
   411  
   412  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   413  	if err != nil {
   414  		log.Info("Could not decode the key", "error", err)
   415  		return err
   416  	}
   417  
   418  	data, err := db.Get(key)
   419  	if err != nil {
   420  		log.Info("Get operation failed", "key", fmt.Sprintf("%#x", key), "error", err)
   421  		return err
   422  	}
   423  	fmt.Printf("key %#x: %#x\n", key, data)
   424  	return nil
   425  }
   426  
   427  // dbDelete deletes a key from the database
   428  func dbDelete(ctx *cli.Context) error {
   429  	if ctx.NArg() != 1 {
   430  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   431  	}
   432  	stack, _ := makeConfigNode(ctx)
   433  	defer stack.Close()
   434  
   435  	db := utils.MakeChainDatabase(ctx, stack, false)
   436  	defer db.Close()
   437  
   438  	key, err := common.ParseHexOrString(ctx.Args().Get(0))
   439  	if err != nil {
   440  		log.Info("Could not decode the key", "error", err)
   441  		return err
   442  	}
   443  	data, err := db.Get(key)
   444  	if err == nil {
   445  		fmt.Printf("Previous value: %#x\n", data)
   446  	}
   447  	if err = db.Delete(key); err != nil {
   448  		log.Info("Delete operation returned an error", "key", fmt.Sprintf("%#x", key), "error", err)
   449  		return err
   450  	}
   451  	return nil
   452  }
   453  
   454  // dbPut overwrite a value in the database
   455  func dbPut(ctx *cli.Context) error {
   456  	if ctx.NArg() != 2 {
   457  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   458  	}
   459  	stack, _ := makeConfigNode(ctx)
   460  	defer stack.Close()
   461  
   462  	db := utils.MakeChainDatabase(ctx, stack, false)
   463  	defer db.Close()
   464  
   465  	var (
   466  		key   []byte
   467  		value []byte
   468  		data  []byte
   469  		err   error
   470  	)
   471  	key, err = common.ParseHexOrString(ctx.Args().Get(0))
   472  	if err != nil {
   473  		log.Info("Could not decode the key", "error", err)
   474  		return err
   475  	}
   476  	value, err = hexutil.Decode(ctx.Args().Get(1))
   477  	if err != nil {
   478  		log.Info("Could not decode the value", "error", err)
   479  		return err
   480  	}
   481  	data, err = db.Get(key)
   482  	if err == nil {
   483  		fmt.Printf("Previous value: %#x\n", data)
   484  	}
   485  	return db.Put(key, value)
   486  }
   487  
   488  // dbDumpTrie shows the key-value slots of a given storage trie
   489  func dbDumpTrie(ctx *cli.Context) error {
   490  	if ctx.NArg() < 1 {
   491  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   492  	}
   493  	stack, _ := makeConfigNode(ctx)
   494  	defer stack.Close()
   495  
   496  	db := utils.MakeChainDatabase(ctx, stack, true)
   497  	defer db.Close()
   498  	var (
   499  		root  []byte
   500  		start []byte
   501  		max   = int64(-1)
   502  		err   error
   503  	)
   504  	if root, err = hexutil.Decode(ctx.Args().Get(0)); err != nil {
   505  		log.Info("Could not decode the root", "error", err)
   506  		return err
   507  	}
   508  	stRoot := common.BytesToHash(root)
   509  	if ctx.NArg() >= 2 {
   510  		if start, err = hexutil.Decode(ctx.Args().Get(1)); err != nil {
   511  			log.Info("Could not decode the seek position", "error", err)
   512  			return err
   513  		}
   514  	}
   515  	if ctx.NArg() >= 3 {
   516  		if max, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   517  			log.Info("Could not decode the max count", "error", err)
   518  			return err
   519  		}
   520  	}
   521  	theTrie, err := trie.New(common.Hash{}, stRoot, trie.NewDatabase(db))
   522  	if err != nil {
   523  		return err
   524  	}
   525  	var count int64
   526  	it := trie.NewIterator(theTrie.NodeIterator(start))
   527  	for it.Next() {
   528  		if max > 0 && count == max {
   529  			fmt.Printf("Exiting after %d values\n", count)
   530  			break
   531  		}
   532  		fmt.Printf("  %d. key %#x: %#x\n", count, it.Key, it.Value)
   533  		count++
   534  	}
   535  	return it.Err
   536  }
   537  
   538  func freezerInspect(ctx *cli.Context) error {
   539  	var (
   540  		start, end    int64
   541  		disableSnappy bool
   542  		err           error
   543  	)
   544  	if ctx.NArg() < 3 {
   545  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   546  	}
   547  	kind := ctx.Args().Get(0)
   548  	if noSnap, ok := rawdb.FreezerNoSnappy[kind]; !ok {
   549  		var options []string
   550  		for opt := range rawdb.FreezerNoSnappy {
   551  			options = append(options, opt)
   552  		}
   553  		sort.Strings(options)
   554  		return fmt.Errorf("Could read freezer-type '%v'. Available options: %v", kind, options)
   555  	} else {
   556  		disableSnappy = noSnap
   557  	}
   558  	if start, err = strconv.ParseInt(ctx.Args().Get(1), 10, 64); err != nil {
   559  		log.Info("Could read start-param", "error", err)
   560  		return err
   561  	}
   562  	if end, err = strconv.ParseInt(ctx.Args().Get(2), 10, 64); err != nil {
   563  		log.Info("Could read count param", "error", err)
   564  		return err
   565  	}
   566  	stack, _ := makeConfigNode(ctx)
   567  	defer stack.Close()
   568  	path := filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   569  	log.Info("Opening freezer", "location", path, "name", kind)
   570  	if f, err := rawdb.NewFreezerTable(path, kind, disableSnappy, true); err != nil {
   571  		return err
   572  	} else {
   573  		f.DumpIndex(start, end)
   574  	}
   575  	return nil
   576  }
   577  
   578  func importLDBdata(ctx *cli.Context) error {
   579  	start := 0
   580  	switch ctx.NArg() {
   581  	case 1:
   582  		break
   583  	case 2:
   584  		s, err := strconv.Atoi(ctx.Args().Get(1))
   585  		if err != nil {
   586  			return fmt.Errorf("second arg must be an integer: %v", err)
   587  		}
   588  		start = s
   589  	default:
   590  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   591  	}
   592  	var (
   593  		fName     = ctx.Args().Get(0)
   594  		stack, _  = makeConfigNode(ctx)
   595  		interrupt = make(chan os.Signal, 1)
   596  		stop      = make(chan struct{})
   597  	)
   598  	defer stack.Close()
   599  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   600  	defer signal.Stop(interrupt)
   601  	defer close(interrupt)
   602  	go func() {
   603  		if _, ok := <-interrupt; ok {
   604  			log.Info("Interrupted during ldb import, stopping at next batch")
   605  		}
   606  		close(stop)
   607  	}()
   608  	db := utils.MakeChainDatabase(ctx, stack, false)
   609  	return utils.ImportLDBData(db, fName, int64(start), stop)
   610  }
   611  
   612  type preimageIterator struct {
   613  	iter ethdb.Iterator
   614  }
   615  
   616  func (iter *preimageIterator) Next() (byte, []byte, []byte, bool) {
   617  	for iter.iter.Next() {
   618  		key := iter.iter.Key()
   619  		if bytes.HasPrefix(key, rawdb.PreimagePrefix) && len(key) == (len(rawdb.PreimagePrefix)+common.HashLength) {
   620  			return utils.OpBatchAdd, key, iter.iter.Value(), true
   621  		}
   622  	}
   623  	return 0, nil, nil, false
   624  }
   625  
   626  func (iter *preimageIterator) Release() {
   627  	iter.iter.Release()
   628  }
   629  
   630  type snapshotIterator struct {
   631  	init    bool
   632  	account ethdb.Iterator
   633  	storage ethdb.Iterator
   634  }
   635  
   636  func (iter *snapshotIterator) Next() (byte, []byte, []byte, bool) {
   637  	if !iter.init {
   638  		iter.init = true
   639  		return utils.OpBatchDel, rawdb.SnapshotRootKey, nil, true
   640  	}
   641  	for iter.account.Next() {
   642  		key := iter.account.Key()
   643  		if bytes.HasPrefix(key, rawdb.SnapshotAccountPrefix) && len(key) == (len(rawdb.SnapshotAccountPrefix)+common.HashLength) {
   644  			return utils.OpBatchAdd, key, iter.account.Value(), true
   645  		}
   646  	}
   647  	for iter.storage.Next() {
   648  		key := iter.storage.Key()
   649  		if bytes.HasPrefix(key, rawdb.SnapshotStoragePrefix) && len(key) == (len(rawdb.SnapshotStoragePrefix)+2*common.HashLength) {
   650  			return utils.OpBatchAdd, key, iter.storage.Value(), true
   651  		}
   652  	}
   653  	return 0, nil, nil, false
   654  }
   655  
   656  func (iter *snapshotIterator) Release() {
   657  	iter.account.Release()
   658  	iter.storage.Release()
   659  }
   660  
   661  // chainExporters defines the export scheme for all exportable chain data.
   662  var chainExporters = map[string]func(db ethdb.Database) utils.ChainDataIterator{
   663  	"preimage": func(db ethdb.Database) utils.ChainDataIterator {
   664  		iter := db.NewIterator(rawdb.PreimagePrefix, nil)
   665  		return &preimageIterator{iter: iter}
   666  	},
   667  	"snapshot": func(db ethdb.Database) utils.ChainDataIterator {
   668  		account := db.NewIterator(rawdb.SnapshotAccountPrefix, nil)
   669  		storage := db.NewIterator(rawdb.SnapshotStoragePrefix, nil)
   670  		return &snapshotIterator{account: account, storage: storage}
   671  	},
   672  }
   673  
   674  func exportChaindata(ctx *cli.Context) error {
   675  	if ctx.NArg() < 2 {
   676  		return fmt.Errorf("required arguments: %v", ctx.Command.ArgsUsage)
   677  	}
   678  	// Parse the required chain data type, make sure it's supported.
   679  	kind := ctx.Args().Get(0)
   680  	kind = strings.ToLower(strings.Trim(kind, " "))
   681  	exporter, ok := chainExporters[kind]
   682  	if !ok {
   683  		var kinds []string
   684  		for kind := range chainExporters {
   685  			kinds = append(kinds, kind)
   686  		}
   687  		return fmt.Errorf("invalid data type %s, supported types: %s", kind, strings.Join(kinds, ", "))
   688  	}
   689  	var (
   690  		stack, _  = makeConfigNode(ctx)
   691  		interrupt = make(chan os.Signal, 1)
   692  		stop      = make(chan struct{})
   693  	)
   694  	defer stack.Close()
   695  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   696  	defer signal.Stop(interrupt)
   697  	defer close(interrupt)
   698  	go func() {
   699  		if _, ok := <-interrupt; ok {
   700  			log.Info("Interrupted during db export, stopping at next batch")
   701  		}
   702  		close(stop)
   703  	}()
   704  	db := utils.MakeChainDatabase(ctx, stack, true)
   705  	return utils.ExportChaindata(ctx.Args().Get(1), kind, exporter(db), stop)
   706  }
   707  
   708  func showMetaData(ctx *cli.Context) error {
   709  	stack, _ := makeConfigNode(ctx)
   710  	defer stack.Close()
   711  	db := utils.MakeChainDatabase(ctx, stack, true)
   712  	ancients, err := db.Ancients()
   713  	if err != nil {
   714  		fmt.Fprintf(os.Stderr, "Error accessing ancients: %v", err)
   715  	}
   716  	pp := func(val *uint64) string {
   717  		if val == nil {
   718  			return "<nil>"
   719  		}
   720  		return fmt.Sprintf("%d (%#x)", *val, *val)
   721  	}
   722  	data := [][]string{
   723  		{"databaseVersion", pp(rawdb.ReadDatabaseVersion(db))},
   724  		{"headBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadBlockHash(db))},
   725  		{"headFastBlockHash", fmt.Sprintf("%v", rawdb.ReadHeadFastBlockHash(db))},
   726  		{"headHeaderHash", fmt.Sprintf("%v", rawdb.ReadHeadHeaderHash(db))}}
   727  	if b := rawdb.ReadHeadBlock(db); b != nil {
   728  		data = append(data, []string{"headBlock.Hash", fmt.Sprintf("%v", b.Hash())})
   729  		data = append(data, []string{"headBlock.Root", fmt.Sprintf("%v", b.Root())})
   730  		data = append(data, []string{"headBlock.Number", fmt.Sprintf("%d (%#x)", b.Number(), b.Number())})
   731  	}
   732  	if b := rawdb.ReadSkeletonSyncStatus(db); b != nil {
   733  		data = append(data, []string{"SkeletonSyncStatus", string(b)})
   734  	}
   735  	if h := rawdb.ReadHeadHeader(db); h != nil {
   736  		data = append(data, []string{"headHeader.Hash", fmt.Sprintf("%v", h.Hash())})
   737  		data = append(data, []string{"headHeader.Root", fmt.Sprintf("%v", h.Root)})
   738  		data = append(data, []string{"headHeader.Number", fmt.Sprintf("%d (%#x)", h.Number, h.Number)})
   739  	}
   740  	data = append(data, [][]string{{"frozen", fmt.Sprintf("%d items", ancients)},
   741  		{"lastPivotNumber", pp(rawdb.ReadLastPivotNumber(db))},
   742  		{"len(snapshotSyncStatus)", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotSyncStatus(db)))},
   743  		{"snapshotGenerator", snapshot.ParseGeneratorStatus(rawdb.ReadSnapshotGenerator(db))},
   744  		{"snapshotDisabled", fmt.Sprintf("%v", rawdb.ReadSnapshotDisabled(db))},
   745  		{"snapshotJournal", fmt.Sprintf("%d bytes", len(rawdb.ReadSnapshotJournal(db)))},
   746  		{"snapshotRecoveryNumber", pp(rawdb.ReadSnapshotRecoveryNumber(db))},
   747  		{"snapshotRoot", fmt.Sprintf("%v", rawdb.ReadSnapshotRoot(db))},
   748  		{"txIndexTail", pp(rawdb.ReadTxIndexTail(db))},
   749  		{"fastTxLookupLimit", pp(rawdb.ReadFastTxLookupLimit(db))},
   750  	}...)
   751  	table := tablewriter.NewWriter(os.Stdout)
   752  	table.SetHeader([]string{"Field", "Value"})
   753  	table.AppendBulk(data)
   754  	table.Render()
   755  	return nil
   756  }
   757  
   758  func freezerMigrate(ctx *cli.Context) error {
   759  	stack, _ := makeConfigNode(ctx)
   760  	defer stack.Close()
   761  
   762  	db := utils.MakeChainDatabase(ctx, stack, false)
   763  	defer db.Close()
   764  
   765  	// Check first block for legacy receipt format
   766  	numAncients, err := db.Ancients()
   767  	if err != nil {
   768  		return err
   769  	}
   770  	if numAncients < 1 {
   771  		log.Info("No receipts in freezer to migrate")
   772  		return nil
   773  	}
   774  
   775  	isFirstLegacy, firstIdx, err := dbHasLegacyReceipts(db, 0)
   776  	if err != nil {
   777  		return err
   778  	}
   779  	if !isFirstLegacy {
   780  		log.Info("No legacy receipts to migrate")
   781  		return nil
   782  	}
   783  
   784  	log.Info("Starting migration", "ancients", numAncients, "firstLegacy", firstIdx)
   785  	start := time.Now()
   786  	if err := db.MigrateTable("receipts", types.ConvertLegacyStoredReceipts); err != nil {
   787  		return err
   788  	}
   789  	if err := db.Close(); err != nil {
   790  		return err
   791  	}
   792  	log.Info("Migration finished", "duration", time.Since(start))
   793  
   794  	return nil
   795  }
   796  
   797  // dbHasLegacyReceipts checks freezer entries for legacy receipts. It stops at the first
   798  // non-empty receipt and checks its format. The index of this first non-empty element is
   799  // the second return parameter.
   800  func dbHasLegacyReceipts(db ethdb.Database, firstIdx uint64) (bool, uint64, error) {
   801  	// Check first block for legacy receipt format
   802  	numAncients, err := db.Ancients()
   803  	if err != nil {
   804  		return false, 0, err
   805  	}
   806  	if numAncients < 1 {
   807  		return false, 0, nil
   808  	}
   809  	if firstIdx >= numAncients {
   810  		return false, firstIdx, nil
   811  	}
   812  	var (
   813  		legacy       bool
   814  		blob         []byte
   815  		emptyRLPList = []byte{192}
   816  	)
   817  	// Find first block with non-empty receipt, only if
   818  	// the index is not already provided.
   819  	if firstIdx == 0 {
   820  		for i := uint64(0); i < numAncients; i++ {
   821  			blob, err = db.Ancient("receipts", i)
   822  			if err != nil {
   823  				return false, 0, err
   824  			}
   825  			if len(blob) == 0 {
   826  				continue
   827  			}
   828  			if !bytes.Equal(blob, emptyRLPList) {
   829  				firstIdx = i
   830  				break
   831  			}
   832  		}
   833  	}
   834  	// Is first non-empty receipt legacy?
   835  	first, err := db.Ancient("receipts", firstIdx)
   836  	if err != nil {
   837  		return false, 0, err
   838  	}
   839  	legacy, err = types.IsLegacyStoredReceipts(first)
   840  	return legacy, firstIdx, err
   841  }