github.com/cryptogateway/go-paymex@v0.0.0-20210204174735-96277fb1e602/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/cryptogateway/go-paymex/cmd/utils"
    30  	"github.com/cryptogateway/go-paymex/common"
    31  	"github.com/cryptogateway/go-paymex/console/prompt"
    32  	"github.com/cryptogateway/go-paymex/core"
    33  	"github.com/cryptogateway/go-paymex/core/rawdb"
    34  	"github.com/cryptogateway/go-paymex/core/state"
    35  	"github.com/cryptogateway/go-paymex/core/types"
    36  	"github.com/cryptogateway/go-paymex/eth/downloader"
    37  	"github.com/cryptogateway/go-paymex/event"
    38  	"github.com/cryptogateway/go-paymex/log"
    39  	"github.com/cryptogateway/go-paymex/metrics"
    40  	"github.com/cryptogateway/go-paymex/trie"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  		},
    69  		Category: "BLOCKCHAIN COMMANDS",
    70  		Description: `
    71  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    72  	}
    73  	importCommand = cli.Command{
    74  		Action:    utils.MigrateFlags(importChain),
    75  		Name:      "import",
    76  		Usage:     "Import a blockchain file",
    77  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    78  		Flags: []cli.Flag{
    79  			utils.DataDirFlag,
    80  			utils.CacheFlag,
    81  			utils.SyncModeFlag,
    82  			utils.GCModeFlag,
    83  			utils.SnapshotFlag,
    84  			utils.CacheDatabaseFlag,
    85  			utils.CacheGCFlag,
    86  			utils.MetricsEnabledFlag,
    87  			utils.MetricsEnabledExpensiveFlag,
    88  			utils.MetricsHTTPFlag,
    89  			utils.MetricsPortFlag,
    90  			utils.MetricsEnableInfluxDBFlag,
    91  			utils.MetricsInfluxDBEndpointFlag,
    92  			utils.MetricsInfluxDBDatabaseFlag,
    93  			utils.MetricsInfluxDBUsernameFlag,
    94  			utils.MetricsInfluxDBPasswordFlag,
    95  			utils.MetricsInfluxDBTagsFlag,
    96  			utils.TxLookupLimitFlag,
    97  		},
    98  		Category: "BLOCKCHAIN COMMANDS",
    99  		Description: `
   100  The import command imports blocks from an RLP-encoded form. The form can be one file
   101  with several RLP-encoded blocks, or several files can be used.
   102  
   103  If only one file is used, import error will result in failure. If several files are used,
   104  processing will proceed even if an individual RLP-file import failure occurs.`,
   105  	}
   106  	exportCommand = cli.Command{
   107  		Action:    utils.MigrateFlags(exportChain),
   108  		Name:      "export",
   109  		Usage:     "Export blockchain into file",
   110  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   111  		Flags: []cli.Flag{
   112  			utils.DataDirFlag,
   113  			utils.CacheFlag,
   114  			utils.SyncModeFlag,
   115  		},
   116  		Category: "BLOCKCHAIN COMMANDS",
   117  		Description: `
   118  Requires a first argument of the file to write to.
   119  Optional second and third arguments control the first and
   120  last block to write. In this mode, the file will be appended
   121  if already existing. If the file ends with .gz, the output will
   122  be gzipped.`,
   123  	}
   124  	importPreimagesCommand = cli.Command{
   125  		Action:    utils.MigrateFlags(importPreimages),
   126  		Name:      "import-preimages",
   127  		Usage:     "Import the preimage database from an RLP stream",
   128  		ArgsUsage: "<datafile>",
   129  		Flags: []cli.Flag{
   130  			utils.DataDirFlag,
   131  			utils.CacheFlag,
   132  			utils.SyncModeFlag,
   133  		},
   134  		Category: "BLOCKCHAIN COMMANDS",
   135  		Description: `
   136  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   137  	}
   138  	exportPreimagesCommand = cli.Command{
   139  		Action:    utils.MigrateFlags(exportPreimages),
   140  		Name:      "export-preimages",
   141  		Usage:     "Export the preimage database into an RLP stream",
   142  		ArgsUsage: "<dumpfile>",
   143  		Flags: []cli.Flag{
   144  			utils.DataDirFlag,
   145  			utils.CacheFlag,
   146  			utils.SyncModeFlag,
   147  		},
   148  		Category: "BLOCKCHAIN COMMANDS",
   149  		Description: `
   150  The export-preimages command export hash preimages to an RLP encoded stream`,
   151  	}
   152  	copydbCommand = cli.Command{
   153  		Action:    utils.MigrateFlags(copyDb),
   154  		Name:      "copydb",
   155  		Usage:     "Create a local chain from a target chaindata folder",
   156  		ArgsUsage: "<sourceChaindataDir>",
   157  		Flags: []cli.Flag{
   158  			utils.DataDirFlag,
   159  			utils.CacheFlag,
   160  			utils.SyncModeFlag,
   161  			utils.FakePoWFlag,
   162  			utils.MainnetFlag,
   163  			utils.RopstenFlag,
   164  			utils.RinkebyFlag,
   165  			utils.TxLookupLimitFlag,
   166  			utils.GoerliFlag,
   167  			utils.YoloV3Flag,
   168  			utils.LegacyTestnetFlag,
   169  		},
   170  		Category: "BLOCKCHAIN COMMANDS",
   171  		Description: `
   172  The first argument must be the directory containing the blockchain to download from`,
   173  	}
   174  	removedbCommand = cli.Command{
   175  		Action:    utils.MigrateFlags(removeDB),
   176  		Name:      "removedb",
   177  		Usage:     "Remove blockchain and state databases",
   178  		ArgsUsage: " ",
   179  		Flags: []cli.Flag{
   180  			utils.DataDirFlag,
   181  		},
   182  		Category: "BLOCKCHAIN COMMANDS",
   183  		Description: `
   184  Remove blockchain and state databases`,
   185  	}
   186  	dumpCommand = cli.Command{
   187  		Action:    utils.MigrateFlags(dump),
   188  		Name:      "dump",
   189  		Usage:     "Dump a specific block from storage",
   190  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   191  		Flags: []cli.Flag{
   192  			utils.DataDirFlag,
   193  			utils.CacheFlag,
   194  			utils.SyncModeFlag,
   195  			utils.IterativeOutputFlag,
   196  			utils.ExcludeCodeFlag,
   197  			utils.ExcludeStorageFlag,
   198  			utils.IncludeIncompletesFlag,
   199  		},
   200  		Category: "BLOCKCHAIN COMMANDS",
   201  		Description: `
   202  The arguments are interpreted as block numbers or hashes.
   203  Use "ethereum dump 0" to dump the genesis block.`,
   204  	}
   205  	inspectCommand = cli.Command{
   206  		Action:    utils.MigrateFlags(inspect),
   207  		Name:      "inspect",
   208  		Usage:     "Inspect the storage size for each type of data in the database",
   209  		ArgsUsage: " ",
   210  		Flags: []cli.Flag{
   211  			utils.DataDirFlag,
   212  			utils.AncientFlag,
   213  			utils.CacheFlag,
   214  			utils.MainnetFlag,
   215  			utils.RopstenFlag,
   216  			utils.RinkebyFlag,
   217  			utils.GoerliFlag,
   218  			utils.YoloV3Flag,
   219  			utils.LegacyTestnetFlag,
   220  			utils.SyncModeFlag,
   221  		},
   222  		Category: "BLOCKCHAIN COMMANDS",
   223  	}
   224  )
   225  
   226  // initGenesis will initialise the given JSON format genesis file and writes it as
   227  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   228  func initGenesis(ctx *cli.Context) error {
   229  	// Make sure we have a valid genesis JSON
   230  	genesisPath := ctx.Args().First()
   231  	if len(genesisPath) == 0 {
   232  		utils.Fatalf("Must supply path to genesis JSON file")
   233  	}
   234  	file, err := os.Open(genesisPath)
   235  	if err != nil {
   236  		utils.Fatalf("Failed to read genesis file: %v", err)
   237  	}
   238  	defer file.Close()
   239  
   240  	genesis := new(core.Genesis)
   241  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   242  		utils.Fatalf("invalid genesis file: %v", err)
   243  	}
   244  	// Open and initialise both full and light databases
   245  	stack, _ := makeConfigNode(ctx)
   246  	defer stack.Close()
   247  
   248  	for _, name := range []string{"chaindata", "lightchaindata"} {
   249  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   250  		if err != nil {
   251  			utils.Fatalf("Failed to open database: %v", err)
   252  		}
   253  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   254  		if err != nil {
   255  			utils.Fatalf("Failed to write genesis block: %v", err)
   256  		}
   257  		chaindb.Close()
   258  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   259  	}
   260  	return nil
   261  }
   262  
   263  func dumpGenesis(ctx *cli.Context) error {
   264  	genesis := utils.MakeGenesis(ctx)
   265  	if genesis == nil {
   266  		genesis = core.DefaultGenesisBlock()
   267  	}
   268  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   269  		utils.Fatalf("could not encode genesis")
   270  	}
   271  	return nil
   272  }
   273  
   274  func importChain(ctx *cli.Context) error {
   275  	if len(ctx.Args()) < 1 {
   276  		utils.Fatalf("This command requires an argument.")
   277  	}
   278  	// Start metrics export if enabled
   279  	utils.SetupMetrics(ctx)
   280  	// Start system runtime metrics collection
   281  	go metrics.CollectProcessMetrics(3 * time.Second)
   282  
   283  	stack, _ := makeConfigNode(ctx)
   284  	defer stack.Close()
   285  
   286  	chain, db := utils.MakeChain(ctx, stack, false)
   287  	defer db.Close()
   288  
   289  	// Start periodically gathering memory profiles
   290  	var peakMemAlloc, peakMemSys uint64
   291  	go func() {
   292  		stats := new(runtime.MemStats)
   293  		for {
   294  			runtime.ReadMemStats(stats)
   295  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   296  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   297  			}
   298  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   299  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   300  			}
   301  			time.Sleep(5 * time.Second)
   302  		}
   303  	}()
   304  	// Import the chain
   305  	start := time.Now()
   306  
   307  	var importErr error
   308  
   309  	if len(ctx.Args()) == 1 {
   310  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   311  			importErr = err
   312  			log.Error("Import error", "err", err)
   313  		}
   314  	} else {
   315  		for _, arg := range ctx.Args() {
   316  			if err := utils.ImportChain(chain, arg); err != nil {
   317  				importErr = err
   318  				log.Error("Import error", "file", arg, "err", err)
   319  			}
   320  		}
   321  	}
   322  	chain.Stop()
   323  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   324  
   325  	// Output pre-compaction stats mostly to see the import trashing
   326  	stats, err := db.Stat("leveldb.stats")
   327  	if err != nil {
   328  		utils.Fatalf("Failed to read database stats: %v", err)
   329  	}
   330  	fmt.Println(stats)
   331  
   332  	ioStats, err := db.Stat("leveldb.iostats")
   333  	if err != nil {
   334  		utils.Fatalf("Failed to read database iostats: %v", err)
   335  	}
   336  	fmt.Println(ioStats)
   337  
   338  	// Print the memory statistics used by the importing
   339  	mem := new(runtime.MemStats)
   340  	runtime.ReadMemStats(mem)
   341  
   342  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   343  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   344  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   345  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   346  
   347  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   348  		return nil
   349  	}
   350  
   351  	// Compact the entire database to more accurately measure disk io and print the stats
   352  	start = time.Now()
   353  	fmt.Println("Compacting entire database...")
   354  	if err = db.Compact(nil, nil); err != nil {
   355  		utils.Fatalf("Compaction failed: %v", err)
   356  	}
   357  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   358  
   359  	stats, err = db.Stat("leveldb.stats")
   360  	if err != nil {
   361  		utils.Fatalf("Failed to read database stats: %v", err)
   362  	}
   363  	fmt.Println(stats)
   364  
   365  	ioStats, err = db.Stat("leveldb.iostats")
   366  	if err != nil {
   367  		utils.Fatalf("Failed to read database iostats: %v", err)
   368  	}
   369  	fmt.Println(ioStats)
   370  	return importErr
   371  }
   372  
   373  func exportChain(ctx *cli.Context) error {
   374  	if len(ctx.Args()) < 1 {
   375  		utils.Fatalf("This command requires an argument.")
   376  	}
   377  
   378  	stack, _ := makeConfigNode(ctx)
   379  	defer stack.Close()
   380  
   381  	chain, _ := utils.MakeChain(ctx, stack, true)
   382  	start := time.Now()
   383  
   384  	var err error
   385  	fp := ctx.Args().First()
   386  	if len(ctx.Args()) < 3 {
   387  		err = utils.ExportChain(chain, fp)
   388  	} else {
   389  		// This can be improved to allow for numbers larger than 9223372036854775807
   390  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   391  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   392  		if ferr != nil || lerr != nil {
   393  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   394  		}
   395  		if first < 0 || last < 0 {
   396  			utils.Fatalf("Export error: block number must be greater than 0\n")
   397  		}
   398  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   399  	}
   400  
   401  	if err != nil {
   402  		utils.Fatalf("Export error: %v\n", err)
   403  	}
   404  	fmt.Printf("Export done in %v\n", time.Since(start))
   405  	return nil
   406  }
   407  
   408  // importPreimages imports preimage data from the specified file.
   409  func importPreimages(ctx *cli.Context) error {
   410  	if len(ctx.Args()) < 1 {
   411  		utils.Fatalf("This command requires an argument.")
   412  	}
   413  
   414  	stack, _ := makeConfigNode(ctx)
   415  	defer stack.Close()
   416  
   417  	db := utils.MakeChainDatabase(ctx, stack)
   418  	start := time.Now()
   419  
   420  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   421  		utils.Fatalf("Import error: %v\n", err)
   422  	}
   423  	fmt.Printf("Import done in %v\n", time.Since(start))
   424  	return nil
   425  }
   426  
   427  // exportPreimages dumps the preimage data to specified json file in streaming way.
   428  func exportPreimages(ctx *cli.Context) error {
   429  	if len(ctx.Args()) < 1 {
   430  		utils.Fatalf("This command requires an argument.")
   431  	}
   432  
   433  	stack, _ := makeConfigNode(ctx)
   434  	defer stack.Close()
   435  
   436  	db := utils.MakeChainDatabase(ctx, stack)
   437  	start := time.Now()
   438  
   439  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   440  		utils.Fatalf("Export error: %v\n", err)
   441  	}
   442  	fmt.Printf("Export done in %v\n", time.Since(start))
   443  	return nil
   444  }
   445  
   446  func copyDb(ctx *cli.Context) error {
   447  	// Ensure we have a source chain directory to copy
   448  	if len(ctx.Args()) < 1 {
   449  		utils.Fatalf("Source chaindata directory path argument missing")
   450  	}
   451  	if len(ctx.Args()) < 2 {
   452  		utils.Fatalf("Source ancient chain directory path argument missing")
   453  	}
   454  	// Initialize a new chain for the running node to sync into
   455  	stack, _ := makeConfigNode(ctx)
   456  	defer stack.Close()
   457  
   458  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   459  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   460  
   461  	var syncBloom *trie.SyncBloom
   462  	if syncMode == downloader.FastSync {
   463  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   464  	}
   465  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   466  
   467  	// Create a source peer to satisfy downloader requests from
   468  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   469  	if err != nil {
   470  		return err
   471  	}
   472  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   473  	if err != nil {
   474  		return err
   475  	}
   476  	peer := downloader.NewFakePeer("local", db, hc, dl)
   477  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   478  		return err
   479  	}
   480  	// Synchronise with the simulated peer
   481  	start := time.Now()
   482  
   483  	currentHeader := hc.CurrentHeader()
   484  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   485  		return err
   486  	}
   487  	for dl.Synchronising() {
   488  		time.Sleep(10 * time.Millisecond)
   489  	}
   490  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   491  
   492  	// Compact the entire database to remove any sync overhead
   493  	start = time.Now()
   494  	fmt.Println("Compacting entire database...")
   495  	if err = db.Compact(nil, nil); err != nil {
   496  		utils.Fatalf("Compaction failed: %v", err)
   497  	}
   498  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   499  	return nil
   500  }
   501  
   502  func removeDB(ctx *cli.Context) error {
   503  	stack, config := makeConfigNode(ctx)
   504  
   505  	// Remove the full node state database
   506  	path := stack.ResolvePath("chaindata")
   507  	if common.FileExist(path) {
   508  		confirmAndRemoveDB(path, "full node state database")
   509  	} else {
   510  		log.Info("Full node state database missing", "path", path)
   511  	}
   512  	// Remove the full node ancient database
   513  	path = config.Eth.DatabaseFreezer
   514  	switch {
   515  	case path == "":
   516  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   517  	case !filepath.IsAbs(path):
   518  		path = config.Node.ResolvePath(path)
   519  	}
   520  	if common.FileExist(path) {
   521  		confirmAndRemoveDB(path, "full node ancient database")
   522  	} else {
   523  		log.Info("Full node ancient database missing", "path", path)
   524  	}
   525  	// Remove the light node database
   526  	path = stack.ResolvePath("lightchaindata")
   527  	if common.FileExist(path) {
   528  		confirmAndRemoveDB(path, "light node database")
   529  	} else {
   530  		log.Info("Light node database missing", "path", path)
   531  	}
   532  	return nil
   533  }
   534  
   535  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   536  // folder if accepted.
   537  func confirmAndRemoveDB(database string, kind string) {
   538  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   539  	switch {
   540  	case err != nil:
   541  		utils.Fatalf("%v", err)
   542  	case !confirm:
   543  		log.Info("Database deletion skipped", "path", database)
   544  	default:
   545  		start := time.Now()
   546  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   547  			// If we're at the top level folder, recurse into
   548  			if path == database {
   549  				return nil
   550  			}
   551  			// Delete all the files, but not subfolders
   552  			if !info.IsDir() {
   553  				os.Remove(path)
   554  				return nil
   555  			}
   556  			return filepath.SkipDir
   557  		})
   558  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   559  	}
   560  }
   561  
   562  func dump(ctx *cli.Context) error {
   563  	stack, _ := makeConfigNode(ctx)
   564  	defer stack.Close()
   565  
   566  	chain, chainDb := utils.MakeChain(ctx, stack, true)
   567  	defer chainDb.Close()
   568  	for _, arg := range ctx.Args() {
   569  		var block *types.Block
   570  		if hashish(arg) {
   571  			block = chain.GetBlockByHash(common.HexToHash(arg))
   572  		} else {
   573  			num, _ := strconv.Atoi(arg)
   574  			block = chain.GetBlockByNumber(uint64(num))
   575  		}
   576  		if block == nil {
   577  			fmt.Println("{}")
   578  			utils.Fatalf("block not found")
   579  		} else {
   580  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   581  			if err != nil {
   582  				utils.Fatalf("could not create new state: %v", err)
   583  			}
   584  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   585  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   586  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   587  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   588  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   589  			} else {
   590  				if includeMissing {
   591  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   592  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   593  				}
   594  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   595  			}
   596  		}
   597  	}
   598  	return nil
   599  }
   600  
   601  func inspect(ctx *cli.Context) error {
   602  	node, _ := makeConfigNode(ctx)
   603  	defer node.Close()
   604  
   605  	_, chainDb := utils.MakeChain(ctx, node, true)
   606  	defer chainDb.Close()
   607  
   608  	return rawdb.InspectDatabase(chainDb)
   609  }
   610  
   611  // hashish returns true for strings that look like hashes.
   612  func hashish(x string) bool {
   613  	_, err := strconv.Atoi(x)
   614  	return err != nil
   615  }