github.com/snowblossomcoin/go-ethereum@v1.9.25/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/console/prompt"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/eth/downloader"
    37  	"github.com/ethereum/go-ethereum/event"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/metrics"
    40  	"github.com/ethereum/go-ethereum/trie"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  		},
    69  		Category: "BLOCKCHAIN COMMANDS",
    70  		Description: `
    71  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    72  	}
    73  	importCommand = cli.Command{
    74  		Action:    utils.MigrateFlags(importChain),
    75  		Name:      "import",
    76  		Usage:     "Import a blockchain file",
    77  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    78  		Flags: []cli.Flag{
    79  			utils.DataDirFlag,
    80  			utils.CacheFlag,
    81  			utils.SyncModeFlag,
    82  			utils.GCModeFlag,
    83  			utils.SnapshotFlag,
    84  			utils.CacheDatabaseFlag,
    85  			utils.CacheGCFlag,
    86  			utils.MetricsEnabledFlag,
    87  			utils.MetricsEnabledExpensiveFlag,
    88  			utils.MetricsHTTPFlag,
    89  			utils.MetricsPortFlag,
    90  			utils.MetricsEnableInfluxDBFlag,
    91  			utils.MetricsInfluxDBEndpointFlag,
    92  			utils.MetricsInfluxDBDatabaseFlag,
    93  			utils.MetricsInfluxDBUsernameFlag,
    94  			utils.MetricsInfluxDBPasswordFlag,
    95  			utils.MetricsInfluxDBTagsFlag,
    96  			utils.TxLookupLimitFlag,
    97  		},
    98  		Category: "BLOCKCHAIN COMMANDS",
    99  		Description: `
   100  The import command imports blocks from an RLP-encoded form. The form can be one file
   101  with several RLP-encoded blocks, or several files can be used.
   102  
   103  If only one file is used, import error will result in failure. If several files are used,
   104  processing will proceed even if an individual RLP-file import failure occurs.`,
   105  	}
   106  	exportCommand = cli.Command{
   107  		Action:    utils.MigrateFlags(exportChain),
   108  		Name:      "export",
   109  		Usage:     "Export blockchain into file",
   110  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   111  		Flags: []cli.Flag{
   112  			utils.DataDirFlag,
   113  			utils.CacheFlag,
   114  			utils.SyncModeFlag,
   115  		},
   116  		Category: "BLOCKCHAIN COMMANDS",
   117  		Description: `
   118  Requires a first argument of the file to write to.
   119  Optional second and third arguments control the first and
   120  last block to write. In this mode, the file will be appended
   121  if already existing. If the file ends with .gz, the output will
   122  be gzipped.`,
   123  	}
   124  	importPreimagesCommand = cli.Command{
   125  		Action:    utils.MigrateFlags(importPreimages),
   126  		Name:      "import-preimages",
   127  		Usage:     "Import the preimage database from an RLP stream",
   128  		ArgsUsage: "<datafile>",
   129  		Flags: []cli.Flag{
   130  			utils.DataDirFlag,
   131  			utils.CacheFlag,
   132  			utils.SyncModeFlag,
   133  		},
   134  		Category: "BLOCKCHAIN COMMANDS",
   135  		Description: `
   136  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   137  	}
   138  	exportPreimagesCommand = cli.Command{
   139  		Action:    utils.MigrateFlags(exportPreimages),
   140  		Name:      "export-preimages",
   141  		Usage:     "Export the preimage database into an RLP stream",
   142  		ArgsUsage: "<dumpfile>",
   143  		Flags: []cli.Flag{
   144  			utils.DataDirFlag,
   145  			utils.CacheFlag,
   146  			utils.SyncModeFlag,
   147  		},
   148  		Category: "BLOCKCHAIN COMMANDS",
   149  		Description: `
   150  The export-preimages command export hash preimages to an RLP encoded stream`,
   151  	}
   152  	copydbCommand = cli.Command{
   153  		Action:    utils.MigrateFlags(copyDb),
   154  		Name:      "copydb",
   155  		Usage:     "Create a local chain from a target chaindata folder",
   156  		ArgsUsage: "<sourceChaindataDir>",
   157  		Flags: []cli.Flag{
   158  			utils.DataDirFlag,
   159  			utils.CacheFlag,
   160  			utils.SyncModeFlag,
   161  			utils.FakePoWFlag,
   162  			utils.RopstenFlag,
   163  			utils.RinkebyFlag,
   164  			utils.TxLookupLimitFlag,
   165  			utils.GoerliFlag,
   166  			utils.YoloV2Flag,
   167  			utils.LegacyTestnetFlag,
   168  		},
   169  		Category: "BLOCKCHAIN COMMANDS",
   170  		Description: `
   171  The first argument must be the directory containing the blockchain to download from`,
   172  	}
   173  	removedbCommand = cli.Command{
   174  		Action:    utils.MigrateFlags(removeDB),
   175  		Name:      "removedb",
   176  		Usage:     "Remove blockchain and state databases",
   177  		ArgsUsage: " ",
   178  		Flags: []cli.Flag{
   179  			utils.DataDirFlag,
   180  		},
   181  		Category: "BLOCKCHAIN COMMANDS",
   182  		Description: `
   183  Remove blockchain and state databases`,
   184  	}
   185  	dumpCommand = cli.Command{
   186  		Action:    utils.MigrateFlags(dump),
   187  		Name:      "dump",
   188  		Usage:     "Dump a specific block from storage",
   189  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   190  		Flags: []cli.Flag{
   191  			utils.DataDirFlag,
   192  			utils.CacheFlag,
   193  			utils.SyncModeFlag,
   194  			utils.IterativeOutputFlag,
   195  			utils.ExcludeCodeFlag,
   196  			utils.ExcludeStorageFlag,
   197  			utils.IncludeIncompletesFlag,
   198  		},
   199  		Category: "BLOCKCHAIN COMMANDS",
   200  		Description: `
   201  The arguments are interpreted as block numbers or hashes.
   202  Use "ethereum dump 0" to dump the genesis block.`,
   203  	}
   204  	inspectCommand = cli.Command{
   205  		Action:    utils.MigrateFlags(inspect),
   206  		Name:      "inspect",
   207  		Usage:     "Inspect the storage size for each type of data in the database",
   208  		ArgsUsage: " ",
   209  		Flags: []cli.Flag{
   210  			utils.DataDirFlag,
   211  			utils.AncientFlag,
   212  			utils.CacheFlag,
   213  			utils.RopstenFlag,
   214  			utils.RinkebyFlag,
   215  			utils.GoerliFlag,
   216  			utils.YoloV2Flag,
   217  			utils.LegacyTestnetFlag,
   218  			utils.SyncModeFlag,
   219  		},
   220  		Category: "BLOCKCHAIN COMMANDS",
   221  	}
   222  )
   223  
   224  // initGenesis will initialise the given JSON format genesis file and writes it as
   225  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   226  func initGenesis(ctx *cli.Context) error {
   227  	// Make sure we have a valid genesis JSON
   228  	genesisPath := ctx.Args().First()
   229  	if len(genesisPath) == 0 {
   230  		utils.Fatalf("Must supply path to genesis JSON file")
   231  	}
   232  	file, err := os.Open(genesisPath)
   233  	if err != nil {
   234  		utils.Fatalf("Failed to read genesis file: %v", err)
   235  	}
   236  	defer file.Close()
   237  
   238  	genesis := new(core.Genesis)
   239  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   240  		utils.Fatalf("invalid genesis file: %v", err)
   241  	}
   242  	// Open and initialise both full and light databases
   243  	stack, _ := makeConfigNode(ctx)
   244  	defer stack.Close()
   245  
   246  	for _, name := range []string{"chaindata", "lightchaindata"} {
   247  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   248  		if err != nil {
   249  			utils.Fatalf("Failed to open database: %v", err)
   250  		}
   251  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   252  		if err != nil {
   253  			utils.Fatalf("Failed to write genesis block: %v", err)
   254  		}
   255  		chaindb.Close()
   256  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   257  	}
   258  	return nil
   259  }
   260  
   261  func dumpGenesis(ctx *cli.Context) error {
   262  	genesis := utils.MakeGenesis(ctx)
   263  	if genesis == nil {
   264  		genesis = core.DefaultGenesisBlock()
   265  	}
   266  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   267  		utils.Fatalf("could not encode genesis")
   268  	}
   269  	return nil
   270  }
   271  
   272  func importChain(ctx *cli.Context) error {
   273  	if len(ctx.Args()) < 1 {
   274  		utils.Fatalf("This command requires an argument.")
   275  	}
   276  	// Start metrics export if enabled
   277  	utils.SetupMetrics(ctx)
   278  	// Start system runtime metrics collection
   279  	go metrics.CollectProcessMetrics(3 * time.Second)
   280  
   281  	stack, _ := makeConfigNode(ctx)
   282  	defer stack.Close()
   283  
   284  	chain, db := utils.MakeChain(ctx, stack, false)
   285  	defer db.Close()
   286  
   287  	// Start periodically gathering memory profiles
   288  	var peakMemAlloc, peakMemSys uint64
   289  	go func() {
   290  		stats := new(runtime.MemStats)
   291  		for {
   292  			runtime.ReadMemStats(stats)
   293  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   294  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   295  			}
   296  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   297  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   298  			}
   299  			time.Sleep(5 * time.Second)
   300  		}
   301  	}()
   302  	// Import the chain
   303  	start := time.Now()
   304  
   305  	var importErr error
   306  
   307  	if len(ctx.Args()) == 1 {
   308  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   309  			importErr = err
   310  			log.Error("Import error", "err", err)
   311  		}
   312  	} else {
   313  		for _, arg := range ctx.Args() {
   314  			if err := utils.ImportChain(chain, arg); err != nil {
   315  				importErr = err
   316  				log.Error("Import error", "file", arg, "err", err)
   317  			}
   318  		}
   319  	}
   320  	chain.Stop()
   321  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   322  
   323  	// Output pre-compaction stats mostly to see the import trashing
   324  	stats, err := db.Stat("leveldb.stats")
   325  	if err != nil {
   326  		utils.Fatalf("Failed to read database stats: %v", err)
   327  	}
   328  	fmt.Println(stats)
   329  
   330  	ioStats, err := db.Stat("leveldb.iostats")
   331  	if err != nil {
   332  		utils.Fatalf("Failed to read database iostats: %v", err)
   333  	}
   334  	fmt.Println(ioStats)
   335  
   336  	// Print the memory statistics used by the importing
   337  	mem := new(runtime.MemStats)
   338  	runtime.ReadMemStats(mem)
   339  
   340  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   341  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   342  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   343  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   344  
   345  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   346  		return nil
   347  	}
   348  
   349  	// Compact the entire database to more accurately measure disk io and print the stats
   350  	start = time.Now()
   351  	fmt.Println("Compacting entire database...")
   352  	if err = db.Compact(nil, nil); err != nil {
   353  		utils.Fatalf("Compaction failed: %v", err)
   354  	}
   355  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   356  
   357  	stats, err = db.Stat("leveldb.stats")
   358  	if err != nil {
   359  		utils.Fatalf("Failed to read database stats: %v", err)
   360  	}
   361  	fmt.Println(stats)
   362  
   363  	ioStats, err = db.Stat("leveldb.iostats")
   364  	if err != nil {
   365  		utils.Fatalf("Failed to read database iostats: %v", err)
   366  	}
   367  	fmt.Println(ioStats)
   368  	return importErr
   369  }
   370  
   371  func exportChain(ctx *cli.Context) error {
   372  	if len(ctx.Args()) < 1 {
   373  		utils.Fatalf("This command requires an argument.")
   374  	}
   375  
   376  	stack, _ := makeConfigNode(ctx)
   377  	defer stack.Close()
   378  
   379  	chain, _ := utils.MakeChain(ctx, stack, true)
   380  	start := time.Now()
   381  
   382  	var err error
   383  	fp := ctx.Args().First()
   384  	if len(ctx.Args()) < 3 {
   385  		err = utils.ExportChain(chain, fp)
   386  	} else {
   387  		// This can be improved to allow for numbers larger than 9223372036854775807
   388  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   389  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   390  		if ferr != nil || lerr != nil {
   391  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   392  		}
   393  		if first < 0 || last < 0 {
   394  			utils.Fatalf("Export error: block number must be greater than 0\n")
   395  		}
   396  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   397  	}
   398  
   399  	if err != nil {
   400  		utils.Fatalf("Export error: %v\n", err)
   401  	}
   402  	fmt.Printf("Export done in %v\n", time.Since(start))
   403  	return nil
   404  }
   405  
   406  // importPreimages imports preimage data from the specified file.
   407  func importPreimages(ctx *cli.Context) error {
   408  	if len(ctx.Args()) < 1 {
   409  		utils.Fatalf("This command requires an argument.")
   410  	}
   411  
   412  	stack, _ := makeConfigNode(ctx)
   413  	defer stack.Close()
   414  
   415  	db := utils.MakeChainDatabase(ctx, stack)
   416  	start := time.Now()
   417  
   418  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   419  		utils.Fatalf("Import error: %v\n", err)
   420  	}
   421  	fmt.Printf("Import done in %v\n", time.Since(start))
   422  	return nil
   423  }
   424  
   425  // exportPreimages dumps the preimage data to specified json file in streaming way.
   426  func exportPreimages(ctx *cli.Context) error {
   427  	if len(ctx.Args()) < 1 {
   428  		utils.Fatalf("This command requires an argument.")
   429  	}
   430  
   431  	stack, _ := makeConfigNode(ctx)
   432  	defer stack.Close()
   433  
   434  	db := utils.MakeChainDatabase(ctx, stack)
   435  	start := time.Now()
   436  
   437  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   438  		utils.Fatalf("Export error: %v\n", err)
   439  	}
   440  	fmt.Printf("Export done in %v\n", time.Since(start))
   441  	return nil
   442  }
   443  
   444  func copyDb(ctx *cli.Context) error {
   445  	// Ensure we have a source chain directory to copy
   446  	if len(ctx.Args()) < 1 {
   447  		utils.Fatalf("Source chaindata directory path argument missing")
   448  	}
   449  	if len(ctx.Args()) < 2 {
   450  		utils.Fatalf("Source ancient chain directory path argument missing")
   451  	}
   452  	// Initialize a new chain for the running node to sync into
   453  	stack, _ := makeConfigNode(ctx)
   454  	defer stack.Close()
   455  
   456  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   457  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   458  
   459  	var syncBloom *trie.SyncBloom
   460  	if syncMode == downloader.FastSync {
   461  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   462  	}
   463  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   464  
   465  	// Create a source peer to satisfy downloader requests from
   466  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   467  	if err != nil {
   468  		return err
   469  	}
   470  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   471  	if err != nil {
   472  		return err
   473  	}
   474  	peer := downloader.NewFakePeer("local", db, hc, dl)
   475  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   476  		return err
   477  	}
   478  	// Synchronise with the simulated peer
   479  	start := time.Now()
   480  
   481  	currentHeader := hc.CurrentHeader()
   482  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   483  		return err
   484  	}
   485  	for dl.Synchronising() {
   486  		time.Sleep(10 * time.Millisecond)
   487  	}
   488  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   489  
   490  	// Compact the entire database to remove any sync overhead
   491  	start = time.Now()
   492  	fmt.Println("Compacting entire database...")
   493  	if err = db.Compact(nil, nil); err != nil {
   494  		utils.Fatalf("Compaction failed: %v", err)
   495  	}
   496  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   497  	return nil
   498  }
   499  
   500  func removeDB(ctx *cli.Context) error {
   501  	stack, config := makeConfigNode(ctx)
   502  
   503  	// Remove the full node state database
   504  	path := stack.ResolvePath("chaindata")
   505  	if common.FileExist(path) {
   506  		confirmAndRemoveDB(path, "full node state database")
   507  	} else {
   508  		log.Info("Full node state database missing", "path", path)
   509  	}
   510  	// Remove the full node ancient database
   511  	path = config.Eth.DatabaseFreezer
   512  	switch {
   513  	case path == "":
   514  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   515  	case !filepath.IsAbs(path):
   516  		path = config.Node.ResolvePath(path)
   517  	}
   518  	if common.FileExist(path) {
   519  		confirmAndRemoveDB(path, "full node ancient database")
   520  	} else {
   521  		log.Info("Full node ancient database missing", "path", path)
   522  	}
   523  	// Remove the light node database
   524  	path = stack.ResolvePath("lightchaindata")
   525  	if common.FileExist(path) {
   526  		confirmAndRemoveDB(path, "light node database")
   527  	} else {
   528  		log.Info("Light node database missing", "path", path)
   529  	}
   530  	return nil
   531  }
   532  
   533  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   534  // folder if accepted.
   535  func confirmAndRemoveDB(database string, kind string) {
   536  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   537  	switch {
   538  	case err != nil:
   539  		utils.Fatalf("%v", err)
   540  	case !confirm:
   541  		log.Info("Database deletion skipped", "path", database)
   542  	default:
   543  		start := time.Now()
   544  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   545  			// If we're at the top level folder, recurse into
   546  			if path == database {
   547  				return nil
   548  			}
   549  			// Delete all the files, but not subfolders
   550  			if !info.IsDir() {
   551  				os.Remove(path)
   552  				return nil
   553  			}
   554  			return filepath.SkipDir
   555  		})
   556  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   557  	}
   558  }
   559  
   560  func dump(ctx *cli.Context) error {
   561  	stack, _ := makeConfigNode(ctx)
   562  	defer stack.Close()
   563  
   564  	chain, chainDb := utils.MakeChain(ctx, stack, true)
   565  	defer chainDb.Close()
   566  	for _, arg := range ctx.Args() {
   567  		var block *types.Block
   568  		if hashish(arg) {
   569  			block = chain.GetBlockByHash(common.HexToHash(arg))
   570  		} else {
   571  			num, _ := strconv.Atoi(arg)
   572  			block = chain.GetBlockByNumber(uint64(num))
   573  		}
   574  		if block == nil {
   575  			fmt.Println("{}")
   576  			utils.Fatalf("block not found")
   577  		} else {
   578  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   579  			if err != nil {
   580  				utils.Fatalf("could not create new state: %v", err)
   581  			}
   582  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   583  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   584  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   585  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   586  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   587  			} else {
   588  				if includeMissing {
   589  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   590  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   591  				}
   592  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   593  			}
   594  		}
   595  	}
   596  	return nil
   597  }
   598  
   599  func inspect(ctx *cli.Context) error {
   600  	node, _ := makeConfigNode(ctx)
   601  	defer node.Close()
   602  
   603  	_, chainDb := utils.MakeChain(ctx, node, true)
   604  	defer chainDb.Close()
   605  
   606  	return rawdb.InspectDatabase(chainDb)
   607  }
   608  
   609  // hashish returns true for strings that look like hashes.
   610  func hashish(x string) bool {
   611  	_, err := strconv.Atoi(x)
   612  	return err != nil
   613  }