github.com/zhiqiangxu/go-ethereum@v1.9.16-0.20210824055606-be91cfdebc48/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/zhiqiangxu/go-ethereum/cmd/utils"
    30  	"github.com/zhiqiangxu/go-ethereum/common"
    31  	"github.com/zhiqiangxu/go-ethereum/console/prompt"
    32  	"github.com/zhiqiangxu/go-ethereum/core"
    33  	"github.com/zhiqiangxu/go-ethereum/core/rawdb"
    34  	"github.com/zhiqiangxu/go-ethereum/core/state"
    35  	"github.com/zhiqiangxu/go-ethereum/core/types"
    36  	"github.com/zhiqiangxu/go-ethereum/eth/downloader"
    37  	"github.com/zhiqiangxu/go-ethereum/event"
    38  	"github.com/zhiqiangxu/go-ethereum/log"
    39  	"github.com/zhiqiangxu/go-ethereum/metrics"
    40  	"github.com/zhiqiangxu/go-ethereum/trie"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  		},
    69  		Category: "BLOCKCHAIN COMMANDS",
    70  		Description: `
    71  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    72  	}
    73  	importCommand = cli.Command{
    74  		Action:    utils.MigrateFlags(importChain),
    75  		Name:      "import",
    76  		Usage:     "Import a blockchain file",
    77  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    78  		Flags: []cli.Flag{
    79  			utils.DataDirFlag,
    80  			utils.CacheFlag,
    81  			utils.SyncModeFlag,
    82  			utils.GCModeFlag,
    83  			utils.SnapshotFlag,
    84  			utils.CacheDatabaseFlag,
    85  			utils.CacheGCFlag,
    86  			utils.MetricsEnabledFlag,
    87  			utils.MetricsEnabledExpensiveFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsInfluxDBEndpointFlag,
    90  			utils.MetricsInfluxDBDatabaseFlag,
    91  			utils.MetricsInfluxDBUsernameFlag,
    92  			utils.MetricsInfluxDBPasswordFlag,
    93  			utils.MetricsInfluxDBTagsFlag,
    94  			utils.TxLookupLimitFlag,
    95  		},
    96  		Category: "BLOCKCHAIN COMMANDS",
    97  		Description: `
    98  The import command imports blocks from an RLP-encoded form. The form can be one file
    99  with several RLP-encoded blocks, or several files can be used.
   100  
   101  If only one file is used, import error will result in failure. If several files are used,
   102  processing will proceed even if an individual RLP-file import failure occurs.`,
   103  	}
   104  	exportCommand = cli.Command{
   105  		Action:    utils.MigrateFlags(exportChain),
   106  		Name:      "export",
   107  		Usage:     "Export blockchain into file",
   108  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   109  		Flags: []cli.Flag{
   110  			utils.DataDirFlag,
   111  			utils.CacheFlag,
   112  			utils.SyncModeFlag,
   113  		},
   114  		Category: "BLOCKCHAIN COMMANDS",
   115  		Description: `
   116  Requires a first argument of the file to write to.
   117  Optional second and third arguments control the first and
   118  last block to write. In this mode, the file will be appended
   119  if already existing. If the file ends with .gz, the output will
   120  be gzipped.`,
   121  	}
   122  	importPreimagesCommand = cli.Command{
   123  		Action:    utils.MigrateFlags(importPreimages),
   124  		Name:      "import-preimages",
   125  		Usage:     "Import the preimage database from an RLP stream",
   126  		ArgsUsage: "<datafile>",
   127  		Flags: []cli.Flag{
   128  			utils.DataDirFlag,
   129  			utils.CacheFlag,
   130  			utils.SyncModeFlag,
   131  		},
   132  		Category: "BLOCKCHAIN COMMANDS",
   133  		Description: `
   134  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   135  	}
   136  	exportPreimagesCommand = cli.Command{
   137  		Action:    utils.MigrateFlags(exportPreimages),
   138  		Name:      "export-preimages",
   139  		Usage:     "Export the preimage database into an RLP stream",
   140  		ArgsUsage: "<dumpfile>",
   141  		Flags: []cli.Flag{
   142  			utils.DataDirFlag,
   143  			utils.CacheFlag,
   144  			utils.SyncModeFlag,
   145  		},
   146  		Category: "BLOCKCHAIN COMMANDS",
   147  		Description: `
   148  The export-preimages command export hash preimages to an RLP encoded stream`,
   149  	}
   150  	copydbCommand = cli.Command{
   151  		Action:    utils.MigrateFlags(copyDb),
   152  		Name:      "copydb",
   153  		Usage:     "Create a local chain from a target chaindata folder",
   154  		ArgsUsage: "<sourceChaindataDir>",
   155  		Flags: []cli.Flag{
   156  			utils.DataDirFlag,
   157  			utils.CacheFlag,
   158  			utils.SyncModeFlag,
   159  			utils.FakePoWFlag,
   160  			utils.RopstenFlag,
   161  			utils.RinkebyFlag,
   162  			utils.TxLookupLimitFlag,
   163  			utils.GoerliFlag,
   164  			utils.YoloV1Flag,
   165  			utils.LegacyTestnetFlag,
   166  		},
   167  		Category: "BLOCKCHAIN COMMANDS",
   168  		Description: `
   169  The first argument must be the directory containing the blockchain to download from`,
   170  	}
   171  	removedbCommand = cli.Command{
   172  		Action:    utils.MigrateFlags(removeDB),
   173  		Name:      "removedb",
   174  		Usage:     "Remove blockchain and state databases",
   175  		ArgsUsage: " ",
   176  		Flags: []cli.Flag{
   177  			utils.DataDirFlag,
   178  		},
   179  		Category: "BLOCKCHAIN COMMANDS",
   180  		Description: `
   181  Remove blockchain and state databases`,
   182  	}
   183  	dumpCommand = cli.Command{
   184  		Action:    utils.MigrateFlags(dump),
   185  		Name:      "dump",
   186  		Usage:     "Dump a specific block from storage",
   187  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   188  		Flags: []cli.Flag{
   189  			utils.DataDirFlag,
   190  			utils.CacheFlag,
   191  			utils.SyncModeFlag,
   192  			utils.IterativeOutputFlag,
   193  			utils.ExcludeCodeFlag,
   194  			utils.ExcludeStorageFlag,
   195  			utils.IncludeIncompletesFlag,
   196  		},
   197  		Category: "BLOCKCHAIN COMMANDS",
   198  		Description: `
   199  The arguments are interpreted as block numbers or hashes.
   200  Use "ethereum dump 0" to dump the genesis block.`,
   201  	}
   202  	inspectCommand = cli.Command{
   203  		Action:    utils.MigrateFlags(inspect),
   204  		Name:      "inspect",
   205  		Usage:     "Inspect the storage size for each type of data in the database",
   206  		ArgsUsage: " ",
   207  		Flags: []cli.Flag{
   208  			utils.DataDirFlag,
   209  			utils.AncientFlag,
   210  			utils.CacheFlag,
   211  			utils.RopstenFlag,
   212  			utils.RinkebyFlag,
   213  			utils.GoerliFlag,
   214  			utils.YoloV1Flag,
   215  			utils.LegacyTestnetFlag,
   216  			utils.SyncModeFlag,
   217  		},
   218  		Category: "BLOCKCHAIN COMMANDS",
   219  	}
   220  )
   221  
   222  // initGenesis will initialise the given JSON format genesis file and writes it as
   223  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   224  func initGenesis(ctx *cli.Context) error {
   225  	// Make sure we have a valid genesis JSON
   226  	genesisPath := ctx.Args().First()
   227  	if len(genesisPath) == 0 {
   228  		utils.Fatalf("Must supply path to genesis JSON file")
   229  	}
   230  	file, err := os.Open(genesisPath)
   231  	if err != nil {
   232  		utils.Fatalf("Failed to read genesis file: %v", err)
   233  	}
   234  	defer file.Close()
   235  
   236  	genesis := new(core.Genesis)
   237  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   238  		utils.Fatalf("invalid genesis file: %v", err)
   239  	}
   240  	// Open an initialise both full and light databases
   241  	stack := makeFullNode(ctx)
   242  	defer stack.Close()
   243  
   244  	for _, name := range []string{"chaindata", "lightchaindata"} {
   245  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   246  		if err != nil {
   247  			utils.Fatalf("Failed to open database: %v", err)
   248  		}
   249  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   250  		if err != nil {
   251  			utils.Fatalf("Failed to write genesis block: %v", err)
   252  		}
   253  		chaindb.Close()
   254  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   255  	}
   256  	return nil
   257  }
   258  
   259  func dumpGenesis(ctx *cli.Context) error {
   260  	genesis := utils.MakeGenesis(ctx)
   261  	if genesis == nil {
   262  		genesis = core.DefaultGenesisBlock()
   263  	}
   264  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   265  		utils.Fatalf("could not encode genesis")
   266  	}
   267  	return nil
   268  }
   269  
   270  func importChain(ctx *cli.Context) error {
   271  	if len(ctx.Args()) < 1 {
   272  		utils.Fatalf("This command requires an argument.")
   273  	}
   274  	// Start metrics export if enabled
   275  	utils.SetupMetrics(ctx)
   276  	// Start system runtime metrics collection
   277  	go metrics.CollectProcessMetrics(3 * time.Second)
   278  	stack := makeFullNode(ctx)
   279  	defer stack.Close()
   280  
   281  	chain, db := utils.MakeChain(ctx, stack, false)
   282  	defer db.Close()
   283  
   284  	// Start periodically gathering memory profiles
   285  	var peakMemAlloc, peakMemSys uint64
   286  	go func() {
   287  		stats := new(runtime.MemStats)
   288  		for {
   289  			runtime.ReadMemStats(stats)
   290  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   291  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   292  			}
   293  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   294  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   295  			}
   296  			time.Sleep(5 * time.Second)
   297  		}
   298  	}()
   299  	// Import the chain
   300  	start := time.Now()
   301  
   302  	if len(ctx.Args()) == 1 {
   303  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   304  			log.Error("Import error", "err", err)
   305  		}
   306  	} else {
   307  		for _, arg := range ctx.Args() {
   308  			if err := utils.ImportChain(chain, arg); err != nil {
   309  				log.Error("Import error", "file", arg, "err", err)
   310  			}
   311  		}
   312  	}
   313  	chain.Stop()
   314  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   315  
   316  	// Output pre-compaction stats mostly to see the import trashing
   317  	stats, err := db.Stat("leveldb.stats")
   318  	if err != nil {
   319  		utils.Fatalf("Failed to read database stats: %v", err)
   320  	}
   321  	fmt.Println(stats)
   322  
   323  	ioStats, err := db.Stat("leveldb.iostats")
   324  	if err != nil {
   325  		utils.Fatalf("Failed to read database iostats: %v", err)
   326  	}
   327  	fmt.Println(ioStats)
   328  
   329  	// Print the memory statistics used by the importing
   330  	mem := new(runtime.MemStats)
   331  	runtime.ReadMemStats(mem)
   332  
   333  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   334  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   335  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   336  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   337  
   338  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   339  		return nil
   340  	}
   341  
   342  	// Compact the entire database to more accurately measure disk io and print the stats
   343  	start = time.Now()
   344  	fmt.Println("Compacting entire database...")
   345  	if err = db.Compact(nil, nil); err != nil {
   346  		utils.Fatalf("Compaction failed: %v", err)
   347  	}
   348  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   349  
   350  	stats, err = db.Stat("leveldb.stats")
   351  	if err != nil {
   352  		utils.Fatalf("Failed to read database stats: %v", err)
   353  	}
   354  	fmt.Println(stats)
   355  
   356  	ioStats, err = db.Stat("leveldb.iostats")
   357  	if err != nil {
   358  		utils.Fatalf("Failed to read database iostats: %v", err)
   359  	}
   360  	fmt.Println(ioStats)
   361  	return nil
   362  }
   363  
   364  func exportChain(ctx *cli.Context) error {
   365  	if len(ctx.Args()) < 1 {
   366  		utils.Fatalf("This command requires an argument.")
   367  	}
   368  	stack := makeFullNode(ctx)
   369  	defer stack.Close()
   370  
   371  	chain, _ := utils.MakeChain(ctx, stack, true)
   372  	start := time.Now()
   373  
   374  	var err error
   375  	fp := ctx.Args().First()
   376  	if len(ctx.Args()) < 3 {
   377  		err = utils.ExportChain(chain, fp)
   378  	} else {
   379  		// This can be improved to allow for numbers larger than 9223372036854775807
   380  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   381  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   382  		if ferr != nil || lerr != nil {
   383  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   384  		}
   385  		if first < 0 || last < 0 {
   386  			utils.Fatalf("Export error: block number must be greater than 0\n")
   387  		}
   388  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   389  	}
   390  
   391  	if err != nil {
   392  		utils.Fatalf("Export error: %v\n", err)
   393  	}
   394  	fmt.Printf("Export done in %v\n", time.Since(start))
   395  	return nil
   396  }
   397  
   398  // importPreimages imports preimage data from the specified file.
   399  func importPreimages(ctx *cli.Context) error {
   400  	if len(ctx.Args()) < 1 {
   401  		utils.Fatalf("This command requires an argument.")
   402  	}
   403  	stack := makeFullNode(ctx)
   404  	defer stack.Close()
   405  
   406  	db := utils.MakeChainDatabase(ctx, stack)
   407  	start := time.Now()
   408  
   409  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   410  		utils.Fatalf("Import error: %v\n", err)
   411  	}
   412  	fmt.Printf("Import done in %v\n", time.Since(start))
   413  	return nil
   414  }
   415  
   416  // exportPreimages dumps the preimage data to specified json file in streaming way.
   417  func exportPreimages(ctx *cli.Context) error {
   418  	if len(ctx.Args()) < 1 {
   419  		utils.Fatalf("This command requires an argument.")
   420  	}
   421  	stack := makeFullNode(ctx)
   422  	defer stack.Close()
   423  
   424  	db := utils.MakeChainDatabase(ctx, stack)
   425  	start := time.Now()
   426  
   427  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   428  		utils.Fatalf("Export error: %v\n", err)
   429  	}
   430  	fmt.Printf("Export done in %v\n", time.Since(start))
   431  	return nil
   432  }
   433  
   434  func copyDb(ctx *cli.Context) error {
   435  	// Ensure we have a source chain directory to copy
   436  	if len(ctx.Args()) < 1 {
   437  		utils.Fatalf("Source chaindata directory path argument missing")
   438  	}
   439  	if len(ctx.Args()) < 2 {
   440  		utils.Fatalf("Source ancient chain directory path argument missing")
   441  	}
   442  	// Initialize a new chain for the running node to sync into
   443  	stack := makeFullNode(ctx)
   444  	defer stack.Close()
   445  
   446  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   447  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   448  
   449  	var syncBloom *trie.SyncBloom
   450  	if syncMode == downloader.FastSync {
   451  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   452  	}
   453  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   454  
   455  	// Create a source peer to satisfy downloader requests from
   456  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   457  	if err != nil {
   458  		return err
   459  	}
   460  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   461  	if err != nil {
   462  		return err
   463  	}
   464  	peer := downloader.NewFakePeer("local", db, hc, dl)
   465  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   466  		return err
   467  	}
   468  	// Synchronise with the simulated peer
   469  	start := time.Now()
   470  
   471  	currentHeader := hc.CurrentHeader()
   472  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   473  		return err
   474  	}
   475  	for dl.Synchronising() {
   476  		time.Sleep(10 * time.Millisecond)
   477  	}
   478  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   479  
   480  	// Compact the entire database to remove any sync overhead
   481  	start = time.Now()
   482  	fmt.Println("Compacting entire database...")
   483  	if err = db.Compact(nil, nil); err != nil {
   484  		utils.Fatalf("Compaction failed: %v", err)
   485  	}
   486  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   487  	return nil
   488  }
   489  
   490  func removeDB(ctx *cli.Context) error {
   491  	stack, config := makeConfigNode(ctx)
   492  
   493  	// Remove the full node state database
   494  	path := stack.ResolvePath("chaindata")
   495  	if common.FileExist(path) {
   496  		confirmAndRemoveDB(path, "full node state database")
   497  	} else {
   498  		log.Info("Full node state database missing", "path", path)
   499  	}
   500  	// Remove the full node ancient database
   501  	path = config.Eth.DatabaseFreezer
   502  	switch {
   503  	case path == "":
   504  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   505  	case !filepath.IsAbs(path):
   506  		path = config.Node.ResolvePath(path)
   507  	}
   508  	if common.FileExist(path) {
   509  		confirmAndRemoveDB(path, "full node ancient database")
   510  	} else {
   511  		log.Info("Full node ancient database missing", "path", path)
   512  	}
   513  	// Remove the light node database
   514  	path = stack.ResolvePath("lightchaindata")
   515  	if common.FileExist(path) {
   516  		confirmAndRemoveDB(path, "light node database")
   517  	} else {
   518  		log.Info("Light node database missing", "path", path)
   519  	}
   520  	return nil
   521  }
   522  
   523  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   524  // folder if accepted.
   525  func confirmAndRemoveDB(database string, kind string) {
   526  	confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   527  	switch {
   528  	case err != nil:
   529  		utils.Fatalf("%v", err)
   530  	case !confirm:
   531  		log.Info("Database deletion skipped", "path", database)
   532  	default:
   533  		start := time.Now()
   534  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   535  			// If we're at the top level folder, recurse into
   536  			if path == database {
   537  				return nil
   538  			}
   539  			// Delete all the files, but not subfolders
   540  			if !info.IsDir() {
   541  				os.Remove(path)
   542  				return nil
   543  			}
   544  			return filepath.SkipDir
   545  		})
   546  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   547  	}
   548  }
   549  
   550  func dump(ctx *cli.Context) error {
   551  	stack := makeFullNode(ctx)
   552  	defer stack.Close()
   553  
   554  	chain, chainDb := utils.MakeChain(ctx, stack, true)
   555  	defer chainDb.Close()
   556  	for _, arg := range ctx.Args() {
   557  		var block *types.Block
   558  		if hashish(arg) {
   559  			block = chain.GetBlockByHash(common.HexToHash(arg))
   560  		} else {
   561  			num, _ := strconv.Atoi(arg)
   562  			block = chain.GetBlockByNumber(uint64(num))
   563  		}
   564  		if block == nil {
   565  			fmt.Println("{}")
   566  			utils.Fatalf("block not found")
   567  		} else {
   568  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   569  			if err != nil {
   570  				utils.Fatalf("could not create new state: %v", err)
   571  			}
   572  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   573  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   574  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   575  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   576  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   577  			} else {
   578  				if includeMissing {
   579  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   580  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   581  				}
   582  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   583  			}
   584  		}
   585  	}
   586  	return nil
   587  }
   588  
   589  func inspect(ctx *cli.Context) error {
   590  	node, _ := makeConfigNode(ctx)
   591  	defer node.Close()
   592  
   593  	_, chainDb := utils.MakeChain(ctx, node, true)
   594  	defer chainDb.Close()
   595  
   596  	return rawdb.InspectDatabase(chainDb)
   597  }
   598  
   599  // hashish returns true for strings that look like hashes.
   600  func hashish(x string) bool {
   601  	_, err := strconv.Atoi(x)
   602  	return err != nil
   603  }