github.com/Debrief-BC/go-debrief@v0.0.0-20200420203408-0c26ca968123/cmd/debrief/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/Debrief-BC/go-debrief/cmd/utils"
    30  	"github.com/Debrief-BC/go-debrief/common"
    31  	"github.com/Debrief-BC/go-debrief/console"
    32  	"github.com/Debrief-BC/go-debrief/core"
    33  	"github.com/Debrief-BC/go-debrief/core/rawdb"
    34  	"github.com/Debrief-BC/go-debrief/core/state"
    35  	"github.com/Debrief-BC/go-debrief/core/types"
    36  	"github.com/Debrief-BC/go-debrief/eth/downloader"
    37  	"github.com/Debrief-BC/go-debrief/event"
    38  	"github.com/Debrief-BC/go-debrief/log"
    39  	"github.com/Debrief-BC/go-debrief/metrics"
    40  	"github.com/Debrief-BC/go-debrief/trie"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  		},
    69  		Category: "BLOCKCHAIN COMMANDS",
    70  		Description: `
    71  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    72  	}
    73  	importCommand = cli.Command{
    74  		Action:    utils.MigrateFlags(importChain),
    75  		Name:      "import",
    76  		Usage:     "Import a blockchain file",
    77  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    78  		Flags: []cli.Flag{
    79  			utils.DataDirFlag,
    80  			utils.CacheFlag,
    81  			utils.SyncModeFlag,
    82  			utils.GCModeFlag,
    83  			utils.SnapshotFlag,
    84  			utils.CacheDatabaseFlag,
    85  			utils.CacheGCFlag,
    86  			utils.MetricsEnabledFlag,
    87  			utils.MetricsEnabledExpensiveFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsInfluxDBEndpointFlag,
    90  			utils.MetricsInfluxDBDatabaseFlag,
    91  			utils.MetricsInfluxDBUsernameFlag,
    92  			utils.MetricsInfluxDBPasswordFlag,
    93  			utils.MetricsInfluxDBTagsFlag,
    94  		},
    95  		Category: "BLOCKCHAIN COMMANDS",
    96  		Description: `
    97  The import command imports blocks from an RLP-encoded form. The form can be one file
    98  with several RLP-encoded blocks, or several files can be used.
    99  
   100  If only one file is used, import error will result in failure. If several files are used,
   101  processing will proceed even if an individual RLP-file import failure occurs.`,
   102  	}
   103  	exportCommand = cli.Command{
   104  		Action:    utils.MigrateFlags(exportChain),
   105  		Name:      "export",
   106  		Usage:     "Export blockchain into file",
   107  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   108  		Flags: []cli.Flag{
   109  			utils.DataDirFlag,
   110  			utils.CacheFlag,
   111  			utils.SyncModeFlag,
   112  		},
   113  		Category: "BLOCKCHAIN COMMANDS",
   114  		Description: `
   115  Requires a first argument of the file to write to.
   116  Optional second and third arguments control the first and
   117  last block to write. In this mode, the file will be appended
   118  if already existing. If the file ends with .gz, the output will
   119  be gzipped.`,
   120  	}
   121  	importPreimagesCommand = cli.Command{
   122  		Action:    utils.MigrateFlags(importPreimages),
   123  		Name:      "import-preimages",
   124  		Usage:     "Import the preimage database from an RLP stream",
   125  		ArgsUsage: "<datafile>",
   126  		Flags: []cli.Flag{
   127  			utils.DataDirFlag,
   128  			utils.CacheFlag,
   129  			utils.SyncModeFlag,
   130  		},
   131  		Category: "BLOCKCHAIN COMMANDS",
   132  		Description: `
   133  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   134  	}
   135  	exportPreimagesCommand = cli.Command{
   136  		Action:    utils.MigrateFlags(exportPreimages),
   137  		Name:      "export-preimages",
   138  		Usage:     "Export the preimage database into an RLP stream",
   139  		ArgsUsage: "<dumpfile>",
   140  		Flags: []cli.Flag{
   141  			utils.DataDirFlag,
   142  			utils.CacheFlag,
   143  			utils.SyncModeFlag,
   144  		},
   145  		Category: "BLOCKCHAIN COMMANDS",
   146  		Description: `
   147  The export-preimages command export hash preimages to an RLP encoded stream`,
   148  	}
   149  	copydbCommand = cli.Command{
   150  		Action:    utils.MigrateFlags(copyDb),
   151  		Name:      "copydb",
   152  		Usage:     "Create a local chain from a target chaindata folder",
   153  		ArgsUsage: "<sourceChaindataDir>",
   154  		Flags: []cli.Flag{
   155  			utils.DataDirFlag,
   156  			utils.CacheFlag,
   157  			utils.SyncModeFlag,
   158  			utils.FakePoWFlag,
   159  			utils.TestnetFlag,
   160  			utils.RinkebyFlag,
   161  		},
   162  		Category: "BLOCKCHAIN COMMANDS",
   163  		Description: `
   164  The first argument must be the directory containing the blockchain to download from`,
   165  	}
   166  	removedbCommand = cli.Command{
   167  		Action:    utils.MigrateFlags(removeDB),
   168  		Name:      "removedb",
   169  		Usage:     "Remove blockchain and state databases",
   170  		ArgsUsage: " ",
   171  		Flags: []cli.Flag{
   172  			utils.DataDirFlag,
   173  		},
   174  		Category: "BLOCKCHAIN COMMANDS",
   175  		Description: `
   176  Remove blockchain and state databases`,
   177  	}
   178  	dumpCommand = cli.Command{
   179  		Action:    utils.MigrateFlags(dump),
   180  		Name:      "dump",
   181  		Usage:     "Dump a specific block from storage",
   182  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   183  		Flags: []cli.Flag{
   184  			utils.DataDirFlag,
   185  			utils.CacheFlag,
   186  			utils.SyncModeFlag,
   187  			utils.IterativeOutputFlag,
   188  			utils.ExcludeCodeFlag,
   189  			utils.ExcludeStorageFlag,
   190  			utils.IncludeIncompletesFlag,
   191  		},
   192  		Category: "BLOCKCHAIN COMMANDS",
   193  		Description: `
   194  The arguments are interpreted as block numbers or hashes.
   195  Use "ethereum dump 0" to dump the genesis block.`,
   196  	}
   197  	inspectCommand = cli.Command{
   198  		Action:    utils.MigrateFlags(inspect),
   199  		Name:      "inspect",
   200  		Usage:     "Inspect the storage size for each type of data in the database",
   201  		ArgsUsage: " ",
   202  		Flags: []cli.Flag{
   203  			utils.DataDirFlag,
   204  			utils.AncientFlag,
   205  			utils.CacheFlag,
   206  			utils.TestnetFlag,
   207  			utils.RinkebyFlag,
   208  			utils.GoerliFlag,
   209  			utils.SyncModeFlag,
   210  		},
   211  		Category: "BLOCKCHAIN COMMANDS",
   212  	}
   213  )
   214  
   215  // initGenesis will initialise the given JSON format genesis file and writes it as
   216  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   217  func initGenesis(ctx *cli.Context) error {
   218  	// Make sure we have a valid genesis JSON
   219  	genesisPath := ctx.Args().First()
   220  	if len(genesisPath) == 0 {
   221  		utils.Fatalf("Must supply path to genesis JSON file")
   222  	}
   223  	file, err := os.Open(genesisPath)
   224  	if err != nil {
   225  		utils.Fatalf("Failed to read genesis file: %v", err)
   226  	}
   227  	defer file.Close()
   228  
   229  	genesis := new(core.Genesis)
   230  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   231  		utils.Fatalf("invalid genesis file: %v", err)
   232  	}
   233  	// Open an initialise both full and light databases
   234  	stack := makeFullNode(ctx)
   235  	defer stack.Close()
   236  
   237  	for _, name := range []string{"chaindata", "lightchaindata"} {
   238  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   239  		if err != nil {
   240  			utils.Fatalf("Failed to open database: %v", err)
   241  		}
   242  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   243  		if err != nil {
   244  			utils.Fatalf("Failed to write genesis block: %v", err)
   245  		}
   246  		chaindb.Close()
   247  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   248  	}
   249  	return nil
   250  }
   251  
   252  func dumpGenesis(ctx *cli.Context) error {
   253  	genesis := utils.MakeGenesis(ctx)
   254  	if genesis == nil {
   255  		genesis = core.DefaultGenesisBlock()
   256  	}
   257  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   258  		utils.Fatalf("could not encode genesis")
   259  	}
   260  	return nil
   261  }
   262  
   263  func importChain(ctx *cli.Context) error {
   264  	if len(ctx.Args()) < 1 {
   265  		utils.Fatalf("This command requires an argument.")
   266  	}
   267  	// Start metrics export if enabled
   268  	utils.SetupMetrics(ctx)
   269  	// Start system runtime metrics collection
   270  	go metrics.CollectProcessMetrics(3 * time.Second)
   271  	stack := makeFullNode(ctx)
   272  	defer stack.Close()
   273  
   274  	chain, db := utils.MakeChain(ctx, stack)
   275  	defer db.Close()
   276  
   277  	// Start periodically gathering memory profiles
   278  	var peakMemAlloc, peakMemSys uint64
   279  	go func() {
   280  		stats := new(runtime.MemStats)
   281  		for {
   282  			runtime.ReadMemStats(stats)
   283  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   284  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   285  			}
   286  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   287  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   288  			}
   289  			time.Sleep(5 * time.Second)
   290  		}
   291  	}()
   292  	// Import the chain
   293  	start := time.Now()
   294  
   295  	if len(ctx.Args()) == 1 {
   296  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   297  			log.Error("Import error", "err", err)
   298  		}
   299  	} else {
   300  		for _, arg := range ctx.Args() {
   301  			if err := utils.ImportChain(chain, arg); err != nil {
   302  				log.Error("Import error", "file", arg, "err", err)
   303  			}
   304  		}
   305  	}
   306  	chain.Stop()
   307  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   308  
   309  	// Output pre-compaction stats mostly to see the import trashing
   310  	stats, err := db.Stat("leveldb.stats")
   311  	if err != nil {
   312  		utils.Fatalf("Failed to read database stats: %v", err)
   313  	}
   314  	fmt.Println(stats)
   315  
   316  	ioStats, err := db.Stat("leveldb.iostats")
   317  	if err != nil {
   318  		utils.Fatalf("Failed to read database iostats: %v", err)
   319  	}
   320  	fmt.Println(ioStats)
   321  
   322  	// Print the memory statistics used by the importing
   323  	mem := new(runtime.MemStats)
   324  	runtime.ReadMemStats(mem)
   325  
   326  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   327  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   328  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   329  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   330  
   331  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   332  		return nil
   333  	}
   334  
   335  	// Compact the entire database to more accurately measure disk io and print the stats
   336  	start = time.Now()
   337  	fmt.Println("Compacting entire database...")
   338  	if err = db.Compact(nil, nil); err != nil {
   339  		utils.Fatalf("Compaction failed: %v", err)
   340  	}
   341  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   342  
   343  	stats, err = db.Stat("leveldb.stats")
   344  	if err != nil {
   345  		utils.Fatalf("Failed to read database stats: %v", err)
   346  	}
   347  	fmt.Println(stats)
   348  
   349  	ioStats, err = db.Stat("leveldb.iostats")
   350  	if err != nil {
   351  		utils.Fatalf("Failed to read database iostats: %v", err)
   352  	}
   353  	fmt.Println(ioStats)
   354  	return nil
   355  }
   356  
   357  func exportChain(ctx *cli.Context) error {
   358  	if len(ctx.Args()) < 1 {
   359  		utils.Fatalf("This command requires an argument.")
   360  	}
   361  	stack := makeFullNode(ctx)
   362  	defer stack.Close()
   363  
   364  	chain, _ := utils.MakeChain(ctx, stack)
   365  	start := time.Now()
   366  
   367  	var err error
   368  	fp := ctx.Args().First()
   369  	if len(ctx.Args()) < 3 {
   370  		err = utils.ExportChain(chain, fp)
   371  	} else {
   372  		// This can be improved to allow for numbers larger than 9223372036854775807
   373  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   374  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   375  		if ferr != nil || lerr != nil {
   376  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   377  		}
   378  		if first < 0 || last < 0 {
   379  			utils.Fatalf("Export error: block number must be greater than 0\n")
   380  		}
   381  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   382  	}
   383  
   384  	if err != nil {
   385  		utils.Fatalf("Export error: %v\n", err)
   386  	}
   387  	fmt.Printf("Export done in %v\n", time.Since(start))
   388  	return nil
   389  }
   390  
   391  // importPreimages imports preimage data from the specified file.
   392  func importPreimages(ctx *cli.Context) error {
   393  	if len(ctx.Args()) < 1 {
   394  		utils.Fatalf("This command requires an argument.")
   395  	}
   396  	stack := makeFullNode(ctx)
   397  	defer stack.Close()
   398  
   399  	db := utils.MakeChainDatabase(ctx, stack)
   400  	start := time.Now()
   401  
   402  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   403  		utils.Fatalf("Import error: %v\n", err)
   404  	}
   405  	fmt.Printf("Import done in %v\n", time.Since(start))
   406  	return nil
   407  }
   408  
   409  // exportPreimages dumps the preimage data to specified json file in streaming way.
   410  func exportPreimages(ctx *cli.Context) error {
   411  	if len(ctx.Args()) < 1 {
   412  		utils.Fatalf("This command requires an argument.")
   413  	}
   414  	stack := makeFullNode(ctx)
   415  	defer stack.Close()
   416  
   417  	db := utils.MakeChainDatabase(ctx, stack)
   418  	start := time.Now()
   419  
   420  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   421  		utils.Fatalf("Export error: %v\n", err)
   422  	}
   423  	fmt.Printf("Export done in %v\n", time.Since(start))
   424  	return nil
   425  }
   426  
   427  func copyDb(ctx *cli.Context) error {
   428  	// Ensure we have a source chain directory to copy
   429  	if len(ctx.Args()) < 1 {
   430  		utils.Fatalf("Source chaindata directory path argument missing")
   431  	}
   432  	if len(ctx.Args()) < 2 {
   433  		utils.Fatalf("Source ancient chain directory path argument missing")
   434  	}
   435  	// Initialize a new chain for the running node to sync into
   436  	stack := makeFullNode(ctx)
   437  	defer stack.Close()
   438  
   439  	chain, chainDb := utils.MakeChain(ctx, stack)
   440  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   441  
   442  	var syncBloom *trie.SyncBloom
   443  	if syncMode == downloader.FastSync {
   444  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   445  	}
   446  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   447  
   448  	// Create a source peer to satisfy downloader requests from
   449  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   450  	if err != nil {
   451  		return err
   452  	}
   453  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   454  	if err != nil {
   455  		return err
   456  	}
   457  	peer := downloader.NewFakePeer("local", db, hc, dl)
   458  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   459  		return err
   460  	}
   461  	// Synchronise with the simulated peer
   462  	start := time.Now()
   463  
   464  	currentHeader := hc.CurrentHeader()
   465  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   466  		return err
   467  	}
   468  	for dl.Synchronising() {
   469  		time.Sleep(10 * time.Millisecond)
   470  	}
   471  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   472  
   473  	// Compact the entire database to remove any sync overhead
   474  	start = time.Now()
   475  	fmt.Println("Compacting entire database...")
   476  	if err = db.Compact(nil, nil); err != nil {
   477  		utils.Fatalf("Compaction failed: %v", err)
   478  	}
   479  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   480  	return nil
   481  }
   482  
   483  func removeDB(ctx *cli.Context) error {
   484  	stack, config := makeConfigNode(ctx)
   485  
   486  	// Remove the full node state database
   487  	path := stack.ResolvePath("chaindata")
   488  	if common.FileExist(path) {
   489  		confirmAndRemoveDB(path, "full node state database")
   490  	} else {
   491  		log.Info("Full node state database missing", "path", path)
   492  	}
   493  	// Remove the full node ancient database
   494  	path = config.Eth.DatabaseFreezer
   495  	switch {
   496  	case path == "":
   497  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   498  	case !filepath.IsAbs(path):
   499  		path = config.Node.ResolvePath(path)
   500  	}
   501  	if common.FileExist(path) {
   502  		confirmAndRemoveDB(path, "full node ancient database")
   503  	} else {
   504  		log.Info("Full node ancient database missing", "path", path)
   505  	}
   506  	// Remove the light node database
   507  	path = stack.ResolvePath("lightchaindata")
   508  	if common.FileExist(path) {
   509  		confirmAndRemoveDB(path, "light node database")
   510  	} else {
   511  		log.Info("Light node database missing", "path", path)
   512  	}
   513  	return nil
   514  }
   515  
   516  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   517  // folder if accepted.
   518  func confirmAndRemoveDB(database string, kind string) {
   519  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   520  	switch {
   521  	case err != nil:
   522  		utils.Fatalf("%v", err)
   523  	case !confirm:
   524  		log.Info("Database deletion skipped", "path", database)
   525  	default:
   526  		start := time.Now()
   527  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   528  			// If we're at the top level folder, recurse into
   529  			if path == database {
   530  				return nil
   531  			}
   532  			// Delete all the files, but not subfolders
   533  			if !info.IsDir() {
   534  				os.Remove(path)
   535  				return nil
   536  			}
   537  			return filepath.SkipDir
   538  		})
   539  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   540  	}
   541  }
   542  
   543  func dump(ctx *cli.Context) error {
   544  	stack := makeFullNode(ctx)
   545  	defer stack.Close()
   546  
   547  	chain, chainDb := utils.MakeChain(ctx, stack)
   548  	defer chainDb.Close()
   549  	for _, arg := range ctx.Args() {
   550  		var block *types.Block
   551  		if hashish(arg) {
   552  			block = chain.GetBlockByHash(common.HexToHash(arg))
   553  		} else {
   554  			num, _ := strconv.Atoi(arg)
   555  			block = chain.GetBlockByNumber(uint64(num))
   556  		}
   557  		if block == nil {
   558  			fmt.Println("{}")
   559  			utils.Fatalf("block not found")
   560  		} else {
   561  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   562  			if err != nil {
   563  				utils.Fatalf("could not create new state: %v", err)
   564  			}
   565  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   566  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   567  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   568  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   569  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   570  			} else {
   571  				if includeMissing {
   572  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   573  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   574  				}
   575  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   576  			}
   577  		}
   578  	}
   579  	return nil
   580  }
   581  
   582  func inspect(ctx *cli.Context) error {
   583  	node, _ := makeConfigNode(ctx)
   584  	defer node.Close()
   585  
   586  	_, chainDb := utils.MakeChain(ctx, node)
   587  	defer chainDb.Close()
   588  
   589  	return rawdb.InspectDatabase(chainDb)
   590  }
   591  
   592  // hashish returns true for strings that look like hashes.
   593  func hashish(x string) bool {
   594  	_, err := strconv.Atoi(x)
   595  	return err != nil
   596  }