github.com/ethereum/go-ethereum@v1.14.4-0.20240516095835-473ee8fc07a3/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/hexutil"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/internal/era"
    39  	"github.com/ethereum/go-ethereum/internal/flags"
    40  	"github.com/ethereum/go-ethereum/log"
    41  	"github.com/ethereum/go-ethereum/metrics"
    42  	"github.com/ethereum/go-ethereum/node"
    43  	"github.com/ethereum/go-ethereum/params"
    44  	"github.com/urfave/cli/v2"
    45  )
    46  
    47  var (
    48  	initCommand = &cli.Command{
    49  		Action:    initGenesis,
    50  		Name:      "init",
    51  		Usage:     "Bootstrap and initialize a new genesis block",
    52  		ArgsUsage: "<genesisPath>",
    53  		Flags: flags.Merge([]cli.Flag{
    54  			utils.CachePreimagesFlag,
    55  			utils.OverrideCancun,
    56  			utils.OverrideVerkle,
    57  		}, utils.DatabaseFlags),
    58  		Description: `
    59  The init command initializes a new genesis block and definition for the network.
    60  This is a destructive action and changes the network in which you will be
    61  participating.
    62  
    63  It expects the genesis file as argument.`,
    64  	}
    65  	dumpGenesisCommand = &cli.Command{
    66  		Action:    dumpGenesis,
    67  		Name:      "dumpgenesis",
    68  		Usage:     "Dumps genesis block JSON configuration to stdout",
    69  		ArgsUsage: "",
    70  		Flags:     append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...),
    71  		Description: `
    72  The dumpgenesis command prints the genesis configuration of the network preset
    73  if one is set.  Otherwise it prints the genesis from the datadir.`,
    74  	}
    75  	importCommand = &cli.Command{
    76  		Action:    importChain,
    77  		Name:      "import",
    78  		Usage:     "Import a blockchain file",
    79  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    80  		Flags: flags.Merge([]cli.Flag{
    81  			utils.CacheFlag,
    82  			utils.SyncModeFlag,
    83  			utils.GCModeFlag,
    84  			utils.SnapshotFlag,
    85  			utils.CacheDatabaseFlag,
    86  			utils.CacheGCFlag,
    87  			utils.MetricsEnabledFlag,
    88  			utils.MetricsEnabledExpensiveFlag,
    89  			utils.MetricsHTTPFlag,
    90  			utils.MetricsPortFlag,
    91  			utils.MetricsEnableInfluxDBFlag,
    92  			utils.MetricsEnableInfluxDBV2Flag,
    93  			utils.MetricsInfluxDBEndpointFlag,
    94  			utils.MetricsInfluxDBDatabaseFlag,
    95  			utils.MetricsInfluxDBUsernameFlag,
    96  			utils.MetricsInfluxDBPasswordFlag,
    97  			utils.MetricsInfluxDBTagsFlag,
    98  			utils.MetricsInfluxDBTokenFlag,
    99  			utils.MetricsInfluxDBBucketFlag,
   100  			utils.MetricsInfluxDBOrganizationFlag,
   101  			utils.TxLookupLimitFlag,
   102  			utils.VMTraceFlag,
   103  			utils.VMTraceJsonConfigFlag,
   104  			utils.TransactionHistoryFlag,
   105  			utils.StateHistoryFlag,
   106  		}, utils.DatabaseFlags),
   107  		Description: `
   108  The import command imports blocks from an RLP-encoded form. The form can be one file
   109  with several RLP-encoded blocks, or several files can be used.
   110  
   111  If only one file is used, import error will result in failure. If several files are used,
   112  processing will proceed even if an individual RLP-file import failure occurs.`,
   113  	}
   114  	exportCommand = &cli.Command{
   115  		Action:    exportChain,
   116  		Name:      "export",
   117  		Usage:     "Export blockchain into file",
   118  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   119  		Flags: flags.Merge([]cli.Flag{
   120  			utils.CacheFlag,
   121  			utils.SyncModeFlag,
   122  		}, utils.DatabaseFlags),
   123  		Description: `
   124  Requires a first argument of the file to write to.
   125  Optional second and third arguments control the first and
   126  last block to write. In this mode, the file will be appended
   127  if already existing. If the file ends with .gz, the output will
   128  be gzipped.`,
   129  	}
   130  	importHistoryCommand = &cli.Command{
   131  		Action:    importHistory,
   132  		Name:      "import-history",
   133  		Usage:     "Import an Era archive",
   134  		ArgsUsage: "<dir>",
   135  		Flags: flags.Merge([]cli.Flag{
   136  			utils.TxLookupLimitFlag,
   137  		},
   138  			utils.DatabaseFlags,
   139  			utils.NetworkFlags,
   140  		),
   141  		Description: `
   142  The import-history command will import blocks and their corresponding receipts
   143  from Era archives.
   144  `,
   145  	}
   146  	exportHistoryCommand = &cli.Command{
   147  		Action:    exportHistory,
   148  		Name:      "export-history",
   149  		Usage:     "Export blockchain history to Era archives",
   150  		ArgsUsage: "<dir> <first> <last>",
   151  		Flags:     flags.Merge(utils.DatabaseFlags),
   152  		Description: `
   153  The export-history command will export blocks and their corresponding receipts
   154  into Era archives. Eras are typically packaged in steps of 8192 blocks.
   155  `,
   156  	}
   157  	importPreimagesCommand = &cli.Command{
   158  		Action:    importPreimages,
   159  		Name:      "import-preimages",
   160  		Usage:     "Import the preimage database from an RLP stream",
   161  		ArgsUsage: "<datafile>",
   162  		Flags: flags.Merge([]cli.Flag{
   163  			utils.CacheFlag,
   164  			utils.SyncModeFlag,
   165  		}, utils.DatabaseFlags),
   166  		Description: `
   167  The import-preimages command imports hash preimages from an RLP encoded stream.
   168  It's deprecated, please use "geth db import" instead.
   169  `,
   170  	}
   171  
   172  	dumpCommand = &cli.Command{
   173  		Action:    dump,
   174  		Name:      "dump",
   175  		Usage:     "Dump a specific block from storage",
   176  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   177  		Flags: flags.Merge([]cli.Flag{
   178  			utils.CacheFlag,
   179  			utils.IterativeOutputFlag,
   180  			utils.ExcludeCodeFlag,
   181  			utils.ExcludeStorageFlag,
   182  			utils.IncludeIncompletesFlag,
   183  			utils.StartKeyFlag,
   184  			utils.DumpLimitFlag,
   185  		}, utils.DatabaseFlags),
   186  		Description: `
   187  This command dumps out the state for a given block (or latest, if none provided).
   188  `,
   189  	}
   190  )
   191  
   192  // initGenesis will initialise the given JSON format genesis file and writes it as
   193  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   194  func initGenesis(ctx *cli.Context) error {
   195  	if ctx.Args().Len() != 1 {
   196  		utils.Fatalf("need genesis.json file as the only argument")
   197  	}
   198  	genesisPath := ctx.Args().First()
   199  	if len(genesisPath) == 0 {
   200  		utils.Fatalf("invalid path to genesis file")
   201  	}
   202  	file, err := os.Open(genesisPath)
   203  	if err != nil {
   204  		utils.Fatalf("Failed to read genesis file: %v", err)
   205  	}
   206  	defer file.Close()
   207  
   208  	genesis := new(core.Genesis)
   209  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   210  		utils.Fatalf("invalid genesis file: %v", err)
   211  	}
   212  	// Open and initialise both full and light databases
   213  	stack, _ := makeConfigNode(ctx)
   214  	defer stack.Close()
   215  
   216  	var overrides core.ChainOverrides
   217  	if ctx.IsSet(utils.OverrideCancun.Name) {
   218  		v := ctx.Uint64(utils.OverrideCancun.Name)
   219  		overrides.OverrideCancun = &v
   220  	}
   221  	if ctx.IsSet(utils.OverrideVerkle.Name) {
   222  		v := ctx.Uint64(utils.OverrideVerkle.Name)
   223  		overrides.OverrideVerkle = &v
   224  	}
   225  	for _, name := range []string{"chaindata", "lightchaindata"} {
   226  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   227  		if err != nil {
   228  			utils.Fatalf("Failed to open database: %v", err)
   229  		}
   230  		defer chaindb.Close()
   231  
   232  		triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
   233  		defer triedb.Close()
   234  
   235  		_, hash, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
   236  		if err != nil {
   237  			utils.Fatalf("Failed to write genesis block: %v", err)
   238  		}
   239  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   240  	}
   241  	return nil
   242  }
   243  
   244  func dumpGenesis(ctx *cli.Context) error {
   245  	// check if there is a testnet preset enabled
   246  	var genesis *core.Genesis
   247  	if utils.IsNetworkPreset(ctx) {
   248  		genesis = utils.MakeGenesis(ctx)
   249  	} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
   250  		genesis = core.DeveloperGenesisBlock(11_500_000, nil)
   251  	}
   252  
   253  	if genesis != nil {
   254  		if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   255  			utils.Fatalf("could not encode genesis: %s", err)
   256  		}
   257  		return nil
   258  	}
   259  
   260  	// dump whatever already exists in the datadir
   261  	stack, _ := makeConfigNode(ctx)
   262  	for _, name := range []string{"chaindata", "lightchaindata"} {
   263  		db, err := stack.OpenDatabase(name, 0, 0, "", true)
   264  		if err != nil {
   265  			if !os.IsNotExist(err) {
   266  				return err
   267  			}
   268  			continue
   269  		}
   270  		genesis, err := core.ReadGenesis(db)
   271  		if err != nil {
   272  			utils.Fatalf("failed to read genesis: %s", err)
   273  		}
   274  		db.Close()
   275  
   276  		if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
   277  			utils.Fatalf("could not encode stored genesis: %s", err)
   278  		}
   279  		return nil
   280  	}
   281  	if ctx.IsSet(utils.DataDirFlag.Name) {
   282  		utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
   283  	}
   284  	utils.Fatalf("no network preset provided, and no genesis exists in the default datadir")
   285  	return nil
   286  }
   287  
   288  func importChain(ctx *cli.Context) error {
   289  	if ctx.Args().Len() < 1 {
   290  		utils.Fatalf("This command requires an argument.")
   291  	}
   292  	// Start metrics export if enabled
   293  	utils.SetupMetrics(ctx)
   294  	// Start system runtime metrics collection
   295  	go metrics.CollectProcessMetrics(3 * time.Second)
   296  
   297  	stack, _ := makeConfigNode(ctx)
   298  	defer stack.Close()
   299  
   300  	chain, db := utils.MakeChain(ctx, stack, false)
   301  	defer db.Close()
   302  
   303  	// Start periodically gathering memory profiles
   304  	var peakMemAlloc, peakMemSys atomic.Uint64
   305  	go func() {
   306  		stats := new(runtime.MemStats)
   307  		for {
   308  			runtime.ReadMemStats(stats)
   309  			if peakMemAlloc.Load() < stats.Alloc {
   310  				peakMemAlloc.Store(stats.Alloc)
   311  			}
   312  			if peakMemSys.Load() < stats.Sys {
   313  				peakMemSys.Store(stats.Sys)
   314  			}
   315  			time.Sleep(5 * time.Second)
   316  		}
   317  	}()
   318  	// Import the chain
   319  	start := time.Now()
   320  
   321  	var importErr error
   322  
   323  	if ctx.Args().Len() == 1 {
   324  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   325  			importErr = err
   326  			log.Error("Import error", "err", err)
   327  		}
   328  	} else {
   329  		for _, arg := range ctx.Args().Slice() {
   330  			if err := utils.ImportChain(chain, arg); err != nil {
   331  				importErr = err
   332  				log.Error("Import error", "file", arg, "err", err)
   333  			}
   334  		}
   335  	}
   336  	chain.Stop()
   337  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   338  
   339  	// Output pre-compaction stats mostly to see the import trashing
   340  	showLeveldbStats(db)
   341  
   342  	// Print the memory statistics used by the importing
   343  	mem := new(runtime.MemStats)
   344  	runtime.ReadMemStats(mem)
   345  
   346  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
   347  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
   348  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   349  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   350  
   351  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   352  		return nil
   353  	}
   354  
   355  	// Compact the entire database to more accurately measure disk io and print the stats
   356  	start = time.Now()
   357  	fmt.Println("Compacting entire database...")
   358  	if err := db.Compact(nil, nil); err != nil {
   359  		utils.Fatalf("Compaction failed: %v", err)
   360  	}
   361  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   362  
   363  	showLeveldbStats(db)
   364  	return importErr
   365  }
   366  
   367  func exportChain(ctx *cli.Context) error {
   368  	if ctx.Args().Len() < 1 {
   369  		utils.Fatalf("This command requires an argument.")
   370  	}
   371  
   372  	stack, _ := makeConfigNode(ctx)
   373  	defer stack.Close()
   374  
   375  	chain, db := utils.MakeChain(ctx, stack, true)
   376  	defer db.Close()
   377  	start := time.Now()
   378  
   379  	var err error
   380  	fp := ctx.Args().First()
   381  	if ctx.Args().Len() < 3 {
   382  		err = utils.ExportChain(chain, fp)
   383  	} else {
   384  		// This can be improved to allow for numbers larger than 9223372036854775807
   385  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   386  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   387  		if ferr != nil || lerr != nil {
   388  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   389  		}
   390  		if first < 0 || last < 0 {
   391  			utils.Fatalf("Export error: block number must be greater than 0\n")
   392  		}
   393  		if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   394  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   395  		}
   396  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   397  	}
   398  	if err != nil {
   399  		utils.Fatalf("Export error: %v\n", err)
   400  	}
   401  	fmt.Printf("Export done in %v\n", time.Since(start))
   402  	return nil
   403  }
   404  
   405  func importHistory(ctx *cli.Context) error {
   406  	if ctx.Args().Len() != 1 {
   407  		utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
   408  	}
   409  
   410  	stack, _ := makeConfigNode(ctx)
   411  	defer stack.Close()
   412  
   413  	chain, db := utils.MakeChain(ctx, stack, false)
   414  	defer db.Close()
   415  
   416  	var (
   417  		start   = time.Now()
   418  		dir     = ctx.Args().Get(0)
   419  		network string
   420  	)
   421  
   422  	// Determine network.
   423  	if utils.IsNetworkPreset(ctx) {
   424  		switch {
   425  		case ctx.Bool(utils.MainnetFlag.Name):
   426  			network = "mainnet"
   427  		case ctx.Bool(utils.SepoliaFlag.Name):
   428  			network = "sepolia"
   429  		case ctx.Bool(utils.GoerliFlag.Name):
   430  			network = "goerli"
   431  		}
   432  	} else {
   433  		// No network flag set, try to determine network based on files
   434  		// present in directory.
   435  		var networks []string
   436  		for _, n := range params.NetworkNames {
   437  			entries, err := era.ReadDir(dir, n)
   438  			if err != nil {
   439  				return fmt.Errorf("error reading %s: %w", dir, err)
   440  			}
   441  			if len(entries) > 0 {
   442  				networks = append(networks, n)
   443  			}
   444  		}
   445  		if len(networks) == 0 {
   446  			return fmt.Errorf("no era1 files found in %s", dir)
   447  		}
   448  		if len(networks) > 1 {
   449  			return errors.New("multiple networks found, use a network flag to specify desired network")
   450  		}
   451  		network = networks[0]
   452  	}
   453  
   454  	if err := utils.ImportHistory(chain, db, dir, network); err != nil {
   455  		return err
   456  	}
   457  	fmt.Printf("Import done in %v\n", time.Since(start))
   458  	return nil
   459  }
   460  
   461  // exportHistory exports chain history in Era archives at a specified
   462  // directory.
   463  func exportHistory(ctx *cli.Context) error {
   464  	if ctx.Args().Len() != 3 {
   465  		utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
   466  	}
   467  
   468  	stack, _ := makeConfigNode(ctx)
   469  	defer stack.Close()
   470  
   471  	chain, _ := utils.MakeChain(ctx, stack, true)
   472  	start := time.Now()
   473  
   474  	var (
   475  		dir         = ctx.Args().Get(0)
   476  		first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   477  		last, lerr  = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   478  	)
   479  	if ferr != nil || lerr != nil {
   480  		utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   481  	}
   482  	if first < 0 || last < 0 {
   483  		utils.Fatalf("Export error: block number must be greater than 0\n")
   484  	}
   485  	if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   486  		utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   487  	}
   488  	err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
   489  	if err != nil {
   490  		utils.Fatalf("Export error: %v\n", err)
   491  	}
   492  	fmt.Printf("Export done in %v\n", time.Since(start))
   493  	return nil
   494  }
   495  
   496  // importPreimages imports preimage data from the specified file.
   497  // it is deprecated, and the export function has been removed, but
   498  // the import function is kept around for the time being so that
   499  // older file formats can still be imported.
   500  func importPreimages(ctx *cli.Context) error {
   501  	if ctx.Args().Len() < 1 {
   502  		utils.Fatalf("This command requires an argument.")
   503  	}
   504  
   505  	stack, _ := makeConfigNode(ctx)
   506  	defer stack.Close()
   507  
   508  	db := utils.MakeChainDatabase(ctx, stack, false)
   509  	defer db.Close()
   510  	start := time.Now()
   511  
   512  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   513  		utils.Fatalf("Import error: %v\n", err)
   514  	}
   515  	fmt.Printf("Import done in %v\n", time.Since(start))
   516  	return nil
   517  }
   518  
   519  func parseDumpConfig(ctx *cli.Context, stack *node.Node, db ethdb.Database) (*state.DumpConfig, common.Hash, error) {
   520  	var header *types.Header
   521  	if ctx.NArg() > 1 {
   522  		return nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   523  	}
   524  	if ctx.NArg() == 1 {
   525  		arg := ctx.Args().First()
   526  		if hashish(arg) {
   527  			hash := common.HexToHash(arg)
   528  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   529  				header = rawdb.ReadHeader(db, hash, *number)
   530  			} else {
   531  				return nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   532  			}
   533  		} else {
   534  			number, err := strconv.ParseUint(arg, 10, 64)
   535  			if err != nil {
   536  				return nil, common.Hash{}, err
   537  			}
   538  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   539  				header = rawdb.ReadHeader(db, hash, number)
   540  			} else {
   541  				return nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   542  			}
   543  		}
   544  	} else {
   545  		// Use latest
   546  		header = rawdb.ReadHeadHeader(db)
   547  	}
   548  	if header == nil {
   549  		return nil, common.Hash{}, errors.New("no head block found")
   550  	}
   551  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   552  	var start common.Hash
   553  	switch len(startArg) {
   554  	case 0: // common.Hash
   555  	case 32:
   556  		start = common.BytesToHash(startArg)
   557  	case 20:
   558  		start = crypto.Keccak256Hash(startArg)
   559  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   560  	default:
   561  		return nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   562  	}
   563  	var conf = &state.DumpConfig{
   564  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   565  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   566  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   567  		Start:             start.Bytes(),
   568  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   569  	}
   570  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   571  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   572  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   573  	return conf, header.Root, nil
   574  }
   575  
   576  func dump(ctx *cli.Context) error {
   577  	stack, _ := makeConfigNode(ctx)
   578  	defer stack.Close()
   579  
   580  	db := utils.MakeChainDatabase(ctx, stack, true)
   581  	defer db.Close()
   582  
   583  	conf, root, err := parseDumpConfig(ctx, stack, db)
   584  	if err != nil {
   585  		return err
   586  	}
   587  	triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
   588  	defer triedb.Close()
   589  
   590  	state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
   591  	if err != nil {
   592  		return err
   593  	}
   594  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   595  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   596  	} else {
   597  		fmt.Println(string(state.Dump(conf)))
   598  	}
   599  	return nil
   600  }
   601  
   602  // hashish returns true for strings that look like hashes.
   603  func hashish(x string) bool {
   604  	_, err := strconv.Atoi(x)
   605  	return err != nil
   606  }