github.com/ethereum-optimism/optimism/l2geth@v0.0.0-20230612200230-50b04ade19e3/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"bytes"
    21  	"crypto/sha256"
    22  	"encoding/json"
    23  	"fmt"
    24  	"io"
    25  	"io/ioutil"
    26  	"net/http"
    27  	"os"
    28  	"path/filepath"
    29  	"regexp"
    30  	"runtime"
    31  	"strconv"
    32  	"sync/atomic"
    33  	"time"
    34  
    35  	"github.com/ethereum-optimism/optimism/l2geth/common/hexutil"
    36  
    37  	"github.com/ethereum-optimism/optimism/l2geth/cmd/utils"
    38  	"github.com/ethereum-optimism/optimism/l2geth/common"
    39  	"github.com/ethereum-optimism/optimism/l2geth/console"
    40  	"github.com/ethereum-optimism/optimism/l2geth/core"
    41  	"github.com/ethereum-optimism/optimism/l2geth/core/rawdb"
    42  	"github.com/ethereum-optimism/optimism/l2geth/core/state"
    43  	"github.com/ethereum-optimism/optimism/l2geth/core/types"
    44  	"github.com/ethereum-optimism/optimism/l2geth/eth/downloader"
    45  	"github.com/ethereum-optimism/optimism/l2geth/event"
    46  	"github.com/ethereum-optimism/optimism/l2geth/log"
    47  	"github.com/ethereum-optimism/optimism/l2geth/trie"
    48  	"gopkg.in/urfave/cli.v1"
    49  )
    50  
    51  var (
    52  	initCommand = cli.Command{
    53  		Action:    utils.MigrateFlags(initGenesis),
    54  		Name:      "init",
    55  		Usage:     "Bootstrap and initialize a new genesis block",
    56  		ArgsUsage: "<genesisPathOrUrl> (<genesisHash>)",
    57  		Flags: []cli.Flag{
    58  			utils.DataDirFlag,
    59  			utils.RollupGenesisTimeoutSecondsFlag,
    60  		},
    61  		Category: "BLOCKCHAIN COMMANDS",
    62  		Description: `
    63  The init command initializes a new genesis block and definition for the network.
    64  This is a destructive action and changes the network in which you will be
    65  participating.
    66  
    67  It expects either a path or an HTTP URL to the genesis file as an argument. If an
    68  HTTP URL is specified for the genesis file, then a hex-encoded SHA256 hash of the
    69  genesis file must be included as a second argument. The hash provided on the CLI
    70  will be checked against the hash of the genesis file downloaded from the URL.`,
    71  	}
    72  	dumpChainCfgCommand = cli.Command{
    73  		Action: utils.MigrateFlags(dumpChainCfg),
    74  		Name:   "dump-chain-cfg",
    75  		Usage:  "Dumps the current chain config to standard out.",
    76  		Flags: []cli.Flag{
    77  			utils.DataDirFlag,
    78  		},
    79  		Category: "BLOCKCHAIN COMMANDS",
    80  		Description: `
    81  This command dumps the currently configured chain state to standard output. It
    82  will fail if there is no genesis block configured.`,
    83  	}
    84  	importCommand = cli.Command{
    85  		Action:    utils.MigrateFlags(importChain),
    86  		Name:      "import",
    87  		Usage:     "Import a blockchain file",
    88  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    89  		Flags: []cli.Flag{
    90  			utils.DataDirFlag,
    91  			utils.CacheFlag,
    92  			utils.SyncModeFlag,
    93  			utils.GCModeFlag,
    94  			utils.CacheDatabaseFlag,
    95  			utils.CacheGCFlag,
    96  		},
    97  		Category: "BLOCKCHAIN COMMANDS",
    98  		Description: `
    99  The import command imports blocks from an RLP-encoded form. The form can be one file
   100  with several RLP-encoded blocks, or several files can be used.
   101  
   102  If only one file is used, import error will result in failure. If several files are used,
   103  processing will proceed even if an individual RLP-file import failure occurs.`,
   104  	}
   105  	exportCommand = cli.Command{
   106  		Action:    utils.MigrateFlags(exportChain),
   107  		Name:      "export",
   108  		Usage:     "Export blockchain into file",
   109  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   110  		Flags: []cli.Flag{
   111  			utils.DataDirFlag,
   112  			utils.CacheFlag,
   113  			utils.SyncModeFlag,
   114  		},
   115  		Category: "BLOCKCHAIN COMMANDS",
   116  		Description: `
   117  Requires a first argument of the file to write to.
   118  Optional second and third arguments control the first and
   119  last block to write. In this mode, the file will be appended
   120  if already existing. If the file ends with .gz, the output will
   121  be gzipped.`,
   122  	}
   123  	importPreimagesCommand = cli.Command{
   124  		Action:    utils.MigrateFlags(importPreimages),
   125  		Name:      "import-preimages",
   126  		Usage:     "Import the preimage database from an RLP stream",
   127  		ArgsUsage: "<datafile>",
   128  		Flags: []cli.Flag{
   129  			utils.DataDirFlag,
   130  			utils.CacheFlag,
   131  			utils.SyncModeFlag,
   132  		},
   133  		Category: "BLOCKCHAIN COMMANDS",
   134  		Description: `
   135  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   136  	}
   137  	exportPreimagesCommand = cli.Command{
   138  		Action:    utils.MigrateFlags(exportPreimages),
   139  		Name:      "export-preimages",
   140  		Usage:     "Export the preimage database into an RLP stream",
   141  		ArgsUsage: "<dumpfile>",
   142  		Flags: []cli.Flag{
   143  			utils.DataDirFlag,
   144  			utils.CacheFlag,
   145  			utils.SyncModeFlag,
   146  		},
   147  		Category: "BLOCKCHAIN COMMANDS",
   148  		Description: `
   149  The export-preimages command export hash preimages to an RLP encoded stream`,
   150  	}
   151  	copydbCommand = cli.Command{
   152  		Action:    utils.MigrateFlags(copyDb),
   153  		Name:      "copydb",
   154  		Usage:     "Create a local chain from a target chaindata folder",
   155  		ArgsUsage: "<sourceChaindataDir>",
   156  		Flags: []cli.Flag{
   157  			utils.DataDirFlag,
   158  			utils.CacheFlag,
   159  			utils.SyncModeFlag,
   160  			utils.FakePoWFlag,
   161  			utils.TestnetFlag,
   162  			utils.RinkebyFlag,
   163  		},
   164  		Category: "BLOCKCHAIN COMMANDS",
   165  		Description: `
   166  The first argument must be the directory containing the blockchain to download from`,
   167  	}
   168  	removedbCommand = cli.Command{
   169  		Action:    utils.MigrateFlags(removeDB),
   170  		Name:      "removedb",
   171  		Usage:     "Remove blockchain and state databases",
   172  		ArgsUsage: " ",
   173  		Flags: []cli.Flag{
   174  			utils.DataDirFlag,
   175  		},
   176  		Category: "BLOCKCHAIN COMMANDS",
   177  		Description: `
   178  Remove blockchain and state databases`,
   179  	}
   180  	dumpCommand = cli.Command{
   181  		Action:    utils.MigrateFlags(dump),
   182  		Name:      "dump",
   183  		Usage:     "Dump a specific block from storage",
   184  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   185  		Flags: []cli.Flag{
   186  			utils.DataDirFlag,
   187  			utils.CacheFlag,
   188  			utils.SyncModeFlag,
   189  			utils.IterativeOutputFlag,
   190  			utils.ExcludeCodeFlag,
   191  			utils.ExcludeStorageFlag,
   192  			utils.IncludeIncompletesFlag,
   193  		},
   194  		Category: "BLOCKCHAIN COMMANDS",
   195  		Description: `
   196  The arguments are interpreted as block numbers or hashes.
   197  Use "ethereum dump 0" to dump the genesis block.`,
   198  	}
   199  	inspectCommand = cli.Command{
   200  		Action:    utils.MigrateFlags(inspect),
   201  		Name:      "inspect",
   202  		Usage:     "Inspect the storage size for each type of data in the database",
   203  		ArgsUsage: " ",
   204  		Flags: []cli.Flag{
   205  			utils.DataDirFlag,
   206  			utils.AncientFlag,
   207  			utils.CacheFlag,
   208  			utils.TestnetFlag,
   209  			utils.RinkebyFlag,
   210  			utils.GoerliFlag,
   211  			utils.SyncModeFlag,
   212  		},
   213  		Category: "BLOCKCHAIN COMMANDS",
   214  	}
   215  )
   216  
   217  // initGenesis will initialise the given JSON format genesis file and writes it as
   218  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   219  func initGenesis(ctx *cli.Context) error {
   220  	// Make sure we have a valid genesis JSON
   221  	genesisPathOrURL := ctx.Args().First()
   222  	if len(genesisPathOrURL) == 0 {
   223  		utils.Fatalf("Must supply path or URL to genesis JSON file")
   224  	}
   225  
   226  	var file io.ReadCloser
   227  	if matched, _ := regexp.MatchString("^http(s)?://", genesisPathOrURL); matched {
   228  		genesisHashStr := ctx.Args().Get(1)
   229  		if genesisHashStr == "" {
   230  			utils.Fatalf("Must specify a genesis hash argument if the genesis path argument is an URL.")
   231  		}
   232  
   233  		genesisHashData, err := hexutil.Decode(genesisHashStr)
   234  		if err != nil {
   235  			utils.Fatalf("Error decoding genesis hash: %v", err)
   236  		}
   237  
   238  		log.Info("Fetching genesis file", "url", genesisPathOrURL)
   239  
   240  		genesisData, err := fetchGenesis(genesisPathOrURL, time.Duration(ctx.GlobalInt(utils.RollupGenesisTimeoutSecondsFlag.Name)))
   241  		if err != nil {
   242  			utils.Fatalf("Failed to fetch genesis file: %v", err)
   243  		}
   244  
   245  		hash := sha256.New()
   246  		hash.Write(genesisData)
   247  		actualHash := hash.Sum(nil)
   248  		if !bytes.Equal(actualHash, genesisHashData) {
   249  			utils.Fatalf(
   250  				"Genesis hashes do not match. Need: %s, got: %s",
   251  				genesisHashStr,
   252  				hexutil.Encode(actualHash),
   253  			)
   254  		}
   255  
   256  		file = ioutil.NopCloser(bytes.NewReader(genesisData))
   257  	} else {
   258  		var err error
   259  		file, err = os.Open(genesisPathOrURL)
   260  		if err != nil {
   261  			utils.Fatalf("Failed to read genesis file: %v", err)
   262  		}
   263  		defer file.Close()
   264  	}
   265  
   266  	genesis := new(core.Genesis)
   267  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   268  		utils.Fatalf("invalid genesis file: %v", err)
   269  	}
   270  	// Open an initialise both full and light databases
   271  	stack := makeFullNode(ctx)
   272  	defer stack.Close()
   273  
   274  	for _, name := range []string{"chaindata", "lightchaindata"} {
   275  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   276  		if err != nil {
   277  			utils.Fatalf("Failed to open database: %v", err)
   278  		}
   279  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   280  		if err != nil {
   281  			utils.Fatalf("Failed to write genesis block: %v", err)
   282  		}
   283  		chaindb.Close()
   284  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   285  	}
   286  	return nil
   287  }
   288  
   289  // dumpChainCfg dumps chain config to standard output.
   290  func dumpChainCfg(ctx *cli.Context) error {
   291  	stack := makeFullNode(ctx)
   292  	defer stack.Close()
   293  
   294  	db, err := stack.OpenDatabase("chaindata", 0, 0, "")
   295  	if err != nil {
   296  		utils.Fatalf("Failed to open database: %v", err)
   297  	}
   298  
   299  	stored := rawdb.ReadCanonicalHash(db, 0)
   300  	var zeroHash common.Hash
   301  	if stored == zeroHash {
   302  		utils.Fatalf("No genesis block configured.")
   303  	}
   304  	chainCfg := rawdb.ReadChainConfig(db, stored)
   305  	out, err := json.MarshalIndent(chainCfg, "", "  ")
   306  	if err != nil {
   307  		utils.Fatalf("Failed to marshal chain config: %v", out)
   308  	}
   309  	fmt.Println(string(out))
   310  	return nil
   311  }
   312  
   313  func importChain(ctx *cli.Context) error {
   314  	if len(ctx.Args()) < 1 {
   315  		utils.Fatalf("This command requires an argument.")
   316  	}
   317  	stack := makeFullNode(ctx)
   318  	defer stack.Close()
   319  
   320  	chain, db := utils.MakeChain(ctx, stack)
   321  	defer db.Close()
   322  
   323  	// Start periodically gathering memory profiles
   324  	var peakMemAlloc, peakMemSys uint64
   325  	go func() {
   326  		stats := new(runtime.MemStats)
   327  		for {
   328  			runtime.ReadMemStats(stats)
   329  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   330  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   331  			}
   332  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   333  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   334  			}
   335  			time.Sleep(5 * time.Second)
   336  		}
   337  	}()
   338  	// Import the chain
   339  	start := time.Now()
   340  
   341  	if len(ctx.Args()) == 1 {
   342  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   343  			log.Error("Import error", "err", err)
   344  		}
   345  	} else {
   346  		for _, arg := range ctx.Args() {
   347  			if err := utils.ImportChain(chain, arg); err != nil {
   348  				log.Error("Import error", "file", arg, "err", err)
   349  			}
   350  		}
   351  	}
   352  	chain.Stop()
   353  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   354  
   355  	// Output pre-compaction stats mostly to see the import trashing
   356  	stats, err := db.Stat("leveldb.stats")
   357  	if err != nil {
   358  		utils.Fatalf("Failed to read database stats: %v", err)
   359  	}
   360  	fmt.Println(stats)
   361  
   362  	ioStats, err := db.Stat("leveldb.iostats")
   363  	if err != nil {
   364  		utils.Fatalf("Failed to read database iostats: %v", err)
   365  	}
   366  	fmt.Println(ioStats)
   367  
   368  	// Print the memory statistics used by the importing
   369  	mem := new(runtime.MemStats)
   370  	runtime.ReadMemStats(mem)
   371  
   372  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   373  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   374  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   375  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   376  
   377  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   378  		return nil
   379  	}
   380  
   381  	// Compact the entire database to more accurately measure disk io and print the stats
   382  	start = time.Now()
   383  	fmt.Println("Compacting entire database...")
   384  	if err = db.Compact(nil, nil); err != nil {
   385  		utils.Fatalf("Compaction failed: %v", err)
   386  	}
   387  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   388  
   389  	stats, err = db.Stat("leveldb.stats")
   390  	if err != nil {
   391  		utils.Fatalf("Failed to read database stats: %v", err)
   392  	}
   393  	fmt.Println(stats)
   394  
   395  	ioStats, err = db.Stat("leveldb.iostats")
   396  	if err != nil {
   397  		utils.Fatalf("Failed to read database iostats: %v", err)
   398  	}
   399  	fmt.Println(ioStats)
   400  	return nil
   401  }
   402  
   403  func exportChain(ctx *cli.Context) error {
   404  	if len(ctx.Args()) < 1 {
   405  		utils.Fatalf("This command requires an argument.")
   406  	}
   407  	stack := makeFullNode(ctx)
   408  	defer stack.Close()
   409  
   410  	chain, _ := utils.MakeChain(ctx, stack)
   411  	start := time.Now()
   412  
   413  	var err error
   414  	fp := ctx.Args().First()
   415  	if len(ctx.Args()) < 3 {
   416  		err = utils.ExportChain(chain, fp)
   417  	} else {
   418  		// This can be improved to allow for numbers larger than 9223372036854775807
   419  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   420  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   421  		if ferr != nil || lerr != nil {
   422  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   423  		}
   424  		if first < 0 || last < 0 {
   425  			utils.Fatalf("Export error: block number must be greater than 0\n")
   426  		}
   427  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   428  	}
   429  
   430  	if err != nil {
   431  		utils.Fatalf("Export error: %v\n", err)
   432  	}
   433  	fmt.Printf("Export done in %v\n", time.Since(start))
   434  	return nil
   435  }
   436  
   437  // importPreimages imports preimage data from the specified file.
   438  func importPreimages(ctx *cli.Context) error {
   439  	if len(ctx.Args()) < 1 {
   440  		utils.Fatalf("This command requires an argument.")
   441  	}
   442  	stack := makeFullNode(ctx)
   443  	defer stack.Close()
   444  
   445  	db := utils.MakeChainDatabase(ctx, stack)
   446  	start := time.Now()
   447  
   448  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   449  		utils.Fatalf("Import error: %v\n", err)
   450  	}
   451  	fmt.Printf("Import done in %v\n", time.Since(start))
   452  	return nil
   453  }
   454  
   455  // exportPreimages dumps the preimage data to specified json file in streaming way.
   456  func exportPreimages(ctx *cli.Context) error {
   457  	if len(ctx.Args()) < 1 {
   458  		utils.Fatalf("This command requires an argument.")
   459  	}
   460  	stack := makeFullNode(ctx)
   461  	defer stack.Close()
   462  
   463  	db := utils.MakeChainDatabase(ctx, stack)
   464  	start := time.Now()
   465  
   466  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   467  		utils.Fatalf("Export error: %v\n", err)
   468  	}
   469  	fmt.Printf("Export done in %v\n", time.Since(start))
   470  	return nil
   471  }
   472  
   473  func copyDb(ctx *cli.Context) error {
   474  	// Ensure we have a source chain directory to copy
   475  	if len(ctx.Args()) < 1 {
   476  		utils.Fatalf("Source chaindata directory path argument missing")
   477  	}
   478  	if len(ctx.Args()) < 2 {
   479  		utils.Fatalf("Source ancient chain directory path argument missing")
   480  	}
   481  	// Initialize a new chain for the running node to sync into
   482  	stack := makeFullNode(ctx)
   483  	defer stack.Close()
   484  
   485  	chain, chainDb := utils.MakeChain(ctx, stack)
   486  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   487  
   488  	var syncBloom *trie.SyncBloom
   489  	if syncMode == downloader.FastSync {
   490  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   491  	}
   492  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   493  
   494  	// Create a source peer to satisfy downloader requests from
   495  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   496  	if err != nil {
   497  		return err
   498  	}
   499  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   500  	if err != nil {
   501  		return err
   502  	}
   503  	peer := downloader.NewFakePeer("local", db, hc, dl)
   504  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   505  		return err
   506  	}
   507  	// Synchronise with the simulated peer
   508  	start := time.Now()
   509  
   510  	currentHeader := hc.CurrentHeader()
   511  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   512  		return err
   513  	}
   514  	for dl.Synchronising() {
   515  		time.Sleep(10 * time.Millisecond)
   516  	}
   517  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   518  
   519  	// Compact the entire database to remove any sync overhead
   520  	start = time.Now()
   521  	fmt.Println("Compacting entire database...")
   522  	if err = db.Compact(nil, nil); err != nil {
   523  		utils.Fatalf("Compaction failed: %v", err)
   524  	}
   525  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   526  	return nil
   527  }
   528  
   529  func removeDB(ctx *cli.Context) error {
   530  	stack, config := makeConfigNode(ctx)
   531  
   532  	// Remove the full node state database
   533  	path := stack.ResolvePath("chaindata")
   534  	if common.FileExist(path) {
   535  		confirmAndRemoveDB(path, "full node state database")
   536  	} else {
   537  		log.Info("Full node state database missing", "path", path)
   538  	}
   539  	// Remove the full node ancient database
   540  	path = config.Eth.DatabaseFreezer
   541  	switch {
   542  	case path == "":
   543  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   544  	case !filepath.IsAbs(path):
   545  		path = config.Node.ResolvePath(path)
   546  	}
   547  	if common.FileExist(path) {
   548  		confirmAndRemoveDB(path, "full node ancient database")
   549  	} else {
   550  		log.Info("Full node ancient database missing", "path", path)
   551  	}
   552  	// Remove the light node database
   553  	path = stack.ResolvePath("lightchaindata")
   554  	if common.FileExist(path) {
   555  		confirmAndRemoveDB(path, "light node database")
   556  	} else {
   557  		log.Info("Light node database missing", "path", path)
   558  	}
   559  	return nil
   560  }
   561  
   562  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   563  // folder if accepted.
   564  func confirmAndRemoveDB(database string, kind string) {
   565  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   566  	switch {
   567  	case err != nil:
   568  		utils.Fatalf("%v", err)
   569  	case !confirm:
   570  		log.Info("Database deletion skipped", "path", database)
   571  	default:
   572  		start := time.Now()
   573  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   574  			// If we're at the top level folder, recurse into
   575  			if path == database {
   576  				return nil
   577  			}
   578  			// Delete all the files, but not subfolders
   579  			if !info.IsDir() {
   580  				os.Remove(path)
   581  				return nil
   582  			}
   583  			return filepath.SkipDir
   584  		})
   585  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   586  	}
   587  }
   588  
   589  func dump(ctx *cli.Context) error {
   590  	stack := makeFullNode(ctx)
   591  	defer stack.Close()
   592  
   593  	chain, chainDb := utils.MakeChain(ctx, stack)
   594  	defer chainDb.Close()
   595  	for _, arg := range ctx.Args() {
   596  		var block *types.Block
   597  		if hashish(arg) {
   598  			block = chain.GetBlockByHash(common.HexToHash(arg))
   599  		} else {
   600  			num, _ := strconv.Atoi(arg)
   601  			block = chain.GetBlockByNumber(uint64(num))
   602  		}
   603  		if block == nil {
   604  			fmt.Println("{}")
   605  			utils.Fatalf("block not found")
   606  		} else {
   607  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   608  			if err != nil {
   609  				utils.Fatalf("could not create new state: %v", err)
   610  			}
   611  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   612  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   613  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   614  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   615  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   616  			} else {
   617  				if includeMissing {
   618  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   619  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   620  				}
   621  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   622  			}
   623  		}
   624  	}
   625  	return nil
   626  }
   627  
   628  func inspect(ctx *cli.Context) error {
   629  	node, _ := makeConfigNode(ctx)
   630  	defer node.Close()
   631  
   632  	_, chainDb := utils.MakeChain(ctx, node)
   633  	defer chainDb.Close()
   634  
   635  	return rawdb.InspectDatabase(chainDb)
   636  }
   637  
   638  // hashish returns true for strings that look like hashes.
   639  func hashish(x string) bool {
   640  	_, err := strconv.Atoi(x)
   641  	return err != nil
   642  }
   643  
   644  func fetchGenesis(url string, timeout time.Duration) ([]byte, error) {
   645  	client := &http.Client{
   646  		Timeout: timeout,
   647  	}
   648  	resp, err := client.Get(url)
   649  	if err != nil {
   650  		return nil, err
   651  	}
   652  	defer resp.Body.Close()
   653  	return ioutil.ReadAll(resp.Body)
   654  }