github.com/JFJun/bsc@v1.0.0/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"net"
    23  	"os"
    24  	"path"
    25  	"path/filepath"
    26  	"runtime"
    27  	"strconv"
    28  	"strings"
    29  	"sync/atomic"
    30  	"time"
    31  
    32  	"github.com/JFJun/bsc/cmd/utils"
    33  	"github.com/JFJun/bsc/common"
    34  	"github.com/JFJun/bsc/console"
    35  	"github.com/JFJun/bsc/core"
    36  	"github.com/JFJun/bsc/core/rawdb"
    37  	"github.com/JFJun/bsc/core/state"
    38  	"github.com/JFJun/bsc/core/types"
    39  	"github.com/JFJun/bsc/eth/downloader"
    40  	"github.com/JFJun/bsc/event"
    41  	"github.com/JFJun/bsc/log"
    42  	"github.com/JFJun/bsc/metrics"
    43  	"github.com/JFJun/bsc/node"
    44  	"github.com/JFJun/bsc/p2p/enode"
    45  	"github.com/JFJun/bsc/trie"
    46  	"gopkg.in/urfave/cli.v1"
    47  )
    48  
    49  var (
    50  	initCommand = cli.Command{
    51  		Action:    utils.MigrateFlags(initGenesis),
    52  		Name:      "init",
    53  		Usage:     "Bootstrap and initialize a new genesis block",
    54  		ArgsUsage: "<genesisPath>",
    55  		Flags: []cli.Flag{
    56  			utils.DataDirFlag,
    57  		},
    58  		Category: "BLOCKCHAIN COMMANDS",
    59  		Description: `
    60  The init command initializes a new genesis block and definition for the network.
    61  This is a destructive action and changes the network in which you will be
    62  participating.
    63  
    64  It expects the genesis file as argument.`,
    65  	}
    66  	initNetworkCommand = cli.Command{
    67  		Action:    utils.MigrateFlags(initNetwork),
    68  		Name:      "init-network",
    69  		Usage:     "Bootstrap and initialize a new genesis block, and nodekey, config files for network nodes",
    70  		ArgsUsage: "<genesisPath>",
    71  		Flags: []cli.Flag{
    72  			utils.InitNetworkDir,
    73  			utils.InitNetworkPort,
    74  			utils.InitNetworkSize,
    75  			utils.InitNetworkIps,
    76  			configFileFlag,
    77  		},
    78  		Category: "BLOCKCHAIN COMMANDS",
    79  		Description: `
    80  The init-network command initializes a new genesis block, definition for the network, config files for network nodes.
    81  It expects the genesis file as argument.`,
    82  	}
    83  	dumpGenesisCommand = cli.Command{
    84  		Action:    utils.MigrateFlags(dumpGenesis),
    85  		Name:      "dumpgenesis",
    86  		Usage:     "Dumps genesis block JSON configuration to stdout",
    87  		ArgsUsage: "",
    88  		Flags: []cli.Flag{
    89  			utils.DataDirFlag,
    90  		},
    91  		Category: "BLOCKCHAIN COMMANDS",
    92  		Description: `
    93  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    94  	}
    95  	importCommand = cli.Command{
    96  		Action:    utils.MigrateFlags(importChain),
    97  		Name:      "import",
    98  		Usage:     "Import a blockchain file",
    99  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
   100  		Flags: []cli.Flag{
   101  			utils.DataDirFlag,
   102  			utils.CacheFlag,
   103  			utils.SyncModeFlag,
   104  			utils.GCModeFlag,
   105  			utils.SnapshotFlag,
   106  			utils.CacheDatabaseFlag,
   107  			utils.CacheGCFlag,
   108  			utils.MetricsEnabledFlag,
   109  			utils.MetricsEnabledExpensiveFlag,
   110  			utils.MetricsEnableInfluxDBFlag,
   111  			utils.MetricsInfluxDBEndpointFlag,
   112  			utils.MetricsInfluxDBDatabaseFlag,
   113  			utils.MetricsInfluxDBUsernameFlag,
   114  			utils.MetricsInfluxDBPasswordFlag,
   115  			utils.MetricsInfluxDBTagsFlag,
   116  		},
   117  		Category: "BLOCKCHAIN COMMANDS",
   118  		Description: `
   119  The import command imports blocks from an RLP-encoded form. The form can be one file
   120  with several RLP-encoded blocks, or several files can be used.
   121  
   122  If only one file is used, import error will result in failure. If several files are used,
   123  processing will proceed even if an individual RLP-file import failure occurs.`,
   124  	}
   125  	exportCommand = cli.Command{
   126  		Action:    utils.MigrateFlags(exportChain),
   127  		Name:      "export",
   128  		Usage:     "Export blockchain into file",
   129  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   130  		Flags: []cli.Flag{
   131  			utils.DataDirFlag,
   132  			utils.CacheFlag,
   133  			utils.SyncModeFlag,
   134  		},
   135  		Category: "BLOCKCHAIN COMMANDS",
   136  		Description: `
   137  Requires a first argument of the file to write to.
   138  Optional second and third arguments control the first and
   139  last block to write. In this mode, the file will be appended
   140  if already existing. If the file ends with .gz, the output will
   141  be gzipped.`,
   142  	}
   143  	importPreimagesCommand = cli.Command{
   144  		Action:    utils.MigrateFlags(importPreimages),
   145  		Name:      "import-preimages",
   146  		Usage:     "Import the preimage database from an RLP stream",
   147  		ArgsUsage: "<datafile>",
   148  		Flags: []cli.Flag{
   149  			utils.DataDirFlag,
   150  			utils.CacheFlag,
   151  			utils.SyncModeFlag,
   152  		},
   153  		Category: "BLOCKCHAIN COMMANDS",
   154  		Description: `
   155  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   156  	}
   157  	exportPreimagesCommand = cli.Command{
   158  		Action:    utils.MigrateFlags(exportPreimages),
   159  		Name:      "export-preimages",
   160  		Usage:     "Export the preimage database into an RLP stream",
   161  		ArgsUsage: "<dumpfile>",
   162  		Flags: []cli.Flag{
   163  			utils.DataDirFlag,
   164  			utils.CacheFlag,
   165  			utils.SyncModeFlag,
   166  		},
   167  		Category: "BLOCKCHAIN COMMANDS",
   168  		Description: `
   169  The export-preimages command export hash preimages to an RLP encoded stream`,
   170  	}
   171  	copydbCommand = cli.Command{
   172  		Action:    utils.MigrateFlags(copyDb),
   173  		Name:      "copydb",
   174  		Usage:     "Create a local chain from a target chaindata folder",
   175  		ArgsUsage: "<sourceChaindataDir>",
   176  		Flags: []cli.Flag{
   177  			utils.DataDirFlag,
   178  			utils.CacheFlag,
   179  			utils.SyncModeFlag,
   180  			utils.FakePoWFlag,
   181  			utils.RopstenFlag,
   182  			utils.RinkebyFlag,
   183  			utils.GoerliFlag,
   184  			utils.LegacyTestnetFlag,
   185  		},
   186  		Category: "BLOCKCHAIN COMMANDS",
   187  		Description: `
   188  The first argument must be the directory containing the blockchain to download from`,
   189  	}
   190  	removedbCommand = cli.Command{
   191  		Action:    utils.MigrateFlags(removeDB),
   192  		Name:      "removedb",
   193  		Usage:     "Remove blockchain and state databases",
   194  		ArgsUsage: " ",
   195  		Flags: []cli.Flag{
   196  			utils.DataDirFlag,
   197  		},
   198  		Category: "BLOCKCHAIN COMMANDS",
   199  		Description: `
   200  Remove blockchain and state databases`,
   201  	}
   202  	dumpCommand = cli.Command{
   203  		Action:    utils.MigrateFlags(dump),
   204  		Name:      "dump",
   205  		Usage:     "Dump a specific block from storage",
   206  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   207  		Flags: []cli.Flag{
   208  			utils.DataDirFlag,
   209  			utils.CacheFlag,
   210  			utils.SyncModeFlag,
   211  			utils.IterativeOutputFlag,
   212  			utils.ExcludeCodeFlag,
   213  			utils.ExcludeStorageFlag,
   214  			utils.IncludeIncompletesFlag,
   215  		},
   216  		Category: "BLOCKCHAIN COMMANDS",
   217  		Description: `
   218  The arguments are interpreted as block numbers or hashes.
   219  Use "ethereum dump 0" to dump the genesis block.`,
   220  	}
   221  	inspectCommand = cli.Command{
   222  		Action:    utils.MigrateFlags(inspect),
   223  		Name:      "inspect",
   224  		Usage:     "Inspect the storage size for each type of data in the database",
   225  		ArgsUsage: " ",
   226  		Flags: []cli.Flag{
   227  			utils.DataDirFlag,
   228  			utils.AncientFlag,
   229  			utils.CacheFlag,
   230  			utils.RopstenFlag,
   231  			utils.RinkebyFlag,
   232  			utils.GoerliFlag,
   233  			utils.LegacyTestnetFlag,
   234  			utils.SyncModeFlag,
   235  		},
   236  		Category: "BLOCKCHAIN COMMANDS",
   237  	}
   238  )
   239  
   240  // initGenesis will initialise the given JSON format genesis file and writes it as
   241  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   242  func initGenesis(ctx *cli.Context) error {
   243  	// Make sure we have a valid genesis JSON
   244  	genesisPath := ctx.Args().First()
   245  	if len(genesisPath) == 0 {
   246  		utils.Fatalf("Must supply path to genesis JSON file")
   247  	}
   248  	file, err := os.Open(genesisPath)
   249  	if err != nil {
   250  		utils.Fatalf("Failed to read genesis file: %v", err)
   251  	}
   252  	defer file.Close()
   253  
   254  	genesis := new(core.Genesis)
   255  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   256  		utils.Fatalf("invalid genesis file: %v", err)
   257  	}
   258  	// Open an initialise both full and light databases
   259  	stack := makeFullNode(ctx)
   260  	defer stack.Close()
   261  
   262  	for _, name := range []string{"chaindata", "lightchaindata"} {
   263  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   264  		if err != nil {
   265  			utils.Fatalf("Failed to open database: %v", err)
   266  		}
   267  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   268  		if err != nil {
   269  			utils.Fatalf("Failed to write genesis block: %v", err)
   270  		}
   271  		chaindb.Close()
   272  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   273  	}
   274  	return nil
   275  }
   276  
   277  // initNetwork will bootstrap and initialize a new genesis block, and nodekey, config files for network nodes
   278  func initNetwork(ctx *cli.Context) error {
   279  	initDir := ctx.String(utils.InitNetworkDir.Name)
   280  	if len(initDir) == 0 {
   281  		utils.Fatalf("init.dir is required")
   282  	}
   283  	size := ctx.Int(utils.InitNetworkSize.Name)
   284  	port := ctx.Int(utils.InitNetworkPort.Name)
   285  	ipStr := ctx.String(utils.InitNetworkIps.Name)
   286  	cfgFile := ctx.String(configFileFlag.Name)
   287  
   288  	if len(cfgFile) == 0 {
   289  		utils.Fatalf("config file is required")
   290  	}
   291  	var ips []string
   292  	if len(ipStr) != 0 {
   293  		ips = strings.Split(ipStr, ",")
   294  		if len(ips) != size {
   295  			utils.Fatalf("mismatch of size and length of ips")
   296  		}
   297  		for i := 0; i < size; i++ {
   298  			_, err := net.ResolveIPAddr("", ips[i])
   299  			if err != nil {
   300  				utils.Fatalf("invalid format of ip")
   301  				return err
   302  			}
   303  		}
   304  	} else {
   305  		ips = make([]string, size)
   306  		for i := 0; i < size; i++ {
   307  			ips[i] = "127.0.0.1"
   308  		}
   309  	}
   310  
   311  	// Make sure we have a valid genesis JSON
   312  	genesisPath := ctx.Args().First()
   313  	if len(genesisPath) == 0 {
   314  		utils.Fatalf("Must supply path to genesis JSON file")
   315  	}
   316  	file, err := os.Open(genesisPath)
   317  	if err != nil {
   318  		utils.Fatalf("Failed to read genesis file: %v", err)
   319  	}
   320  	defer file.Close()
   321  
   322  	genesis := new(core.Genesis)
   323  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   324  		utils.Fatalf("invalid genesis file: %v", err)
   325  	}
   326  	enodes := make([]*enode.Node, size)
   327  
   328  	// load config
   329  	var config gethConfig
   330  	err = loadConfig(cfgFile, &config)
   331  	if err != nil {
   332  		return err
   333  	}
   334  	config.Eth.Genesis = genesis
   335  
   336  	for i := 0; i < size; i++ {
   337  		stack, err := node.New(&config.Node)
   338  		if err != nil {
   339  			return err
   340  		}
   341  		stack.Config().DataDir = path.Join(initDir, fmt.Sprintf("node%d", i))
   342  		pk := stack.Config().NodeKey()
   343  		enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), port, port)
   344  	}
   345  
   346  	for i := 0; i < size; i++ {
   347  		config.Node.HTTPHost = ips[i]
   348  		config.Node.P2P.StaticNodes = make([]*enode.Node, size-1)
   349  		for j := 0; j < i; j++ {
   350  			config.Node.P2P.StaticNodes[j] = enodes[j]
   351  		}
   352  		for j := i + 1; j < size; j++ {
   353  			config.Node.P2P.StaticNodes[j-1] = enodes[j]
   354  		}
   355  		out, err := tomlSettings.Marshal(config)
   356  		if err != nil {
   357  			return err
   358  		}
   359  		dump, err := os.OpenFile(path.Join(initDir, fmt.Sprintf("node%d", i), "config.toml"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644)
   360  		if err != nil {
   361  			return err
   362  		}
   363  		defer dump.Close()
   364  		dump.Write(out)
   365  	}
   366  	return nil
   367  }
   368  
   369  func dumpGenesis(ctx *cli.Context) error {
   370  	genesis := utils.MakeGenesis(ctx)
   371  	if genesis == nil {
   372  		genesis = core.DefaultGenesisBlock()
   373  	}
   374  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   375  		utils.Fatalf("could not encode genesis")
   376  	}
   377  	return nil
   378  }
   379  
   380  func importChain(ctx *cli.Context) error {
   381  	if len(ctx.Args()) < 1 {
   382  		utils.Fatalf("This command requires an argument.")
   383  	}
   384  	// Start metrics export if enabled
   385  	utils.SetupMetrics(ctx)
   386  	// Start system runtime metrics collection
   387  	go metrics.CollectProcessMetrics(3 * time.Second)
   388  	stack := makeFullNode(ctx)
   389  	defer stack.Close()
   390  
   391  	chain, db := utils.MakeChain(ctx, stack)
   392  	defer db.Close()
   393  
   394  	// Start periodically gathering memory profiles
   395  	var peakMemAlloc, peakMemSys uint64
   396  	go func() {
   397  		stats := new(runtime.MemStats)
   398  		for {
   399  			runtime.ReadMemStats(stats)
   400  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   401  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   402  			}
   403  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   404  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   405  			}
   406  			time.Sleep(5 * time.Second)
   407  		}
   408  	}()
   409  	// Import the chain
   410  	start := time.Now()
   411  
   412  	if len(ctx.Args()) == 1 {
   413  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   414  			log.Error("Import error", "err", err)
   415  		}
   416  	} else {
   417  		for _, arg := range ctx.Args() {
   418  			if err := utils.ImportChain(chain, arg); err != nil {
   419  				log.Error("Import error", "file", arg, "err", err)
   420  			}
   421  		}
   422  	}
   423  	chain.Stop()
   424  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   425  
   426  	// Output pre-compaction stats mostly to see the import trashing
   427  	stats, err := db.Stat("leveldb.stats")
   428  	if err != nil {
   429  		utils.Fatalf("Failed to read database stats: %v", err)
   430  	}
   431  	fmt.Println(stats)
   432  
   433  	ioStats, err := db.Stat("leveldb.iostats")
   434  	if err != nil {
   435  		utils.Fatalf("Failed to read database iostats: %v", err)
   436  	}
   437  	fmt.Println(ioStats)
   438  
   439  	// Print the memory statistics used by the importing
   440  	mem := new(runtime.MemStats)
   441  	runtime.ReadMemStats(mem)
   442  
   443  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   444  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   445  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   446  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   447  
   448  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   449  		return nil
   450  	}
   451  
   452  	// Compact the entire database to more accurately measure disk io and print the stats
   453  	start = time.Now()
   454  	fmt.Println("Compacting entire database...")
   455  	if err = db.Compact(nil, nil); err != nil {
   456  		utils.Fatalf("Compaction failed: %v", err)
   457  	}
   458  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   459  
   460  	stats, err = db.Stat("leveldb.stats")
   461  	if err != nil {
   462  		utils.Fatalf("Failed to read database stats: %v", err)
   463  	}
   464  	fmt.Println(stats)
   465  
   466  	ioStats, err = db.Stat("leveldb.iostats")
   467  	if err != nil {
   468  		utils.Fatalf("Failed to read database iostats: %v", err)
   469  	}
   470  	fmt.Println(ioStats)
   471  	return nil
   472  }
   473  
   474  func exportChain(ctx *cli.Context) error {
   475  	if len(ctx.Args()) < 1 {
   476  		utils.Fatalf("This command requires an argument.")
   477  	}
   478  	stack := makeFullNode(ctx)
   479  	defer stack.Close()
   480  
   481  	chain, _ := utils.MakeChain(ctx, stack)
   482  	start := time.Now()
   483  
   484  	var err error
   485  	fp := ctx.Args().First()
   486  	if len(ctx.Args()) < 3 {
   487  		err = utils.ExportChain(chain, fp)
   488  	} else {
   489  		// This can be improved to allow for numbers larger than 9223372036854775807
   490  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   491  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   492  		if ferr != nil || lerr != nil {
   493  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   494  		}
   495  		if first < 0 || last < 0 {
   496  			utils.Fatalf("Export error: block number must be greater than 0\n")
   497  		}
   498  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   499  	}
   500  
   501  	if err != nil {
   502  		utils.Fatalf("Export error: %v\n", err)
   503  	}
   504  	fmt.Printf("Export done in %v\n", time.Since(start))
   505  	return nil
   506  }
   507  
   508  // importPreimages imports preimage data from the specified file.
   509  func importPreimages(ctx *cli.Context) error {
   510  	if len(ctx.Args()) < 1 {
   511  		utils.Fatalf("This command requires an argument.")
   512  	}
   513  	stack := makeFullNode(ctx)
   514  	defer stack.Close()
   515  
   516  	db := utils.MakeChainDatabase(ctx, stack)
   517  	start := time.Now()
   518  
   519  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   520  		utils.Fatalf("Import error: %v\n", err)
   521  	}
   522  	fmt.Printf("Import done in %v\n", time.Since(start))
   523  	return nil
   524  }
   525  
   526  // exportPreimages dumps the preimage data to specified json file in streaming way.
   527  func exportPreimages(ctx *cli.Context) error {
   528  	if len(ctx.Args()) < 1 {
   529  		utils.Fatalf("This command requires an argument.")
   530  	}
   531  	stack := makeFullNode(ctx)
   532  	defer stack.Close()
   533  
   534  	db := utils.MakeChainDatabase(ctx, stack)
   535  	start := time.Now()
   536  
   537  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   538  		utils.Fatalf("Export error: %v\n", err)
   539  	}
   540  	fmt.Printf("Export done in %v\n", time.Since(start))
   541  	return nil
   542  }
   543  
   544  func copyDb(ctx *cli.Context) error {
   545  	// Ensure we have a source chain directory to copy
   546  	if len(ctx.Args()) < 1 {
   547  		utils.Fatalf("Source chaindata directory path argument missing")
   548  	}
   549  	if len(ctx.Args()) < 2 {
   550  		utils.Fatalf("Source ancient chain directory path argument missing")
   551  	}
   552  	// Initialize a new chain for the running node to sync into
   553  	stack := makeFullNode(ctx)
   554  	defer stack.Close()
   555  
   556  	chain, chainDb := utils.MakeChain(ctx, stack)
   557  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   558  
   559  	var syncBloom *trie.SyncBloom
   560  	if syncMode == downloader.FastSync {
   561  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   562  	}
   563  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   564  
   565  	// Create a source peer to satisfy downloader requests from
   566  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   567  	if err != nil {
   568  		return err
   569  	}
   570  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   571  	if err != nil {
   572  		return err
   573  	}
   574  	peer := downloader.NewFakePeer("local", db, hc, dl)
   575  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   576  		return err
   577  	}
   578  	// Synchronise with the simulated peer
   579  	start := time.Now()
   580  
   581  	currentHeader := hc.CurrentHeader()
   582  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   583  		return err
   584  	}
   585  	for dl.Synchronising() {
   586  		time.Sleep(10 * time.Millisecond)
   587  	}
   588  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   589  
   590  	// Compact the entire database to remove any sync overhead
   591  	start = time.Now()
   592  	fmt.Println("Compacting entire database...")
   593  	if err = db.Compact(nil, nil); err != nil {
   594  		utils.Fatalf("Compaction failed: %v", err)
   595  	}
   596  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   597  	return nil
   598  }
   599  
   600  func removeDB(ctx *cli.Context) error {
   601  	stack, config := makeConfigNode(ctx)
   602  
   603  	// Remove the full node state database
   604  	path := stack.ResolvePath("chaindata")
   605  	if common.FileExist(path) {
   606  		confirmAndRemoveDB(path, "full node state database")
   607  	} else {
   608  		log.Info("Full node state database missing", "path", path)
   609  	}
   610  	// Remove the full node ancient database
   611  	path = config.Eth.DatabaseFreezer
   612  	switch {
   613  	case path == "":
   614  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   615  	case !filepath.IsAbs(path):
   616  		path = config.Node.ResolvePath(path)
   617  	}
   618  	if common.FileExist(path) {
   619  		confirmAndRemoveDB(path, "full node ancient database")
   620  	} else {
   621  		log.Info("Full node ancient database missing", "path", path)
   622  	}
   623  	// Remove the light node database
   624  	path = stack.ResolvePath("lightchaindata")
   625  	if common.FileExist(path) {
   626  		confirmAndRemoveDB(path, "light node database")
   627  	} else {
   628  		log.Info("Light node database missing", "path", path)
   629  	}
   630  	return nil
   631  }
   632  
   633  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   634  // folder if accepted.
   635  func confirmAndRemoveDB(database string, kind string) {
   636  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   637  	switch {
   638  	case err != nil:
   639  		utils.Fatalf("%v", err)
   640  	case !confirm:
   641  		log.Info("Database deletion skipped", "path", database)
   642  	default:
   643  		start := time.Now()
   644  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   645  			// If we're at the top level folder, recurse into
   646  			if path == database {
   647  				return nil
   648  			}
   649  			// Delete all the files, but not subfolders
   650  			if !info.IsDir() {
   651  				os.Remove(path)
   652  				return nil
   653  			}
   654  			return filepath.SkipDir
   655  		})
   656  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   657  	}
   658  }
   659  
   660  func dump(ctx *cli.Context) error {
   661  	stack := makeFullNode(ctx)
   662  	defer stack.Close()
   663  
   664  	chain, chainDb := utils.MakeChain(ctx, stack)
   665  	defer chainDb.Close()
   666  	for _, arg := range ctx.Args() {
   667  		var block *types.Block
   668  		if hashish(arg) {
   669  			block = chain.GetBlockByHash(common.HexToHash(arg))
   670  		} else {
   671  			num, _ := strconv.Atoi(arg)
   672  			block = chain.GetBlockByNumber(uint64(num))
   673  		}
   674  		if block == nil {
   675  			fmt.Println("{}")
   676  			utils.Fatalf("block not found")
   677  		} else {
   678  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   679  			if err != nil {
   680  				utils.Fatalf("could not create new state: %v", err)
   681  			}
   682  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   683  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   684  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   685  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   686  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   687  			} else {
   688  				if includeMissing {
   689  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   690  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   691  				}
   692  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   693  			}
   694  		}
   695  	}
   696  	return nil
   697  }
   698  
   699  func inspect(ctx *cli.Context) error {
   700  	node, _ := makeConfigNode(ctx)
   701  	defer node.Close()
   702  
   703  	_, chainDb := utils.MakeChain(ctx, node)
   704  	defer chainDb.Close()
   705  
   706  	return rawdb.InspectDatabase(chainDb)
   707  }
   708  
   709  // hashish returns true for strings that look like hashes.
   710  func hashish(x string) bool {
   711  	_, err := strconv.Atoi(x)
   712  	return err != nil
   713  }