github.com/daeglee/go-ethereum@v0.0.0-20190504220456-cad3e8d18e9b/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/console"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/rawdb"
    33  	"github.com/ethereum/go-ethereum/core/state"
    34  	"github.com/ethereum/go-ethereum/core/types"
    35  	"github.com/ethereum/go-ethereum/eth/downloader"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"gopkg.in/urfave/cli.v1"
    39  )
    40  
    41  var (
    42  	initCommand = cli.Command{
    43  		Action:    utils.MigrateFlags(initGenesis),
    44  		Name:      "init",
    45  		Usage:     "Bootstrap and initialize a new genesis block",
    46  		ArgsUsage: "<genesisPath>",
    47  		Flags: []cli.Flag{
    48  			utils.DataDirFlag,
    49  		},
    50  		Category: "BLOCKCHAIN COMMANDS",
    51  		Description: `
    52  The init command initializes a new genesis block and definition for the network.
    53  This is a destructive action and changes the network in which you will be
    54  participating.
    55  
    56  It expects the genesis file as argument.`,
    57  	}
    58  	importCommand = cli.Command{
    59  		Action:    utils.MigrateFlags(importChain),
    60  		Name:      "import",
    61  		Usage:     "Import a blockchain file",
    62  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    63  		Flags: []cli.Flag{
    64  			utils.DataDirFlag,
    65  			utils.CacheFlag,
    66  			utils.SyncModeFlag,
    67  			utils.GCModeFlag,
    68  			utils.CacheDatabaseFlag,
    69  			utils.CacheGCFlag,
    70  		},
    71  		Category: "BLOCKCHAIN COMMANDS",
    72  		Description: `
    73  The import command imports blocks from an RLP-encoded form. The form can be one file
    74  with several RLP-encoded blocks, or several files can be used.
    75  
    76  If only one file is used, import error will result in failure. If several files are used,
    77  processing will proceed even if an individual RLP-file import failure occurs.`,
    78  	}
    79  	exportCommand = cli.Command{
    80  		Action:    utils.MigrateFlags(exportChain),
    81  		Name:      "export",
    82  		Usage:     "Export blockchain into file",
    83  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    84  		Flags: []cli.Flag{
    85  			utils.DataDirFlag,
    86  			utils.CacheFlag,
    87  			utils.SyncModeFlag,
    88  		},
    89  		Category: "BLOCKCHAIN COMMANDS",
    90  		Description: `
    91  Requires a first argument of the file to write to.
    92  Optional second and third arguments control the first and
    93  last block to write. In this mode, the file will be appended
    94  if already existing. If the file ends with .gz, the output will
    95  be gzipped.`,
    96  	}
    97  	importPreimagesCommand = cli.Command{
    98  		Action:    utils.MigrateFlags(importPreimages),
    99  		Name:      "import-preimages",
   100  		Usage:     "Import the preimage database from an RLP stream",
   101  		ArgsUsage: "<datafile>",
   102  		Flags: []cli.Flag{
   103  			utils.DataDirFlag,
   104  			utils.CacheFlag,
   105  			utils.SyncModeFlag,
   106  		},
   107  		Category: "BLOCKCHAIN COMMANDS",
   108  		Description: `
   109  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   110  	}
   111  	exportPreimagesCommand = cli.Command{
   112  		Action:    utils.MigrateFlags(exportPreimages),
   113  		Name:      "export-preimages",
   114  		Usage:     "Export the preimage database into an RLP stream",
   115  		ArgsUsage: "<dumpfile>",
   116  		Flags: []cli.Flag{
   117  			utils.DataDirFlag,
   118  			utils.CacheFlag,
   119  			utils.SyncModeFlag,
   120  		},
   121  		Category: "BLOCKCHAIN COMMANDS",
   122  		Description: `
   123  The export-preimages command export hash preimages to an RLP encoded stream`,
   124  	}
   125  	copydbCommand = cli.Command{
   126  		Action:    utils.MigrateFlags(copyDb),
   127  		Name:      "copydb",
   128  		Usage:     "Create a local chain from a target chaindata folder",
   129  		ArgsUsage: "<sourceChaindataDir>",
   130  		Flags: []cli.Flag{
   131  			utils.DataDirFlag,
   132  			utils.CacheFlag,
   133  			utils.SyncModeFlag,
   134  			utils.FakePoWFlag,
   135  			utils.TestnetFlag,
   136  			utils.RinkebyFlag,
   137  		},
   138  		Category: "BLOCKCHAIN COMMANDS",
   139  		Description: `
   140  The first argument must be the directory containing the blockchain to download from`,
   141  	}
   142  	removedbCommand = cli.Command{
   143  		Action:    utils.MigrateFlags(removeDB),
   144  		Name:      "removedb",
   145  		Usage:     "Remove blockchain and state databases",
   146  		ArgsUsage: " ",
   147  		Flags: []cli.Flag{
   148  			utils.DataDirFlag,
   149  		},
   150  		Category: "BLOCKCHAIN COMMANDS",
   151  		Description: `
   152  Remove blockchain and state databases`,
   153  	}
   154  	dumpCommand = cli.Command{
   155  		Action:    utils.MigrateFlags(dump),
   156  		Name:      "dump",
   157  		Usage:     "Dump a specific block from storage",
   158  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   159  		Flags: []cli.Flag{
   160  			utils.DataDirFlag,
   161  			utils.CacheFlag,
   162  			utils.SyncModeFlag,
   163  		},
   164  		Category: "BLOCKCHAIN COMMANDS",
   165  		Description: `
   166  The arguments are interpreted as block numbers or hashes.
   167  Use "ethereum dump 0" to dump the genesis block.`,
   168  	}
   169  )
   170  
   171  // initGenesis will initialise the given JSON format genesis file and writes it as
   172  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   173  func initGenesis(ctx *cli.Context) error {
   174  	// Make sure we have a valid genesis JSON
   175  	genesisPath := ctx.Args().First()
   176  	if len(genesisPath) == 0 {
   177  		utils.Fatalf("Must supply path to genesis JSON file")
   178  	}
   179  	file, err := os.Open(genesisPath)
   180  	if err != nil {
   181  		utils.Fatalf("Failed to read genesis file: %v", err)
   182  	}
   183  	defer file.Close()
   184  
   185  	genesis := new(core.Genesis)
   186  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   187  		utils.Fatalf("invalid genesis file: %v", err)
   188  	}
   189  	// Open an initialise both full and light databases
   190  	stack := makeFullNode(ctx)
   191  	defer stack.Close()
   192  
   193  	for _, name := range []string{"chaindata", "lightchaindata"} {
   194  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   195  		if err != nil {
   196  			utils.Fatalf("Failed to open database: %v", err)
   197  		}
   198  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   199  		if err != nil {
   200  			utils.Fatalf("Failed to write genesis block: %v", err)
   201  		}
   202  		chaindb.Close()
   203  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   204  	}
   205  	return nil
   206  }
   207  
   208  func importChain(ctx *cli.Context) error {
   209  	if len(ctx.Args()) < 1 {
   210  		utils.Fatalf("This command requires an argument.")
   211  	}
   212  	stack := makeFullNode(ctx)
   213  	defer stack.Close()
   214  
   215  	chain, db := utils.MakeChain(ctx, stack)
   216  	defer db.Close()
   217  
   218  	// Start periodically gathering memory profiles
   219  	var peakMemAlloc, peakMemSys uint64
   220  	go func() {
   221  		stats := new(runtime.MemStats)
   222  		for {
   223  			runtime.ReadMemStats(stats)
   224  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   225  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   226  			}
   227  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   228  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   229  			}
   230  			time.Sleep(5 * time.Second)
   231  		}
   232  	}()
   233  	// Import the chain
   234  	start := time.Now()
   235  
   236  	if len(ctx.Args()) == 1 {
   237  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   238  			log.Error("Import error", "err", err)
   239  		}
   240  	} else {
   241  		for _, arg := range ctx.Args() {
   242  			if err := utils.ImportChain(chain, arg); err != nil {
   243  				log.Error("Import error", "file", arg, "err", err)
   244  			}
   245  		}
   246  	}
   247  	chain.Stop()
   248  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   249  
   250  	// Output pre-compaction stats mostly to see the import trashing
   251  	stats, err := db.Stat("leveldb.stats")
   252  	if err != nil {
   253  		utils.Fatalf("Failed to read database stats: %v", err)
   254  	}
   255  	fmt.Println(stats)
   256  
   257  	ioStats, err := db.Stat("leveldb.iostats")
   258  	if err != nil {
   259  		utils.Fatalf("Failed to read database iostats: %v", err)
   260  	}
   261  	fmt.Println(ioStats)
   262  
   263  	// Print the memory statistics used by the importing
   264  	mem := new(runtime.MemStats)
   265  	runtime.ReadMemStats(mem)
   266  
   267  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   268  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   269  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   270  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   271  
   272  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   273  		return nil
   274  	}
   275  
   276  	// Compact the entire database to more accurately measure disk io and print the stats
   277  	start = time.Now()
   278  	fmt.Println("Compacting entire database...")
   279  	if err = db.Compact(nil, nil); err != nil {
   280  		utils.Fatalf("Compaction failed: %v", err)
   281  	}
   282  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   283  
   284  	stats, err = db.Stat("leveldb.stats")
   285  	if err != nil {
   286  		utils.Fatalf("Failed to read database stats: %v", err)
   287  	}
   288  	fmt.Println(stats)
   289  
   290  	ioStats, err = db.Stat("leveldb.iostats")
   291  	if err != nil {
   292  		utils.Fatalf("Failed to read database iostats: %v", err)
   293  	}
   294  	fmt.Println(ioStats)
   295  	return nil
   296  }
   297  
   298  func exportChain(ctx *cli.Context) error {
   299  	if len(ctx.Args()) < 1 {
   300  		utils.Fatalf("This command requires an argument.")
   301  	}
   302  	stack := makeFullNode(ctx)
   303  	defer stack.Close()
   304  
   305  	chain, _ := utils.MakeChain(ctx, stack)
   306  	start := time.Now()
   307  
   308  	var err error
   309  	fp := ctx.Args().First()
   310  	if len(ctx.Args()) < 3 {
   311  		err = utils.ExportChain(chain, fp)
   312  	} else {
   313  		// This can be improved to allow for numbers larger than 9223372036854775807
   314  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   315  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   316  		if ferr != nil || lerr != nil {
   317  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   318  		}
   319  		if first < 0 || last < 0 {
   320  			utils.Fatalf("Export error: block number must be greater than 0\n")
   321  		}
   322  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   323  	}
   324  
   325  	if err != nil {
   326  		utils.Fatalf("Export error: %v\n", err)
   327  	}
   328  	fmt.Printf("Export done in %v\n", time.Since(start))
   329  	return nil
   330  }
   331  
   332  // importPreimages imports preimage data from the specified file.
   333  func importPreimages(ctx *cli.Context) error {
   334  	if len(ctx.Args()) < 1 {
   335  		utils.Fatalf("This command requires an argument.")
   336  	}
   337  	stack := makeFullNode(ctx)
   338  	defer stack.Close()
   339  
   340  	db := utils.MakeChainDatabase(ctx, stack)
   341  	start := time.Now()
   342  
   343  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   344  		utils.Fatalf("Import error: %v\n", err)
   345  	}
   346  	fmt.Printf("Import done in %v\n", time.Since(start))
   347  	return nil
   348  }
   349  
   350  // exportPreimages dumps the preimage data to specified json file in streaming way.
   351  func exportPreimages(ctx *cli.Context) error {
   352  	if len(ctx.Args()) < 1 {
   353  		utils.Fatalf("This command requires an argument.")
   354  	}
   355  	stack := makeFullNode(ctx)
   356  	defer stack.Close()
   357  
   358  	db := utils.MakeChainDatabase(ctx, stack)
   359  	start := time.Now()
   360  
   361  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   362  		utils.Fatalf("Export error: %v\n", err)
   363  	}
   364  	fmt.Printf("Export done in %v\n", time.Since(start))
   365  	return nil
   366  }
   367  
   368  func copyDb(ctx *cli.Context) error {
   369  	// Ensure we have a source chain directory to copy
   370  	if len(ctx.Args()) != 1 {
   371  		utils.Fatalf("Source chaindata directory path argument missing")
   372  	}
   373  	// Initialize a new chain for the running node to sync into
   374  	stack := makeFullNode(ctx)
   375  	defer stack.Close()
   376  
   377  	chain, chainDb := utils.MakeChain(ctx, stack)
   378  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   379  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   380  
   381  	// Create a source peer to satisfy downloader requests from
   382  	db, err := rawdb.NewLevelDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256, "")
   383  	if err != nil {
   384  		return err
   385  	}
   386  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   387  	if err != nil {
   388  		return err
   389  	}
   390  	peer := downloader.NewFakePeer("local", db, hc, dl)
   391  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   392  		return err
   393  	}
   394  	// Synchronise with the simulated peer
   395  	start := time.Now()
   396  
   397  	currentHeader := hc.CurrentHeader()
   398  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   399  		return err
   400  	}
   401  	for dl.Synchronising() {
   402  		time.Sleep(10 * time.Millisecond)
   403  	}
   404  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   405  
   406  	// Compact the entire database to remove any sync overhead
   407  	start = time.Now()
   408  	fmt.Println("Compacting entire database...")
   409  	if err = db.Compact(nil, nil); err != nil {
   410  		utils.Fatalf("Compaction failed: %v", err)
   411  	}
   412  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   413  	return nil
   414  }
   415  
   416  func removeDB(ctx *cli.Context) error {
   417  	stack, _ := makeConfigNode(ctx)
   418  
   419  	for _, name := range []string{"chaindata", "lightchaindata"} {
   420  		// Ensure the database exists in the first place
   421  		logger := log.New("database", name)
   422  
   423  		dbdir := stack.ResolvePath(name)
   424  		if !common.FileExist(dbdir) {
   425  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   426  			continue
   427  		}
   428  		// Confirm removal and execute
   429  		fmt.Println(dbdir)
   430  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   431  		switch {
   432  		case err != nil:
   433  			utils.Fatalf("%v", err)
   434  		case !confirm:
   435  			logger.Warn("Database deletion aborted")
   436  		default:
   437  			start := time.Now()
   438  			os.RemoveAll(dbdir)
   439  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   440  		}
   441  	}
   442  	return nil
   443  }
   444  
   445  func dump(ctx *cli.Context) error {
   446  	stack := makeFullNode(ctx)
   447  	defer stack.Close()
   448  
   449  	chain, chainDb := utils.MakeChain(ctx, stack)
   450  	for _, arg := range ctx.Args() {
   451  		var block *types.Block
   452  		if hashish(arg) {
   453  			block = chain.GetBlockByHash(common.HexToHash(arg))
   454  		} else {
   455  			num, _ := strconv.Atoi(arg)
   456  			block = chain.GetBlockByNumber(uint64(num))
   457  		}
   458  		if block == nil {
   459  			fmt.Println("{}")
   460  			utils.Fatalf("block not found")
   461  		} else {
   462  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   463  			if err != nil {
   464  				utils.Fatalf("could not create new state: %v", err)
   465  			}
   466  			fmt.Printf("%s\n", state.Dump())
   467  		}
   468  	}
   469  	chainDb.Close()
   470  	return nil
   471  }
   472  
   473  // hashish returns true for strings that look like hashes.
   474  func hashish(x string) bool {
   475  	_, err := strconv.Atoi(x)
   476  	return err != nil
   477  }