github.com/DTFN/go-ethereum@v1.4.5/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/console"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/eth/downloader"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  			utils.LightModeFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.LightModeFlag,
    70  			utils.GCModeFlag,
    71  			utils.CacheDatabaseFlag,
    72  			utils.CacheGCFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The import command imports blocks from an RLP-encoded form. The form can be one file
    77  with several RLP-encoded blocks, or several files can be used.
    78  
    79  If only one file is used, import error will result in failure. If several files are used,
    80  processing will proceed even if an individual RLP-file import failure occurs.`,
    81  	}
    82  	exportCommand = cli.Command{
    83  		Action:    utils.MigrateFlags(exportChain),
    84  		Name:      "export",
    85  		Usage:     "Export blockchain into file",
    86  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    87  		Flags: []cli.Flag{
    88  			utils.DataDirFlag,
    89  			utils.CacheFlag,
    90  			utils.LightModeFlag,
    91  		},
    92  		Category: "BLOCKCHAIN COMMANDS",
    93  		Description: `
    94  Requires a first argument of the file to write to.
    95  Optional second and third arguments control the first and
    96  last block to write. In this mode, the file will be appended
    97  if already existing.`,
    98  	}
    99  	importPreimagesCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(importPreimages),
   101  		Name:      "import-preimages",
   102  		Usage:     "Import the preimage database from an RLP stream",
   103  		ArgsUsage: "<datafile>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.LightModeFlag,
   108  		},
   109  		Category: "BLOCKCHAIN COMMANDS",
   110  		Description: `
   111  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   112  	}
   113  	exportPreimagesCommand = cli.Command{
   114  		Action:    utils.MigrateFlags(exportPreimages),
   115  		Name:      "export-preimages",
   116  		Usage:     "Export the preimage database into an RLP stream",
   117  		ArgsUsage: "<dumpfile>",
   118  		Flags: []cli.Flag{
   119  			utils.DataDirFlag,
   120  			utils.CacheFlag,
   121  			utils.LightModeFlag,
   122  		},
   123  		Category: "BLOCKCHAIN COMMANDS",
   124  		Description: `
   125  The export-preimages command export hash preimages to an RLP encoded stream`,
   126  	}
   127  	copydbCommand = cli.Command{
   128  		Action:    utils.MigrateFlags(copyDb),
   129  		Name:      "copydb",
   130  		Usage:     "Create a local chain from a target chaindata folder",
   131  		ArgsUsage: "<sourceChaindataDir>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.CacheFlag,
   135  			utils.SyncModeFlag,
   136  			utils.FakePoWFlag,
   137  			utils.TestnetFlag,
   138  			utils.RinkebyFlag,
   139  		},
   140  		Category: "BLOCKCHAIN COMMANDS",
   141  		Description: `
   142  The first argument must be the directory containing the blockchain to download from`,
   143  	}
   144  	removedbCommand = cli.Command{
   145  		Action:    utils.MigrateFlags(removeDB),
   146  		Name:      "removedb",
   147  		Usage:     "Remove blockchain and state databases",
   148  		ArgsUsage: " ",
   149  		Flags: []cli.Flag{
   150  			utils.DataDirFlag,
   151  			utils.LightModeFlag,
   152  		},
   153  		Category: "BLOCKCHAIN COMMANDS",
   154  		Description: `
   155  Remove blockchain and state databases`,
   156  	}
   157  	dumpCommand = cli.Command{
   158  		Action:    utils.MigrateFlags(dump),
   159  		Name:      "dump",
   160  		Usage:     "Dump a specific block from storage",
   161  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   162  		Flags: []cli.Flag{
   163  			utils.DataDirFlag,
   164  			utils.CacheFlag,
   165  			utils.LightModeFlag,
   166  		},
   167  		Category: "BLOCKCHAIN COMMANDS",
   168  		Description: `
   169  The arguments are interpreted as block numbers or hashes.
   170  Use "ethereum dump 0" to dump the genesis block.`,
   171  	}
   172  )
   173  
   174  // initGenesis will initialise the given JSON format genesis file and writes it as
   175  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   176  func initGenesis(ctx *cli.Context) error {
   177  	// Make sure we have a valid genesis JSON
   178  	genesisPath := ctx.Args().First()
   179  	if len(genesisPath) == 0 {
   180  		utils.Fatalf("Must supply path to genesis JSON file")
   181  	}
   182  	file, err := os.Open(genesisPath)
   183  	if err != nil {
   184  		utils.Fatalf("Failed to read genesis file: %v", err)
   185  	}
   186  	defer file.Close()
   187  
   188  	genesis := new(core.Genesis)
   189  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   190  		utils.Fatalf("invalid genesis file: %v", err)
   191  	}
   192  	// Open an initialise both full and light databases
   193  	stack := makeFullNode(ctx)
   194  	for _, name := range []string{"chaindata", "lightchaindata"} {
   195  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   196  		if err != nil {
   197  			utils.Fatalf("Failed to open database: %v", err)
   198  		}
   199  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   200  		if err != nil {
   201  			utils.Fatalf("Failed to write genesis block: %v", err)
   202  		}
   203  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   204  	}
   205  	return nil
   206  }
   207  
   208  func importChain(ctx *cli.Context) error {
   209  	if len(ctx.Args()) < 1 {
   210  		utils.Fatalf("This command requires an argument.")
   211  	}
   212  	stack := makeFullNode(ctx)
   213  	chain, chainDb := utils.MakeChain(ctx, stack)
   214  	defer chainDb.Close()
   215  
   216  	// Start periodically gathering memory profiles
   217  	var peakMemAlloc, peakMemSys uint64
   218  	go func() {
   219  		stats := new(runtime.MemStats)
   220  		for {
   221  			runtime.ReadMemStats(stats)
   222  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   223  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   224  			}
   225  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   226  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   227  			}
   228  			time.Sleep(5 * time.Second)
   229  		}
   230  	}()
   231  	// Import the chain
   232  	start := time.Now()
   233  
   234  	if len(ctx.Args()) == 1 {
   235  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   236  			log.Error("Import error", "err", err)
   237  		}
   238  	} else {
   239  		for _, arg := range ctx.Args() {
   240  			if err := utils.ImportChain(chain, arg); err != nil {
   241  				log.Error("Import error", "file", arg, "err", err)
   242  			}
   243  		}
   244  	}
   245  	chain.Stop()
   246  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   247  
   248  	// Output pre-compaction stats mostly to see the import trashing
   249  	db := chainDb.(*ethdb.LDBDatabase)
   250  
   251  	stats, err := db.LDB().GetProperty("leveldb.stats")
   252  	if err != nil {
   253  		utils.Fatalf("Failed to read database stats: %v", err)
   254  	}
   255  	fmt.Println(stats)
   256  
   257  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   258  	if err != nil {
   259  		utils.Fatalf("Failed to read database iostats: %v", err)
   260  	}
   261  	fmt.Println(ioStats)
   262  
   263  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   264  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   265  
   266  	// Print the memory statistics used by the importing
   267  	mem := new(runtime.MemStats)
   268  	runtime.ReadMemStats(mem)
   269  
   270  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   271  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   272  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   273  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   274  
   275  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   276  		return nil
   277  	}
   278  
   279  	// Compact the entire database to more accurately measure disk io and print the stats
   280  	start = time.Now()
   281  	fmt.Println("Compacting entire database...")
   282  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   283  		utils.Fatalf("Compaction failed: %v", err)
   284  	}
   285  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   286  
   287  	stats, err = db.LDB().GetProperty("leveldb.stats")
   288  	if err != nil {
   289  		utils.Fatalf("Failed to read database stats: %v", err)
   290  	}
   291  	fmt.Println(stats)
   292  
   293  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   294  	if err != nil {
   295  		utils.Fatalf("Failed to read database iostats: %v", err)
   296  	}
   297  	fmt.Println(ioStats)
   298  
   299  	return nil
   300  }
   301  
   302  func exportChain(ctx *cli.Context) error {
   303  	if len(ctx.Args()) < 1 {
   304  		utils.Fatalf("This command requires an argument.")
   305  	}
   306  	stack := makeFullNode(ctx)
   307  	chain, _ := utils.MakeChain(ctx, stack)
   308  	start := time.Now()
   309  
   310  	var err error
   311  	fp := ctx.Args().First()
   312  	if len(ctx.Args()) < 3 {
   313  		err = utils.ExportChain(chain, fp)
   314  	} else {
   315  		// This can be improved to allow for numbers larger than 9223372036854775807
   316  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   317  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   318  		if ferr != nil || lerr != nil {
   319  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   320  		}
   321  		if first < 0 || last < 0 {
   322  			utils.Fatalf("Export error: block number must be greater than 0\n")
   323  		}
   324  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   325  	}
   326  
   327  	if err != nil {
   328  		utils.Fatalf("Export error: %v\n", err)
   329  	}
   330  	fmt.Printf("Export done in %v\n", time.Since(start))
   331  	return nil
   332  }
   333  
   334  // importPreimages imports preimage data from the specified file.
   335  func importPreimages(ctx *cli.Context) error {
   336  	if len(ctx.Args()) < 1 {
   337  		utils.Fatalf("This command requires an argument.")
   338  	}
   339  	stack := makeFullNode(ctx)
   340  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   341  
   342  	start := time.Now()
   343  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   344  		utils.Fatalf("Export error: %v\n", err)
   345  	}
   346  	fmt.Printf("Export done in %v\n", time.Since(start))
   347  	return nil
   348  }
   349  
   350  // exportPreimages dumps the preimage data to specified json file in streaming way.
   351  func exportPreimages(ctx *cli.Context) error {
   352  	if len(ctx.Args()) < 1 {
   353  		utils.Fatalf("This command requires an argument.")
   354  	}
   355  	stack := makeFullNode(ctx)
   356  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   357  
   358  	start := time.Now()
   359  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   360  		utils.Fatalf("Export error: %v\n", err)
   361  	}
   362  	fmt.Printf("Export done in %v\n", time.Since(start))
   363  	return nil
   364  }
   365  
   366  func copyDb(ctx *cli.Context) error {
   367  	// Ensure we have a source chain directory to copy
   368  	if len(ctx.Args()) != 1 {
   369  		utils.Fatalf("Source chaindata directory path argument missing")
   370  	}
   371  	// Initialize a new chain for the running node to sync into
   372  	stack := makeFullNode(ctx)
   373  	chain, chainDb := utils.MakeChain(ctx, stack)
   374  
   375  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   376  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   377  
   378  	// Create a source peer to satisfy downloader requests from
   379  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   380  	if err != nil {
   381  		return err
   382  	}
   383  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   384  	if err != nil {
   385  		return err
   386  	}
   387  	peer := downloader.NewFakePeer("local", db, hc, dl)
   388  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   389  		return err
   390  	}
   391  	// Synchronise with the simulated peer
   392  	start := time.Now()
   393  
   394  	currentHeader := hc.CurrentHeader()
   395  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   396  		return err
   397  	}
   398  	for dl.Synchronising() {
   399  		time.Sleep(10 * time.Millisecond)
   400  	}
   401  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   402  
   403  	// Compact the entire database to remove any sync overhead
   404  	start = time.Now()
   405  	fmt.Println("Compacting entire database...")
   406  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   407  		utils.Fatalf("Compaction failed: %v", err)
   408  	}
   409  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   410  
   411  	return nil
   412  }
   413  
   414  func removeDB(ctx *cli.Context) error {
   415  	stack, _ := makeConfigNode(ctx)
   416  
   417  	for _, name := range []string{"chaindata", "lightchaindata"} {
   418  		// Ensure the database exists in the first place
   419  		logger := log.New("database", name)
   420  
   421  		dbdir := stack.ResolvePath(name)
   422  		if !common.FileExist(dbdir) {
   423  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   424  			continue
   425  		}
   426  		// Confirm removal and execute
   427  		fmt.Println(dbdir)
   428  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   429  		switch {
   430  		case err != nil:
   431  			utils.Fatalf("%v", err)
   432  		case !confirm:
   433  			logger.Warn("Database deletion aborted")
   434  		default:
   435  			start := time.Now()
   436  			os.RemoveAll(dbdir)
   437  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   438  		}
   439  	}
   440  	return nil
   441  }
   442  
   443  func dump(ctx *cli.Context) error {
   444  	stack := makeFullNode(ctx)
   445  	chain, chainDb := utils.MakeChain(ctx, stack)
   446  	for _, arg := range ctx.Args() {
   447  		var block *types.Block
   448  		if hashish(arg) {
   449  			block = chain.GetBlockByHash(common.HexToHash(arg))
   450  		} else {
   451  			num, _ := strconv.Atoi(arg)
   452  			block = chain.GetBlockByNumber(uint64(num))
   453  		}
   454  		if block == nil {
   455  			fmt.Println("{}")
   456  			utils.Fatalf("block not found")
   457  		} else {
   458  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   459  			if err != nil {
   460  				utils.Fatalf("could not create new state: %v", err)
   461  			}
   462  			fmt.Printf("%s\n", state.Dump())
   463  		}
   464  	}
   465  	chainDb.Close()
   466  	return nil
   467  }
   468  
   469  // hashish returns true for strings that look like hashes.
   470  func hashish(x string) bool {
   471  	_, err := strconv.Atoi(x)
   472  	return err != nil
   473  }