github.com/hyperion-hyn/go-ethereum@v2.4.0+incompatible/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"io"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/console"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/state"
    34  	"github.com/ethereum/go-ethereum/core/types"
    35  	"github.com/ethereum/go-ethereum/eth/downloader"
    36  	"github.com/ethereum/go-ethereum/ethdb"
    37  	"github.com/ethereum/go-ethereum/event"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/trie"
    40  	"github.com/syndtr/goleveldb/leveldb/util"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.SyncModeFlag,
    70  			utils.GCModeFlag,
    71  			utils.CacheDatabaseFlag,
    72  			utils.CacheGCFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The import command imports blocks from an RLP-encoded form. The form can be one file
    77  with several RLP-encoded blocks, or several files can be used.
    78  
    79  If only one file is used, import error will result in failure. If several files are used,
    80  processing will proceed even if an individual RLP-file import failure occurs.`,
    81  	}
    82  	exportCommand = cli.Command{
    83  		Action:    utils.MigrateFlags(exportChain),
    84  		Name:      "export",
    85  		Usage:     "Export blockchain into file",
    86  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    87  		Flags: []cli.Flag{
    88  			utils.DataDirFlag,
    89  			utils.CacheFlag,
    90  			utils.SyncModeFlag,
    91  		},
    92  		Category: "BLOCKCHAIN COMMANDS",
    93  		Description: `
    94  Requires a first argument of the file to write to.
    95  Optional second and third arguments control the first and
    96  last block to write. In this mode, the file will be appended
    97  if already existing. If the file ends with .gz, the output will
    98  be gzipped.`,
    99  	}
   100  	importPreimagesCommand = cli.Command{
   101  		Action:    utils.MigrateFlags(importPreimages),
   102  		Name:      "import-preimages",
   103  		Usage:     "Import the preimage database from an RLP stream",
   104  		ArgsUsage: "<datafile>",
   105  		Flags: []cli.Flag{
   106  			utils.DataDirFlag,
   107  			utils.CacheFlag,
   108  			utils.SyncModeFlag,
   109  		},
   110  		Category: "BLOCKCHAIN COMMANDS",
   111  		Description: `
   112  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   113  	}
   114  	exportPreimagesCommand = cli.Command{
   115  		Action:    utils.MigrateFlags(exportPreimages),
   116  		Name:      "export-preimages",
   117  		Usage:     "Export the preimage database into an RLP stream",
   118  		ArgsUsage: "<dumpfile>",
   119  		Flags: []cli.Flag{
   120  			utils.DataDirFlag,
   121  			utils.CacheFlag,
   122  			utils.SyncModeFlag,
   123  		},
   124  		Category: "BLOCKCHAIN COMMANDS",
   125  		Description: `
   126  The export-preimages command export hash preimages to an RLP encoded stream`,
   127  	}
   128  	copydbCommand = cli.Command{
   129  		Action:    utils.MigrateFlags(copyDb),
   130  		Name:      "copydb",
   131  		Usage:     "Create a local chain from a target chaindata folder",
   132  		ArgsUsage: "<sourceChaindataDir>",
   133  		Flags: []cli.Flag{
   134  			utils.DataDirFlag,
   135  			utils.CacheFlag,
   136  			utils.SyncModeFlag,
   137  			utils.FakePoWFlag,
   138  			utils.TestnetFlag,
   139  			utils.RinkebyFlag,
   140  		},
   141  		Category: "BLOCKCHAIN COMMANDS",
   142  		Description: `
   143  The first argument must be the directory containing the blockchain to download from`,
   144  	}
   145  	removedbCommand = cli.Command{
   146  		Action:    utils.MigrateFlags(removeDB),
   147  		Name:      "removedb",
   148  		Usage:     "Remove blockchain and state databases",
   149  		ArgsUsage: " ",
   150  		Flags: []cli.Flag{
   151  			utils.DataDirFlag,
   152  		},
   153  		Category: "BLOCKCHAIN COMMANDS",
   154  		Description: `
   155  Remove blockchain and state databases`,
   156  	}
   157  	dumpCommand = cli.Command{
   158  		Action:    utils.MigrateFlags(dump),
   159  		Name:      "dump",
   160  		Usage:     "Dump a specific block from storage",
   161  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   162  		Flags: []cli.Flag{
   163  			utils.DataDirFlag,
   164  			utils.CacheFlag,
   165  			utils.SyncModeFlag,
   166  		},
   167  		Category: "BLOCKCHAIN COMMANDS",
   168  		Description: `
   169  The arguments are interpreted as block numbers or hashes.
   170  Use "ethereum dump 0" to dump the genesis block.`,
   171  	}
   172  )
   173  
   174  // In the regular Genesis / ChainConfig struct, due to the way go deserializes
   175  // json, IsQuorum defaults to false (when not specified). Here we specify it as
   176  // a pointer so we can make the distinction and default unspecified to true.
   177  func getIsQuorum(file io.Reader) bool {
   178  	altGenesis := new(struct {
   179  		Config *struct {
   180  			IsQuorum *bool `json:"isQuorum"`
   181  		} `json:"config"`
   182  	})
   183  
   184  	if err := json.NewDecoder(file).Decode(altGenesis); err != nil {
   185  		utils.Fatalf("invalid genesis file: %v", err)
   186  	}
   187  
   188  	// unspecified defaults to true
   189  	return altGenesis.Config.IsQuorum == nil || *altGenesis.Config.IsQuorum
   190  }
   191  
   192  // initGenesis will initialise the given JSON format genesis file and writes it as
   193  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   194  func initGenesis(ctx *cli.Context) error {
   195  	// Make sure we have a valid genesis JSON
   196  	genesisPath := ctx.Args().First()
   197  	if len(genesisPath) == 0 {
   198  		utils.Fatalf("Must supply path to genesis JSON file")
   199  	}
   200  	file, err := os.Open(genesisPath)
   201  	if err != nil {
   202  		utils.Fatalf("Failed to read genesis file: %v", err)
   203  	}
   204  	defer file.Close()
   205  
   206  	genesis := new(core.Genesis)
   207  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   208  		utils.Fatalf("invalid genesis file: %v", err)
   209  	}
   210  
   211  	file.Seek(0, 0)
   212  	genesis.Config.IsQuorum = getIsQuorum(file)
   213  
   214  	// Open an initialise both full and light databases
   215  	stack := makeFullNode(ctx)
   216  	for _, name := range []string{"chaindata", "lightchaindata"} {
   217  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   218  		if err != nil {
   219  			utils.Fatalf("Failed to open database: %v", err)
   220  		}
   221  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   222  		if err != nil {
   223  			utils.Fatalf("Failed to write genesis block: %v", err)
   224  		}
   225  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   226  	}
   227  	return nil
   228  }
   229  
   230  func importChain(ctx *cli.Context) error {
   231  	if len(ctx.Args()) < 1 {
   232  		utils.Fatalf("This command requires an argument.")
   233  	}
   234  	stack := makeFullNode(ctx)
   235  	chain, chainDb := utils.MakeChain(ctx, stack)
   236  	defer chainDb.Close()
   237  
   238  	// Start periodically gathering memory profiles
   239  	var peakMemAlloc, peakMemSys uint64
   240  	go func() {
   241  		stats := new(runtime.MemStats)
   242  		for {
   243  			runtime.ReadMemStats(stats)
   244  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   245  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   246  			}
   247  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   248  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   249  			}
   250  			time.Sleep(5 * time.Second)
   251  		}
   252  	}()
   253  	// Import the chain
   254  	start := time.Now()
   255  
   256  	if len(ctx.Args()) == 1 {
   257  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   258  			log.Error("Import error", "err", err)
   259  		}
   260  	} else {
   261  		for _, arg := range ctx.Args() {
   262  			if err := utils.ImportChain(chain, arg); err != nil {
   263  				log.Error("Import error", "file", arg, "err", err)
   264  			}
   265  		}
   266  	}
   267  	chain.Stop()
   268  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   269  
   270  	// Output pre-compaction stats mostly to see the import trashing
   271  	db := chainDb.(*ethdb.LDBDatabase)
   272  
   273  	stats, err := db.LDB().GetProperty("leveldb.stats")
   274  	if err != nil {
   275  		utils.Fatalf("Failed to read database stats: %v", err)
   276  	}
   277  	fmt.Println(stats)
   278  
   279  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   280  	if err != nil {
   281  		utils.Fatalf("Failed to read database iostats: %v", err)
   282  	}
   283  	fmt.Println(ioStats)
   284  
   285  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   286  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   287  
   288  	// Print the memory statistics used by the importing
   289  	mem := new(runtime.MemStats)
   290  	runtime.ReadMemStats(mem)
   291  
   292  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   293  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   294  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   295  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   296  
   297  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   298  		return nil
   299  	}
   300  
   301  	// Compact the entire database to more accurately measure disk io and print the stats
   302  	start = time.Now()
   303  	fmt.Println("Compacting entire database...")
   304  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   305  		utils.Fatalf("Compaction failed: %v", err)
   306  	}
   307  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   308  
   309  	stats, err = db.LDB().GetProperty("leveldb.stats")
   310  	if err != nil {
   311  		utils.Fatalf("Failed to read database stats: %v", err)
   312  	}
   313  	fmt.Println(stats)
   314  
   315  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   316  	if err != nil {
   317  		utils.Fatalf("Failed to read database iostats: %v", err)
   318  	}
   319  	fmt.Println(ioStats)
   320  
   321  	return nil
   322  }
   323  
   324  func exportChain(ctx *cli.Context) error {
   325  	if len(ctx.Args()) < 1 {
   326  		utils.Fatalf("This command requires an argument.")
   327  	}
   328  	stack := makeFullNode(ctx)
   329  	chain, _ := utils.MakeChain(ctx, stack)
   330  	start := time.Now()
   331  
   332  	var err error
   333  	fp := ctx.Args().First()
   334  	if len(ctx.Args()) < 3 {
   335  		err = utils.ExportChain(chain, fp)
   336  	} else {
   337  		// This can be improved to allow for numbers larger than 9223372036854775807
   338  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   339  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   340  		if ferr != nil || lerr != nil {
   341  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   342  		}
   343  		if first < 0 || last < 0 {
   344  			utils.Fatalf("Export error: block number must be greater than 0\n")
   345  		}
   346  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   347  	}
   348  
   349  	if err != nil {
   350  		utils.Fatalf("Export error: %v\n", err)
   351  	}
   352  	fmt.Printf("Export done in %v\n", time.Since(start))
   353  	return nil
   354  }
   355  
   356  // importPreimages imports preimage data from the specified file.
   357  func importPreimages(ctx *cli.Context) error {
   358  	if len(ctx.Args()) < 1 {
   359  		utils.Fatalf("This command requires an argument.")
   360  	}
   361  	stack := makeFullNode(ctx)
   362  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   363  
   364  	start := time.Now()
   365  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   366  		utils.Fatalf("Import error: %v\n", err)
   367  	}
   368  	fmt.Printf("Import done in %v\n", time.Since(start))
   369  	return nil
   370  }
   371  
   372  // exportPreimages dumps the preimage data to specified json file in streaming way.
   373  func exportPreimages(ctx *cli.Context) error {
   374  	if len(ctx.Args()) < 1 {
   375  		utils.Fatalf("This command requires an argument.")
   376  	}
   377  	stack := makeFullNode(ctx)
   378  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   379  
   380  	start := time.Now()
   381  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   382  		utils.Fatalf("Export error: %v\n", err)
   383  	}
   384  	fmt.Printf("Export done in %v\n", time.Since(start))
   385  	return nil
   386  }
   387  
   388  func copyDb(ctx *cli.Context) error {
   389  	// Ensure we have a source chain directory to copy
   390  	if len(ctx.Args()) != 1 {
   391  		utils.Fatalf("Source chaindata directory path argument missing")
   392  	}
   393  	// Initialize a new chain for the running node to sync into
   394  	stack := makeFullNode(ctx)
   395  	chain, chainDb := utils.MakeChain(ctx, stack)
   396  
   397  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   398  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   399  
   400  	// Create a source peer to satisfy downloader requests from
   401  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   402  	if err != nil {
   403  		return err
   404  	}
   405  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   406  	if err != nil {
   407  		return err
   408  	}
   409  	peer := downloader.NewFakePeer("local", db, hc, dl)
   410  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   411  		return err
   412  	}
   413  	// Synchronise with the simulated peer
   414  	start := time.Now()
   415  
   416  	currentHeader := hc.CurrentHeader()
   417  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   418  		return err
   419  	}
   420  	for dl.Synchronising() {
   421  		time.Sleep(10 * time.Millisecond)
   422  	}
   423  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   424  
   425  	// Compact the entire database to remove any sync overhead
   426  	start = time.Now()
   427  	fmt.Println("Compacting entire database...")
   428  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   429  		utils.Fatalf("Compaction failed: %v", err)
   430  	}
   431  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   432  
   433  	return nil
   434  }
   435  
   436  func removeDB(ctx *cli.Context) error {
   437  	stack, _ := makeConfigNode(ctx)
   438  
   439  	for _, name := range []string{"chaindata", "lightchaindata"} {
   440  		// Ensure the database exists in the first place
   441  		logger := log.New("database", name)
   442  
   443  		dbdir := stack.ResolvePath(name)
   444  		if !common.FileExist(dbdir) {
   445  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   446  			continue
   447  		}
   448  		// Confirm removal and execute
   449  		fmt.Println(dbdir)
   450  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   451  		switch {
   452  		case err != nil:
   453  			utils.Fatalf("%v", err)
   454  		case !confirm:
   455  			logger.Warn("Database deletion aborted")
   456  		default:
   457  			start := time.Now()
   458  			os.RemoveAll(dbdir)
   459  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   460  		}
   461  	}
   462  	return nil
   463  }
   464  
   465  func dump(ctx *cli.Context) error {
   466  	stack := makeFullNode(ctx)
   467  	chain, chainDb := utils.MakeChain(ctx, stack)
   468  	for _, arg := range ctx.Args() {
   469  		var block *types.Block
   470  		if hashish(arg) {
   471  			block = chain.GetBlockByHash(common.HexToHash(arg))
   472  		} else {
   473  			num, _ := strconv.Atoi(arg)
   474  			block = chain.GetBlockByNumber(uint64(num))
   475  		}
   476  		if block == nil {
   477  			fmt.Println("{}")
   478  			utils.Fatalf("block not found")
   479  		} else {
   480  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   481  			if err != nil {
   482  				utils.Fatalf("could not create new state: %v", err)
   483  			}
   484  			fmt.Printf("%s\n", state.Dump())
   485  		}
   486  	}
   487  	chainDb.Close()
   488  	return nil
   489  }
   490  
   491  // hashish returns true for strings that look like hashes.
   492  func hashish(x string) bool {
   493  	_, err := strconv.Atoi(x)
   494  	return err != nil
   495  }