github.com/Elemental-core/elementalcore@v0.0.0-20191206075037-63891242267a/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The elementalcore Authors
     2  // This file is part of elementalcore.
     3  //
     4  // elementalcore is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // elementalcore is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with elementalcore. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/Elemental-core/elementalcore/cmd/utils"
    29  	"github.com/Elemental-core/elementalcore/common"
    30  	"github.com/Elemental-core/elementalcore/console"
    31  	"github.com/Elemental-core/elementalcore/core"
    32  	"github.com/Elemental-core/elementalcore/core/state"
    33  	"github.com/Elemental-core/elementalcore/core/types"
    34  	"github.com/Elemental-core/elementalcore/eth/downloader"
    35  	"github.com/Elemental-core/elementalcore/ethdb"
    36  	"github.com/Elemental-core/elementalcore/event"
    37  	"github.com/Elemental-core/elementalcore/log"
    38  	"github.com/Elemental-core/elementalcore/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  			utils.LightModeFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.LightModeFlag,
    70  		},
    71  		Category: "BLOCKCHAIN COMMANDS",
    72  		Description: `
    73  The import command imports blocks from an RLP-encoded form. The form can be one file
    74  with several RLP-encoded blocks, or several files can be used.
    75  
    76  If only one file is used, import error will result in failure. If several files are used,
    77  processing will proceed even if an individual RLP-file import failure occurs.`,
    78  	}
    79  	exportCommand = cli.Command{
    80  		Action:    utils.MigrateFlags(exportChain),
    81  		Name:      "export",
    82  		Usage:     "Export blockchain into file",
    83  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    84  		Flags: []cli.Flag{
    85  			utils.DataDirFlag,
    86  			utils.CacheFlag,
    87  			utils.LightModeFlag,
    88  		},
    89  		Category: "BLOCKCHAIN COMMANDS",
    90  		Description: `
    91  Requires a first argument of the file to write to.
    92  Optional second and third arguments control the first and
    93  last block to write. In this mode, the file will be appended
    94  if already existing.`,
    95  	}
    96  	copydbCommand = cli.Command{
    97  		Action:    utils.MigrateFlags(copyDb),
    98  		Name:      "copydb",
    99  		Usage:     "Create a local chain from a target chaindata folder",
   100  		ArgsUsage: "<sourceChaindataDir>",
   101  		Flags: []cli.Flag{
   102  			utils.DataDirFlag,
   103  			utils.CacheFlag,
   104  			utils.SyncModeFlag,
   105  		},
   106  		Category: "BLOCKCHAIN COMMANDS",
   107  		Description: `
   108  The first argument must be the directory containing the blockchain to download from`,
   109  	}
   110  	removedbCommand = cli.Command{
   111  		Action:    utils.MigrateFlags(removeDB),
   112  		Name:      "removedb",
   113  		Usage:     "Remove blockchain and state databases",
   114  		ArgsUsage: " ",
   115  		Flags: []cli.Flag{
   116  			utils.DataDirFlag,
   117  			utils.LightModeFlag,
   118  		},
   119  		Category: "BLOCKCHAIN COMMANDS",
   120  		Description: `
   121  Remove blockchain and state databases`,
   122  	}
   123  	dumpCommand = cli.Command{
   124  		Action:    utils.MigrateFlags(dump),
   125  		Name:      "dump",
   126  		Usage:     "Dump a specific block from storage",
   127  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   128  		Flags: []cli.Flag{
   129  			utils.DataDirFlag,
   130  			utils.CacheFlag,
   131  			utils.LightModeFlag,
   132  		},
   133  		Category: "BLOCKCHAIN COMMANDS",
   134  		Description: `
   135  The arguments are interpreted as block numbers or hashes.
   136  Use "ethereum dump 0" to dump the genesis block.`,
   137  	}
   138  )
   139  
   140  // initGenesis will initialise the given JSON format genesis file and writes it as
   141  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   142  func initGenesis(ctx *cli.Context) error {
   143  	// Make sure we have a valid genesis JSON
   144  	genesisPath := ctx.Args().First()
   145  	if len(genesisPath) == 0 {
   146  		utils.Fatalf("Must supply path to genesis JSON file")
   147  	}
   148  	file, err := os.Open(genesisPath)
   149  	if err != nil {
   150  		utils.Fatalf("Failed to read genesis file: %v", err)
   151  	}
   152  	defer file.Close()
   153  
   154  	genesis := new(core.Genesis)
   155  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   156  		utils.Fatalf("invalid genesis file: %v", err)
   157  	}
   158  	// Open an initialise both full and light databases
   159  	stack := makeFullNode(ctx)
   160  	for _, name := range []string{"chaindata", "lightchaindata"} {
   161  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   162  		if err != nil {
   163  			utils.Fatalf("Failed to open database: %v", err)
   164  		}
   165  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   166  		if err != nil {
   167  			utils.Fatalf("Failed to write genesis block: %v", err)
   168  		}
   169  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   170  	}
   171  	return nil
   172  }
   173  
   174  func importChain(ctx *cli.Context) error {
   175  	if len(ctx.Args()) < 1 {
   176  		utils.Fatalf("This command requires an argument.")
   177  	}
   178  	stack := makeFullNode(ctx)
   179  	chain, chainDb := utils.MakeChain(ctx, stack)
   180  	defer chainDb.Close()
   181  
   182  	// Start periodically gathering memory profiles
   183  	var peakMemAlloc, peakMemSys uint64
   184  	go func() {
   185  		stats := new(runtime.MemStats)
   186  		for {
   187  			runtime.ReadMemStats(stats)
   188  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   189  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   190  			}
   191  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   192  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   193  			}
   194  			time.Sleep(5 * time.Second)
   195  		}
   196  	}()
   197  	// Import the chain
   198  	start := time.Now()
   199  
   200  	if len(ctx.Args()) == 1 {
   201  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   202  			utils.Fatalf("Import error: %v", err)
   203  		}
   204  	} else {
   205  		for _, arg := range ctx.Args() {
   206  			if err := utils.ImportChain(chain, arg); err != nil {
   207  				log.Error("Import error", "file", arg, "err", err)
   208  			}
   209  		}
   210  	}
   211  
   212  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   213  
   214  	// Output pre-compaction stats mostly to see the import trashing
   215  	db := chainDb.(*ethdb.LDBDatabase)
   216  
   217  	stats, err := db.LDB().GetProperty("leveldb.stats")
   218  	if err != nil {
   219  		utils.Fatalf("Failed to read database stats: %v", err)
   220  	}
   221  	fmt.Println(stats)
   222  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   223  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   224  
   225  	// Print the memory statistics used by the importing
   226  	mem := new(runtime.MemStats)
   227  	runtime.ReadMemStats(mem)
   228  
   229  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   230  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   231  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   232  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   233  
   234  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   235  		return nil
   236  	}
   237  
   238  	// Compact the entire database to more accurately measure disk io and print the stats
   239  	start = time.Now()
   240  	fmt.Println("Compacting entire database...")
   241  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   242  		utils.Fatalf("Compaction failed: %v", err)
   243  	}
   244  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   245  
   246  	stats, err = db.LDB().GetProperty("leveldb.stats")
   247  	if err != nil {
   248  		utils.Fatalf("Failed to read database stats: %v", err)
   249  	}
   250  	fmt.Println(stats)
   251  
   252  	return nil
   253  }
   254  
   255  func exportChain(ctx *cli.Context) error {
   256  	if len(ctx.Args()) < 1 {
   257  		utils.Fatalf("This command requires an argument.")
   258  	}
   259  	stack := makeFullNode(ctx)
   260  	chain, _ := utils.MakeChain(ctx, stack)
   261  	start := time.Now()
   262  
   263  	var err error
   264  	fp := ctx.Args().First()
   265  	if len(ctx.Args()) < 3 {
   266  		err = utils.ExportChain(chain, fp)
   267  	} else {
   268  		// This can be improved to allow for numbers larger than 9223372036854775807
   269  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   270  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   271  		if ferr != nil || lerr != nil {
   272  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   273  		}
   274  		if first < 0 || last < 0 {
   275  			utils.Fatalf("Export error: block number must be greater than 0\n")
   276  		}
   277  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   278  	}
   279  
   280  	if err != nil {
   281  		utils.Fatalf("Export error: %v\n", err)
   282  	}
   283  	fmt.Printf("Export done in %v", time.Since(start))
   284  	return nil
   285  }
   286  
   287  func copyDb(ctx *cli.Context) error {
   288  	// Ensure we have a source chain directory to copy
   289  	if len(ctx.Args()) != 1 {
   290  		utils.Fatalf("Source chaindata directory path argument missing")
   291  	}
   292  	// Initialize a new chain for the running node to sync into
   293  	stack := makeFullNode(ctx)
   294  	chain, chainDb := utils.MakeChain(ctx, stack)
   295  
   296  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   297  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   298  
   299  	// Create a source peer to satisfy downloader requests from
   300  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   301  	if err != nil {
   302  		return err
   303  	}
   304  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   305  	if err != nil {
   306  		return err
   307  	}
   308  	peer := downloader.NewFakePeer("local", db, hc, dl)
   309  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   310  		return err
   311  	}
   312  	// Synchronise with the simulated peer
   313  	start := time.Now()
   314  
   315  	currentHeader := hc.CurrentHeader()
   316  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   317  		return err
   318  	}
   319  	for dl.Synchronising() {
   320  		time.Sleep(10 * time.Millisecond)
   321  	}
   322  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   323  
   324  	// Compact the entire database to remove any sync overhead
   325  	start = time.Now()
   326  	fmt.Println("Compacting entire database...")
   327  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   328  		utils.Fatalf("Compaction failed: %v", err)
   329  	}
   330  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   331  
   332  	return nil
   333  }
   334  
   335  func removeDB(ctx *cli.Context) error {
   336  	stack, _ := makeConfigNode(ctx)
   337  
   338  	for _, name := range []string{"chaindata", "lightchaindata"} {
   339  		// Ensure the database exists in the first place
   340  		logger := log.New("database", name)
   341  
   342  		dbdir := stack.ResolvePath(name)
   343  		if !common.FileExist(dbdir) {
   344  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   345  			continue
   346  		}
   347  		// Confirm removal and execute
   348  		fmt.Println(dbdir)
   349  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   350  		switch {
   351  		case err != nil:
   352  			utils.Fatalf("%v", err)
   353  		case !confirm:
   354  			logger.Warn("Database deletion aborted")
   355  		default:
   356  			start := time.Now()
   357  			os.RemoveAll(dbdir)
   358  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   359  		}
   360  	}
   361  	return nil
   362  }
   363  
   364  func dump(ctx *cli.Context) error {
   365  	stack := makeFullNode(ctx)
   366  	chain, chainDb := utils.MakeChain(ctx, stack)
   367  	for _, arg := range ctx.Args() {
   368  		var block *types.Block
   369  		if hashish(arg) {
   370  			block = chain.GetBlockByHash(common.HexToHash(arg))
   371  		} else {
   372  			num, _ := strconv.Atoi(arg)
   373  			block = chain.GetBlockByNumber(uint64(num))
   374  		}
   375  		if block == nil {
   376  			fmt.Println("{}")
   377  			utils.Fatalf("block not found")
   378  		} else {
   379  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   380  			if err != nil {
   381  				utils.Fatalf("could not create new state: %v", err)
   382  			}
   383  			fmt.Printf("%s\n", state.Dump())
   384  		}
   385  	}
   386  	chainDb.Close()
   387  	return nil
   388  }
   389  
   390  // hashish returns true for strings that look like hashes.
   391  func hashish(x string) bool {
   392  	_, err := strconv.Atoi(x)
   393  	return err != nil
   394  }