github.com/luckypickle/go-ethereum-vet@v1.14.2/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/luckypickle/go-ethereum-vet/cmd/utils"
    29  	"github.com/luckypickle/go-ethereum-vet/common"
    30  	"github.com/luckypickle/go-ethereum-vet/console"
    31  	"github.com/luckypickle/go-ethereum-vet/core"
    32  	"github.com/luckypickle/go-ethereum-vet/core/state"
    33  	"github.com/luckypickle/go-ethereum-vet/core/types"
    34  	"github.com/luckypickle/go-ethereum-vet/eth/downloader"
    35  	"github.com/luckypickle/go-ethereum-vet/ethdb"
    36  	"github.com/luckypickle/go-ethereum-vet/event"
    37  	"github.com/luckypickle/go-ethereum-vet/log"
    38  	"github.com/luckypickle/go-ethereum-vet/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  		},
    52  		Category: "BLOCKCHAIN COMMANDS",
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	importCommand = cli.Command{
    61  		Action:    utils.MigrateFlags(importChain),
    62  		Name:      "import",
    63  		Usage:     "Import a blockchain file",
    64  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    65  		Flags: []cli.Flag{
    66  			utils.DataDirFlag,
    67  			utils.CacheFlag,
    68  			utils.SyncModeFlag,
    69  			utils.GCModeFlag,
    70  			utils.CacheDatabaseFlag,
    71  			utils.CacheGCFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The import command imports blocks from an RLP-encoded form. The form can be one file
    76  with several RLP-encoded blocks, or several files can be used.
    77  
    78  If only one file is used, import error will result in failure. If several files are used,
    79  processing will proceed even if an individual RLP-file import failure occurs.`,
    80  	}
    81  	exportCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(exportChain),
    83  		Name:      "export",
    84  		Usage:     "Export blockchain into file",
    85  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  			utils.SyncModeFlag,
    90  		},
    91  		Category: "BLOCKCHAIN COMMANDS",
    92  		Description: `
    93  Requires a first argument of the file to write to.
    94  Optional second and third arguments control the first and
    95  last block to write. In this mode, the file will be appended
    96  if already existing. If the file ends with .gz, the output will
    97  be gzipped.`,
    98  	}
    99  	importPreimagesCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(importPreimages),
   101  		Name:      "import-preimages",
   102  		Usage:     "Import the preimage database from an RLP stream",
   103  		ArgsUsage: "<datafile>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  		},
   109  		Category: "BLOCKCHAIN COMMANDS",
   110  		Description: `
   111  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   112  	}
   113  	exportPreimagesCommand = cli.Command{
   114  		Action:    utils.MigrateFlags(exportPreimages),
   115  		Name:      "export-preimages",
   116  		Usage:     "Export the preimage database into an RLP stream",
   117  		ArgsUsage: "<dumpfile>",
   118  		Flags: []cli.Flag{
   119  			utils.DataDirFlag,
   120  			utils.CacheFlag,
   121  			utils.SyncModeFlag,
   122  		},
   123  		Category: "BLOCKCHAIN COMMANDS",
   124  		Description: `
   125  The export-preimages command export hash preimages to an RLP encoded stream`,
   126  	}
   127  	copydbCommand = cli.Command{
   128  		Action:    utils.MigrateFlags(copyDb),
   129  		Name:      "copydb",
   130  		Usage:     "Create a local chain from a target chaindata folder",
   131  		ArgsUsage: "<sourceChaindataDir>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.CacheFlag,
   135  			utils.SyncModeFlag,
   136  			utils.FakePoWFlag,
   137  			utils.TestnetFlag,
   138  			utils.RinkebyFlag,
   139  		},
   140  		Category: "BLOCKCHAIN COMMANDS",
   141  		Description: `
   142  The first argument must be the directory containing the blockchain to download from`,
   143  	}
   144  	removedbCommand = cli.Command{
   145  		Action:    utils.MigrateFlags(removeDB),
   146  		Name:      "removedb",
   147  		Usage:     "Remove blockchain and state databases",
   148  		ArgsUsage: " ",
   149  		Flags: []cli.Flag{
   150  			utils.DataDirFlag,
   151  		},
   152  		Category: "BLOCKCHAIN COMMANDS",
   153  		Description: `
   154  Remove blockchain and state databases`,
   155  	}
   156  	dumpCommand = cli.Command{
   157  		Action:    utils.MigrateFlags(dump),
   158  		Name:      "dump",
   159  		Usage:     "Dump a specific block from storage",
   160  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   161  		Flags: []cli.Flag{
   162  			utils.DataDirFlag,
   163  			utils.CacheFlag,
   164  			utils.SyncModeFlag,
   165  		},
   166  		Category: "BLOCKCHAIN COMMANDS",
   167  		Description: `
   168  The arguments are interpreted as block numbers or hashes.
   169  Use "ethereum dump 0" to dump the genesis block.`,
   170  	}
   171  )
   172  
   173  // initGenesis will initialise the given JSON format genesis file and writes it as
   174  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   175  func initGenesis(ctx *cli.Context) error {
   176  	// Make sure we have a valid genesis JSON
   177  	genesisPath := ctx.Args().First()
   178  	if len(genesisPath) == 0 {
   179  		utils.Fatalf("Must supply path to genesis JSON file")
   180  	}
   181  	file, err := os.Open(genesisPath)
   182  	if err != nil {
   183  		utils.Fatalf("Failed to read genesis file: %v", err)
   184  	}
   185  	defer file.Close()
   186  
   187  	genesis := new(core.Genesis)
   188  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   189  		utils.Fatalf("invalid genesis file: %v", err)
   190  	}
   191  	// Open an initialise both full and light databases
   192  	stack := makeFullNode(ctx)
   193  	for _, name := range []string{"chaindata", "lightchaindata"} {
   194  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   195  		if err != nil {
   196  			utils.Fatalf("Failed to open database: %v", err)
   197  		}
   198  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   199  		if err != nil {
   200  			utils.Fatalf("Failed to write genesis block: %v", err)
   201  		}
   202  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   203  	}
   204  	return nil
   205  }
   206  
   207  func importChain(ctx *cli.Context) error {
   208  	if len(ctx.Args()) < 1 {
   209  		utils.Fatalf("This command requires an argument.")
   210  	}
   211  	stack := makeFullNode(ctx)
   212  	chain, chainDb := utils.MakeChain(ctx, stack)
   213  	defer chainDb.Close()
   214  
   215  	// Start periodically gathering memory profiles
   216  	var peakMemAlloc, peakMemSys uint64
   217  	go func() {
   218  		stats := new(runtime.MemStats)
   219  		for {
   220  			runtime.ReadMemStats(stats)
   221  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   222  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   223  			}
   224  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   225  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   226  			}
   227  			time.Sleep(5 * time.Second)
   228  		}
   229  	}()
   230  	// Import the chain
   231  	start := time.Now()
   232  
   233  	if len(ctx.Args()) == 1 {
   234  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   235  			log.Error("Import error", "err", err)
   236  		}
   237  	} else {
   238  		for _, arg := range ctx.Args() {
   239  			if err := utils.ImportChain(chain, arg); err != nil {
   240  				log.Error("Import error", "file", arg, "err", err)
   241  			}
   242  		}
   243  	}
   244  	chain.Stop()
   245  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   246  
   247  	// Output pre-compaction stats mostly to see the import trashing
   248  	db := chainDb.(*ethdb.LDBDatabase)
   249  
   250  	stats, err := db.LDB().GetProperty("leveldb.stats")
   251  	if err != nil {
   252  		utils.Fatalf("Failed to read database stats: %v", err)
   253  	}
   254  	fmt.Println(stats)
   255  
   256  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   257  	if err != nil {
   258  		utils.Fatalf("Failed to read database iostats: %v", err)
   259  	}
   260  	fmt.Println(ioStats)
   261  
   262  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   263  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   264  
   265  	// Print the memory statistics used by the importing
   266  	mem := new(runtime.MemStats)
   267  	runtime.ReadMemStats(mem)
   268  
   269  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   270  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   271  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   272  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   273  
   274  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   275  		return nil
   276  	}
   277  
   278  	// Compact the entire database to more accurately measure disk io and print the stats
   279  	start = time.Now()
   280  	fmt.Println("Compacting entire database...")
   281  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   282  		utils.Fatalf("Compaction failed: %v", err)
   283  	}
   284  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   285  
   286  	stats, err = db.LDB().GetProperty("leveldb.stats")
   287  	if err != nil {
   288  		utils.Fatalf("Failed to read database stats: %v", err)
   289  	}
   290  	fmt.Println(stats)
   291  
   292  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   293  	if err != nil {
   294  		utils.Fatalf("Failed to read database iostats: %v", err)
   295  	}
   296  	fmt.Println(ioStats)
   297  
   298  	return nil
   299  }
   300  
   301  func exportChain(ctx *cli.Context) error {
   302  	if len(ctx.Args()) < 1 {
   303  		utils.Fatalf("This command requires an argument.")
   304  	}
   305  	stack := makeFullNode(ctx)
   306  	chain, _ := utils.MakeChain(ctx, stack)
   307  	start := time.Now()
   308  
   309  	var err error
   310  	fp := ctx.Args().First()
   311  	if len(ctx.Args()) < 3 {
   312  		err = utils.ExportChain(chain, fp)
   313  	} else {
   314  		// This can be improved to allow for numbers larger than 9223372036854775807
   315  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   316  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   317  		if ferr != nil || lerr != nil {
   318  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   319  		}
   320  		if first < 0 || last < 0 {
   321  			utils.Fatalf("Export error: block number must be greater than 0\n")
   322  		}
   323  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   324  	}
   325  
   326  	if err != nil {
   327  		utils.Fatalf("Export error: %v\n", err)
   328  	}
   329  	fmt.Printf("Export done in %v\n", time.Since(start))
   330  	return nil
   331  }
   332  
   333  // importPreimages imports preimage data from the specified file.
   334  func importPreimages(ctx *cli.Context) error {
   335  	if len(ctx.Args()) < 1 {
   336  		utils.Fatalf("This command requires an argument.")
   337  	}
   338  	stack := makeFullNode(ctx)
   339  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   340  
   341  	start := time.Now()
   342  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   343  		utils.Fatalf("Export error: %v\n", err)
   344  	}
   345  	fmt.Printf("Export done in %v\n", time.Since(start))
   346  	return nil
   347  }
   348  
   349  // exportPreimages dumps the preimage data to specified json file in streaming way.
   350  func exportPreimages(ctx *cli.Context) error {
   351  	if len(ctx.Args()) < 1 {
   352  		utils.Fatalf("This command requires an argument.")
   353  	}
   354  	stack := makeFullNode(ctx)
   355  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   356  
   357  	start := time.Now()
   358  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   359  		utils.Fatalf("Export error: %v\n", err)
   360  	}
   361  	fmt.Printf("Export done in %v\n", time.Since(start))
   362  	return nil
   363  }
   364  
   365  func copyDb(ctx *cli.Context) error {
   366  	// Ensure we have a source chain directory to copy
   367  	if len(ctx.Args()) != 1 {
   368  		utils.Fatalf("Source chaindata directory path argument missing")
   369  	}
   370  	// Initialize a new chain for the running node to sync into
   371  	stack := makeFullNode(ctx)
   372  	chain, chainDb := utils.MakeChain(ctx, stack)
   373  
   374  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   375  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   376  
   377  	// Create a source peer to satisfy downloader requests from
   378  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   379  	if err != nil {
   380  		return err
   381  	}
   382  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   383  	if err != nil {
   384  		return err
   385  	}
   386  	peer := downloader.NewFakePeer("local", db, hc, dl)
   387  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   388  		return err
   389  	}
   390  	// Synchronise with the simulated peer
   391  	start := time.Now()
   392  
   393  	currentHeader := hc.CurrentHeader()
   394  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   395  		return err
   396  	}
   397  	for dl.Synchronising() {
   398  		time.Sleep(10 * time.Millisecond)
   399  	}
   400  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   401  
   402  	// Compact the entire database to remove any sync overhead
   403  	start = time.Now()
   404  	fmt.Println("Compacting entire database...")
   405  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   406  		utils.Fatalf("Compaction failed: %v", err)
   407  	}
   408  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   409  
   410  	return nil
   411  }
   412  
   413  func removeDB(ctx *cli.Context) error {
   414  	stack, _ := makeConfigNode(ctx)
   415  
   416  	for _, name := range []string{"chaindata", "lightchaindata"} {
   417  		// Ensure the database exists in the first place
   418  		logger := log.New("database", name)
   419  
   420  		dbdir := stack.ResolvePath(name)
   421  		if !common.FileExist(dbdir) {
   422  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   423  			continue
   424  		}
   425  		// Confirm removal and execute
   426  		fmt.Println(dbdir)
   427  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   428  		switch {
   429  		case err != nil:
   430  			utils.Fatalf("%v", err)
   431  		case !confirm:
   432  			logger.Warn("Database deletion aborted")
   433  		default:
   434  			start := time.Now()
   435  			os.RemoveAll(dbdir)
   436  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   437  		}
   438  	}
   439  	return nil
   440  }
   441  
   442  func dump(ctx *cli.Context) error {
   443  	stack := makeFullNode(ctx)
   444  	chain, chainDb := utils.MakeChain(ctx, stack)
   445  	for _, arg := range ctx.Args() {
   446  		var block *types.Block
   447  		if hashish(arg) {
   448  			block = chain.GetBlockByHash(common.HexToHash(arg))
   449  		} else {
   450  			num, _ := strconv.Atoi(arg)
   451  			block = chain.GetBlockByNumber(uint64(num))
   452  		}
   453  		if block == nil {
   454  			fmt.Println("{}")
   455  			utils.Fatalf("block not found")
   456  		} else {
   457  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   458  			if err != nil {
   459  				utils.Fatalf("could not create new state: %v", err)
   460  			}
   461  			fmt.Printf("%s\n", state.Dump())
   462  		}
   463  	}
   464  	chainDb.Close()
   465  	return nil
   466  }
   467  
   468  // hashish returns true for strings that look like hashes.
   469  func hashish(x string) bool {
   470  	_, err := strconv.Atoi(x)
   471  	return err != nil
   472  }