github.com/juliankolbe/go-ethereum@v1.9.992/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/juliankolbe/go-ethereum/cmd/utils"
    29  	"github.com/juliankolbe/go-ethereum/common"
    30  	"github.com/juliankolbe/go-ethereum/core"
    31  	"github.com/juliankolbe/go-ethereum/core/rawdb"
    32  	"github.com/juliankolbe/go-ethereum/core/state"
    33  	"github.com/juliankolbe/go-ethereum/core/types"
    34  	"github.com/juliankolbe/go-ethereum/eth/downloader"
    35  	"github.com/juliankolbe/go-ethereum/event"
    36  	"github.com/juliankolbe/go-ethereum/log"
    37  	"github.com/juliankolbe/go-ethereum/metrics"
    38  	"github.com/juliankolbe/go-ethereum/trie"
    39  	"gopkg.in/urfave/cli.v1"
    40  )
    41  
    42  var (
    43  	initCommand = cli.Command{
    44  		Action:    utils.MigrateFlags(initGenesis),
    45  		Name:      "init",
    46  		Usage:     "Bootstrap and initialize a new genesis block",
    47  		ArgsUsage: "<genesisPath>",
    48  		Flags: []cli.Flag{
    49  			utils.DataDirFlag,
    50  		},
    51  		Category: "BLOCKCHAIN COMMANDS",
    52  		Description: `
    53  The init command initializes a new genesis block and definition for the network.
    54  This is a destructive action and changes the network in which you will be
    55  participating.
    56  
    57  It expects the genesis file as argument.`,
    58  	}
    59  	dumpGenesisCommand = cli.Command{
    60  		Action:    utils.MigrateFlags(dumpGenesis),
    61  		Name:      "dumpgenesis",
    62  		Usage:     "Dumps genesis block JSON configuration to stdout",
    63  		ArgsUsage: "",
    64  		Flags: []cli.Flag{
    65  			utils.DataDirFlag,
    66  		},
    67  		Category: "BLOCKCHAIN COMMANDS",
    68  		Description: `
    69  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    70  	}
    71  	importCommand = cli.Command{
    72  		Action:    utils.MigrateFlags(importChain),
    73  		Name:      "import",
    74  		Usage:     "Import a blockchain file",
    75  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    76  		Flags: []cli.Flag{
    77  			utils.DataDirFlag,
    78  			utils.CacheFlag,
    79  			utils.SyncModeFlag,
    80  			utils.GCModeFlag,
    81  			utils.SnapshotFlag,
    82  			utils.CacheDatabaseFlag,
    83  			utils.CacheGCFlag,
    84  			utils.MetricsEnabledFlag,
    85  			utils.MetricsEnabledExpensiveFlag,
    86  			utils.MetricsHTTPFlag,
    87  			utils.MetricsPortFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsInfluxDBEndpointFlag,
    90  			utils.MetricsInfluxDBDatabaseFlag,
    91  			utils.MetricsInfluxDBUsernameFlag,
    92  			utils.MetricsInfluxDBPasswordFlag,
    93  			utils.MetricsInfluxDBTagsFlag,
    94  			utils.TxLookupLimitFlag,
    95  		},
    96  		Category: "BLOCKCHAIN COMMANDS",
    97  		Description: `
    98  The import command imports blocks from an RLP-encoded form. The form can be one file
    99  with several RLP-encoded blocks, or several files can be used.
   100  
   101  If only one file is used, import error will result in failure. If several files are used,
   102  processing will proceed even if an individual RLP-file import failure occurs.`,
   103  	}
   104  	exportCommand = cli.Command{
   105  		Action:    utils.MigrateFlags(exportChain),
   106  		Name:      "export",
   107  		Usage:     "Export blockchain into file",
   108  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   109  		Flags: []cli.Flag{
   110  			utils.DataDirFlag,
   111  			utils.CacheFlag,
   112  			utils.SyncModeFlag,
   113  		},
   114  		Category: "BLOCKCHAIN COMMANDS",
   115  		Description: `
   116  Requires a first argument of the file to write to.
   117  Optional second and third arguments control the first and
   118  last block to write. In this mode, the file will be appended
   119  if already existing. If the file ends with .gz, the output will
   120  be gzipped.`,
   121  	}
   122  	importPreimagesCommand = cli.Command{
   123  		Action:    utils.MigrateFlags(importPreimages),
   124  		Name:      "import-preimages",
   125  		Usage:     "Import the preimage database from an RLP stream",
   126  		ArgsUsage: "<datafile>",
   127  		Flags: []cli.Flag{
   128  			utils.DataDirFlag,
   129  			utils.CacheFlag,
   130  			utils.SyncModeFlag,
   131  		},
   132  		Category: "BLOCKCHAIN COMMANDS",
   133  		Description: `
   134  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   135  	}
   136  	exportPreimagesCommand = cli.Command{
   137  		Action:    utils.MigrateFlags(exportPreimages),
   138  		Name:      "export-preimages",
   139  		Usage:     "Export the preimage database into an RLP stream",
   140  		ArgsUsage: "<dumpfile>",
   141  		Flags: []cli.Flag{
   142  			utils.DataDirFlag,
   143  			utils.CacheFlag,
   144  			utils.SyncModeFlag,
   145  		},
   146  		Category: "BLOCKCHAIN COMMANDS",
   147  		Description: `
   148  The export-preimages command export hash preimages to an RLP encoded stream`,
   149  	}
   150  	copydbCommand = cli.Command{
   151  		Action:    utils.MigrateFlags(copyDb),
   152  		Name:      "copydb",
   153  		Usage:     "Create a local chain from a target chaindata folder",
   154  		ArgsUsage: "<sourceChaindataDir>",
   155  		Flags: []cli.Flag{
   156  			utils.DataDirFlag,
   157  			utils.CacheFlag,
   158  			utils.SyncModeFlag,
   159  			utils.FakePoWFlag,
   160  			utils.MainnetFlag,
   161  			utils.RopstenFlag,
   162  			utils.RinkebyFlag,
   163  			utils.TxLookupLimitFlag,
   164  			utils.GoerliFlag,
   165  			utils.YoloV3Flag,
   166  		},
   167  		Category: "BLOCKCHAIN COMMANDS",
   168  		Description: `
   169  The first argument must be the directory containing the blockchain to download from`,
   170  	}
   171  	dumpCommand = cli.Command{
   172  		Action:    utils.MigrateFlags(dump),
   173  		Name:      "dump",
   174  		Usage:     "Dump a specific block from storage",
   175  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   176  		Flags: []cli.Flag{
   177  			utils.DataDirFlag,
   178  			utils.CacheFlag,
   179  			utils.SyncModeFlag,
   180  			utils.IterativeOutputFlag,
   181  			utils.ExcludeCodeFlag,
   182  			utils.ExcludeStorageFlag,
   183  			utils.IncludeIncompletesFlag,
   184  		},
   185  		Category: "BLOCKCHAIN COMMANDS",
   186  		Description: `
   187  The arguments are interpreted as block numbers or hashes.
   188  Use "ethereum dump 0" to dump the genesis block.`,
   189  	}
   190  )
   191  
   192  // initGenesis will initialise the given JSON format genesis file and writes it as
   193  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   194  func initGenesis(ctx *cli.Context) error {
   195  	// Make sure we have a valid genesis JSON
   196  	genesisPath := ctx.Args().First()
   197  	if len(genesisPath) == 0 {
   198  		utils.Fatalf("Must supply path to genesis JSON file")
   199  	}
   200  	file, err := os.Open(genesisPath)
   201  	if err != nil {
   202  		utils.Fatalf("Failed to read genesis file: %v", err)
   203  	}
   204  	defer file.Close()
   205  
   206  	genesis := new(core.Genesis)
   207  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   208  		utils.Fatalf("invalid genesis file: %v", err)
   209  	}
   210  	// Open and initialise both full and light databases
   211  	stack, _ := makeConfigNode(ctx)
   212  	defer stack.Close()
   213  
   214  	for _, name := range []string{"chaindata", "lightchaindata"} {
   215  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   216  		if err != nil {
   217  			utils.Fatalf("Failed to open database: %v", err)
   218  		}
   219  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   220  		if err != nil {
   221  			utils.Fatalf("Failed to write genesis block: %v", err)
   222  		}
   223  		chaindb.Close()
   224  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   225  	}
   226  	return nil
   227  }
   228  
   229  func dumpGenesis(ctx *cli.Context) error {
   230  	genesis := utils.MakeGenesis(ctx)
   231  	if genesis == nil {
   232  		genesis = core.DefaultGenesisBlock()
   233  	}
   234  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   235  		utils.Fatalf("could not encode genesis")
   236  	}
   237  	return nil
   238  }
   239  
   240  func importChain(ctx *cli.Context) error {
   241  	if len(ctx.Args()) < 1 {
   242  		utils.Fatalf("This command requires an argument.")
   243  	}
   244  	// Start metrics export if enabled
   245  	utils.SetupMetrics(ctx)
   246  	// Start system runtime metrics collection
   247  	go metrics.CollectProcessMetrics(3 * time.Second)
   248  
   249  	stack, _ := makeConfigNode(ctx)
   250  	defer stack.Close()
   251  
   252  	chain, db := utils.MakeChain(ctx, stack, false)
   253  	defer db.Close()
   254  
   255  	// Start periodically gathering memory profiles
   256  	var peakMemAlloc, peakMemSys uint64
   257  	go func() {
   258  		stats := new(runtime.MemStats)
   259  		for {
   260  			runtime.ReadMemStats(stats)
   261  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   262  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   263  			}
   264  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   265  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   266  			}
   267  			time.Sleep(5 * time.Second)
   268  		}
   269  	}()
   270  	// Import the chain
   271  	start := time.Now()
   272  
   273  	var importErr error
   274  
   275  	if len(ctx.Args()) == 1 {
   276  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   277  			importErr = err
   278  			log.Error("Import error", "err", err)
   279  		}
   280  	} else {
   281  		for _, arg := range ctx.Args() {
   282  			if err := utils.ImportChain(chain, arg); err != nil {
   283  				importErr = err
   284  				log.Error("Import error", "file", arg, "err", err)
   285  			}
   286  		}
   287  	}
   288  	chain.Stop()
   289  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   290  
   291  	// Output pre-compaction stats mostly to see the import trashing
   292  	showLeveldbStats(db)
   293  
   294  	// Print the memory statistics used by the importing
   295  	mem := new(runtime.MemStats)
   296  	runtime.ReadMemStats(mem)
   297  
   298  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   299  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   300  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   301  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   302  
   303  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   304  		return nil
   305  	}
   306  
   307  	// Compact the entire database to more accurately measure disk io and print the stats
   308  	start = time.Now()
   309  	fmt.Println("Compacting entire database...")
   310  	if err := db.Compact(nil, nil); err != nil {
   311  		utils.Fatalf("Compaction failed: %v", err)
   312  	}
   313  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   314  
   315  	showLeveldbStats(db)
   316  	return importErr
   317  }
   318  
   319  func exportChain(ctx *cli.Context) error {
   320  	if len(ctx.Args()) < 1 {
   321  		utils.Fatalf("This command requires an argument.")
   322  	}
   323  
   324  	stack, _ := makeConfigNode(ctx)
   325  	defer stack.Close()
   326  
   327  	chain, _ := utils.MakeChain(ctx, stack, true)
   328  	start := time.Now()
   329  
   330  	var err error
   331  	fp := ctx.Args().First()
   332  	if len(ctx.Args()) < 3 {
   333  		err = utils.ExportChain(chain, fp)
   334  	} else {
   335  		// This can be improved to allow for numbers larger than 9223372036854775807
   336  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   337  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   338  		if ferr != nil || lerr != nil {
   339  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   340  		}
   341  		if first < 0 || last < 0 {
   342  			utils.Fatalf("Export error: block number must be greater than 0\n")
   343  		}
   344  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   345  	}
   346  
   347  	if err != nil {
   348  		utils.Fatalf("Export error: %v\n", err)
   349  	}
   350  	fmt.Printf("Export done in %v\n", time.Since(start))
   351  	return nil
   352  }
   353  
   354  // importPreimages imports preimage data from the specified file.
   355  func importPreimages(ctx *cli.Context) error {
   356  	if len(ctx.Args()) < 1 {
   357  		utils.Fatalf("This command requires an argument.")
   358  	}
   359  
   360  	stack, _ := makeConfigNode(ctx)
   361  	defer stack.Close()
   362  
   363  	db := utils.MakeChainDatabase(ctx, stack)
   364  	start := time.Now()
   365  
   366  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   367  		utils.Fatalf("Import error: %v\n", err)
   368  	}
   369  	fmt.Printf("Import done in %v\n", time.Since(start))
   370  	return nil
   371  }
   372  
   373  // exportPreimages dumps the preimage data to specified json file in streaming way.
   374  func exportPreimages(ctx *cli.Context) error {
   375  	if len(ctx.Args()) < 1 {
   376  		utils.Fatalf("This command requires an argument.")
   377  	}
   378  
   379  	stack, _ := makeConfigNode(ctx)
   380  	defer stack.Close()
   381  
   382  	db := utils.MakeChainDatabase(ctx, stack)
   383  	start := time.Now()
   384  
   385  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   386  		utils.Fatalf("Export error: %v\n", err)
   387  	}
   388  	fmt.Printf("Export done in %v\n", time.Since(start))
   389  	return nil
   390  }
   391  
   392  func copyDb(ctx *cli.Context) error {
   393  	// Ensure we have a source chain directory to copy
   394  	if len(ctx.Args()) < 1 {
   395  		utils.Fatalf("Source chaindata directory path argument missing")
   396  	}
   397  	if len(ctx.Args()) < 2 {
   398  		utils.Fatalf("Source ancient chain directory path argument missing")
   399  	}
   400  	// Initialize a new chain for the running node to sync into
   401  	stack, _ := makeConfigNode(ctx)
   402  	defer stack.Close()
   403  
   404  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   405  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   406  
   407  	var syncBloom *trie.SyncBloom
   408  	if syncMode == downloader.FastSync {
   409  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   410  	}
   411  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   412  
   413  	// Create a source peer to satisfy downloader requests from
   414  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   415  	if err != nil {
   416  		return err
   417  	}
   418  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   419  	if err != nil {
   420  		return err
   421  	}
   422  	peer := downloader.NewFakePeer("local", db, hc, dl)
   423  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   424  		return err
   425  	}
   426  	// Synchronise with the simulated peer
   427  	start := time.Now()
   428  
   429  	currentHeader := hc.CurrentHeader()
   430  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   431  		return err
   432  	}
   433  	for dl.Synchronising() {
   434  		time.Sleep(10 * time.Millisecond)
   435  	}
   436  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   437  
   438  	// Compact the entire database to remove any sync overhead
   439  	start = time.Now()
   440  	fmt.Println("Compacting entire database...")
   441  	if err = db.Compact(nil, nil); err != nil {
   442  		utils.Fatalf("Compaction failed: %v", err)
   443  	}
   444  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   445  	return nil
   446  }
   447  
   448  func dump(ctx *cli.Context) error {
   449  	stack, _ := makeConfigNode(ctx)
   450  	defer stack.Close()
   451  
   452  	chain, chainDb := utils.MakeChain(ctx, stack, true)
   453  	defer chainDb.Close()
   454  	for _, arg := range ctx.Args() {
   455  		var block *types.Block
   456  		if hashish(arg) {
   457  			block = chain.GetBlockByHash(common.HexToHash(arg))
   458  		} else {
   459  			num, _ := strconv.Atoi(arg)
   460  			block = chain.GetBlockByNumber(uint64(num))
   461  		}
   462  		if block == nil {
   463  			fmt.Println("{}")
   464  			utils.Fatalf("block not found")
   465  		} else {
   466  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   467  			if err != nil {
   468  				utils.Fatalf("could not create new state: %v", err)
   469  			}
   470  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   471  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   472  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   473  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   474  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   475  			} else {
   476  				if includeMissing {
   477  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   478  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   479  				}
   480  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   481  			}
   482  		}
   483  	}
   484  	return nil
   485  }
   486  
   487  // hashish returns true for strings that look like hashes.
   488  func hashish(x string) bool {
   489  	_, err := strconv.Atoi(x)
   490  	return err != nil
   491  }