github.com/benorgera/go-ethereum@v1.10.18-0.20220401011646-b3f57b1a73ba/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/hexutil"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/metrics"
    40  	"github.com/ethereum/go-ethereum/node"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.MainnetFlag,
    68  			utils.RopstenFlag,
    69  			utils.SepoliaFlag,
    70  			utils.RinkebyFlag,
    71  			utils.GoerliFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    76  	}
    77  	importCommand = cli.Command{
    78  		Action:    utils.MigrateFlags(importChain),
    79  		Name:      "import",
    80  		Usage:     "Import a blockchain file",
    81  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    82  		Flags: []cli.Flag{
    83  			utils.DataDirFlag,
    84  			utils.CacheFlag,
    85  			utils.SyncModeFlag,
    86  			utils.GCModeFlag,
    87  			utils.SnapshotFlag,
    88  			utils.CacheDatabaseFlag,
    89  			utils.CacheGCFlag,
    90  			utils.MetricsEnabledFlag,
    91  			utils.MetricsEnabledExpensiveFlag,
    92  			utils.MetricsHTTPFlag,
    93  			utils.MetricsPortFlag,
    94  			utils.MetricsEnableInfluxDBFlag,
    95  			utils.MetricsEnableInfluxDBV2Flag,
    96  			utils.MetricsInfluxDBEndpointFlag,
    97  			utils.MetricsInfluxDBDatabaseFlag,
    98  			utils.MetricsInfluxDBUsernameFlag,
    99  			utils.MetricsInfluxDBPasswordFlag,
   100  			utils.MetricsInfluxDBTagsFlag,
   101  			utils.MetricsInfluxDBTokenFlag,
   102  			utils.MetricsInfluxDBBucketFlag,
   103  			utils.MetricsInfluxDBOrganizationFlag,
   104  			utils.TxLookupLimitFlag,
   105  		},
   106  		Category: "BLOCKCHAIN COMMANDS",
   107  		Description: `
   108  The import command imports blocks from an RLP-encoded form. The form can be one file
   109  with several RLP-encoded blocks, or several files can be used.
   110  
   111  If only one file is used, import error will result in failure. If several files are used,
   112  processing will proceed even if an individual RLP-file import failure occurs.`,
   113  	}
   114  	exportCommand = cli.Command{
   115  		Action:    utils.MigrateFlags(exportChain),
   116  		Name:      "export",
   117  		Usage:     "Export blockchain into file",
   118  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   119  		Flags: []cli.Flag{
   120  			utils.DataDirFlag,
   121  			utils.CacheFlag,
   122  			utils.SyncModeFlag,
   123  		},
   124  		Category: "BLOCKCHAIN COMMANDS",
   125  		Description: `
   126  Requires a first argument of the file to write to.
   127  Optional second and third arguments control the first and
   128  last block to write. In this mode, the file will be appended
   129  if already existing. If the file ends with .gz, the output will
   130  be gzipped.`,
   131  	}
   132  	importPreimagesCommand = cli.Command{
   133  		Action:    utils.MigrateFlags(importPreimages),
   134  		Name:      "import-preimages",
   135  		Usage:     "Import the preimage database from an RLP stream",
   136  		ArgsUsage: "<datafile>",
   137  		Flags: []cli.Flag{
   138  			utils.DataDirFlag,
   139  			utils.CacheFlag,
   140  			utils.SyncModeFlag,
   141  		},
   142  		Category: "BLOCKCHAIN COMMANDS",
   143  		Description: `
   144  The import-preimages command imports hash preimages from an RLP encoded stream.
   145  It's deprecated, please use "geth db import" instead.
   146  `,
   147  	}
   148  	exportPreimagesCommand = cli.Command{
   149  		Action:    utils.MigrateFlags(exportPreimages),
   150  		Name:      "export-preimages",
   151  		Usage:     "Export the preimage database into an RLP stream",
   152  		ArgsUsage: "<dumpfile>",
   153  		Flags: []cli.Flag{
   154  			utils.DataDirFlag,
   155  			utils.CacheFlag,
   156  			utils.SyncModeFlag,
   157  		},
   158  		Category: "BLOCKCHAIN COMMANDS",
   159  		Description: `
   160  The export-preimages command exports hash preimages to an RLP encoded stream.
   161  It's deprecated, please use "geth db export" instead.
   162  `,
   163  	}
   164  	dumpCommand = cli.Command{
   165  		Action:    utils.MigrateFlags(dump),
   166  		Name:      "dump",
   167  		Usage:     "Dump a specific block from storage",
   168  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   169  		Flags: []cli.Flag{
   170  			utils.DataDirFlag,
   171  			utils.CacheFlag,
   172  			utils.IterativeOutputFlag,
   173  			utils.ExcludeCodeFlag,
   174  			utils.ExcludeStorageFlag,
   175  			utils.IncludeIncompletesFlag,
   176  			utils.StartKeyFlag,
   177  			utils.DumpLimitFlag,
   178  		},
   179  		Category: "BLOCKCHAIN COMMANDS",
   180  		Description: `
   181  This command dumps out the state for a given block (or latest, if none provided).
   182  `,
   183  	}
   184  )
   185  
   186  // initGenesis will initialise the given JSON format genesis file and writes it as
   187  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   188  func initGenesis(ctx *cli.Context) error {
   189  	// Make sure we have a valid genesis JSON
   190  	genesisPath := ctx.Args().First()
   191  	if len(genesisPath) == 0 {
   192  		utils.Fatalf("Must supply path to genesis JSON file")
   193  	}
   194  	file, err := os.Open(genesisPath)
   195  	if err != nil {
   196  		utils.Fatalf("Failed to read genesis file: %v", err)
   197  	}
   198  	defer file.Close()
   199  
   200  	genesis := new(core.Genesis)
   201  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   202  		utils.Fatalf("invalid genesis file: %v", err)
   203  	}
   204  	// Open and initialise both full and light databases
   205  	stack, _ := makeConfigNode(ctx)
   206  	defer stack.Close()
   207  
   208  	for _, name := range []string{"chaindata", "lightchaindata"} {
   209  		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
   210  		if err != nil {
   211  			utils.Fatalf("Failed to open database: %v", err)
   212  		}
   213  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   214  		if err != nil {
   215  			utils.Fatalf("Failed to write genesis block: %v", err)
   216  		}
   217  		chaindb.Close()
   218  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   219  	}
   220  	return nil
   221  }
   222  
   223  func dumpGenesis(ctx *cli.Context) error {
   224  	// TODO(rjl493456442) support loading from the custom datadir
   225  	genesis := utils.MakeGenesis(ctx)
   226  	if genesis == nil {
   227  		genesis = core.DefaultGenesisBlock()
   228  	}
   229  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   230  		utils.Fatalf("could not encode genesis")
   231  	}
   232  	return nil
   233  }
   234  
   235  func importChain(ctx *cli.Context) error {
   236  	if len(ctx.Args()) < 1 {
   237  		utils.Fatalf("This command requires an argument.")
   238  	}
   239  	// Start metrics export if enabled
   240  	utils.SetupMetrics(ctx)
   241  	// Start system runtime metrics collection
   242  	go metrics.CollectProcessMetrics(3 * time.Second)
   243  
   244  	stack, _ := makeConfigNode(ctx)
   245  	defer stack.Close()
   246  
   247  	chain, db := utils.MakeChain(ctx, stack)
   248  	defer db.Close()
   249  
   250  	// Start periodically gathering memory profiles
   251  	var peakMemAlloc, peakMemSys uint64
   252  	go func() {
   253  		stats := new(runtime.MemStats)
   254  		for {
   255  			runtime.ReadMemStats(stats)
   256  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   257  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   258  			}
   259  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   260  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   261  			}
   262  			time.Sleep(5 * time.Second)
   263  		}
   264  	}()
   265  	// Import the chain
   266  	start := time.Now()
   267  
   268  	var importErr error
   269  
   270  	if len(ctx.Args()) == 1 {
   271  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   272  			importErr = err
   273  			log.Error("Import error", "err", err)
   274  		}
   275  	} else {
   276  		for _, arg := range ctx.Args() {
   277  			if err := utils.ImportChain(chain, arg); err != nil {
   278  				importErr = err
   279  				log.Error("Import error", "file", arg, "err", err)
   280  			}
   281  		}
   282  	}
   283  	chain.Stop()
   284  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   285  
   286  	// Output pre-compaction stats mostly to see the import trashing
   287  	showLeveldbStats(db)
   288  
   289  	// Print the memory statistics used by the importing
   290  	mem := new(runtime.MemStats)
   291  	runtime.ReadMemStats(mem)
   292  
   293  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   294  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   295  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   296  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   297  
   298  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   299  		return nil
   300  	}
   301  
   302  	// Compact the entire database to more accurately measure disk io and print the stats
   303  	start = time.Now()
   304  	fmt.Println("Compacting entire database...")
   305  	if err := db.Compact(nil, nil); err != nil {
   306  		utils.Fatalf("Compaction failed: %v", err)
   307  	}
   308  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   309  
   310  	showLeveldbStats(db)
   311  	return importErr
   312  }
   313  
   314  func exportChain(ctx *cli.Context) error {
   315  	if len(ctx.Args()) < 1 {
   316  		utils.Fatalf("This command requires an argument.")
   317  	}
   318  
   319  	stack, _ := makeConfigNode(ctx)
   320  	defer stack.Close()
   321  
   322  	chain, _ := utils.MakeChain(ctx, stack)
   323  	start := time.Now()
   324  
   325  	var err error
   326  	fp := ctx.Args().First()
   327  	if len(ctx.Args()) < 3 {
   328  		err = utils.ExportChain(chain, fp)
   329  	} else {
   330  		// This can be improved to allow for numbers larger than 9223372036854775807
   331  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   332  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   333  		if ferr != nil || lerr != nil {
   334  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   335  		}
   336  		if first < 0 || last < 0 {
   337  			utils.Fatalf("Export error: block number must be greater than 0\n")
   338  		}
   339  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   340  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   341  		}
   342  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   343  	}
   344  
   345  	if err != nil {
   346  		utils.Fatalf("Export error: %v\n", err)
   347  	}
   348  	fmt.Printf("Export done in %v\n", time.Since(start))
   349  	return nil
   350  }
   351  
   352  // importPreimages imports preimage data from the specified file.
   353  func importPreimages(ctx *cli.Context) error {
   354  	if len(ctx.Args()) < 1 {
   355  		utils.Fatalf("This command requires an argument.")
   356  	}
   357  
   358  	stack, _ := makeConfigNode(ctx)
   359  	defer stack.Close()
   360  
   361  	db := utils.MakeChainDatabase(ctx, stack, false)
   362  	start := time.Now()
   363  
   364  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   365  		utils.Fatalf("Import error: %v\n", err)
   366  	}
   367  	fmt.Printf("Import done in %v\n", time.Since(start))
   368  	return nil
   369  }
   370  
   371  // exportPreimages dumps the preimage data to specified json file in streaming way.
   372  func exportPreimages(ctx *cli.Context) error {
   373  	if len(ctx.Args()) < 1 {
   374  		utils.Fatalf("This command requires an argument.")
   375  	}
   376  	stack, _ := makeConfigNode(ctx)
   377  	defer stack.Close()
   378  
   379  	db := utils.MakeChainDatabase(ctx, stack, true)
   380  	start := time.Now()
   381  
   382  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   383  		utils.Fatalf("Export error: %v\n", err)
   384  	}
   385  	fmt.Printf("Export done in %v\n", time.Since(start))
   386  	return nil
   387  }
   388  
   389  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   390  	db := utils.MakeChainDatabase(ctx, stack, true)
   391  	var header *types.Header
   392  	if ctx.NArg() > 1 {
   393  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   394  	}
   395  	if ctx.NArg() == 1 {
   396  		arg := ctx.Args().First()
   397  		if hashish(arg) {
   398  			hash := common.HexToHash(arg)
   399  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   400  				header = rawdb.ReadHeader(db, hash, *number)
   401  			} else {
   402  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   403  			}
   404  		} else {
   405  			number, err := strconv.Atoi(arg)
   406  			if err != nil {
   407  				return nil, nil, common.Hash{}, err
   408  			}
   409  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   410  				header = rawdb.ReadHeader(db, hash, uint64(number))
   411  			} else {
   412  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   413  			}
   414  		}
   415  	} else {
   416  		// Use latest
   417  		header = rawdb.ReadHeadHeader(db)
   418  	}
   419  	if header == nil {
   420  		return nil, nil, common.Hash{}, errors.New("no head block found")
   421  	}
   422  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   423  	var start common.Hash
   424  	switch len(startArg) {
   425  	case 0: // common.Hash
   426  	case 32:
   427  		start = common.BytesToHash(startArg)
   428  	case 20:
   429  		start = crypto.Keccak256Hash(startArg)
   430  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   431  	default:
   432  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   433  	}
   434  	var conf = &state.DumpConfig{
   435  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   436  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   437  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   438  		Start:             start.Bytes(),
   439  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   440  	}
   441  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   442  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   443  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   444  	return conf, db, header.Root, nil
   445  }
   446  
   447  func dump(ctx *cli.Context) error {
   448  	stack, _ := makeConfigNode(ctx)
   449  	defer stack.Close()
   450  
   451  	conf, db, root, err := parseDumpConfig(ctx, stack)
   452  	if err != nil {
   453  		return err
   454  	}
   455  	state, err := state.New(root, state.NewDatabase(db), nil)
   456  	if err != nil {
   457  		return err
   458  	}
   459  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   460  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   461  	} else {
   462  		if conf.OnlyWithAddresses {
   463  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   464  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   465  			return fmt.Errorf("incompatible options")
   466  		}
   467  		fmt.Println(string(state.Dump(conf)))
   468  	}
   469  	return nil
   470  }
   471  
   472  // hashish returns true for strings that look like hashes.
   473  func hashish(x string) bool {
   474  	_, err := strconv.Atoi(x)
   475  	return err != nil
   476  }