github.com/palisadeinc/bor@v0.0.0-20230615125219-ab7196213d15/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/hexutil"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/metrics"
    40  	"github.com/ethereum/go-ethereum/node"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.MainnetFlag,
    68  			utils.RopstenFlag,
    69  			utils.SepoliaFlag,
    70  			utils.RinkebyFlag,
    71  			utils.GoerliFlag,
    72  			utils.MumbaiFlag,
    73  			utils.BorMainnetFlag,
    74  		},
    75  		Category: "BLOCKCHAIN COMMANDS",
    76  		Description: `
    77  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    78  	}
    79  	importCommand = cli.Command{
    80  		Action:    utils.MigrateFlags(importChain),
    81  		Name:      "import",
    82  		Usage:     "Import a blockchain file",
    83  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) <genesisPath>",
    84  		Flags: []cli.Flag{
    85  			utils.DataDirFlag,
    86  			utils.CacheFlag,
    87  			utils.SyncModeFlag,
    88  			utils.GCModeFlag,
    89  			utils.SnapshotFlag,
    90  			utils.CacheDatabaseFlag,
    91  			utils.CacheGCFlag,
    92  			utils.MetricsEnabledFlag,
    93  			utils.MetricsEnabledExpensiveFlag,
    94  			utils.MetricsHTTPFlag,
    95  			utils.MetricsPortFlag,
    96  			utils.MetricsEnableInfluxDBFlag,
    97  			utils.MetricsEnableInfluxDBV2Flag,
    98  			utils.MetricsInfluxDBEndpointFlag,
    99  			utils.MetricsInfluxDBDatabaseFlag,
   100  			utils.MetricsInfluxDBUsernameFlag,
   101  			utils.MetricsInfluxDBPasswordFlag,
   102  			utils.MetricsInfluxDBTagsFlag,
   103  			utils.MetricsInfluxDBTokenFlag,
   104  			utils.MetricsInfluxDBBucketFlag,
   105  			utils.MetricsInfluxDBOrganizationFlag,
   106  			utils.TxLookupLimitFlag,
   107  
   108  			// bor related flags
   109  			utils.HeimdallURLFlag,
   110  			utils.WithoutHeimdallFlag,
   111  			utils.HeimdallgRPCAddressFlag,
   112  			utils.RunHeimdallFlag,
   113  			utils.RunHeimdallArgsFlag,
   114  			utils.UseHeimdallAppFlag,
   115  		},
   116  		Category: "BLOCKCHAIN COMMANDS",
   117  		Description: `
   118  The import command imports blocks from an RLP-encoded form. The form can be one file
   119  with several RLP-encoded blocks, or several files can be used.
   120  
   121  If only one file is used, import error will result in failure. If several files are used,
   122  processing will proceed even if an individual RLP-file import failure occurs.`,
   123  	}
   124  	exportCommand = cli.Command{
   125  		Action:    utils.MigrateFlags(exportChain),
   126  		Name:      "export",
   127  		Usage:     "Export blockchain into file",
   128  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   129  		Flags: []cli.Flag{
   130  			utils.DataDirFlag,
   131  			utils.CacheFlag,
   132  			utils.SyncModeFlag,
   133  		},
   134  		Category: "BLOCKCHAIN COMMANDS",
   135  		Description: `
   136  Requires a first argument of the file to write to.
   137  Optional second and third arguments control the first and
   138  last block to write. In this mode, the file will be appended
   139  if already existing. If the file ends with .gz, the output will
   140  be gzipped.`,
   141  	}
   142  	importPreimagesCommand = cli.Command{
   143  		Action:    utils.MigrateFlags(importPreimages),
   144  		Name:      "import-preimages",
   145  		Usage:     "Import the preimage database from an RLP stream",
   146  		ArgsUsage: "<datafile>",
   147  		Flags: []cli.Flag{
   148  			utils.DataDirFlag,
   149  			utils.CacheFlag,
   150  			utils.SyncModeFlag,
   151  		},
   152  		Category: "BLOCKCHAIN COMMANDS",
   153  		Description: `
   154  The import-preimages command imports hash preimages from an RLP encoded stream.
   155  It's deprecated, please use "geth db import" instead.
   156  `,
   157  	}
   158  	exportPreimagesCommand = cli.Command{
   159  		Action:    utils.MigrateFlags(exportPreimages),
   160  		Name:      "export-preimages",
   161  		Usage:     "Export the preimage database into an RLP stream",
   162  		ArgsUsage: "<dumpfile>",
   163  		Flags: []cli.Flag{
   164  			utils.DataDirFlag,
   165  			utils.CacheFlag,
   166  			utils.SyncModeFlag,
   167  		},
   168  		Category: "BLOCKCHAIN COMMANDS",
   169  		Description: `
   170  The export-preimages command exports hash preimages to an RLP encoded stream.
   171  It's deprecated, please use "geth db export" instead.
   172  `,
   173  	}
   174  	dumpCommand = cli.Command{
   175  		Action:    utils.MigrateFlags(dump),
   176  		Name:      "dump",
   177  		Usage:     "Dump a specific block from storage",
   178  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   179  		Flags: []cli.Flag{
   180  			utils.DataDirFlag,
   181  			utils.CacheFlag,
   182  			utils.IterativeOutputFlag,
   183  			utils.ExcludeCodeFlag,
   184  			utils.ExcludeStorageFlag,
   185  			utils.IncludeIncompletesFlag,
   186  			utils.StartKeyFlag,
   187  			utils.DumpLimitFlag,
   188  		},
   189  		Category: "BLOCKCHAIN COMMANDS",
   190  		Description: `
   191  This command dumps out the state for a given block (or latest, if none provided).
   192  `,
   193  	}
   194  )
   195  
   196  // initGenesis will initialise the given JSON format genesis file and writes it as
   197  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   198  func initGenesis(ctx *cli.Context) error {
   199  	// Make sure we have a valid genesis JSON
   200  	genesisPath := ctx.Args().First()
   201  	if len(genesisPath) == 0 {
   202  		utils.Fatalf("Must supply path to genesis JSON file")
   203  	}
   204  	file, err := os.Open(genesisPath)
   205  	if err != nil {
   206  		utils.Fatalf("Failed to read genesis file: %v", err)
   207  	}
   208  	defer file.Close()
   209  
   210  	genesis := new(core.Genesis)
   211  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   212  		utils.Fatalf("invalid genesis file: %v", err)
   213  	}
   214  	// Open and initialise both full and light databases
   215  	stack, _ := makeConfigNode(ctx)
   216  	defer stack.Close()
   217  
   218  	for _, name := range []string{"chaindata", "lightchaindata"} {
   219  		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
   220  		if err != nil {
   221  			utils.Fatalf("Failed to open database: %v", err)
   222  		}
   223  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   224  		if err != nil {
   225  			utils.Fatalf("Failed to write genesis block: %v", err)
   226  		}
   227  		chaindb.Close()
   228  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   229  	}
   230  	return nil
   231  }
   232  
   233  func dumpGenesis(ctx *cli.Context) error {
   234  	// TODO(rjl493456442) support loading from the custom datadir
   235  	genesis := utils.MakeGenesis(ctx)
   236  	if genesis == nil {
   237  		genesis = core.DefaultGenesisBlock()
   238  	}
   239  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   240  		utils.Fatalf("could not encode genesis")
   241  	}
   242  	return nil
   243  }
   244  
   245  func importChain(ctx *cli.Context) error {
   246  	if len(ctx.Args()) < 2 {
   247  		utils.Fatalf("This command requires an argument.")
   248  	}
   249  	// Start metrics export if enabled
   250  	utils.SetupMetrics(ctx)
   251  	// Start system runtime metrics collection
   252  	go metrics.CollectProcessMetrics(3 * time.Second)
   253  
   254  	stack, _ := makeConfigNode(ctx)
   255  	defer stack.Close()
   256  
   257  	chain, db := utils.MakeChain(ctx, stack)
   258  	defer db.Close()
   259  
   260  	// Start periodically gathering memory profiles
   261  	var peakMemAlloc, peakMemSys uint64
   262  	go func() {
   263  		stats := new(runtime.MemStats)
   264  		for {
   265  			runtime.ReadMemStats(stats)
   266  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   267  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   268  			}
   269  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   270  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   271  			}
   272  			time.Sleep(5 * time.Second)
   273  		}
   274  	}()
   275  	// Import the chain
   276  	start := time.Now()
   277  
   278  	var importErr error
   279  
   280  	// ArgsUsage: "<filename> (<filename 2> ... <filename N>) <genesisPath>",
   281  	if len(ctx.Args()) == 2 {
   282  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   283  			importErr = err
   284  			log.Error("Import error", "err", err)
   285  		}
   286  	} else {
   287  		for _, arg := range ctx.Args()[:len(ctx.Args())-1] {
   288  			if err := utils.ImportChain(chain, arg); err != nil {
   289  				importErr = err
   290  				log.Error("Import error", "file", arg, "err", err)
   291  			}
   292  		}
   293  	}
   294  	chain.Stop()
   295  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   296  
   297  	// Output pre-compaction stats mostly to see the import trashing
   298  	showLeveldbStats(db)
   299  
   300  	// Print the memory statistics used by the importing
   301  	mem := new(runtime.MemStats)
   302  	runtime.ReadMemStats(mem)
   303  
   304  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   305  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   306  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   307  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   308  
   309  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   310  		return nil
   311  	}
   312  
   313  	// Compact the entire database to more accurately measure disk io and print the stats
   314  	start = time.Now()
   315  	fmt.Println("Compacting entire database...")
   316  	if err := db.Compact(nil, nil); err != nil {
   317  		utils.Fatalf("Compaction failed: %v", err)
   318  	}
   319  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   320  
   321  	showLeveldbStats(db)
   322  	return importErr
   323  }
   324  
   325  func exportChain(ctx *cli.Context) error {
   326  	if len(ctx.Args()) < 1 {
   327  		utils.Fatalf("This command requires an argument.")
   328  	}
   329  
   330  	stack, _ := makeConfigNode(ctx)
   331  	defer stack.Close()
   332  
   333  	chain, _ := utils.MakeChain(ctx, stack)
   334  	start := time.Now()
   335  
   336  	var err error
   337  	fp := ctx.Args().First()
   338  	if len(ctx.Args()) < 3 {
   339  		err = utils.ExportChain(chain, fp)
   340  	} else {
   341  		// This can be improved to allow for numbers larger than 9223372036854775807
   342  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   343  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   344  		if ferr != nil || lerr != nil {
   345  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   346  		}
   347  		if first < 0 || last < 0 {
   348  			utils.Fatalf("Export error: block number must be greater than 0\n")
   349  		}
   350  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   351  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   352  		}
   353  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   354  	}
   355  
   356  	if err != nil {
   357  		utils.Fatalf("Export error: %v\n", err)
   358  	}
   359  	fmt.Printf("Export done in %v\n", time.Since(start))
   360  	return nil
   361  }
   362  
   363  // importPreimages imports preimage data from the specified file.
   364  func importPreimages(ctx *cli.Context) error {
   365  	if len(ctx.Args()) < 1 {
   366  		utils.Fatalf("This command requires an argument.")
   367  	}
   368  
   369  	stack, _ := makeConfigNode(ctx)
   370  	defer stack.Close()
   371  
   372  	db := utils.MakeChainDatabase(ctx, stack, false)
   373  	start := time.Now()
   374  
   375  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   376  		utils.Fatalf("Import error: %v\n", err)
   377  	}
   378  	fmt.Printf("Import done in %v\n", time.Since(start))
   379  	return nil
   380  }
   381  
   382  // exportPreimages dumps the preimage data to specified json file in streaming way.
   383  func exportPreimages(ctx *cli.Context) error {
   384  	if len(ctx.Args()) < 1 {
   385  		utils.Fatalf("This command requires an argument.")
   386  	}
   387  	stack, _ := makeConfigNode(ctx)
   388  	defer stack.Close()
   389  
   390  	db := utils.MakeChainDatabase(ctx, stack, true)
   391  	start := time.Now()
   392  
   393  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   394  		utils.Fatalf("Export error: %v\n", err)
   395  	}
   396  	fmt.Printf("Export done in %v\n", time.Since(start))
   397  	return nil
   398  }
   399  
   400  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   401  	db := utils.MakeChainDatabase(ctx, stack, true)
   402  	var header *types.Header
   403  	if ctx.NArg() > 1 {
   404  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   405  	}
   406  	if ctx.NArg() == 1 {
   407  		arg := ctx.Args().First()
   408  		if hashish(arg) {
   409  			hash := common.HexToHash(arg)
   410  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   411  				header = rawdb.ReadHeader(db, hash, *number)
   412  			} else {
   413  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   414  			}
   415  		} else {
   416  			number, err := strconv.Atoi(arg)
   417  			if err != nil {
   418  				return nil, nil, common.Hash{}, err
   419  			}
   420  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   421  				header = rawdb.ReadHeader(db, hash, uint64(number))
   422  			} else {
   423  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   424  			}
   425  		}
   426  	} else {
   427  		// Use latest
   428  		header = rawdb.ReadHeadHeader(db)
   429  	}
   430  	if header == nil {
   431  		return nil, nil, common.Hash{}, errors.New("no head block found")
   432  	}
   433  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   434  	var start common.Hash
   435  	switch len(startArg) {
   436  	case 0: // common.Hash
   437  	case 32:
   438  		start = common.BytesToHash(startArg)
   439  	case 20:
   440  		start = crypto.Keccak256Hash(startArg)
   441  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   442  	default:
   443  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   444  	}
   445  	var conf = &state.DumpConfig{
   446  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   447  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   448  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   449  		Start:             start.Bytes(),
   450  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   451  	}
   452  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   453  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   454  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   455  	return conf, db, header.Root, nil
   456  }
   457  
   458  func dump(ctx *cli.Context) error {
   459  	stack, _ := makeConfigNode(ctx)
   460  	defer stack.Close()
   461  
   462  	conf, db, root, err := parseDumpConfig(ctx, stack)
   463  	if err != nil {
   464  		return err
   465  	}
   466  	state, err := state.New(root, state.NewDatabase(db), nil)
   467  	if err != nil {
   468  		return err
   469  	}
   470  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   471  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   472  	} else {
   473  		if conf.OnlyWithAddresses {
   474  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   475  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   476  			return fmt.Errorf("incompatible options")
   477  		}
   478  		fmt.Println(string(state.Dump(conf)))
   479  	}
   480  	return nil
   481  }
   482  
   483  // hashish returns true for strings that look like hashes.
   484  func hashish(x string) bool {
   485  	_, err := strconv.Atoi(x)
   486  	return err != nil
   487  }