github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"gopkg.in/urfave/cli.v1"
    30  
    31  	"github.com/scroll-tech/go-ethereum/cmd/utils"
    32  	"github.com/scroll-tech/go-ethereum/common"
    33  	"github.com/scroll-tech/go-ethereum/common/hexutil"
    34  	"github.com/scroll-tech/go-ethereum/core"
    35  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    36  	"github.com/scroll-tech/go-ethereum/core/state"
    37  	"github.com/scroll-tech/go-ethereum/core/types"
    38  	"github.com/scroll-tech/go-ethereum/crypto"
    39  	"github.com/scroll-tech/go-ethereum/ethdb"
    40  	"github.com/scroll-tech/go-ethereum/log"
    41  	"github.com/scroll-tech/go-ethereum/metrics"
    42  	"github.com/scroll-tech/go-ethereum/node"
    43  )
    44  
    45  var (
    46  	initCommand = cli.Command{
    47  		Action:    utils.MigrateFlags(initGenesis),
    48  		Name:      "init",
    49  		Usage:     "Bootstrap and initialize a new genesis block",
    50  		ArgsUsage: "<genesisPath>",
    51  		Flags: []cli.Flag{
    52  			utils.DataDirFlag,
    53  		},
    54  		Category: "BLOCKCHAIN COMMANDS",
    55  		Description: `
    56  The init command initializes a new genesis block and definition for the network.
    57  This is a destructive action and changes the network in which you will be
    58  participating.
    59  
    60  It expects the genesis file as argument.`,
    61  	}
    62  	dumpGenesisCommand = cli.Command{
    63  		Action:    utils.MigrateFlags(dumpGenesis),
    64  		Name:      "dumpgenesis",
    65  		Usage:     "Dumps genesis block JSON configuration to stdout",
    66  		ArgsUsage: "",
    67  		Flags: []cli.Flag{
    68  			utils.MainnetFlag,
    69  			utils.RopstenFlag,
    70  			utils.SepoliaFlag,
    71  			utils.RinkebyFlag,
    72  			utils.GoerliFlag,
    73  			utils.ScrollAlphaFlag,
    74  			utils.ScrollSepoliaFlag,
    75  			utils.ScrollFlag,
    76  		},
    77  		Category: "BLOCKCHAIN COMMANDS",
    78  		Description: `
    79  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    80  	}
    81  	importCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(importChain),
    83  		Name:      "import",
    84  		Usage:     "Import a blockchain file",
    85  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  			utils.SyncModeFlag,
    90  			utils.GCModeFlag,
    91  			utils.SnapshotFlag,
    92  			utils.CacheDatabaseFlag,
    93  			utils.CacheGCFlag,
    94  			utils.MetricsEnabledFlag,
    95  			utils.MetricsEnabledExpensiveFlag,
    96  			utils.MetricsHTTPFlag,
    97  			utils.MetricsPortFlag,
    98  			utils.MetricsEnableInfluxDBFlag,
    99  			utils.MetricsEnableInfluxDBV2Flag,
   100  			utils.MetricsInfluxDBEndpointFlag,
   101  			utils.MetricsInfluxDBDatabaseFlag,
   102  			utils.MetricsInfluxDBUsernameFlag,
   103  			utils.MetricsInfluxDBPasswordFlag,
   104  			utils.MetricsInfluxDBTagsFlag,
   105  			utils.MetricsInfluxDBTokenFlag,
   106  			utils.MetricsInfluxDBBucketFlag,
   107  			utils.MetricsInfluxDBOrganizationFlag,
   108  			utils.TxLookupLimitFlag,
   109  		},
   110  		Category: "BLOCKCHAIN COMMANDS",
   111  		Description: `
   112  The import command imports blocks from an RLP-encoded form. The form can be one file
   113  with several RLP-encoded blocks, or several files can be used.
   114  
   115  If only one file is used, import error will result in failure. If several files are used,
   116  processing will proceed even if an individual RLP-file import failure occurs.`,
   117  	}
   118  	exportCommand = cli.Command{
   119  		Action:    utils.MigrateFlags(exportChain),
   120  		Name:      "export",
   121  		Usage:     "Export blockchain into file",
   122  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   123  		Flags: []cli.Flag{
   124  			utils.DataDirFlag,
   125  			utils.CacheFlag,
   126  			utils.SyncModeFlag,
   127  		},
   128  		Category: "BLOCKCHAIN COMMANDS",
   129  		Description: `
   130  Requires a first argument of the file to write to.
   131  Optional second and third arguments control the first and
   132  last block to write. In this mode, the file will be appended
   133  if already existing. If the file ends with .gz, the output will
   134  be gzipped.`,
   135  	}
   136  	importPreimagesCommand = cli.Command{
   137  		Action:    utils.MigrateFlags(importPreimages),
   138  		Name:      "import-preimages",
   139  		Usage:     "Import the preimage database from an RLP stream",
   140  		ArgsUsage: "<datafile>",
   141  		Flags: []cli.Flag{
   142  			utils.DataDirFlag,
   143  			utils.CacheFlag,
   144  			utils.SyncModeFlag,
   145  		},
   146  		Category: "BLOCKCHAIN COMMANDS",
   147  		Description: `
   148  The import-preimages command imports hash preimages from an RLP encoded stream.
   149  It's deprecated, please use "geth db import" instead.
   150  `,
   151  	}
   152  	exportPreimagesCommand = cli.Command{
   153  		Action:    utils.MigrateFlags(exportPreimages),
   154  		Name:      "export-preimages",
   155  		Usage:     "Export the preimage database into an RLP stream",
   156  		ArgsUsage: "<dumpfile>",
   157  		Flags: []cli.Flag{
   158  			utils.DataDirFlag,
   159  			utils.CacheFlag,
   160  			utils.SyncModeFlag,
   161  		},
   162  		Category: "BLOCKCHAIN COMMANDS",
   163  		Description: `
   164  The export-preimages command exports hash preimages to an RLP encoded stream.
   165  It's deprecated, please use "geth db export" instead.
   166  `,
   167  	}
   168  	dumpCommand = cli.Command{
   169  		Action:    utils.MigrateFlags(dump),
   170  		Name:      "dump",
   171  		Usage:     "Dump a specific block from storage",
   172  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   173  		Flags: []cli.Flag{
   174  			utils.DataDirFlag,
   175  			utils.CacheFlag,
   176  			utils.IterativeOutputFlag,
   177  			utils.ExcludeCodeFlag,
   178  			utils.ExcludeStorageFlag,
   179  			utils.IncludeIncompletesFlag,
   180  			utils.StartKeyFlag,
   181  			utils.DumpLimitFlag,
   182  		},
   183  		Category: "BLOCKCHAIN COMMANDS",
   184  		Description: `
   185  This command dumps out the state for a given block (or latest, if none provided).
   186  `,
   187  	}
   188  )
   189  
   190  // initGenesis will initialise the given JSON format genesis file and writes it as
   191  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   192  func initGenesis(ctx *cli.Context) error {
   193  	// Make sure we have a valid genesis JSON
   194  	genesisPath := ctx.Args().First()
   195  	if len(genesisPath) == 0 {
   196  		utils.Fatalf("Must supply path to genesis JSON file")
   197  	}
   198  	file, err := os.Open(genesisPath)
   199  	if err != nil {
   200  		utils.Fatalf("Failed to read genesis file: %v", err)
   201  	}
   202  	defer file.Close()
   203  
   204  	genesis := new(core.Genesis)
   205  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   206  		utils.Fatalf("invalid genesis file: %v", err)
   207  	}
   208  	// Open and initialise both full and light databases
   209  	stack, _ := makeConfigNode(ctx)
   210  	defer stack.Close()
   211  
   212  	for _, name := range []string{"chaindata", "lightchaindata"} {
   213  		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
   214  		if err != nil {
   215  			utils.Fatalf("Failed to open database: %v", err)
   216  		}
   217  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   218  		if err != nil {
   219  			utils.Fatalf("Failed to write genesis block: %v", err)
   220  		}
   221  		chaindb.Close()
   222  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   223  	}
   224  	return nil
   225  }
   226  
   227  func dumpGenesis(ctx *cli.Context) error {
   228  	// TODO(rjl493456442) support loading from the custom datadir
   229  	genesis := utils.MakeGenesis(ctx)
   230  	if genesis == nil {
   231  		genesis = core.DefaultGenesisBlock()
   232  	}
   233  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   234  		utils.Fatalf("could not encode genesis")
   235  	}
   236  	return nil
   237  }
   238  
   239  func importChain(ctx *cli.Context) error {
   240  	if len(ctx.Args()) < 1 {
   241  		utils.Fatalf("This command requires an argument.")
   242  	}
   243  	// Start metrics export if enabled
   244  	utils.SetupMetrics(ctx)
   245  	// Start system runtime metrics collection
   246  	go metrics.CollectProcessMetrics(3 * time.Second)
   247  
   248  	stack, _ := makeConfigNode(ctx)
   249  	defer stack.Close()
   250  
   251  	chain, db := utils.MakeChain(ctx, stack)
   252  	defer db.Close()
   253  
   254  	// Start periodically gathering memory profiles
   255  	var peakMemAlloc, peakMemSys uint64
   256  	go func() {
   257  		stats := new(runtime.MemStats)
   258  		for {
   259  			runtime.ReadMemStats(stats)
   260  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   261  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   262  			}
   263  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   264  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   265  			}
   266  			time.Sleep(5 * time.Second)
   267  		}
   268  	}()
   269  	// Import the chain
   270  	start := time.Now()
   271  
   272  	var importErr error
   273  
   274  	if len(ctx.Args()) == 1 {
   275  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   276  			importErr = err
   277  			log.Error("Import error", "err", err)
   278  		}
   279  	} else {
   280  		for _, arg := range ctx.Args() {
   281  			if err := utils.ImportChain(chain, arg); err != nil {
   282  				importErr = err
   283  				log.Error("Import error", "file", arg, "err", err)
   284  			}
   285  		}
   286  	}
   287  	chain.Stop()
   288  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   289  
   290  	// Output pre-compaction stats mostly to see the import trashing
   291  	showLeveldbStats(db)
   292  
   293  	// Print the memory statistics used by the importing
   294  	mem := new(runtime.MemStats)
   295  	runtime.ReadMemStats(mem)
   296  
   297  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   298  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   299  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   300  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   301  
   302  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   303  		return nil
   304  	}
   305  
   306  	// Compact the entire database to more accurately measure disk io and print the stats
   307  	start = time.Now()
   308  	fmt.Println("Compacting entire database...")
   309  	if err := db.Compact(nil, nil); err != nil {
   310  		utils.Fatalf("Compaction failed: %v", err)
   311  	}
   312  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   313  
   314  	showLeveldbStats(db)
   315  	return importErr
   316  }
   317  
   318  func exportChain(ctx *cli.Context) error {
   319  	if len(ctx.Args()) < 1 {
   320  		utils.Fatalf("This command requires an argument.")
   321  	}
   322  
   323  	stack, _ := makeConfigNode(ctx)
   324  	defer stack.Close()
   325  
   326  	chain, _ := utils.MakeChain(ctx, stack)
   327  	start := time.Now()
   328  
   329  	var err error
   330  	fp := ctx.Args().First()
   331  	if len(ctx.Args()) < 3 {
   332  		err = utils.ExportChain(chain, fp)
   333  	} else {
   334  		// This can be improved to allow for numbers larger than 9223372036854775807
   335  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   336  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   337  		if ferr != nil || lerr != nil {
   338  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   339  		}
   340  		if first < 0 || last < 0 {
   341  			utils.Fatalf("Export error: block number must be greater than 0\n")
   342  		}
   343  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   344  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   345  		}
   346  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   347  	}
   348  
   349  	if err != nil {
   350  		utils.Fatalf("Export error: %v\n", err)
   351  	}
   352  	fmt.Printf("Export done in %v\n", time.Since(start))
   353  	return nil
   354  }
   355  
   356  // importPreimages imports preimage data from the specified file.
   357  func importPreimages(ctx *cli.Context) error {
   358  	if len(ctx.Args()) < 1 {
   359  		utils.Fatalf("This command requires an argument.")
   360  	}
   361  
   362  	stack, _ := makeConfigNode(ctx)
   363  	defer stack.Close()
   364  
   365  	db := utils.MakeChainDatabase(ctx, stack, false)
   366  	start := time.Now()
   367  
   368  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   369  		utils.Fatalf("Import error: %v\n", err)
   370  	}
   371  	fmt.Printf("Import done in %v\n", time.Since(start))
   372  	return nil
   373  }
   374  
   375  // exportPreimages dumps the preimage data to specified json file in streaming way.
   376  func exportPreimages(ctx *cli.Context) error {
   377  	if len(ctx.Args()) < 1 {
   378  		utils.Fatalf("This command requires an argument.")
   379  	}
   380  	stack, _ := makeConfigNode(ctx)
   381  	defer stack.Close()
   382  
   383  	db := utils.MakeChainDatabase(ctx, stack, true)
   384  	start := time.Now()
   385  
   386  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   387  		utils.Fatalf("Export error: %v\n", err)
   388  	}
   389  	fmt.Printf("Export done in %v\n", time.Since(start))
   390  	return nil
   391  }
   392  
   393  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   394  	db := utils.MakeChainDatabase(ctx, stack, true)
   395  	var header *types.Header
   396  	if ctx.NArg() > 1 {
   397  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   398  	}
   399  	if ctx.NArg() == 1 {
   400  		arg := ctx.Args().First()
   401  		if hashish(arg) {
   402  			hash := common.HexToHash(arg)
   403  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   404  				header = rawdb.ReadHeader(db, hash, *number)
   405  			} else {
   406  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   407  			}
   408  		} else {
   409  			number, err := strconv.Atoi(arg)
   410  			if err != nil {
   411  				return nil, nil, common.Hash{}, err
   412  			}
   413  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   414  				header = rawdb.ReadHeader(db, hash, uint64(number))
   415  			} else {
   416  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   417  			}
   418  		}
   419  	} else {
   420  		// Use latest
   421  		header = rawdb.ReadHeadHeader(db)
   422  	}
   423  	if header == nil {
   424  		return nil, nil, common.Hash{}, errors.New("no head block found")
   425  	}
   426  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   427  	var start common.Hash
   428  	switch len(startArg) {
   429  	case 0: // common.Hash
   430  	case 32:
   431  		start = common.BytesToHash(startArg)
   432  	case 20:
   433  		start = crypto.Keccak256Hash(startArg)
   434  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   435  	default:
   436  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   437  	}
   438  	var conf = &state.DumpConfig{
   439  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   440  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   441  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   442  		Start:             start.Bytes(),
   443  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   444  	}
   445  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   446  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   447  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   448  	return conf, db, header.Root, nil
   449  }
   450  
   451  func dump(ctx *cli.Context) error {
   452  	stack, _ := makeConfigNode(ctx)
   453  	defer stack.Close()
   454  
   455  	conf, db, root, err := parseDumpConfig(ctx, stack)
   456  	if err != nil {
   457  		return err
   458  	}
   459  	state, err := state.New(root, state.NewDatabase(db), nil)
   460  	if err != nil {
   461  		return err
   462  	}
   463  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   464  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   465  	} else {
   466  		if conf.OnlyWithAddresses {
   467  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   468  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   469  			return fmt.Errorf("incompatible options")
   470  		}
   471  		fmt.Println(string(state.Dump(conf)))
   472  	}
   473  	return nil
   474  }
   475  
   476  // hashish returns true for strings that look like hashes.
   477  func hashish(x string) bool {
   478  	_, err := strconv.Atoi(x)
   479  	return err != nil
   480  }