github.com/shrimpyuk/bor@v0.2.15-0.20220224151350-fb4ec6020bae/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/hexutil"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/log"
    39  	"github.com/ethereum/go-ethereum/metrics"
    40  	"github.com/ethereum/go-ethereum/node"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.MainnetFlag,
    68  			utils.RopstenFlag,
    69  			utils.RinkebyFlag,
    70  			utils.GoerliFlag,
    71  			utils.MumbaiFlag,
    72  			utils.BorMainnetFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    77  	}
    78  	importCommand = cli.Command{
    79  		Action:    utils.MigrateFlags(importChain),
    80  		Name:      "import",
    81  		Usage:     "Import a blockchain file",
    82  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) <genesisPath>",
    83  		Flags: []cli.Flag{
    84  			utils.DataDirFlag,
    85  			utils.CacheFlag,
    86  			utils.SyncModeFlag,
    87  			utils.GCModeFlag,
    88  			utils.SnapshotFlag,
    89  			utils.CacheDatabaseFlag,
    90  			utils.CacheGCFlag,
    91  			utils.MetricsEnabledFlag,
    92  			utils.MetricsEnabledExpensiveFlag,
    93  			utils.MetricsHTTPFlag,
    94  			utils.MetricsPortFlag,
    95  			utils.MetricsEnableInfluxDBFlag,
    96  			utils.MetricsEnableInfluxDBV2Flag,
    97  			utils.MetricsInfluxDBEndpointFlag,
    98  			utils.MetricsInfluxDBDatabaseFlag,
    99  			utils.MetricsInfluxDBUsernameFlag,
   100  			utils.MetricsInfluxDBPasswordFlag,
   101  			utils.MetricsInfluxDBTagsFlag,
   102  			utils.MetricsInfluxDBTokenFlag,
   103  			utils.MetricsInfluxDBBucketFlag,
   104  			utils.MetricsInfluxDBOrganizationFlag,
   105  			utils.TxLookupLimitFlag,
   106  
   107  			// bor related flags
   108  			utils.HeimdallURLFlag,
   109  			utils.WithoutHeimdallFlag,
   110  		},
   111  		Category: "BLOCKCHAIN COMMANDS",
   112  		Description: `
   113  The import command imports blocks from an RLP-encoded form. The form can be one file
   114  with several RLP-encoded blocks, or several files can be used.
   115  
   116  If only one file is used, import error will result in failure. If several files are used,
   117  processing will proceed even if an individual RLP-file import failure occurs.`,
   118  	}
   119  	exportCommand = cli.Command{
   120  		Action:    utils.MigrateFlags(exportChain),
   121  		Name:      "export",
   122  		Usage:     "Export blockchain into file",
   123  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   124  		Flags: []cli.Flag{
   125  			utils.DataDirFlag,
   126  			utils.CacheFlag,
   127  			utils.SyncModeFlag,
   128  		},
   129  		Category: "BLOCKCHAIN COMMANDS",
   130  		Description: `
   131  Requires a first argument of the file to write to.
   132  Optional second and third arguments control the first and
   133  last block to write. In this mode, the file will be appended
   134  if already existing. If the file ends with .gz, the output will
   135  be gzipped.`,
   136  	}
   137  	importPreimagesCommand = cli.Command{
   138  		Action:    utils.MigrateFlags(importPreimages),
   139  		Name:      "import-preimages",
   140  		Usage:     "Import the preimage database from an RLP stream",
   141  		ArgsUsage: "<datafile>",
   142  		Flags: []cli.Flag{
   143  			utils.DataDirFlag,
   144  			utils.CacheFlag,
   145  			utils.SyncModeFlag,
   146  		},
   147  		Category: "BLOCKCHAIN COMMANDS",
   148  		Description: `
   149  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   150  	}
   151  	exportPreimagesCommand = cli.Command{
   152  		Action:    utils.MigrateFlags(exportPreimages),
   153  		Name:      "export-preimages",
   154  		Usage:     "Export the preimage database into an RLP stream",
   155  		ArgsUsage: "<dumpfile>",
   156  		Flags: []cli.Flag{
   157  			utils.DataDirFlag,
   158  			utils.CacheFlag,
   159  			utils.SyncModeFlag,
   160  		},
   161  		Category: "BLOCKCHAIN COMMANDS",
   162  		Description: `
   163  The export-preimages command export hash preimages to an RLP encoded stream`,
   164  	}
   165  	dumpCommand = cli.Command{
   166  		Action:    utils.MigrateFlags(dump),
   167  		Name:      "dump",
   168  		Usage:     "Dump a specific block from storage",
   169  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   170  		Flags: []cli.Flag{
   171  			utils.DataDirFlag,
   172  			utils.CacheFlag,
   173  			utils.IterativeOutputFlag,
   174  			utils.ExcludeCodeFlag,
   175  			utils.ExcludeStorageFlag,
   176  			utils.IncludeIncompletesFlag,
   177  			utils.StartKeyFlag,
   178  			utils.DumpLimitFlag,
   179  		},
   180  		Category: "BLOCKCHAIN COMMANDS",
   181  		Description: `
   182  This command dumps out the state for a given block (or latest, if none provided).
   183  `,
   184  	}
   185  )
   186  
   187  // initGenesis will initialise the given JSON format genesis file and writes it as
   188  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   189  func initGenesis(ctx *cli.Context) error {
   190  	// Make sure we have a valid genesis JSON
   191  	genesisPath := ctx.Args().First()
   192  	if len(genesisPath) == 0 {
   193  		utils.Fatalf("Must supply path to genesis JSON file")
   194  	}
   195  	file, err := os.Open(genesisPath)
   196  	if err != nil {
   197  		utils.Fatalf("Failed to read genesis file: %v", err)
   198  	}
   199  	defer file.Close()
   200  
   201  	genesis := new(core.Genesis)
   202  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   203  		utils.Fatalf("invalid genesis file: %v", err)
   204  	}
   205  	// Open and initialise both full and light databases
   206  	stack, _ := makeConfigNode(ctx)
   207  	defer stack.Close()
   208  
   209  	for _, name := range []string{"chaindata", "lightchaindata"} {
   210  		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
   211  		if err != nil {
   212  			utils.Fatalf("Failed to open database: %v", err)
   213  		}
   214  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   215  		if err != nil {
   216  			utils.Fatalf("Failed to write genesis block: %v", err)
   217  		}
   218  		chaindb.Close()
   219  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   220  	}
   221  	return nil
   222  }
   223  
   224  func dumpGenesis(ctx *cli.Context) error {
   225  	// TODO(rjl493456442) support loading from the custom datadir
   226  	genesis := utils.MakeGenesis(ctx)
   227  	if genesis == nil {
   228  		genesis = core.DefaultGenesisBlock()
   229  	}
   230  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   231  		utils.Fatalf("could not encode genesis")
   232  	}
   233  	return nil
   234  }
   235  
   236  func importChain(ctx *cli.Context) error {
   237  	if len(ctx.Args()) < 2 {
   238  		utils.Fatalf("This command requires an argument.")
   239  	}
   240  	// Start metrics export if enabled
   241  	utils.SetupMetrics(ctx)
   242  	// Start system runtime metrics collection
   243  	go metrics.CollectProcessMetrics(3 * time.Second)
   244  
   245  	stack, _ := makeConfigNode(ctx)
   246  	defer stack.Close()
   247  
   248  	chain, db := utils.MakeChain(ctx, stack)
   249  	defer db.Close()
   250  
   251  	// Start periodically gathering memory profiles
   252  	var peakMemAlloc, peakMemSys uint64
   253  	go func() {
   254  		stats := new(runtime.MemStats)
   255  		for {
   256  			runtime.ReadMemStats(stats)
   257  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   258  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   259  			}
   260  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   261  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   262  			}
   263  			time.Sleep(5 * time.Second)
   264  		}
   265  	}()
   266  	// Import the chain
   267  	start := time.Now()
   268  
   269  	var importErr error
   270  
   271  	// ArgsUsage: "<filename> (<filename 2> ... <filename N>) <genesisPath>",
   272  	if len(ctx.Args()) == 2 {
   273  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   274  			importErr = err
   275  			log.Error("Import error", "err", err)
   276  		}
   277  	} else {
   278  		for _, arg := range ctx.Args()[:len(ctx.Args())-1] {
   279  			if err := utils.ImportChain(chain, arg); err != nil {
   280  				importErr = err
   281  				log.Error("Import error", "file", arg, "err", err)
   282  			}
   283  		}
   284  	}
   285  	chain.Stop()
   286  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   287  
   288  	// Output pre-compaction stats mostly to see the import trashing
   289  	showLeveldbStats(db)
   290  
   291  	// Print the memory statistics used by the importing
   292  	mem := new(runtime.MemStats)
   293  	runtime.ReadMemStats(mem)
   294  
   295  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   296  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   297  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   298  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   299  
   300  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   301  		return nil
   302  	}
   303  
   304  	// Compact the entire database to more accurately measure disk io and print the stats
   305  	start = time.Now()
   306  	fmt.Println("Compacting entire database...")
   307  	if err := db.Compact(nil, nil); err != nil {
   308  		utils.Fatalf("Compaction failed: %v", err)
   309  	}
   310  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   311  
   312  	showLeveldbStats(db)
   313  	return importErr
   314  }
   315  
   316  func exportChain(ctx *cli.Context) error {
   317  	if len(ctx.Args()) < 1 {
   318  		utils.Fatalf("This command requires an argument.")
   319  	}
   320  
   321  	stack, _ := makeConfigNode(ctx)
   322  	defer stack.Close()
   323  
   324  	chain, _ := utils.MakeChain(ctx, stack)
   325  	start := time.Now()
   326  
   327  	var err error
   328  	fp := ctx.Args().First()
   329  	if len(ctx.Args()) < 3 {
   330  		err = utils.ExportChain(chain, fp)
   331  	} else {
   332  		// This can be improved to allow for numbers larger than 9223372036854775807
   333  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   334  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   335  		if ferr != nil || lerr != nil {
   336  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   337  		}
   338  		if first < 0 || last < 0 {
   339  			utils.Fatalf("Export error: block number must be greater than 0\n")
   340  		}
   341  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   342  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   343  		}
   344  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   345  	}
   346  
   347  	if err != nil {
   348  		utils.Fatalf("Export error: %v\n", err)
   349  	}
   350  	fmt.Printf("Export done in %v\n", time.Since(start))
   351  	return nil
   352  }
   353  
   354  // importPreimages imports preimage data from the specified file.
   355  func importPreimages(ctx *cli.Context) error {
   356  	if len(ctx.Args()) < 1 {
   357  		utils.Fatalf("This command requires an argument.")
   358  	}
   359  
   360  	stack, _ := makeConfigNode(ctx)
   361  	defer stack.Close()
   362  
   363  	db := utils.MakeChainDatabase(ctx, stack, false)
   364  	start := time.Now()
   365  
   366  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   367  		utils.Fatalf("Import error: %v\n", err)
   368  	}
   369  	fmt.Printf("Import done in %v\n", time.Since(start))
   370  	return nil
   371  }
   372  
   373  // exportPreimages dumps the preimage data to specified json file in streaming way.
   374  func exportPreimages(ctx *cli.Context) error {
   375  	if len(ctx.Args()) < 1 {
   376  		utils.Fatalf("This command requires an argument.")
   377  	}
   378  
   379  	stack, _ := makeConfigNode(ctx)
   380  	defer stack.Close()
   381  
   382  	db := utils.MakeChainDatabase(ctx, stack, true)
   383  	start := time.Now()
   384  
   385  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   386  		utils.Fatalf("Export error: %v\n", err)
   387  	}
   388  	fmt.Printf("Export done in %v\n", time.Since(start))
   389  	return nil
   390  }
   391  
   392  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   393  	db := utils.MakeChainDatabase(ctx, stack, true)
   394  	var header *types.Header
   395  	if ctx.NArg() > 1 {
   396  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   397  	}
   398  	if ctx.NArg() == 1 {
   399  		arg := ctx.Args().First()
   400  		if hashish(arg) {
   401  			hash := common.HexToHash(arg)
   402  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   403  				header = rawdb.ReadHeader(db, hash, *number)
   404  			} else {
   405  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   406  			}
   407  		} else {
   408  			number, err := strconv.Atoi(arg)
   409  			if err != nil {
   410  				return nil, nil, common.Hash{}, err
   411  			}
   412  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   413  				header = rawdb.ReadHeader(db, hash, uint64(number))
   414  			} else {
   415  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   416  			}
   417  		}
   418  	} else {
   419  		// Use latest
   420  		header = rawdb.ReadHeadHeader(db)
   421  	}
   422  	if header == nil {
   423  		return nil, nil, common.Hash{}, errors.New("no head block found")
   424  	}
   425  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   426  	var start common.Hash
   427  	switch len(startArg) {
   428  	case 0: // common.Hash
   429  	case 32:
   430  		start = common.BytesToHash(startArg)
   431  	case 20:
   432  		start = crypto.Keccak256Hash(startArg)
   433  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   434  	default:
   435  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   436  	}
   437  	var conf = &state.DumpConfig{
   438  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   439  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   440  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   441  		Start:             start.Bytes(),
   442  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   443  	}
   444  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   445  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   446  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   447  	return conf, db, header.Root, nil
   448  }
   449  
   450  func dump(ctx *cli.Context) error {
   451  	stack, _ := makeConfigNode(ctx)
   452  	defer stack.Close()
   453  
   454  	conf, db, root, err := parseDumpConfig(ctx, stack)
   455  	if err != nil {
   456  		return err
   457  	}
   458  	state, err := state.New(root, state.NewDatabase(db), nil)
   459  	if err != nil {
   460  		return err
   461  	}
   462  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   463  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   464  	} else {
   465  		if conf.OnlyWithAddresses {
   466  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   467  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   468  			return fmt.Errorf("incompatible options")
   469  		}
   470  		fmt.Println(string(state.Dump(conf)))
   471  	}
   472  	return nil
   473  }
   474  
   475  // hashish returns true for strings that look like hashes.
   476  func hashish(x string) bool {
   477  	_, err := strconv.Atoi(x)
   478  	return err != nil
   479  }