github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethxdao/go-ethereum/cmd/utils"
    30  	"github.com/ethxdao/go-ethereum/common"
    31  	"github.com/ethxdao/go-ethereum/common/hexutil"
    32  	"github.com/ethxdao/go-ethereum/core"
    33  	"github.com/ethxdao/go-ethereum/core/rawdb"
    34  	"github.com/ethxdao/go-ethereum/core/state"
    35  	"github.com/ethxdao/go-ethereum/core/types"
    36  	"github.com/ethxdao/go-ethereum/crypto"
    37  	"github.com/ethxdao/go-ethereum/ethdb"
    38  	"github.com/ethxdao/go-ethereum/internal/flags"
    39  	"github.com/ethxdao/go-ethereum/log"
    40  	"github.com/ethxdao/go-ethereum/metrics"
    41  	"github.com/ethxdao/go-ethereum/node"
    42  )
    43  
    44  var (
    45  	initCommand = &cli.Command{
    46  		Action:    initGenesis,
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags:     utils.DatabasePathFlags,
    51  		Description: `
    52  The init command initializes a new genesis block and definition for the network.
    53  This is a destructive action and changes the network in which you will be
    54  participating.
    55  
    56  It expects the genesis file as argument.`,
    57  	}
    58  	dumpGenesisCommand = &cli.Command{
    59  		Action:    dumpGenesis,
    60  		Name:      "dumpgenesis",
    61  		Usage:     "Dumps genesis block JSON configuration to stdout",
    62  		ArgsUsage: "",
    63  		Flags:     utils.NetworkFlags,
    64  		Description: `
    65  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    66  	}
    67  	importCommand = &cli.Command{
    68  		Action:    importChain,
    69  		Name:      "import",
    70  		Usage:     "Import a blockchain file",
    71  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    72  		Flags: flags.Merge([]cli.Flag{
    73  			utils.CacheFlag,
    74  			utils.SyncModeFlag,
    75  			utils.GCModeFlag,
    76  			utils.SnapshotFlag,
    77  			utils.CacheDatabaseFlag,
    78  			utils.CacheGCFlag,
    79  			utils.MetricsEnabledFlag,
    80  			utils.MetricsEnabledExpensiveFlag,
    81  			utils.MetricsHTTPFlag,
    82  			utils.MetricsPortFlag,
    83  			utils.MetricsEnableInfluxDBFlag,
    84  			utils.MetricsEnableInfluxDBV2Flag,
    85  			utils.MetricsInfluxDBEndpointFlag,
    86  			utils.MetricsInfluxDBDatabaseFlag,
    87  			utils.MetricsInfluxDBUsernameFlag,
    88  			utils.MetricsInfluxDBPasswordFlag,
    89  			utils.MetricsInfluxDBTagsFlag,
    90  			utils.MetricsInfluxDBTokenFlag,
    91  			utils.MetricsInfluxDBBucketFlag,
    92  			utils.MetricsInfluxDBOrganizationFlag,
    93  			utils.TxLookupLimitFlag,
    94  		}, utils.DatabasePathFlags),
    95  		Description: `
    96  The import command imports blocks from an RLP-encoded form. The form can be one file
    97  with several RLP-encoded blocks, or several files can be used.
    98  
    99  If only one file is used, import error will result in failure. If several files are used,
   100  processing will proceed even if an individual RLP-file import failure occurs.`,
   101  	}
   102  	exportCommand = &cli.Command{
   103  		Action:    exportChain,
   104  		Name:      "export",
   105  		Usage:     "Export blockchain into file",
   106  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   107  		Flags: flags.Merge([]cli.Flag{
   108  			utils.CacheFlag,
   109  			utils.SyncModeFlag,
   110  		}, utils.DatabasePathFlags),
   111  		Description: `
   112  Requires a first argument of the file to write to.
   113  Optional second and third arguments control the first and
   114  last block to write. In this mode, the file will be appended
   115  if already existing. If the file ends with .gz, the output will
   116  be gzipped.`,
   117  	}
   118  	importPreimagesCommand = &cli.Command{
   119  		Action:    importPreimages,
   120  		Name:      "import-preimages",
   121  		Usage:     "Import the preimage database from an RLP stream",
   122  		ArgsUsage: "<datafile>",
   123  		Flags: flags.Merge([]cli.Flag{
   124  			utils.CacheFlag,
   125  			utils.SyncModeFlag,
   126  		}, utils.DatabasePathFlags),
   127  		Description: `
   128  The import-preimages command imports hash preimages from an RLP encoded stream.
   129  It's deprecated, please use "geth db import" instead.
   130  `,
   131  	}
   132  	exportPreimagesCommand = &cli.Command{
   133  		Action:    exportPreimages,
   134  		Name:      "export-preimages",
   135  		Usage:     "Export the preimage database into an RLP stream",
   136  		ArgsUsage: "<dumpfile>",
   137  		Flags: flags.Merge([]cli.Flag{
   138  			utils.CacheFlag,
   139  			utils.SyncModeFlag,
   140  		}, utils.DatabasePathFlags),
   141  		Description: `
   142  The export-preimages command exports hash preimages to an RLP encoded stream.
   143  It's deprecated, please use "geth db export" instead.
   144  `,
   145  	}
   146  	dumpCommand = &cli.Command{
   147  		Action:    dump,
   148  		Name:      "dump",
   149  		Usage:     "Dump a specific block from storage",
   150  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   151  		Flags: flags.Merge([]cli.Flag{
   152  			utils.CacheFlag,
   153  			utils.IterativeOutputFlag,
   154  			utils.ExcludeCodeFlag,
   155  			utils.ExcludeStorageFlag,
   156  			utils.IncludeIncompletesFlag,
   157  			utils.StartKeyFlag,
   158  			utils.DumpLimitFlag,
   159  		}, utils.DatabasePathFlags),
   160  		Description: `
   161  This command dumps out the state for a given block (or latest, if none provided).
   162  `,
   163  	}
   164  )
   165  
   166  // initGenesis will initialise the given JSON format genesis file and writes it as
   167  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   168  func initGenesis(ctx *cli.Context) error {
   169  	if ctx.Args().Len() != 1 {
   170  		utils.Fatalf("need genesis.json file as the only argument")
   171  	}
   172  	genesisPath := ctx.Args().First()
   173  	if len(genesisPath) == 0 {
   174  		utils.Fatalf("invalid path to genesis file")
   175  	}
   176  	file, err := os.Open(genesisPath)
   177  	if err != nil {
   178  		utils.Fatalf("Failed to read genesis file: %v", err)
   179  	}
   180  	defer file.Close()
   181  
   182  	genesis := new(core.Genesis)
   183  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   184  		utils.Fatalf("invalid genesis file: %v", err)
   185  	}
   186  	// Open and initialise both full and light databases
   187  	stack, _ := makeConfigNode(ctx)
   188  	defer stack.Close()
   189  	for _, name := range []string{"chaindata", "lightchaindata"} {
   190  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   191  		if err != nil {
   192  			utils.Fatalf("Failed to open database: %v", err)
   193  		}
   194  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   195  		if err != nil {
   196  			utils.Fatalf("Failed to write genesis block: %v", err)
   197  		}
   198  		chaindb.Close()
   199  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   200  	}
   201  	return nil
   202  }
   203  
   204  func dumpGenesis(ctx *cli.Context) error {
   205  	// TODO(rjl493456442) support loading from the custom datadir
   206  	genesis := utils.MakeGenesis(ctx)
   207  	if genesis == nil {
   208  		genesis = core.DefaultGenesisBlock()
   209  	}
   210  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   211  		utils.Fatalf("could not encode genesis")
   212  	}
   213  	return nil
   214  }
   215  
   216  func importChain(ctx *cli.Context) error {
   217  	if ctx.Args().Len() < 1 {
   218  		utils.Fatalf("This command requires an argument.")
   219  	}
   220  	// Start metrics export if enabled
   221  	utils.SetupMetrics(ctx)
   222  	// Start system runtime metrics collection
   223  	go metrics.CollectProcessMetrics(3 * time.Second)
   224  
   225  	stack, _ := makeConfigNode(ctx)
   226  	defer stack.Close()
   227  
   228  	chain, db := utils.MakeChain(ctx, stack)
   229  	defer db.Close()
   230  
   231  	// Start periodically gathering memory profiles
   232  	var peakMemAlloc, peakMemSys uint64
   233  	go func() {
   234  		stats := new(runtime.MemStats)
   235  		for {
   236  			runtime.ReadMemStats(stats)
   237  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   238  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   239  			}
   240  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   241  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   242  			}
   243  			time.Sleep(5 * time.Second)
   244  		}
   245  	}()
   246  	// Import the chain
   247  	start := time.Now()
   248  
   249  	var importErr error
   250  
   251  	if ctx.Args().Len() == 1 {
   252  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   253  			importErr = err
   254  			log.Error("Import error", "err", err)
   255  		}
   256  	} else {
   257  		for _, arg := range ctx.Args().Slice() {
   258  			if err := utils.ImportChain(chain, arg); err != nil {
   259  				importErr = err
   260  				log.Error("Import error", "file", arg, "err", err)
   261  			}
   262  		}
   263  	}
   264  	chain.Stop()
   265  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   266  
   267  	// Output pre-compaction stats mostly to see the import trashing
   268  	showLeveldbStats(db)
   269  
   270  	// Print the memory statistics used by the importing
   271  	mem := new(runtime.MemStats)
   272  	runtime.ReadMemStats(mem)
   273  
   274  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   275  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   276  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   277  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   278  
   279  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   280  		return nil
   281  	}
   282  
   283  	// Compact the entire database to more accurately measure disk io and print the stats
   284  	start = time.Now()
   285  	fmt.Println("Compacting entire database...")
   286  	if err := db.Compact(nil, nil); err != nil {
   287  		utils.Fatalf("Compaction failed: %v", err)
   288  	}
   289  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   290  
   291  	showLeveldbStats(db)
   292  	return importErr
   293  }
   294  
   295  func exportChain(ctx *cli.Context) error {
   296  	if ctx.Args().Len() < 1 {
   297  		utils.Fatalf("This command requires an argument.")
   298  	}
   299  
   300  	stack, _ := makeConfigNode(ctx)
   301  	defer stack.Close()
   302  
   303  	chain, _ := utils.MakeChain(ctx, stack)
   304  	start := time.Now()
   305  
   306  	var err error
   307  	fp := ctx.Args().First()
   308  	if ctx.Args().Len() < 3 {
   309  		err = utils.ExportChain(chain, fp)
   310  	} else {
   311  		// This can be improved to allow for numbers larger than 9223372036854775807
   312  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   313  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   314  		if ferr != nil || lerr != nil {
   315  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   316  		}
   317  		if first < 0 || last < 0 {
   318  			utils.Fatalf("Export error: block number must be greater than 0\n")
   319  		}
   320  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   321  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   322  		}
   323  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   324  	}
   325  
   326  	if err != nil {
   327  		utils.Fatalf("Export error: %v\n", err)
   328  	}
   329  	fmt.Printf("Export done in %v\n", time.Since(start))
   330  	return nil
   331  }
   332  
   333  // importPreimages imports preimage data from the specified file.
   334  func importPreimages(ctx *cli.Context) error {
   335  	if ctx.Args().Len() < 1 {
   336  		utils.Fatalf("This command requires an argument.")
   337  	}
   338  
   339  	stack, _ := makeConfigNode(ctx)
   340  	defer stack.Close()
   341  
   342  	db := utils.MakeChainDatabase(ctx, stack, false)
   343  	start := time.Now()
   344  
   345  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   346  		utils.Fatalf("Import error: %v\n", err)
   347  	}
   348  	fmt.Printf("Import done in %v\n", time.Since(start))
   349  	return nil
   350  }
   351  
   352  // exportPreimages dumps the preimage data to specified json file in streaming way.
   353  func exportPreimages(ctx *cli.Context) error {
   354  	if ctx.Args().Len() < 1 {
   355  		utils.Fatalf("This command requires an argument.")
   356  	}
   357  	stack, _ := makeConfigNode(ctx)
   358  	defer stack.Close()
   359  
   360  	db := utils.MakeChainDatabase(ctx, stack, true)
   361  	start := time.Now()
   362  
   363  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   364  		utils.Fatalf("Export error: %v\n", err)
   365  	}
   366  	fmt.Printf("Export done in %v\n", time.Since(start))
   367  	return nil
   368  }
   369  
   370  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   371  	db := utils.MakeChainDatabase(ctx, stack, true)
   372  	var header *types.Header
   373  	if ctx.NArg() > 1 {
   374  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   375  	}
   376  	if ctx.NArg() == 1 {
   377  		arg := ctx.Args().First()
   378  		if hashish(arg) {
   379  			hash := common.HexToHash(arg)
   380  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   381  				header = rawdb.ReadHeader(db, hash, *number)
   382  			} else {
   383  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   384  			}
   385  		} else {
   386  			number, err := strconv.ParseUint(arg, 10, 64)
   387  			if err != nil {
   388  				return nil, nil, common.Hash{}, err
   389  			}
   390  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   391  				header = rawdb.ReadHeader(db, hash, number)
   392  			} else {
   393  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   394  			}
   395  		}
   396  	} else {
   397  		// Use latest
   398  		header = rawdb.ReadHeadHeader(db)
   399  	}
   400  	if header == nil {
   401  		return nil, nil, common.Hash{}, errors.New("no head block found")
   402  	}
   403  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   404  	var start common.Hash
   405  	switch len(startArg) {
   406  	case 0: // common.Hash
   407  	case 32:
   408  		start = common.BytesToHash(startArg)
   409  	case 20:
   410  		start = crypto.Keccak256Hash(startArg)
   411  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   412  	default:
   413  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   414  	}
   415  	var conf = &state.DumpConfig{
   416  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   417  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   418  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   419  		Start:             start.Bytes(),
   420  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   421  	}
   422  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   423  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   424  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   425  	return conf, db, header.Root, nil
   426  }
   427  
   428  func dump(ctx *cli.Context) error {
   429  	stack, _ := makeConfigNode(ctx)
   430  	defer stack.Close()
   431  
   432  	conf, db, root, err := parseDumpConfig(ctx, stack)
   433  	if err != nil {
   434  		return err
   435  	}
   436  	state, err := state.New(root, state.NewDatabase(db), nil)
   437  	if err != nil {
   438  		return err
   439  	}
   440  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   441  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   442  	} else {
   443  		if conf.OnlyWithAddresses {
   444  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   445  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   446  			return fmt.Errorf("incompatible options")
   447  		}
   448  		fmt.Println(string(state.Dump(conf)))
   449  	}
   450  	return nil
   451  }
   452  
   453  // hashish returns true for strings that look like hashes.
   454  func hashish(x string) bool {
   455  	_, err := strconv.Atoi(x)
   456  	return err != nil
   457  }