github.com/tirogen/go-ethereum@v1.10.12-0.20221226051715-250cfede41b6/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/tirogen/go-ethereum/cmd/utils"
    30  	"github.com/tirogen/go-ethereum/common"
    31  	"github.com/tirogen/go-ethereum/common/hexutil"
    32  	"github.com/tirogen/go-ethereum/core"
    33  	"github.com/tirogen/go-ethereum/core/rawdb"
    34  	"github.com/tirogen/go-ethereum/core/state"
    35  	"github.com/tirogen/go-ethereum/core/types"
    36  	"github.com/tirogen/go-ethereum/crypto"
    37  	"github.com/tirogen/go-ethereum/ethdb"
    38  	"github.com/tirogen/go-ethereum/internal/flags"
    39  	"github.com/tirogen/go-ethereum/log"
    40  	"github.com/tirogen/go-ethereum/metrics"
    41  	"github.com/tirogen/go-ethereum/node"
    42  	"github.com/tirogen/go-ethereum/trie"
    43  	"github.com/urfave/cli/v2"
    44  )
    45  
    46  var (
    47  	initCommand = &cli.Command{
    48  		Action:    initGenesis,
    49  		Name:      "init",
    50  		Usage:     "Bootstrap and initialize a new genesis block",
    51  		ArgsUsage: "<genesisPath>",
    52  		Flags:     flags.Merge([]cli.Flag{utils.CachePreimagesFlag}, utils.DatabasePathFlags),
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	dumpGenesisCommand = &cli.Command{
    61  		Action:    dumpGenesis,
    62  		Name:      "dumpgenesis",
    63  		Usage:     "Dumps genesis block JSON configuration to stdout",
    64  		ArgsUsage: "",
    65  		Flags:     append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...),
    66  		Description: `
    67  The dumpgenesis command prints the genesis configuration of the network preset
    68  if one is set.  Otherwise it prints the genesis from the datadir.`,
    69  	}
    70  	importCommand = &cli.Command{
    71  		Action:    importChain,
    72  		Name:      "import",
    73  		Usage:     "Import a blockchain file",
    74  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    75  		Flags: flags.Merge([]cli.Flag{
    76  			utils.CacheFlag,
    77  			utils.SyncModeFlag,
    78  			utils.GCModeFlag,
    79  			utils.SnapshotFlag,
    80  			utils.CacheDatabaseFlag,
    81  			utils.CacheGCFlag,
    82  			utils.MetricsEnabledFlag,
    83  			utils.MetricsEnabledExpensiveFlag,
    84  			utils.MetricsHTTPFlag,
    85  			utils.MetricsPortFlag,
    86  			utils.MetricsEnableInfluxDBFlag,
    87  			utils.MetricsEnableInfluxDBV2Flag,
    88  			utils.MetricsInfluxDBEndpointFlag,
    89  			utils.MetricsInfluxDBDatabaseFlag,
    90  			utils.MetricsInfluxDBUsernameFlag,
    91  			utils.MetricsInfluxDBPasswordFlag,
    92  			utils.MetricsInfluxDBTagsFlag,
    93  			utils.MetricsInfluxDBTokenFlag,
    94  			utils.MetricsInfluxDBBucketFlag,
    95  			utils.MetricsInfluxDBOrganizationFlag,
    96  			utils.TxLookupLimitFlag,
    97  		}, utils.DatabasePathFlags),
    98  		Description: `
    99  The import command imports blocks from an RLP-encoded form. The form can be one file
   100  with several RLP-encoded blocks, or several files can be used.
   101  
   102  If only one file is used, import error will result in failure. If several files are used,
   103  processing will proceed even if an individual RLP-file import failure occurs.`,
   104  	}
   105  	exportCommand = &cli.Command{
   106  		Action:    exportChain,
   107  		Name:      "export",
   108  		Usage:     "Export blockchain into file",
   109  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   110  		Flags: flags.Merge([]cli.Flag{
   111  			utils.CacheFlag,
   112  			utils.SyncModeFlag,
   113  		}, utils.DatabasePathFlags),
   114  		Description: `
   115  Requires a first argument of the file to write to.
   116  Optional second and third arguments control the first and
   117  last block to write. In this mode, the file will be appended
   118  if already existing. If the file ends with .gz, the output will
   119  be gzipped.`,
   120  	}
   121  	importPreimagesCommand = &cli.Command{
   122  		Action:    importPreimages,
   123  		Name:      "import-preimages",
   124  		Usage:     "Import the preimage database from an RLP stream",
   125  		ArgsUsage: "<datafile>",
   126  		Flags: flags.Merge([]cli.Flag{
   127  			utils.CacheFlag,
   128  			utils.SyncModeFlag,
   129  		}, utils.DatabasePathFlags),
   130  		Description: `
   131  The import-preimages command imports hash preimages from an RLP encoded stream.
   132  It's deprecated, please use "geth db import" instead.
   133  `,
   134  	}
   135  	exportPreimagesCommand = &cli.Command{
   136  		Action:    exportPreimages,
   137  		Name:      "export-preimages",
   138  		Usage:     "Export the preimage database into an RLP stream",
   139  		ArgsUsage: "<dumpfile>",
   140  		Flags: flags.Merge([]cli.Flag{
   141  			utils.CacheFlag,
   142  			utils.SyncModeFlag,
   143  		}, utils.DatabasePathFlags),
   144  		Description: `
   145  The export-preimages command exports hash preimages to an RLP encoded stream.
   146  It's deprecated, please use "geth db export" instead.
   147  `,
   148  	}
   149  	dumpCommand = &cli.Command{
   150  		Action:    dump,
   151  		Name:      "dump",
   152  		Usage:     "Dump a specific block from storage",
   153  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   154  		Flags: flags.Merge([]cli.Flag{
   155  			utils.CacheFlag,
   156  			utils.IterativeOutputFlag,
   157  			utils.ExcludeCodeFlag,
   158  			utils.ExcludeStorageFlag,
   159  			utils.IncludeIncompletesFlag,
   160  			utils.StartKeyFlag,
   161  			utils.DumpLimitFlag,
   162  		}, utils.DatabasePathFlags),
   163  		Description: `
   164  This command dumps out the state for a given block (or latest, if none provided).
   165  `,
   166  	}
   167  )
   168  
   169  // initGenesis will initialise the given JSON format genesis file and writes it as
   170  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   171  func initGenesis(ctx *cli.Context) error {
   172  	if ctx.Args().Len() != 1 {
   173  		utils.Fatalf("need genesis.json file as the only argument")
   174  	}
   175  	genesisPath := ctx.Args().First()
   176  	if len(genesisPath) == 0 {
   177  		utils.Fatalf("invalid path to genesis file")
   178  	}
   179  	file, err := os.Open(genesisPath)
   180  	if err != nil {
   181  		utils.Fatalf("Failed to read genesis file: %v", err)
   182  	}
   183  	defer file.Close()
   184  
   185  	genesis := new(core.Genesis)
   186  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   187  		utils.Fatalf("invalid genesis file: %v", err)
   188  	}
   189  	// Open and initialise both full and light databases
   190  	stack, _ := makeConfigNode(ctx)
   191  	defer stack.Close()
   192  
   193  	for _, name := range []string{"chaindata", "lightchaindata"} {
   194  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   195  		if err != nil {
   196  			utils.Fatalf("Failed to open database: %v", err)
   197  		}
   198  		triedb := trie.NewDatabaseWithConfig(chaindb, &trie.Config{
   199  			Preimages: ctx.Bool(utils.CachePreimagesFlag.Name),
   200  		})
   201  		_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis)
   202  		if err != nil {
   203  			utils.Fatalf("Failed to write genesis block: %v", err)
   204  		}
   205  		chaindb.Close()
   206  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   207  	}
   208  	return nil
   209  }
   210  
   211  func dumpGenesis(ctx *cli.Context) error {
   212  	// if there is a testnet preset enabled, dump that
   213  	if utils.IsNetworkPreset(ctx) {
   214  		genesis := utils.MakeGenesis(ctx)
   215  		if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   216  			utils.Fatalf("could not encode genesis: %s", err)
   217  		}
   218  		return nil
   219  	}
   220  	// dump whatever already exists in the datadir
   221  	stack, _ := makeConfigNode(ctx)
   222  	for _, name := range []string{"chaindata", "lightchaindata"} {
   223  		db, err := stack.OpenDatabase(name, 0, 0, "", true)
   224  		if err != nil {
   225  			if !os.IsNotExist(err) {
   226  				return err
   227  			}
   228  			continue
   229  		}
   230  		genesis, err := core.ReadGenesis(db)
   231  		if err != nil {
   232  			utils.Fatalf("failed to read genesis: %s", err)
   233  		}
   234  		db.Close()
   235  
   236  		if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
   237  			utils.Fatalf("could not encode stored genesis: %s", err)
   238  		}
   239  		return nil
   240  	}
   241  	if ctx.IsSet(utils.DataDirFlag.Name) {
   242  		utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
   243  	}
   244  	utils.Fatalf("no network preset provided.  no exisiting genesis in the default datadir")
   245  	return nil
   246  }
   247  
   248  func importChain(ctx *cli.Context) error {
   249  	if ctx.Args().Len() < 1 {
   250  		utils.Fatalf("This command requires an argument.")
   251  	}
   252  	// Start metrics export if enabled
   253  	utils.SetupMetrics(ctx)
   254  	// Start system runtime metrics collection
   255  	go metrics.CollectProcessMetrics(3 * time.Second)
   256  
   257  	stack, _ := makeConfigNode(ctx)
   258  	defer stack.Close()
   259  
   260  	chain, db := utils.MakeChain(ctx, stack, false)
   261  	defer db.Close()
   262  
   263  	// Start periodically gathering memory profiles
   264  	var peakMemAlloc, peakMemSys uint64
   265  	go func() {
   266  		stats := new(runtime.MemStats)
   267  		for {
   268  			runtime.ReadMemStats(stats)
   269  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   270  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   271  			}
   272  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   273  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   274  			}
   275  			time.Sleep(5 * time.Second)
   276  		}
   277  	}()
   278  	// Import the chain
   279  	start := time.Now()
   280  
   281  	var importErr error
   282  
   283  	if ctx.Args().Len() == 1 {
   284  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   285  			importErr = err
   286  			log.Error("Import error", "err", err)
   287  		}
   288  	} else {
   289  		for _, arg := range ctx.Args().Slice() {
   290  			if err := utils.ImportChain(chain, arg); err != nil {
   291  				importErr = err
   292  				log.Error("Import error", "file", arg, "err", err)
   293  			}
   294  		}
   295  	}
   296  	chain.Stop()
   297  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   298  
   299  	// Output pre-compaction stats mostly to see the import trashing
   300  	showLeveldbStats(db)
   301  
   302  	// Print the memory statistics used by the importing
   303  	mem := new(runtime.MemStats)
   304  	runtime.ReadMemStats(mem)
   305  
   306  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   307  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   308  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   309  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   310  
   311  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   312  		return nil
   313  	}
   314  
   315  	// Compact the entire database to more accurately measure disk io and print the stats
   316  	start = time.Now()
   317  	fmt.Println("Compacting entire database...")
   318  	if err := db.Compact(nil, nil); err != nil {
   319  		utils.Fatalf("Compaction failed: %v", err)
   320  	}
   321  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   322  
   323  	showLeveldbStats(db)
   324  	return importErr
   325  }
   326  
   327  func exportChain(ctx *cli.Context) error {
   328  	if ctx.Args().Len() < 1 {
   329  		utils.Fatalf("This command requires an argument.")
   330  	}
   331  
   332  	stack, _ := makeConfigNode(ctx)
   333  	defer stack.Close()
   334  
   335  	chain, _ := utils.MakeChain(ctx, stack, true)
   336  	start := time.Now()
   337  
   338  	var err error
   339  	fp := ctx.Args().First()
   340  	if ctx.Args().Len() < 3 {
   341  		err = utils.ExportChain(chain, fp)
   342  	} else {
   343  		// This can be improved to allow for numbers larger than 9223372036854775807
   344  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   345  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   346  		if ferr != nil || lerr != nil {
   347  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   348  		}
   349  		if first < 0 || last < 0 {
   350  			utils.Fatalf("Export error: block number must be greater than 0\n")
   351  		}
   352  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   353  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   354  		}
   355  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   356  	}
   357  
   358  	if err != nil {
   359  		utils.Fatalf("Export error: %v\n", err)
   360  	}
   361  	fmt.Printf("Export done in %v\n", time.Since(start))
   362  	return nil
   363  }
   364  
   365  // importPreimages imports preimage data from the specified file.
   366  func importPreimages(ctx *cli.Context) error {
   367  	if ctx.Args().Len() < 1 {
   368  		utils.Fatalf("This command requires an argument.")
   369  	}
   370  
   371  	stack, _ := makeConfigNode(ctx)
   372  	defer stack.Close()
   373  
   374  	db := utils.MakeChainDatabase(ctx, stack, false)
   375  	start := time.Now()
   376  
   377  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   378  		utils.Fatalf("Import error: %v\n", err)
   379  	}
   380  	fmt.Printf("Import done in %v\n", time.Since(start))
   381  	return nil
   382  }
   383  
   384  // exportPreimages dumps the preimage data to specified json file in streaming way.
   385  func exportPreimages(ctx *cli.Context) error {
   386  	if ctx.Args().Len() < 1 {
   387  		utils.Fatalf("This command requires an argument.")
   388  	}
   389  	stack, _ := makeConfigNode(ctx)
   390  	defer stack.Close()
   391  
   392  	db := utils.MakeChainDatabase(ctx, stack, true)
   393  	start := time.Now()
   394  
   395  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   396  		utils.Fatalf("Export error: %v\n", err)
   397  	}
   398  	fmt.Printf("Export done in %v\n", time.Since(start))
   399  	return nil
   400  }
   401  
   402  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   403  	db := utils.MakeChainDatabase(ctx, stack, true)
   404  	var header *types.Header
   405  	if ctx.NArg() > 1 {
   406  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   407  	}
   408  	if ctx.NArg() == 1 {
   409  		arg := ctx.Args().First()
   410  		if hashish(arg) {
   411  			hash := common.HexToHash(arg)
   412  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   413  				header = rawdb.ReadHeader(db, hash, *number)
   414  			} else {
   415  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   416  			}
   417  		} else {
   418  			number, err := strconv.ParseUint(arg, 10, 64)
   419  			if err != nil {
   420  				return nil, nil, common.Hash{}, err
   421  			}
   422  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   423  				header = rawdb.ReadHeader(db, hash, number)
   424  			} else {
   425  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   426  			}
   427  		}
   428  	} else {
   429  		// Use latest
   430  		header = rawdb.ReadHeadHeader(db)
   431  	}
   432  	if header == nil {
   433  		return nil, nil, common.Hash{}, errors.New("no head block found")
   434  	}
   435  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   436  	var start common.Hash
   437  	switch len(startArg) {
   438  	case 0: // common.Hash
   439  	case 32:
   440  		start = common.BytesToHash(startArg)
   441  	case 20:
   442  		start = crypto.Keccak256Hash(startArg)
   443  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   444  	default:
   445  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   446  	}
   447  	var conf = &state.DumpConfig{
   448  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   449  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   450  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   451  		Start:             start.Bytes(),
   452  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   453  	}
   454  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   455  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   456  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   457  	return conf, db, header.Root, nil
   458  }
   459  
   460  func dump(ctx *cli.Context) error {
   461  	stack, _ := makeConfigNode(ctx)
   462  	defer stack.Close()
   463  
   464  	conf, db, root, err := parseDumpConfig(ctx, stack)
   465  	if err != nil {
   466  		return err
   467  	}
   468  	config := &trie.Config{
   469  		Preimages: true, // always enable preimage lookup
   470  	}
   471  	state, err := state.New(root, state.NewDatabaseWithConfig(db, config), nil)
   472  	if err != nil {
   473  		return err
   474  	}
   475  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   476  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   477  	} else {
   478  		if conf.OnlyWithAddresses {
   479  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   480  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   481  			return fmt.Errorf("incompatible options")
   482  		}
   483  		fmt.Println(string(state.Dump(conf)))
   484  	}
   485  	return nil
   486  }
   487  
   488  // hashish returns true for strings that look like hashes.
   489  func hashish(x string) bool {
   490  	_, err := strconv.Atoi(x)
   491  	return err != nil
   492  }