github.com/bcnmy/go-ethereum@v1.10.27/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/ethereum/go-ethereum/cmd/utils"
    30  	"github.com/ethereum/go-ethereum/common"
    31  	"github.com/ethereum/go-ethereum/common/hexutil"
    32  	"github.com/ethereum/go-ethereum/core"
    33  	"github.com/ethereum/go-ethereum/core/rawdb"
    34  	"github.com/ethereum/go-ethereum/core/state"
    35  	"github.com/ethereum/go-ethereum/core/types"
    36  	"github.com/ethereum/go-ethereum/crypto"
    37  	"github.com/ethereum/go-ethereum/ethdb"
    38  	"github.com/ethereum/go-ethereum/internal/flags"
    39  	"github.com/ethereum/go-ethereum/log"
    40  	"github.com/ethereum/go-ethereum/metrics"
    41  	"github.com/ethereum/go-ethereum/node"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	initCommand = &cli.Command{
    47  		Action:    initGenesis,
    48  		Name:      "init",
    49  		Usage:     "Bootstrap and initialize a new genesis block",
    50  		ArgsUsage: "<genesisPath>",
    51  		Flags:     utils.DatabasePathFlags,
    52  		Description: `
    53  The init command initializes a new genesis block and definition for the network.
    54  This is a destructive action and changes the network in which you will be
    55  participating.
    56  
    57  It expects the genesis file as argument.`,
    58  	}
    59  	dumpGenesisCommand = &cli.Command{
    60  		Action:    dumpGenesis,
    61  		Name:      "dumpgenesis",
    62  		Usage:     "Dumps genesis block JSON configuration to stdout",
    63  		ArgsUsage: "",
    64  		Flags:     utils.NetworkFlags,
    65  		Description: `
    66  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    67  	}
    68  	importCommand = &cli.Command{
    69  		Action:    importChain,
    70  		Name:      "import",
    71  		Usage:     "Import a blockchain file",
    72  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    73  		Flags: flags.Merge([]cli.Flag{
    74  			utils.CacheFlag,
    75  			utils.SyncModeFlag,
    76  			utils.GCModeFlag,
    77  			utils.SnapshotFlag,
    78  			utils.CacheDatabaseFlag,
    79  			utils.CacheGCFlag,
    80  			utils.MetricsEnabledFlag,
    81  			utils.MetricsEnabledExpensiveFlag,
    82  			utils.MetricsHTTPFlag,
    83  			utils.MetricsPortFlag,
    84  			utils.MetricsEnableInfluxDBFlag,
    85  			utils.MetricsEnableInfluxDBV2Flag,
    86  			utils.MetricsInfluxDBEndpointFlag,
    87  			utils.MetricsInfluxDBDatabaseFlag,
    88  			utils.MetricsInfluxDBUsernameFlag,
    89  			utils.MetricsInfluxDBPasswordFlag,
    90  			utils.MetricsInfluxDBTagsFlag,
    91  			utils.MetricsInfluxDBTokenFlag,
    92  			utils.MetricsInfluxDBBucketFlag,
    93  			utils.MetricsInfluxDBOrganizationFlag,
    94  			utils.TxLookupLimitFlag,
    95  		}, utils.DatabasePathFlags),
    96  		Description: `
    97  The import command imports blocks from an RLP-encoded form. The form can be one file
    98  with several RLP-encoded blocks, or several files can be used.
    99  
   100  If only one file is used, import error will result in failure. If several files are used,
   101  processing will proceed even if an individual RLP-file import failure occurs.`,
   102  	}
   103  	exportCommand = &cli.Command{
   104  		Action:    exportChain,
   105  		Name:      "export",
   106  		Usage:     "Export blockchain into file",
   107  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   108  		Flags: flags.Merge([]cli.Flag{
   109  			utils.CacheFlag,
   110  			utils.SyncModeFlag,
   111  		}, utils.DatabasePathFlags),
   112  		Description: `
   113  Requires a first argument of the file to write to.
   114  Optional second and third arguments control the first and
   115  last block to write. In this mode, the file will be appended
   116  if already existing. If the file ends with .gz, the output will
   117  be gzipped.`,
   118  	}
   119  	importPreimagesCommand = &cli.Command{
   120  		Action:    importPreimages,
   121  		Name:      "import-preimages",
   122  		Usage:     "Import the preimage database from an RLP stream",
   123  		ArgsUsage: "<datafile>",
   124  		Flags: flags.Merge([]cli.Flag{
   125  			utils.CacheFlag,
   126  			utils.SyncModeFlag,
   127  		}, utils.DatabasePathFlags),
   128  		Description: `
   129  The import-preimages command imports hash preimages from an RLP encoded stream.
   130  It's deprecated, please use "geth db import" instead.
   131  `,
   132  	}
   133  	exportPreimagesCommand = &cli.Command{
   134  		Action:    exportPreimages,
   135  		Name:      "export-preimages",
   136  		Usage:     "Export the preimage database into an RLP stream",
   137  		ArgsUsage: "<dumpfile>",
   138  		Flags: flags.Merge([]cli.Flag{
   139  			utils.CacheFlag,
   140  			utils.SyncModeFlag,
   141  		}, utils.DatabasePathFlags),
   142  		Description: `
   143  The export-preimages command exports hash preimages to an RLP encoded stream.
   144  It's deprecated, please use "geth db export" instead.
   145  `,
   146  	}
   147  	dumpCommand = &cli.Command{
   148  		Action:    dump,
   149  		Name:      "dump",
   150  		Usage:     "Dump a specific block from storage",
   151  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   152  		Flags: flags.Merge([]cli.Flag{
   153  			utils.CacheFlag,
   154  			utils.IterativeOutputFlag,
   155  			utils.ExcludeCodeFlag,
   156  			utils.ExcludeStorageFlag,
   157  			utils.IncludeIncompletesFlag,
   158  			utils.StartKeyFlag,
   159  			utils.DumpLimitFlag,
   160  		}, utils.DatabasePathFlags),
   161  		Description: `
   162  This command dumps out the state for a given block (or latest, if none provided).
   163  `,
   164  	}
   165  )
   166  
   167  // initGenesis will initialise the given JSON format genesis file and writes it as
   168  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   169  func initGenesis(ctx *cli.Context) error {
   170  	if ctx.Args().Len() != 1 {
   171  		utils.Fatalf("need genesis.json file as the only argument")
   172  	}
   173  	genesisPath := ctx.Args().First()
   174  	if len(genesisPath) == 0 {
   175  		utils.Fatalf("invalid path to genesis file")
   176  	}
   177  	file, err := os.Open(genesisPath)
   178  	if err != nil {
   179  		utils.Fatalf("Failed to read genesis file: %v", err)
   180  	}
   181  	defer file.Close()
   182  
   183  	genesis := new(core.Genesis)
   184  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   185  		utils.Fatalf("invalid genesis file: %v", err)
   186  	}
   187  	// Open and initialise both full and light databases
   188  	stack, _ := makeConfigNode(ctx)
   189  	defer stack.Close()
   190  	for _, name := range []string{"chaindata", "lightchaindata"} {
   191  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   192  		if err != nil {
   193  			utils.Fatalf("Failed to open database: %v", err)
   194  		}
   195  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   196  		if err != nil {
   197  			utils.Fatalf("Failed to write genesis block: %v", err)
   198  		}
   199  		chaindb.Close()
   200  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   201  	}
   202  	return nil
   203  }
   204  
   205  func dumpGenesis(ctx *cli.Context) error {
   206  	// TODO(rjl493456442) support loading from the custom datadir
   207  	genesis := utils.MakeGenesis(ctx)
   208  	if genesis == nil {
   209  		genesis = core.DefaultGenesisBlock()
   210  	}
   211  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   212  		utils.Fatalf("could not encode genesis")
   213  	}
   214  	return nil
   215  }
   216  
   217  func importChain(ctx *cli.Context) error {
   218  	if ctx.Args().Len() < 1 {
   219  		utils.Fatalf("This command requires an argument.")
   220  	}
   221  	// Start metrics export if enabled
   222  	utils.SetupMetrics(ctx)
   223  	// Start system runtime metrics collection
   224  	go metrics.CollectProcessMetrics(3 * time.Second)
   225  
   226  	stack, _ := makeConfigNode(ctx)
   227  	defer stack.Close()
   228  
   229  	chain, db := utils.MakeChain(ctx, stack)
   230  	defer db.Close()
   231  
   232  	// Start periodically gathering memory profiles
   233  	var peakMemAlloc, peakMemSys uint64
   234  	go func() {
   235  		stats := new(runtime.MemStats)
   236  		for {
   237  			runtime.ReadMemStats(stats)
   238  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   239  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   240  			}
   241  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   242  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   243  			}
   244  			time.Sleep(5 * time.Second)
   245  		}
   246  	}()
   247  	// Import the chain
   248  	start := time.Now()
   249  
   250  	var importErr error
   251  
   252  	if ctx.Args().Len() == 1 {
   253  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   254  			importErr = err
   255  			log.Error("Import error", "err", err)
   256  		}
   257  	} else {
   258  		for _, arg := range ctx.Args().Slice() {
   259  			if err := utils.ImportChain(chain, arg); err != nil {
   260  				importErr = err
   261  				log.Error("Import error", "file", arg, "err", err)
   262  			}
   263  		}
   264  	}
   265  	chain.Stop()
   266  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   267  
   268  	// Output pre-compaction stats mostly to see the import trashing
   269  	showLeveldbStats(db)
   270  
   271  	// Print the memory statistics used by the importing
   272  	mem := new(runtime.MemStats)
   273  	runtime.ReadMemStats(mem)
   274  
   275  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   276  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   277  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   278  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   279  
   280  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   281  		return nil
   282  	}
   283  
   284  	// Compact the entire database to more accurately measure disk io and print the stats
   285  	start = time.Now()
   286  	fmt.Println("Compacting entire database...")
   287  	if err := db.Compact(nil, nil); err != nil {
   288  		utils.Fatalf("Compaction failed: %v", err)
   289  	}
   290  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   291  
   292  	showLeveldbStats(db)
   293  	return importErr
   294  }
   295  
   296  func exportChain(ctx *cli.Context) error {
   297  	if ctx.Args().Len() < 1 {
   298  		utils.Fatalf("This command requires an argument.")
   299  	}
   300  
   301  	stack, _ := makeConfigNode(ctx)
   302  	defer stack.Close()
   303  
   304  	chain, _ := utils.MakeChain(ctx, stack)
   305  	start := time.Now()
   306  
   307  	var err error
   308  	fp := ctx.Args().First()
   309  	if ctx.Args().Len() < 3 {
   310  		err = utils.ExportChain(chain, fp)
   311  	} else {
   312  		// This can be improved to allow for numbers larger than 9223372036854775807
   313  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   314  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   315  		if ferr != nil || lerr != nil {
   316  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   317  		}
   318  		if first < 0 || last < 0 {
   319  			utils.Fatalf("Export error: block number must be greater than 0\n")
   320  		}
   321  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   322  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   323  		}
   324  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   325  	}
   326  
   327  	if err != nil {
   328  		utils.Fatalf("Export error: %v\n", err)
   329  	}
   330  	fmt.Printf("Export done in %v\n", time.Since(start))
   331  	return nil
   332  }
   333  
   334  // importPreimages imports preimage data from the specified file.
   335  func importPreimages(ctx *cli.Context) error {
   336  	if ctx.Args().Len() < 1 {
   337  		utils.Fatalf("This command requires an argument.")
   338  	}
   339  
   340  	stack, _ := makeConfigNode(ctx)
   341  	defer stack.Close()
   342  
   343  	db := utils.MakeChainDatabase(ctx, stack, false)
   344  	start := time.Now()
   345  
   346  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   347  		utils.Fatalf("Import error: %v\n", err)
   348  	}
   349  	fmt.Printf("Import done in %v\n", time.Since(start))
   350  	return nil
   351  }
   352  
   353  // exportPreimages dumps the preimage data to specified json file in streaming way.
   354  func exportPreimages(ctx *cli.Context) error {
   355  	if ctx.Args().Len() < 1 {
   356  		utils.Fatalf("This command requires an argument.")
   357  	}
   358  	stack, _ := makeConfigNode(ctx)
   359  	defer stack.Close()
   360  
   361  	db := utils.MakeChainDatabase(ctx, stack, true)
   362  	start := time.Now()
   363  
   364  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   365  		utils.Fatalf("Export error: %v\n", err)
   366  	}
   367  	fmt.Printf("Export done in %v\n", time.Since(start))
   368  	return nil
   369  }
   370  
   371  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   372  	db := utils.MakeChainDatabase(ctx, stack, true)
   373  	var header *types.Header
   374  	if ctx.NArg() > 1 {
   375  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   376  	}
   377  	if ctx.NArg() == 1 {
   378  		arg := ctx.Args().First()
   379  		if hashish(arg) {
   380  			hash := common.HexToHash(arg)
   381  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   382  				header = rawdb.ReadHeader(db, hash, *number)
   383  			} else {
   384  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   385  			}
   386  		} else {
   387  			number, err := strconv.ParseUint(arg, 10, 64)
   388  			if err != nil {
   389  				return nil, nil, common.Hash{}, err
   390  			}
   391  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   392  				header = rawdb.ReadHeader(db, hash, number)
   393  			} else {
   394  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   395  			}
   396  		}
   397  	} else {
   398  		// Use latest
   399  		header = rawdb.ReadHeadHeader(db)
   400  	}
   401  	if header == nil {
   402  		return nil, nil, common.Hash{}, errors.New("no head block found")
   403  	}
   404  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   405  	var start common.Hash
   406  	switch len(startArg) {
   407  	case 0: // common.Hash
   408  	case 32:
   409  		start = common.BytesToHash(startArg)
   410  	case 20:
   411  		start = crypto.Keccak256Hash(startArg)
   412  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   413  	default:
   414  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   415  	}
   416  	var conf = &state.DumpConfig{
   417  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   418  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   419  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   420  		Start:             start.Bytes(),
   421  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   422  	}
   423  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   424  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   425  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   426  	return conf, db, header.Root, nil
   427  }
   428  
   429  func dump(ctx *cli.Context) error {
   430  	stack, _ := makeConfigNode(ctx)
   431  	defer stack.Close()
   432  
   433  	conf, db, root, err := parseDumpConfig(ctx, stack)
   434  	if err != nil {
   435  		return err
   436  	}
   437  	state, err := state.New(root, state.NewDatabase(db), nil)
   438  	if err != nil {
   439  		return err
   440  	}
   441  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   442  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   443  	} else {
   444  		if conf.OnlyWithAddresses {
   445  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   446  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   447  			return fmt.Errorf("incompatible options")
   448  		}
   449  		fmt.Println(string(state.Dump(conf)))
   450  	}
   451  	return nil
   452  }
   453  
   454  // hashish returns true for strings that look like hashes.
   455  func hashish(x string) bool {
   456  	_, err := strconv.Atoi(x)
   457  	return err != nil
   458  }