github.com/electroneum/electroneum-sc@v0.0.0-20230105223411-3bc1d078281e/cmd/etn-sc/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/electroneum/electroneum-sc/cmd/utils"
    30  	"github.com/electroneum/electroneum-sc/common"
    31  	"github.com/electroneum/electroneum-sc/common/hexutil"
    32  	"github.com/electroneum/electroneum-sc/core"
    33  	"github.com/electroneum/electroneum-sc/core/rawdb"
    34  	"github.com/electroneum/electroneum-sc/core/state"
    35  	"github.com/electroneum/electroneum-sc/core/types"
    36  	"github.com/electroneum/electroneum-sc/crypto"
    37  	"github.com/electroneum/electroneum-sc/ethdb"
    38  	"github.com/electroneum/electroneum-sc/log"
    39  	"github.com/electroneum/electroneum-sc/metrics"
    40  	"github.com/electroneum/electroneum-sc/node"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags:     utils.DatabasePathFlags,
    51  		Category:  "BLOCKCHAIN COMMANDS",
    52  		Description: `
    53  The init command initializes a new genesis block and definition for the network.
    54  This is a destructive action and changes the network in which you will be
    55  participating.
    56  
    57  It expects the genesis file as argument.`,
    58  	}
    59  	dumpGenesisCommand = cli.Command{
    60  		Action:    utils.MigrateFlags(dumpGenesis),
    61  		Name:      "dumpgenesis",
    62  		Usage:     "Dumps genesis block JSON configuration to stdout",
    63  		ArgsUsage: "",
    64  		Flags:     utils.NetworkFlags,
    65  		Category:  "BLOCKCHAIN COMMANDS",
    66  		Description: `
    67  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    68  	}
    69  	importCommand = cli.Command{
    70  		Action:    utils.MigrateFlags(importChain),
    71  		Name:      "import",
    72  		Usage:     "Import a blockchain file",
    73  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    74  		Flags: append([]cli.Flag{
    75  			utils.CacheFlag,
    76  			utils.SyncModeFlag,
    77  			utils.GCModeFlag,
    78  			utils.SnapshotFlag,
    79  			utils.CacheDatabaseFlag,
    80  			utils.CacheGCFlag,
    81  			utils.MetricsEnabledFlag,
    82  			utils.MetricsEnabledExpensiveFlag,
    83  			utils.MetricsHTTPFlag,
    84  			utils.MetricsPortFlag,
    85  			utils.MetricsEnableInfluxDBFlag,
    86  			utils.MetricsEnableInfluxDBV2Flag,
    87  			utils.MetricsInfluxDBEndpointFlag,
    88  			utils.MetricsInfluxDBDatabaseFlag,
    89  			utils.MetricsInfluxDBUsernameFlag,
    90  			utils.MetricsInfluxDBPasswordFlag,
    91  			utils.MetricsInfluxDBTagsFlag,
    92  			utils.MetricsInfluxDBTokenFlag,
    93  			utils.MetricsInfluxDBBucketFlag,
    94  			utils.MetricsInfluxDBOrganizationFlag,
    95  			utils.TxLookupLimitFlag,
    96  		}, utils.DatabasePathFlags...),
    97  		Category: "BLOCKCHAIN COMMANDS",
    98  		Description: `
    99  The import command imports blocks from an RLP-encoded form. The form can be one file
   100  with several RLP-encoded blocks, or several files can be used.
   101  
   102  If only one file is used, import error will result in failure. If several files are used,
   103  processing will proceed even if an individual RLP-file import failure occurs.`,
   104  	}
   105  	exportCommand = cli.Command{
   106  		Action:    utils.MigrateFlags(exportChain),
   107  		Name:      "export",
   108  		Usage:     "Export blockchain into file",
   109  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   110  		Flags: append([]cli.Flag{
   111  			utils.CacheFlag,
   112  			utils.SyncModeFlag,
   113  		}, utils.DatabasePathFlags...),
   114  		Category: "BLOCKCHAIN COMMANDS",
   115  		Description: `
   116  Requires a first argument of the file to write to.
   117  Optional second and third arguments control the first and
   118  last block to write. In this mode, the file will be appended
   119  if already existing. If the file ends with .gz, the output will
   120  be gzipped.`,
   121  	}
   122  	importPreimagesCommand = cli.Command{
   123  		Action:    utils.MigrateFlags(importPreimages),
   124  		Name:      "import-preimages",
   125  		Usage:     "Import the preimage database from an RLP stream",
   126  		ArgsUsage: "<datafile>",
   127  		Flags: append([]cli.Flag{
   128  			utils.CacheFlag,
   129  			utils.SyncModeFlag,
   130  		}, utils.DatabasePathFlags...),
   131  		Category: "BLOCKCHAIN COMMANDS",
   132  		Description: `
   133  The import-preimages command imports hash preimages from an RLP encoded stream.
   134  It's deprecated, please use "geth db import" instead.
   135  `,
   136  	}
   137  	exportPreimagesCommand = cli.Command{
   138  		Action:    utils.MigrateFlags(exportPreimages),
   139  		Name:      "export-preimages",
   140  		Usage:     "Export the preimage database into an RLP stream",
   141  		ArgsUsage: "<dumpfile>",
   142  		Flags: append([]cli.Flag{
   143  			utils.CacheFlag,
   144  			utils.SyncModeFlag,
   145  		}, utils.DatabasePathFlags...),
   146  		Category: "BLOCKCHAIN COMMANDS",
   147  		Description: `
   148  The export-preimages command exports hash preimages to an RLP encoded stream.
   149  It's deprecated, please use "geth db export" instead.
   150  `,
   151  	}
   152  	dumpCommand = cli.Command{
   153  		Action:    utils.MigrateFlags(dump),
   154  		Name:      "dump",
   155  		Usage:     "Dump a specific block from storage",
   156  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   157  		Flags: append([]cli.Flag{
   158  			utils.CacheFlag,
   159  			utils.IterativeOutputFlag,
   160  			utils.ExcludeCodeFlag,
   161  			utils.ExcludeStorageFlag,
   162  			utils.IncludeIncompletesFlag,
   163  			utils.StartKeyFlag,
   164  			utils.DumpLimitFlag,
   165  		}, utils.DatabasePathFlags...),
   166  		Category: "BLOCKCHAIN COMMANDS",
   167  		Description: `
   168  This command dumps out the state for a given block (or latest, if none provided).
   169  `,
   170  	}
   171  )
   172  
   173  // initGenesis will initialise the given JSON format genesis file and writes it as
   174  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   175  func initGenesis(ctx *cli.Context) error {
   176  	// Make sure we have a valid genesis JSON
   177  	genesisPath := ctx.Args().First()
   178  	if len(genesisPath) == 0 {
   179  		utils.Fatalf("Must supply path to genesis JSON file")
   180  	}
   181  	file, err := os.Open(genesisPath)
   182  	if err != nil {
   183  		utils.Fatalf("Failed to read genesis file: %v", err)
   184  	}
   185  	defer file.Close()
   186  
   187  	genesis := new(core.Genesis)
   188  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   189  		utils.Fatalf("invalid genesis file: %v", err)
   190  	}
   191  
   192  	// Open and initialise both full and light databases
   193  	stack, _ := makeConfigNode(ctx)
   194  	defer stack.Close()
   195  	for _, name := range []string{"chaindata", "lightchaindata"} {
   196  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.GlobalString(utils.AncientFlag.Name), "", false)
   197  		if err != nil {
   198  			utils.Fatalf("Failed to open database: %v", err)
   199  		}
   200  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   201  		if err != nil {
   202  			utils.Fatalf("Failed to write genesis block: %v", err)
   203  		}
   204  		chaindb.Close()
   205  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   206  	}
   207  	return nil
   208  }
   209  
   210  func dumpGenesis(ctx *cli.Context) error {
   211  	// TODO(rjl493456442) support loading from the custom datadir
   212  	genesis := utils.MakeGenesis(ctx)
   213  	if genesis == nil {
   214  		genesis = core.DefaultGenesisBlock()
   215  	}
   216  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   217  		utils.Fatalf("could not encode genesis")
   218  	}
   219  	return nil
   220  }
   221  
   222  func importChain(ctx *cli.Context) error {
   223  	if len(ctx.Args()) < 1 {
   224  		utils.Fatalf("This command requires an argument.")
   225  	}
   226  	// Start metrics export if enabled
   227  	utils.SetupMetrics(ctx)
   228  	// Start system runtime metrics collection
   229  	go metrics.CollectProcessMetrics(3 * time.Second)
   230  
   231  	stack, _ := makeConfigNode(ctx)
   232  	defer stack.Close()
   233  
   234  	chain, db := utils.MakeChain(ctx, stack)
   235  	defer db.Close()
   236  
   237  	// Start periodically gathering memory profiles
   238  	var peakMemAlloc, peakMemSys uint64
   239  	go func() {
   240  		stats := new(runtime.MemStats)
   241  		for {
   242  			runtime.ReadMemStats(stats)
   243  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   244  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   245  			}
   246  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   247  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   248  			}
   249  			time.Sleep(5 * time.Second)
   250  		}
   251  	}()
   252  	// Import the chain
   253  	start := time.Now()
   254  
   255  	var importErr error
   256  
   257  	if len(ctx.Args()) == 1 {
   258  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   259  			importErr = err
   260  			log.Error("Import error", "err", err)
   261  		}
   262  	} else {
   263  		for _, arg := range ctx.Args() {
   264  			if err := utils.ImportChain(chain, arg); err != nil {
   265  				importErr = err
   266  				log.Error("Import error", "file", arg, "err", err)
   267  			}
   268  		}
   269  	}
   270  	chain.Stop()
   271  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   272  
   273  	// Output pre-compaction stats mostly to see the import trashing
   274  	showLeveldbStats(db)
   275  
   276  	// Print the memory statistics used by the importing
   277  	mem := new(runtime.MemStats)
   278  	runtime.ReadMemStats(mem)
   279  
   280  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   281  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   282  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   283  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   284  
   285  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   286  		return nil
   287  	}
   288  
   289  	// Compact the entire database to more accurately measure disk io and print the stats
   290  	start = time.Now()
   291  	fmt.Println("Compacting entire database...")
   292  	if err := db.Compact(nil, nil); err != nil {
   293  		utils.Fatalf("Compaction failed: %v", err)
   294  	}
   295  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   296  
   297  	showLeveldbStats(db)
   298  	return importErr
   299  }
   300  
   301  func exportChain(ctx *cli.Context) error {
   302  	if len(ctx.Args()) < 1 {
   303  		utils.Fatalf("This command requires an argument.")
   304  	}
   305  
   306  	stack, _ := makeConfigNode(ctx)
   307  	defer stack.Close()
   308  
   309  	chain, _ := utils.MakeChain(ctx, stack)
   310  	start := time.Now()
   311  
   312  	var err error
   313  	fp := ctx.Args().First()
   314  	if len(ctx.Args()) < 3 {
   315  		err = utils.ExportChain(chain, fp)
   316  	} else {
   317  		// This can be improved to allow for numbers larger than 9223372036854775807
   318  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   319  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   320  		if ferr != nil || lerr != nil {
   321  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   322  		}
   323  		if first < 0 || last < 0 {
   324  			utils.Fatalf("Export error: block number must be greater than 0\n")
   325  		}
   326  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   327  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   328  		}
   329  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   330  	}
   331  
   332  	if err != nil {
   333  		utils.Fatalf("Export error: %v\n", err)
   334  	}
   335  	fmt.Printf("Export done in %v\n", time.Since(start))
   336  	return nil
   337  }
   338  
   339  // importPreimages imports preimage data from the specified file.
   340  func importPreimages(ctx *cli.Context) error {
   341  	if len(ctx.Args()) < 1 {
   342  		utils.Fatalf("This command requires an argument.")
   343  	}
   344  
   345  	stack, _ := makeConfigNode(ctx)
   346  	defer stack.Close()
   347  
   348  	db := utils.MakeChainDatabase(ctx, stack, false)
   349  	start := time.Now()
   350  
   351  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   352  		utils.Fatalf("Import error: %v\n", err)
   353  	}
   354  	fmt.Printf("Import done in %v\n", time.Since(start))
   355  	return nil
   356  }
   357  
   358  // exportPreimages dumps the preimage data to specified json file in streaming way.
   359  func exportPreimages(ctx *cli.Context) error {
   360  	if len(ctx.Args()) < 1 {
   361  		utils.Fatalf("This command requires an argument.")
   362  	}
   363  	stack, _ := makeConfigNode(ctx)
   364  	defer stack.Close()
   365  
   366  	db := utils.MakeChainDatabase(ctx, stack, true)
   367  	start := time.Now()
   368  
   369  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   370  		utils.Fatalf("Export error: %v\n", err)
   371  	}
   372  	fmt.Printf("Export done in %v\n", time.Since(start))
   373  	return nil
   374  }
   375  
   376  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   377  	db := utils.MakeChainDatabase(ctx, stack, true)
   378  	var header *types.Header
   379  	if ctx.NArg() > 1 {
   380  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   381  	}
   382  	if ctx.NArg() == 1 {
   383  		arg := ctx.Args().First()
   384  		if hashish(arg) {
   385  			hash := common.HexToHash(arg)
   386  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   387  				header = rawdb.ReadHeader(db, hash, *number)
   388  			} else {
   389  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   390  			}
   391  		} else {
   392  			number, err := strconv.Atoi(arg)
   393  			if err != nil {
   394  				return nil, nil, common.Hash{}, err
   395  			}
   396  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   397  				header = rawdb.ReadHeader(db, hash, uint64(number))
   398  			} else {
   399  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   400  			}
   401  		}
   402  	} else {
   403  		// Use latest
   404  		header = rawdb.ReadHeadHeader(db)
   405  	}
   406  	if header == nil {
   407  		return nil, nil, common.Hash{}, errors.New("no head block found")
   408  	}
   409  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   410  	var start common.Hash
   411  	switch len(startArg) {
   412  	case 0: // common.Hash
   413  	case 32:
   414  		start = common.BytesToHash(startArg)
   415  	case 20:
   416  		start = crypto.Keccak256Hash(startArg)
   417  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   418  	default:
   419  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   420  	}
   421  	var conf = &state.DumpConfig{
   422  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   423  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   424  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   425  		Start:             start.Bytes(),
   426  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   427  	}
   428  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   429  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   430  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   431  	return conf, db, header.Root, nil
   432  }
   433  
   434  func dump(ctx *cli.Context) error {
   435  	stack, _ := makeConfigNode(ctx)
   436  	defer stack.Close()
   437  
   438  	conf, db, root, err := parseDumpConfig(ctx, stack)
   439  	if err != nil {
   440  		return err
   441  	}
   442  	state, err := state.New(root, state.NewDatabase(db), nil)
   443  	if err != nil {
   444  		return err
   445  	}
   446  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   447  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   448  	} else {
   449  		if conf.OnlyWithAddresses {
   450  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   451  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   452  			return fmt.Errorf("incompatible options")
   453  		}
   454  		fmt.Println(string(state.Dump(conf)))
   455  	}
   456  	return nil
   457  }
   458  
   459  // hashish returns true for strings that look like hashes.
   460  func hashish(x string) bool {
   461  	_, err := strconv.Atoi(x)
   462  	return err != nil
   463  }