github.com/theQRL/go-zond@v0.1.1/cmd/gzond/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond/cmd/utils"
    30  	"github.com/theQRL/go-zond/common"
    31  	"github.com/theQRL/go-zond/common/hexutil"
    32  	"github.com/theQRL/go-zond/core"
    33  	"github.com/theQRL/go-zond/core/rawdb"
    34  	"github.com/theQRL/go-zond/core/state"
    35  	"github.com/theQRL/go-zond/core/types"
    36  	"github.com/theQRL/go-zond/crypto"
    37  	"github.com/theQRL/go-zond/internal/flags"
    38  	"github.com/theQRL/go-zond/log"
    39  	"github.com/theQRL/go-zond/metrics"
    40  	"github.com/theQRL/go-zond/node"
    41  	"github.com/theQRL/go-zond/zonddb"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	initCommand = &cli.Command{
    47  		Action:    initGenesis,
    48  		Name:      "init",
    49  		Usage:     "Bootstrap and initialize a new genesis block",
    50  		ArgsUsage: "<genesisPath>",
    51  		Flags: flags.Merge([]cli.Flag{
    52  			utils.CachePreimagesFlag,
    53  			utils.StateSchemeFlag,
    54  		}, utils.DatabasePathFlags),
    55  		Description: `
    56  The init command initializes a new genesis block and definition for the network.
    57  This is a destructive action and changes the network in which you will be
    58  participating.
    59  
    60  It expects the genesis file as argument.`,
    61  	}
    62  	dumpGenesisCommand = &cli.Command{
    63  		Action:    dumpGenesis,
    64  		Name:      "dumpgenesis",
    65  		Usage:     "Dumps genesis block JSON configuration to stdout",
    66  		ArgsUsage: "",
    67  		Flags:     append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...),
    68  		Description: `
    69  The dumpgenesis command prints the genesis configuration of the network preset
    70  if one is set.  Otherwise it prints the genesis from the datadir.`,
    71  	}
    72  	importCommand = &cli.Command{
    73  		Action:    importChain,
    74  		Name:      "import",
    75  		Usage:     "Import a blockchain file",
    76  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    77  		Flags: flags.Merge([]cli.Flag{
    78  			utils.CacheFlag,
    79  			utils.SyncModeFlag,
    80  			utils.GCModeFlag,
    81  			utils.SnapshotFlag,
    82  			utils.CacheDatabaseFlag,
    83  			utils.CacheGCFlag,
    84  			utils.MetricsEnabledFlag,
    85  			utils.MetricsEnabledExpensiveFlag,
    86  			utils.MetricsHTTPFlag,
    87  			utils.MetricsPortFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsEnableInfluxDBV2Flag,
    90  			utils.MetricsInfluxDBEndpointFlag,
    91  			utils.MetricsInfluxDBDatabaseFlag,
    92  			utils.MetricsInfluxDBUsernameFlag,
    93  			utils.MetricsInfluxDBPasswordFlag,
    94  			utils.MetricsInfluxDBTagsFlag,
    95  			utils.MetricsInfluxDBTokenFlag,
    96  			utils.MetricsInfluxDBBucketFlag,
    97  			utils.MetricsInfluxDBOrganizationFlag,
    98  			utils.TxLookupLimitFlag,
    99  			utils.TransactionHistoryFlag,
   100  			utils.StateSchemeFlag,
   101  			utils.StateHistoryFlag,
   102  		}, utils.DatabasePathFlags),
   103  		Description: `
   104  The import command imports blocks from an RLP-encoded form. The form can be one file
   105  with several RLP-encoded blocks, or several files can be used.
   106  
   107  If only one file is used, import error will result in failure. If several files are used,
   108  processing will proceed even if an individual RLP-file import failure occurs.`,
   109  	}
   110  	exportCommand = &cli.Command{
   111  		Action:    exportChain,
   112  		Name:      "export",
   113  		Usage:     "Export blockchain into file",
   114  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   115  		Flags: flags.Merge([]cli.Flag{
   116  			utils.CacheFlag,
   117  			utils.SyncModeFlag,
   118  			utils.StateSchemeFlag,
   119  		}, utils.DatabasePathFlags),
   120  		Description: `
   121  Requires a first argument of the file to write to.
   122  Optional second and third arguments control the first and
   123  last block to write. In this mode, the file will be appended
   124  if already existing. If the file ends with .gz, the output will
   125  be gzipped.`,
   126  	}
   127  	importPreimagesCommand = &cli.Command{
   128  		Action:    importPreimages,
   129  		Name:      "import-preimages",
   130  		Usage:     "Import the preimage database from an RLP stream",
   131  		ArgsUsage: "<datafile>",
   132  		Flags: flags.Merge([]cli.Flag{
   133  			utils.CacheFlag,
   134  			utils.SyncModeFlag,
   135  		}, utils.DatabasePathFlags),
   136  		Description: `
   137  The import-preimages command imports hash preimages from an RLP encoded stream.
   138  It's deprecated, please use "gzond db import" instead.
   139  `,
   140  	}
   141  	exportPreimagesCommand = &cli.Command{
   142  		Action:    exportPreimages,
   143  		Name:      "export-preimages",
   144  		Usage:     "Export the preimage database into an RLP stream",
   145  		ArgsUsage: "<dumpfile>",
   146  		Flags: flags.Merge([]cli.Flag{
   147  			utils.CacheFlag,
   148  			utils.SyncModeFlag,
   149  		}, utils.DatabasePathFlags),
   150  		Description: `
   151  The export-preimages command exports hash preimages to an RLP encoded stream.
   152  It's deprecated, please use "gzond db export" instead.
   153  `,
   154  	}
   155  	dumpCommand = &cli.Command{
   156  		Action:    dump,
   157  		Name:      "dump",
   158  		Usage:     "Dump a specific block from storage",
   159  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   160  		Flags: flags.Merge([]cli.Flag{
   161  			utils.CacheFlag,
   162  			utils.IterativeOutputFlag,
   163  			utils.ExcludeCodeFlag,
   164  			utils.ExcludeStorageFlag,
   165  			utils.IncludeIncompletesFlag,
   166  			utils.StartKeyFlag,
   167  			utils.DumpLimitFlag,
   168  			utils.StateSchemeFlag,
   169  		}, utils.DatabasePathFlags),
   170  		Description: `
   171  This command dumps out the state for a given block (or latest, if none provided).
   172  `,
   173  	}
   174  )
   175  
   176  // initGenesis will initialise the given JSON format genesis file and writes it as
   177  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   178  func initGenesis(ctx *cli.Context) error {
   179  	if ctx.Args().Len() != 1 {
   180  		utils.Fatalf("need genesis.json file as the only argument")
   181  	}
   182  	genesisPath := ctx.Args().First()
   183  	if len(genesisPath) == 0 {
   184  		utils.Fatalf("invalid path to genesis file")
   185  	}
   186  	file, err := os.Open(genesisPath)
   187  	if err != nil {
   188  		utils.Fatalf("Failed to read genesis file: %v", err)
   189  	}
   190  	defer file.Close()
   191  
   192  	genesis := new(core.Genesis)
   193  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   194  		utils.Fatalf("invalid genesis file: %v", err)
   195  	}
   196  	// Open and initialise both full and light databases
   197  	stack, _ := makeConfigNode(ctx)
   198  	defer stack.Close()
   199  
   200  	for _, name := range []string{"chaindata", "lightchaindata"} {
   201  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   202  		if err != nil {
   203  			utils.Fatalf("Failed to open database: %v", err)
   204  		}
   205  		defer chaindb.Close()
   206  
   207  		triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
   208  		defer triedb.Close()
   209  
   210  		_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis)
   211  		if err != nil {
   212  			utils.Fatalf("Failed to write genesis block: %v", err)
   213  		}
   214  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   215  	}
   216  	return nil
   217  }
   218  
   219  func dumpGenesis(ctx *cli.Context) error {
   220  	// if there is a testnet preset enabled, dump that
   221  	if utils.IsNetworkPreset(ctx) {
   222  		genesis := utils.MakeGenesis(ctx)
   223  		if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   224  			utils.Fatalf("could not encode genesis: %s", err)
   225  		}
   226  		return nil
   227  	}
   228  	// dump whatever already exists in the datadir
   229  	stack, _ := makeConfigNode(ctx)
   230  	for _, name := range []string{"chaindata", "lightchaindata"} {
   231  		db, err := stack.OpenDatabase(name, 0, 0, "", true)
   232  		if err != nil {
   233  			if !os.IsNotExist(err) {
   234  				return err
   235  			}
   236  			continue
   237  		}
   238  		genesis, err := core.ReadGenesis(db)
   239  		if err != nil {
   240  			utils.Fatalf("failed to read genesis: %s", err)
   241  		}
   242  		db.Close()
   243  
   244  		if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
   245  			utils.Fatalf("could not encode stored genesis: %s", err)
   246  		}
   247  		return nil
   248  	}
   249  	if ctx.IsSet(utils.DataDirFlag.Name) {
   250  		utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
   251  	}
   252  	utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
   253  	return nil
   254  }
   255  
   256  func importChain(ctx *cli.Context) error {
   257  	if ctx.Args().Len() < 1 {
   258  		utils.Fatalf("This command requires an argument.")
   259  	}
   260  	// Start metrics export if enabled
   261  	utils.SetupMetrics(ctx)
   262  	// Start system runtime metrics collection
   263  	go metrics.CollectProcessMetrics(3 * time.Second)
   264  
   265  	stack, _ := makeConfigNode(ctx)
   266  	defer stack.Close()
   267  
   268  	chain, db := utils.MakeChain(ctx, stack, false)
   269  	defer db.Close()
   270  
   271  	// Start periodically gathering memory profiles
   272  	var peakMemAlloc, peakMemSys atomic.Uint64
   273  	go func() {
   274  		stats := new(runtime.MemStats)
   275  		for {
   276  			runtime.ReadMemStats(stats)
   277  			if peakMemAlloc.Load() < stats.Alloc {
   278  				peakMemAlloc.Store(stats.Alloc)
   279  			}
   280  			if peakMemSys.Load() < stats.Sys {
   281  				peakMemSys.Store(stats.Sys)
   282  			}
   283  			time.Sleep(5 * time.Second)
   284  		}
   285  	}()
   286  	// Import the chain
   287  	start := time.Now()
   288  
   289  	var importErr error
   290  
   291  	if ctx.Args().Len() == 1 {
   292  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   293  			importErr = err
   294  			log.Error("Import error", "err", err)
   295  		}
   296  	} else {
   297  		for _, arg := range ctx.Args().Slice() {
   298  			if err := utils.ImportChain(chain, arg); err != nil {
   299  				importErr = err
   300  				log.Error("Import error", "file", arg, "err", err)
   301  			}
   302  		}
   303  	}
   304  	chain.Stop()
   305  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   306  
   307  	// Output pre-compaction stats mostly to see the import trashing
   308  	showLeveldbStats(db)
   309  
   310  	// Print the memory statistics used by the importing
   311  	mem := new(runtime.MemStats)
   312  	runtime.ReadMemStats(mem)
   313  
   314  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
   315  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
   316  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   317  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   318  
   319  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   320  		return nil
   321  	}
   322  
   323  	// Compact the entire database to more accurately measure disk io and print the stats
   324  	start = time.Now()
   325  	fmt.Println("Compacting entire database...")
   326  	if err := db.Compact(nil, nil); err != nil {
   327  		utils.Fatalf("Compaction failed: %v", err)
   328  	}
   329  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   330  
   331  	showLeveldbStats(db)
   332  	return importErr
   333  }
   334  
   335  func exportChain(ctx *cli.Context) error {
   336  	if ctx.Args().Len() < 1 {
   337  		utils.Fatalf("This command requires an argument.")
   338  	}
   339  
   340  	stack, _ := makeConfigNode(ctx)
   341  	defer stack.Close()
   342  
   343  	chain, _ := utils.MakeChain(ctx, stack, true)
   344  	start := time.Now()
   345  
   346  	var err error
   347  	fp := ctx.Args().First()
   348  	if ctx.Args().Len() < 3 {
   349  		err = utils.ExportChain(chain, fp)
   350  	} else {
   351  		// This can be improved to allow for numbers larger than 9223372036854775807
   352  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   353  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   354  		if ferr != nil || lerr != nil {
   355  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   356  		}
   357  		if first < 0 || last < 0 {
   358  			utils.Fatalf("Export error: block number must be greater than 0\n")
   359  		}
   360  		if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   361  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   362  		}
   363  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   364  	}
   365  
   366  	if err != nil {
   367  		utils.Fatalf("Export error: %v\n", err)
   368  	}
   369  	fmt.Printf("Export done in %v\n", time.Since(start))
   370  	return nil
   371  }
   372  
   373  // importPreimages imports preimage data from the specified file.
   374  func importPreimages(ctx *cli.Context) error {
   375  	if ctx.Args().Len() < 1 {
   376  		utils.Fatalf("This command requires an argument.")
   377  	}
   378  
   379  	stack, _ := makeConfigNode(ctx)
   380  	defer stack.Close()
   381  
   382  	db := utils.MakeChainDatabase(ctx, stack, false)
   383  	start := time.Now()
   384  
   385  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   386  		utils.Fatalf("Import error: %v\n", err)
   387  	}
   388  	fmt.Printf("Import done in %v\n", time.Since(start))
   389  	return nil
   390  }
   391  
   392  // exportPreimages dumps the preimage data to specified json file in streaming way.
   393  func exportPreimages(ctx *cli.Context) error {
   394  	if ctx.Args().Len() < 1 {
   395  		utils.Fatalf("This command requires an argument.")
   396  	}
   397  	stack, _ := makeConfigNode(ctx)
   398  	defer stack.Close()
   399  
   400  	db := utils.MakeChainDatabase(ctx, stack, true)
   401  	start := time.Now()
   402  
   403  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   404  		utils.Fatalf("Export error: %v\n", err)
   405  	}
   406  	fmt.Printf("Export done in %v\n", time.Since(start))
   407  	return nil
   408  }
   409  
   410  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, zonddb.Database, common.Hash, error) {
   411  	db := utils.MakeChainDatabase(ctx, stack, true)
   412  	var header *types.Header
   413  	if ctx.NArg() > 1 {
   414  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   415  	}
   416  	if ctx.NArg() == 1 {
   417  		arg := ctx.Args().First()
   418  		if hashish(arg) {
   419  			hash := common.HexToHash(arg)
   420  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   421  				header = rawdb.ReadHeader(db, hash, *number)
   422  			} else {
   423  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   424  			}
   425  		} else {
   426  			number, err := strconv.ParseUint(arg, 10, 64)
   427  			if err != nil {
   428  				return nil, nil, common.Hash{}, err
   429  			}
   430  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   431  				header = rawdb.ReadHeader(db, hash, number)
   432  			} else {
   433  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   434  			}
   435  		}
   436  	} else {
   437  		// Use latest
   438  		header = rawdb.ReadHeadHeader(db)
   439  	}
   440  	if header == nil {
   441  		return nil, nil, common.Hash{}, errors.New("no head block found")
   442  	}
   443  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   444  	var start common.Hash
   445  	switch len(startArg) {
   446  	case 0: // common.Hash
   447  	case 32:
   448  		start = common.BytesToHash(startArg)
   449  	case 20:
   450  		start = crypto.Keccak256Hash(startArg)
   451  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   452  	default:
   453  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   454  	}
   455  	var conf = &state.DumpConfig{
   456  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   457  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   458  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   459  		Start:             start.Bytes(),
   460  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   461  	}
   462  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   463  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   464  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   465  	return conf, db, header.Root, nil
   466  }
   467  
   468  func dump(ctx *cli.Context) error {
   469  	stack, _ := makeConfigNode(ctx)
   470  	defer stack.Close()
   471  
   472  	conf, db, root, err := parseDumpConfig(ctx, stack)
   473  	if err != nil {
   474  		return err
   475  	}
   476  	triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
   477  	defer triedb.Close()
   478  
   479  	state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
   480  	if err != nil {
   481  		return err
   482  	}
   483  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   484  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   485  	} else {
   486  		if conf.OnlyWithAddresses {
   487  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   488  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   489  			return errors.New("incompatible options")
   490  		}
   491  		fmt.Println(string(state.Dump(conf)))
   492  	}
   493  	return nil
   494  }
   495  
   496  // hashish returns true for strings that look like hashes.
   497  func hashish(x string) bool {
   498  	_, err := strconv.Atoi(x)
   499  	return err != nil
   500  }