github.com/theQRL/go-zond@v0.2.1/cmd/gzond/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/theQRL/go-zond/cmd/utils"
    30  	"github.com/theQRL/go-zond/common"
    31  	"github.com/theQRL/go-zond/common/hexutil"
    32  	"github.com/theQRL/go-zond/core"
    33  	"github.com/theQRL/go-zond/core/rawdb"
    34  	"github.com/theQRL/go-zond/core/state"
    35  	"github.com/theQRL/go-zond/core/types"
    36  	"github.com/theQRL/go-zond/crypto"
    37  	"github.com/theQRL/go-zond/internal/flags"
    38  	"github.com/theQRL/go-zond/log"
    39  	"github.com/theQRL/go-zond/metrics"
    40  	"github.com/theQRL/go-zond/node"
    41  	"github.com/theQRL/go-zond/zonddb"
    42  	"github.com/urfave/cli/v2"
    43  )
    44  
    45  var (
    46  	initCommand = &cli.Command{
    47  		Action:    initGenesis,
    48  		Name:      "init",
    49  		Usage:     "Bootstrap and initialize a new genesis block",
    50  		ArgsUsage: "<genesisPath>",
    51  		Flags: flags.Merge([]cli.Flag{
    52  			utils.CachePreimagesFlag,
    53  			utils.StateSchemeFlag,
    54  		}, utils.DatabasePathFlags),
    55  		Description: `
    56  The init command initializes a new genesis block and definition for the network.
    57  This is a destructive action and changes the network in which you will be
    58  participating.
    59  
    60  It expects the genesis file as argument.`,
    61  	}
    62  	dumpGenesisCommand = &cli.Command{
    63  		Action:    dumpGenesis,
    64  		Name:      "dumpgenesis",
    65  		Usage:     "Dumps genesis block JSON configuration to stdout",
    66  		ArgsUsage: "",
    67  		Flags:     append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...),
    68  		Description: `
    69  The dumpgenesis command prints the genesis configuration of the network preset
    70  if one is set.  Otherwise it prints the genesis from the datadir.`,
    71  	}
    72  	importCommand = &cli.Command{
    73  		Action:    importChain,
    74  		Name:      "import",
    75  		Usage:     "Import a blockchain file",
    76  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    77  		Flags: flags.Merge([]cli.Flag{
    78  			utils.CacheFlag,
    79  			utils.SyncModeFlag,
    80  			utils.GCModeFlag,
    81  			utils.SnapshotFlag,
    82  			utils.CacheDatabaseFlag,
    83  			utils.CacheGCFlag,
    84  			utils.MetricsEnabledFlag,
    85  			utils.MetricsEnabledExpensiveFlag,
    86  			utils.MetricsHTTPFlag,
    87  			utils.MetricsPortFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsEnableInfluxDBV2Flag,
    90  			utils.MetricsInfluxDBEndpointFlag,
    91  			utils.MetricsInfluxDBDatabaseFlag,
    92  			utils.MetricsInfluxDBUsernameFlag,
    93  			utils.MetricsInfluxDBPasswordFlag,
    94  			utils.MetricsInfluxDBTagsFlag,
    95  			utils.MetricsInfluxDBTokenFlag,
    96  			utils.MetricsInfluxDBBucketFlag,
    97  			utils.MetricsInfluxDBOrganizationFlag,
    98  			utils.TransactionHistoryFlag,
    99  			utils.StateSchemeFlag,
   100  			utils.StateHistoryFlag,
   101  		}, utils.DatabasePathFlags),
   102  		Description: `
   103  The import command imports blocks from an RLP-encoded form. The form can be one file
   104  with several RLP-encoded blocks, or several files can be used.
   105  
   106  If only one file is used, import error will result in failure. If several files are used,
   107  processing will proceed even if an individual RLP-file import failure occurs.`,
   108  	}
   109  	exportCommand = &cli.Command{
   110  		Action:    exportChain,
   111  		Name:      "export",
   112  		Usage:     "Export blockchain into file",
   113  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   114  		Flags: flags.Merge([]cli.Flag{
   115  			utils.CacheFlag,
   116  			utils.SyncModeFlag,
   117  			utils.StateSchemeFlag,
   118  		}, utils.DatabasePathFlags),
   119  		Description: `
   120  Requires a first argument of the file to write to.
   121  Optional second and third arguments control the first and
   122  last block to write. In this mode, the file will be appended
   123  if already existing. If the file ends with .gz, the output will
   124  be gzipped.`,
   125  	}
   126  	dumpCommand = &cli.Command{
   127  		Action:    dump,
   128  		Name:      "dump",
   129  		Usage:     "Dump a specific block from storage",
   130  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   131  		Flags: flags.Merge([]cli.Flag{
   132  			utils.CacheFlag,
   133  			utils.IterativeOutputFlag,
   134  			utils.ExcludeCodeFlag,
   135  			utils.ExcludeStorageFlag,
   136  			utils.IncludeIncompletesFlag,
   137  			utils.StartKeyFlag,
   138  			utils.DumpLimitFlag,
   139  			utils.StateSchemeFlag,
   140  		}, utils.DatabasePathFlags),
   141  		Description: `
   142  This command dumps out the state for a given block (or latest, if none provided).
   143  `,
   144  	}
   145  )
   146  
   147  // initGenesis will initialise the given JSON format genesis file and writes it as
   148  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   149  func initGenesis(ctx *cli.Context) error {
   150  	if ctx.Args().Len() != 1 {
   151  		utils.Fatalf("need genesis.json file as the only argument")
   152  	}
   153  	genesisPath := ctx.Args().First()
   154  	if len(genesisPath) == 0 {
   155  		utils.Fatalf("invalid path to genesis file")
   156  	}
   157  	file, err := os.Open(genesisPath)
   158  	if err != nil {
   159  		utils.Fatalf("Failed to read genesis file: %v", err)
   160  	}
   161  	defer file.Close()
   162  
   163  	genesis := new(core.Genesis)
   164  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   165  		utils.Fatalf("invalid genesis file: %v", err)
   166  	}
   167  	// Open and initialise both full node database
   168  	stack, _ := makeConfigNode(ctx)
   169  	defer stack.Close()
   170  
   171  	for _, name := range []string{"chaindata"} {
   172  		chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false)
   173  		if err != nil {
   174  			utils.Fatalf("Failed to open database: %v", err)
   175  		}
   176  		defer chaindb.Close()
   177  
   178  		triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false)
   179  		defer triedb.Close()
   180  
   181  		_, hash, err := core.SetupGenesisBlock(chaindb, triedb, genesis)
   182  		if err != nil {
   183  			utils.Fatalf("Failed to write genesis block: %v", err)
   184  		}
   185  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   186  	}
   187  	return nil
   188  }
   189  
   190  func dumpGenesis(ctx *cli.Context) error {
   191  	// if there is a testnet preset enabled, dump that
   192  	if utils.IsNetworkPreset(ctx) {
   193  		genesis := utils.MakeGenesis(ctx)
   194  		if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   195  			utils.Fatalf("could not encode genesis: %s", err)
   196  		}
   197  		return nil
   198  	}
   199  	// dump whatever already exists in the datadir
   200  	stack, _ := makeConfigNode(ctx)
   201  	for _, name := range []string{"chaindata"} {
   202  		db, err := stack.OpenDatabase(name, 0, 0, "", true)
   203  		if err != nil {
   204  			if !os.IsNotExist(err) {
   205  				return err
   206  			}
   207  			continue
   208  		}
   209  		genesis, err := core.ReadGenesis(db)
   210  		if err != nil {
   211  			utils.Fatalf("failed to read genesis: %s", err)
   212  		}
   213  		db.Close()
   214  
   215  		if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
   216  			utils.Fatalf("could not encode stored genesis: %s", err)
   217  		}
   218  		return nil
   219  	}
   220  	if ctx.IsSet(utils.DataDirFlag.Name) {
   221  		utils.Fatalf("no existing datadir at %s", stack.Config().DataDir)
   222  	}
   223  	utils.Fatalf("no network preset provided, no existing genesis in the default datadir")
   224  	return nil
   225  }
   226  
   227  func importChain(ctx *cli.Context) error {
   228  	if ctx.Args().Len() < 1 {
   229  		utils.Fatalf("This command requires an argument.")
   230  	}
   231  	// Start metrics export if enabled
   232  	utils.SetupMetrics(ctx)
   233  	// Start system runtime metrics collection
   234  	go metrics.CollectProcessMetrics(3 * time.Second)
   235  
   236  	stack, _ := makeConfigNode(ctx)
   237  	defer stack.Close()
   238  
   239  	chain, db := utils.MakeChain(ctx, stack, false)
   240  	defer db.Close()
   241  
   242  	// Start periodically gathering memory profiles
   243  	var peakMemAlloc, peakMemSys atomic.Uint64
   244  	go func() {
   245  		stats := new(runtime.MemStats)
   246  		for {
   247  			runtime.ReadMemStats(stats)
   248  			if peakMemAlloc.Load() < stats.Alloc {
   249  				peakMemAlloc.Store(stats.Alloc)
   250  			}
   251  			if peakMemSys.Load() < stats.Sys {
   252  				peakMemSys.Store(stats.Sys)
   253  			}
   254  			time.Sleep(5 * time.Second)
   255  		}
   256  	}()
   257  	// Import the chain
   258  	start := time.Now()
   259  
   260  	var importErr error
   261  
   262  	if ctx.Args().Len() == 1 {
   263  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   264  			importErr = err
   265  			log.Error("Import error", "err", err)
   266  		}
   267  	} else {
   268  		for _, arg := range ctx.Args().Slice() {
   269  			if err := utils.ImportChain(chain, arg); err != nil {
   270  				importErr = err
   271  				log.Error("Import error", "file", arg, "err", err)
   272  			}
   273  		}
   274  	}
   275  	chain.Stop()
   276  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   277  
   278  	// Output pre-compaction stats mostly to see the import trashing
   279  	showLeveldbStats(db)
   280  
   281  	// Print the memory statistics used by the importing
   282  	mem := new(runtime.MemStats)
   283  	runtime.ReadMemStats(mem)
   284  
   285  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
   286  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
   287  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   288  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   289  
   290  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   291  		return nil
   292  	}
   293  
   294  	// Compact the entire database to more accurately measure disk io and print the stats
   295  	start = time.Now()
   296  	fmt.Println("Compacting entire database...")
   297  	if err := db.Compact(nil, nil); err != nil {
   298  		utils.Fatalf("Compaction failed: %v", err)
   299  	}
   300  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   301  
   302  	showLeveldbStats(db)
   303  	return importErr
   304  }
   305  
   306  func exportChain(ctx *cli.Context) error {
   307  	if ctx.Args().Len() < 1 {
   308  		utils.Fatalf("This command requires an argument.")
   309  	}
   310  
   311  	stack, _ := makeConfigNode(ctx)
   312  	defer stack.Close()
   313  
   314  	chain, _ := utils.MakeChain(ctx, stack, true)
   315  	start := time.Now()
   316  
   317  	var err error
   318  	fp := ctx.Args().First()
   319  	if ctx.Args().Len() < 3 {
   320  		err = utils.ExportChain(chain, fp)
   321  	} else {
   322  		// This can be improved to allow for numbers larger than 9223372036854775807
   323  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   324  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   325  		if ferr != nil || lerr != nil {
   326  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   327  		}
   328  		if first < 0 || last < 0 {
   329  			utils.Fatalf("Export error: block number must be greater than 0\n")
   330  		}
   331  		if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   332  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   333  		}
   334  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   335  	}
   336  
   337  	if err != nil {
   338  		utils.Fatalf("Export error: %v\n", err)
   339  	}
   340  	fmt.Printf("Export done in %v\n", time.Since(start))
   341  	return nil
   342  }
   343  
   344  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, zonddb.Database, common.Hash, error) {
   345  	db := utils.MakeChainDatabase(ctx, stack, true)
   346  	var header *types.Header
   347  	if ctx.NArg() > 1 {
   348  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   349  	}
   350  	if ctx.NArg() == 1 {
   351  		arg := ctx.Args().First()
   352  		if hashish(arg) {
   353  			hash := common.HexToHash(arg)
   354  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   355  				header = rawdb.ReadHeader(db, hash, *number)
   356  			} else {
   357  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   358  			}
   359  		} else {
   360  			number, err := strconv.ParseUint(arg, 10, 64)
   361  			if err != nil {
   362  				return nil, nil, common.Hash{}, err
   363  			}
   364  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   365  				header = rawdb.ReadHeader(db, hash, number)
   366  			} else {
   367  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   368  			}
   369  		}
   370  	} else {
   371  		// Use latest
   372  		header = rawdb.ReadHeadHeader(db)
   373  	}
   374  	if header == nil {
   375  		return nil, nil, common.Hash{}, errors.New("no head block found")
   376  	}
   377  	startArg := ctx.String(utils.StartKeyFlag.Name)
   378  	var start common.Hash
   379  	switch len(startArg) {
   380  	case 0: // common.Hash
   381  	case 64, 66:
   382  		start = common.BytesToHash(common.FromHex(startArg))
   383  	case 41:
   384  		addr, err := common.NewAddressFromString(startArg)
   385  		if err != nil {
   386  			return nil, nil, common.Hash{}, err
   387  		}
   388  		start = crypto.Keccak256Hash(addr.Bytes())
   389  		log.Info("Converting start-address to hash", "address", addr, "hash", start.Hex())
   390  	default:
   391  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   392  	}
   393  	var conf = &state.DumpConfig{
   394  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   395  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   396  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   397  		Start:             start.Bytes(),
   398  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   399  	}
   400  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   401  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   402  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   403  	return conf, db, header.Root, nil
   404  }
   405  
   406  func dump(ctx *cli.Context) error {
   407  	stack, _ := makeConfigNode(ctx)
   408  	defer stack.Close()
   409  
   410  	conf, db, root, err := parseDumpConfig(ctx, stack)
   411  	if err != nil {
   412  		return err
   413  	}
   414  	triedb := utils.MakeTrieDatabase(ctx, db, true, false) // always enable preimage lookup
   415  	defer triedb.Close()
   416  
   417  	state, err := state.New(root, state.NewDatabaseWithNodeDB(db, triedb), nil)
   418  	if err != nil {
   419  		return err
   420  	}
   421  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   422  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   423  	} else {
   424  		if conf.OnlyWithAddresses {
   425  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   426  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   427  			return errors.New("incompatible options")
   428  		}
   429  		fmt.Println(string(state.Dump(conf)))
   430  	}
   431  	return nil
   432  }
   433  
   434  // hashish returns true for strings that look like hashes.
   435  func hashish(x string) bool {
   436  	_, err := strconv.Atoi(x)
   437  	return err != nil
   438  }