gitee.com/liu-zhao234568/cntest@v1.0.0/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"gitee.com/liu-zhao234568/cntest/cmd/utils"
    30  	"gitee.com/liu-zhao234568/cntest/common"
    31  	"gitee.com/liu-zhao234568/cntest/common/hexutil"
    32  	"gitee.com/liu-zhao234568/cntest/core"
    33  	"gitee.com/liu-zhao234568/cntest/core/rawdb"
    34  	"gitee.com/liu-zhao234568/cntest/core/state"
    35  	"gitee.com/liu-zhao234568/cntest/core/types"
    36  	"gitee.com/liu-zhao234568/cntest/crypto"
    37  	"gitee.com/liu-zhao234568/cntest/ethdb"
    38  	"gitee.com/liu-zhao234568/cntest/log"
    39  	"gitee.com/liu-zhao234568/cntest/metrics"
    40  	"gitee.com/liu-zhao234568/cntest/node"
    41  	"gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	dumpGenesisCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(dumpGenesis),
    63  		Name:      "dumpgenesis",
    64  		Usage:     "Dumps genesis block JSON configuration to stdout",
    65  		ArgsUsage: "",
    66  		Flags: []cli.Flag{
    67  			utils.MainnetFlag,
    68  			utils.RopstenFlag,
    69  			utils.RinkebyFlag,
    70  			utils.GoerliFlag,
    71  			utils.CalaverasFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    76  	}
    77  	importCommand = cli.Command{
    78  		Action:    utils.MigrateFlags(importChain),
    79  		Name:      "import",
    80  		Usage:     "Import a blockchain file",
    81  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    82  		Flags: []cli.Flag{
    83  			utils.DataDirFlag,
    84  			utils.CacheFlag,
    85  			utils.SyncModeFlag,
    86  			utils.GCModeFlag,
    87  			utils.SnapshotFlag,
    88  			utils.CacheDatabaseFlag,
    89  			utils.CacheGCFlag,
    90  			utils.MetricsEnabledFlag,
    91  			utils.MetricsEnabledExpensiveFlag,
    92  			utils.MetricsHTTPFlag,
    93  			utils.MetricsPortFlag,
    94  			utils.MetricsEnableInfluxDBFlag,
    95  			utils.MetricsInfluxDBEndpointFlag,
    96  			utils.MetricsInfluxDBDatabaseFlag,
    97  			utils.MetricsInfluxDBUsernameFlag,
    98  			utils.MetricsInfluxDBPasswordFlag,
    99  			utils.MetricsInfluxDBTagsFlag,
   100  			utils.TxLookupLimitFlag,
   101  		},
   102  		Category: "BLOCKCHAIN COMMANDS",
   103  		Description: `
   104  The import command imports blocks from an RLP-encoded form. The form can be one file
   105  with several RLP-encoded blocks, or several files can be used.
   106  
   107  If only one file is used, import error will result in failure. If several files are used,
   108  processing will proceed even if an individual RLP-file import failure occurs.`,
   109  	}
   110  	exportCommand = cli.Command{
   111  		Action:    utils.MigrateFlags(exportChain),
   112  		Name:      "export",
   113  		Usage:     "Export blockchain into file",
   114  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   115  		Flags: []cli.Flag{
   116  			utils.DataDirFlag,
   117  			utils.CacheFlag,
   118  			utils.SyncModeFlag,
   119  		},
   120  		Category: "BLOCKCHAIN COMMANDS",
   121  		Description: `
   122  Requires a first argument of the file to write to.
   123  Optional second and third arguments control the first and
   124  last block to write. In this mode, the file will be appended
   125  if already existing. If the file ends with .gz, the output will
   126  be gzipped.`,
   127  	}
   128  	importPreimagesCommand = cli.Command{
   129  		Action:    utils.MigrateFlags(importPreimages),
   130  		Name:      "import-preimages",
   131  		Usage:     "Import the preimage database from an RLP stream",
   132  		ArgsUsage: "<datafile>",
   133  		Flags: []cli.Flag{
   134  			utils.DataDirFlag,
   135  			utils.CacheFlag,
   136  			utils.SyncModeFlag,
   137  		},
   138  		Category: "BLOCKCHAIN COMMANDS",
   139  		Description: `
   140  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   141  	}
   142  	exportPreimagesCommand = cli.Command{
   143  		Action:    utils.MigrateFlags(exportPreimages),
   144  		Name:      "export-preimages",
   145  		Usage:     "Export the preimage database into an RLP stream",
   146  		ArgsUsage: "<dumpfile>",
   147  		Flags: []cli.Flag{
   148  			utils.DataDirFlag,
   149  			utils.CacheFlag,
   150  			utils.SyncModeFlag,
   151  		},
   152  		Category: "BLOCKCHAIN COMMANDS",
   153  		Description: `
   154  The export-preimages command export hash preimages to an RLP encoded stream`,
   155  	}
   156  	dumpCommand = cli.Command{
   157  		Action:    utils.MigrateFlags(dump),
   158  		Name:      "dump",
   159  		Usage:     "Dump a specific block from storage",
   160  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   161  		Flags: []cli.Flag{
   162  			utils.DataDirFlag,
   163  			utils.CacheFlag,
   164  			utils.IterativeOutputFlag,
   165  			utils.ExcludeCodeFlag,
   166  			utils.ExcludeStorageFlag,
   167  			utils.IncludeIncompletesFlag,
   168  			utils.StartKeyFlag,
   169  			utils.DumpLimitFlag,
   170  		},
   171  		Category: "BLOCKCHAIN COMMANDS",
   172  		Description: `
   173  This command dumps out the state for a given block (or latest, if none provided).
   174  `,
   175  	}
   176  )
   177  
   178  // initGenesis will initialise the given JSON format genesis file and writes it as
   179  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   180  func initGenesis(ctx *cli.Context) error {
   181  	// Make sure we have a valid genesis JSON
   182  	genesisPath := ctx.Args().First()
   183  	if len(genesisPath) == 0 {
   184  		utils.Fatalf("Must supply path to genesis JSON file")
   185  	}
   186  	file, err := os.Open(genesisPath)
   187  	if err != nil {
   188  		utils.Fatalf("Failed to read genesis file: %v", err)
   189  	}
   190  	defer file.Close()
   191  
   192  	genesis := new(core.Genesis)
   193  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   194  		utils.Fatalf("invalid genesis file: %v", err)
   195  	}
   196  	// Open and initialise both full and light databases
   197  	stack, _ := makeConfigNode(ctx)
   198  	defer stack.Close()
   199  
   200  	for _, name := range []string{"chaindata", "lightchaindata"} {
   201  		chaindb, err := stack.OpenDatabase(name, 0, 0, "", false)
   202  		if err != nil {
   203  			utils.Fatalf("Failed to open database: %v", err)
   204  		}
   205  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   206  		if err != nil {
   207  			utils.Fatalf("Failed to write genesis block: %v", err)
   208  		}
   209  		chaindb.Close()
   210  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   211  	}
   212  	return nil
   213  }
   214  
   215  func dumpGenesis(ctx *cli.Context) error {
   216  	// TODO(rjl493456442) support loading from the custom datadir
   217  	genesis := utils.MakeGenesis(ctx)
   218  	if genesis == nil {
   219  		genesis = core.DefaultGenesisBlock()
   220  	}
   221  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   222  		utils.Fatalf("could not encode genesis")
   223  	}
   224  	return nil
   225  }
   226  
   227  func importChain(ctx *cli.Context) error {
   228  	if len(ctx.Args()) < 1 {
   229  		utils.Fatalf("This command requires an argument.")
   230  	}
   231  	// Start metrics export if enabled
   232  	utils.SetupMetrics(ctx)
   233  	// Start system runtime metrics collection
   234  	go metrics.CollectProcessMetrics(3 * time.Second)
   235  
   236  	stack, _ := makeConfigNode(ctx)
   237  	defer stack.Close()
   238  
   239  	chain, db := utils.MakeChain(ctx, stack)
   240  	defer db.Close()
   241  
   242  	// Start periodically gathering memory profiles
   243  	var peakMemAlloc, peakMemSys uint64
   244  	go func() {
   245  		stats := new(runtime.MemStats)
   246  		for {
   247  			runtime.ReadMemStats(stats)
   248  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   249  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   250  			}
   251  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   252  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   253  			}
   254  			time.Sleep(5 * time.Second)
   255  		}
   256  	}()
   257  	// Import the chain
   258  	start := time.Now()
   259  
   260  	var importErr error
   261  
   262  	if len(ctx.Args()) == 1 {
   263  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   264  			importErr = err
   265  			log.Error("Import error", "err", err)
   266  		}
   267  	} else {
   268  		for _, arg := range ctx.Args() {
   269  			if err := utils.ImportChain(chain, arg); err != nil {
   270  				importErr = err
   271  				log.Error("Import error", "file", arg, "err", err)
   272  			}
   273  		}
   274  	}
   275  	chain.Stop()
   276  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   277  
   278  	// Output pre-compaction stats mostly to see the import trashing
   279  	showLeveldbStats(db)
   280  
   281  	// Print the memory statistics used by the importing
   282  	mem := new(runtime.MemStats)
   283  	runtime.ReadMemStats(mem)
   284  
   285  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   286  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   287  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   288  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   289  
   290  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   291  		return nil
   292  	}
   293  
   294  	// Compact the entire database to more accurately measure disk io and print the stats
   295  	start = time.Now()
   296  	fmt.Println("Compacting entire database...")
   297  	if err := db.Compact(nil, nil); err != nil {
   298  		utils.Fatalf("Compaction failed: %v", err)
   299  	}
   300  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   301  
   302  	showLeveldbStats(db)
   303  	return importErr
   304  }
   305  
   306  func exportChain(ctx *cli.Context) error {
   307  	if len(ctx.Args()) < 1 {
   308  		utils.Fatalf("This command requires an argument.")
   309  	}
   310  
   311  	stack, _ := makeConfigNode(ctx)
   312  	defer stack.Close()
   313  
   314  	chain, _ := utils.MakeChain(ctx, stack)
   315  	start := time.Now()
   316  
   317  	var err error
   318  	fp := ctx.Args().First()
   319  	if len(ctx.Args()) < 3 {
   320  		err = utils.ExportChain(chain, fp)
   321  	} else {
   322  		// This can be improved to allow for numbers larger than 9223372036854775807
   323  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   324  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   325  		if ferr != nil || lerr != nil {
   326  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   327  		}
   328  		if first < 0 || last < 0 {
   329  			utils.Fatalf("Export error: block number must be greater than 0\n")
   330  		}
   331  		if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() {
   332  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64())
   333  		}
   334  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   335  	}
   336  
   337  	if err != nil {
   338  		utils.Fatalf("Export error: %v\n", err)
   339  	}
   340  	fmt.Printf("Export done in %v\n", time.Since(start))
   341  	return nil
   342  }
   343  
   344  // importPreimages imports preimage data from the specified file.
   345  func importPreimages(ctx *cli.Context) error {
   346  	if len(ctx.Args()) < 1 {
   347  		utils.Fatalf("This command requires an argument.")
   348  	}
   349  
   350  	stack, _ := makeConfigNode(ctx)
   351  	defer stack.Close()
   352  
   353  	db := utils.MakeChainDatabase(ctx, stack, false)
   354  	start := time.Now()
   355  
   356  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   357  		utils.Fatalf("Import error: %v\n", err)
   358  	}
   359  	fmt.Printf("Import done in %v\n", time.Since(start))
   360  	return nil
   361  }
   362  
   363  // exportPreimages dumps the preimage data to specified json file in streaming way.
   364  func exportPreimages(ctx *cli.Context) error {
   365  	if len(ctx.Args()) < 1 {
   366  		utils.Fatalf("This command requires an argument.")
   367  	}
   368  
   369  	stack, _ := makeConfigNode(ctx)
   370  	defer stack.Close()
   371  
   372  	db := utils.MakeChainDatabase(ctx, stack, true)
   373  	start := time.Now()
   374  
   375  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   376  		utils.Fatalf("Export error: %v\n", err)
   377  	}
   378  	fmt.Printf("Export done in %v\n", time.Since(start))
   379  	return nil
   380  }
   381  
   382  func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) {
   383  	db := utils.MakeChainDatabase(ctx, stack, true)
   384  	var header *types.Header
   385  	if ctx.NArg() > 1 {
   386  		return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   387  	}
   388  	if ctx.NArg() == 1 {
   389  		arg := ctx.Args().First()
   390  		if hashish(arg) {
   391  			hash := common.HexToHash(arg)
   392  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   393  				header = rawdb.ReadHeader(db, hash, *number)
   394  			} else {
   395  				return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   396  			}
   397  		} else {
   398  			number, err := strconv.Atoi(arg)
   399  			if err != nil {
   400  				return nil, nil, common.Hash{}, err
   401  			}
   402  			if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) {
   403  				header = rawdb.ReadHeader(db, hash, uint64(number))
   404  			} else {
   405  				return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   406  			}
   407  		}
   408  	} else {
   409  		// Use latest
   410  		header = rawdb.ReadHeadHeader(db)
   411  	}
   412  	if header == nil {
   413  		return nil, nil, common.Hash{}, errors.New("no head block found")
   414  	}
   415  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   416  	var start common.Hash
   417  	switch len(startArg) {
   418  	case 0: // common.Hash
   419  	case 32:
   420  		start = common.BytesToHash(startArg)
   421  	case 20:
   422  		start = crypto.Keccak256Hash(startArg)
   423  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   424  	default:
   425  		return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   426  	}
   427  	var conf = &state.DumpConfig{
   428  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   429  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   430  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   431  		Start:             start.Bytes(),
   432  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   433  	}
   434  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   435  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   436  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   437  	return conf, db, header.Root, nil
   438  }
   439  
   440  func dump(ctx *cli.Context) error {
   441  	stack, _ := makeConfigNode(ctx)
   442  	defer stack.Close()
   443  
   444  	conf, db, root, err := parseDumpConfig(ctx, stack)
   445  	if err != nil {
   446  		return err
   447  	}
   448  	state, err := state.New(root, state.NewDatabase(db), nil)
   449  	if err != nil {
   450  		return err
   451  	}
   452  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   453  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   454  	} else {
   455  		if conf.OnlyWithAddresses {
   456  			fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+
   457  				" otherwise the accounts will overwrite each other in the resulting mapping.")
   458  			return fmt.Errorf("incompatible options")
   459  		}
   460  		fmt.Println(string(state.Dump(conf)))
   461  	}
   462  	return nil
   463  }
   464  
   465  // hashish returns true for strings that look like hashes.
   466  func hashish(x string) bool {
   467  	_, err := strconv.Atoi(x)
   468  	return err != nil
   469  }