github.com/jincm/wesharechain@v0.0.0-20210122032815-1537409ce26a/chain/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/console"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/eth/downloader"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  		},
    52  		Category: "BLOCKCHAIN COMMANDS",
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	importCommand = cli.Command{
    61  		Action:    utils.MigrateFlags(importChain),
    62  		Name:      "import",
    63  		Usage:     "Import a blockchain file",
    64  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    65  		Flags: []cli.Flag{
    66  			utils.DataDirFlag,
    67  			utils.CacheFlag,
    68  			utils.SyncModeFlag,
    69  			utils.GCModeFlag,
    70  			utils.CacheDatabaseFlag,
    71  			utils.CacheGCFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The import command imports blocks from an RLP-encoded form. The form can be one file
    76  with several RLP-encoded blocks, or several files can be used.
    77  
    78  If only one file is used, import error will result in failure. If several files are used,
    79  processing will proceed even if an individual RLP-file import failure occurs.`,
    80  	}
    81  	exportCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(exportChain),
    83  		Name:      "export",
    84  		Usage:     "Export blockchain into file",
    85  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  			utils.SyncModeFlag,
    90  		},
    91  		Category: "BLOCKCHAIN COMMANDS",
    92  		Description: `
    93  Requires a first argument of the file to write to.
    94  Optional second and third arguments control the first and
    95  last block to write. In this mode, the file will be appended
    96  if already existing. If the file ends with .gz, the output will
    97  be gzipped.`,
    98  	}
    99  	importPreimagesCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(importPreimages),
   101  		Name:      "import-preimages",
   102  		Usage:     "Import the preimage database from an RLP stream",
   103  		ArgsUsage: "<datafile>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  		},
   109  		Category: "BLOCKCHAIN COMMANDS",
   110  		Description: `
   111  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   112  	}
   113  	exportPreimagesCommand = cli.Command{
   114  		Action:    utils.MigrateFlags(exportPreimages),
   115  		Name:      "export-preimages",
   116  		Usage:     "Export the preimage database into an RLP stream",
   117  		ArgsUsage: "<dumpfile>",
   118  		Flags: []cli.Flag{
   119  			utils.DataDirFlag,
   120  			utils.CacheFlag,
   121  			utils.SyncModeFlag,
   122  		},
   123  		Category: "BLOCKCHAIN COMMANDS",
   124  		Description: `
   125  The export-preimages command export hash preimages to an RLP encoded stream`,
   126  	}
   127  	copydbCommand = cli.Command{
   128  		Action:    utils.MigrateFlags(copyDb),
   129  		Name:      "copydb",
   130  		Usage:     "Create a local chain from a target chaindata folder",
   131  		ArgsUsage: "<sourceChaindataDir>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.CacheFlag,
   135  			utils.SyncModeFlag,
   136  			utils.FakePoWFlag,
   137  			utils.TestnetFlag,
   138  			utils.RinkebyFlag,
   139  		},
   140  		Category: "BLOCKCHAIN COMMANDS",
   141  		Description: `
   142  The first argument must be the directory containing the blockchain to download from`,
   143  	}
   144  	removedbCommand = cli.Command{
   145  		Action:    utils.MigrateFlags(removeDB),
   146  		Name:      "removedb",
   147  		Usage:     "Remove blockchain and state databases",
   148  		ArgsUsage: " ",
   149  		Flags: []cli.Flag{
   150  			utils.DataDirFlag,
   151  		},
   152  		Category: "BLOCKCHAIN COMMANDS",
   153  		Description: `
   154  Remove blockchain and state databases`,
   155  	}
   156  	dumpCommand = cli.Command{
   157  		Action:    utils.MigrateFlags(dump),
   158  		Name:      "dump",
   159  		Usage:     "Dump a specific block from storage",
   160  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   161  		Flags: []cli.Flag{
   162  			utils.DataDirFlag,
   163  			utils.CacheFlag,
   164  			utils.SyncModeFlag,
   165  		},
   166  		Category: "BLOCKCHAIN COMMANDS",
   167  		Description: `
   168  The arguments are interpreted as block numbers or hashes.
   169  Use "ethereum dump 0" to dump the genesis block.`,
   170  	}
   171  )
   172  
   173  // initGenesis will initialise the given JSON format genesis file and writes it as
   174  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   175  func initGenesis(ctx *cli.Context) error {
   176  	// Make sure we have a valid genesis JSON
   177  	genesisPath := ctx.Args().First()
   178  	if len(genesisPath) == 0 {
   179  		utils.Fatalf("Must supply path to genesis JSON file")
   180  	}
   181  	file, err := os.Open(genesisPath)
   182  	if err != nil {
   183  		utils.Fatalf("Failed to read genesis file: %v", err)
   184  	}
   185  	defer file.Close()
   186  
   187  	genesis := new(core.Genesis)
   188  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   189  		utils.Fatalf("invalid genesis file: %v", err)
   190  	}
   191  	// Open an initialise both full and light databases
   192  	stack := makeFullNode(ctx)
   193  	defer stack.Close()
   194  
   195  	for _, name := range []string{"chaindata", "lightchaindata"} {
   196  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   197  		if err != nil {
   198  			utils.Fatalf("Failed to open database: %v", err)
   199  		}
   200  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   201  		if err != nil {
   202  			utils.Fatalf("Failed to write genesis block: %v", err)
   203  		}
   204  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   205  	}
   206  	return nil
   207  }
   208  
   209  func importChain(ctx *cli.Context) error {
   210  	if len(ctx.Args()) < 1 {
   211  		utils.Fatalf("This command requires an argument.")
   212  	}
   213  	stack := makeFullNode(ctx)
   214  	defer stack.Close()
   215  
   216  	chain, chainDb := utils.MakeChain(ctx, stack)
   217  	defer chainDb.Close()
   218  
   219  	// Start periodically gathering memory profiles
   220  	var peakMemAlloc, peakMemSys uint64
   221  	go func() {
   222  		stats := new(runtime.MemStats)
   223  		for {
   224  			runtime.ReadMemStats(stats)
   225  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   226  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   227  			}
   228  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   229  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   230  			}
   231  			time.Sleep(5 * time.Second)
   232  		}
   233  	}()
   234  	// Import the chain
   235  	start := time.Now()
   236  
   237  	if len(ctx.Args()) == 1 {
   238  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   239  			log.Error("Import error", "err", err)
   240  		}
   241  	} else {
   242  		for _, arg := range ctx.Args() {
   243  			if err := utils.ImportChain(chain, arg); err != nil {
   244  				log.Error("Import error", "file", arg, "err", err)
   245  			}
   246  		}
   247  	}
   248  	chain.Stop()
   249  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   250  
   251  	// Output pre-compaction stats mostly to see the import trashing
   252  	db := chainDb.(*ethdb.LDBDatabase)
   253  
   254  	stats, err := db.LDB().GetProperty("leveldb.stats")
   255  	if err != nil {
   256  		utils.Fatalf("Failed to read database stats: %v", err)
   257  	}
   258  	fmt.Println(stats)
   259  
   260  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   261  	if err != nil {
   262  		utils.Fatalf("Failed to read database iostats: %v", err)
   263  	}
   264  	fmt.Println(ioStats)
   265  
   266  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   267  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   268  
   269  	// Print the memory statistics used by the importing
   270  	mem := new(runtime.MemStats)
   271  	runtime.ReadMemStats(mem)
   272  
   273  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   274  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   275  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   276  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   277  
   278  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   279  		return nil
   280  	}
   281  
   282  	// Compact the entire database to more accurately measure disk io and print the stats
   283  	start = time.Now()
   284  	fmt.Println("Compacting entire database...")
   285  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   286  		utils.Fatalf("Compaction failed: %v", err)
   287  	}
   288  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   289  
   290  	stats, err = db.LDB().GetProperty("leveldb.stats")
   291  	if err != nil {
   292  		utils.Fatalf("Failed to read database stats: %v", err)
   293  	}
   294  	fmt.Println(stats)
   295  
   296  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   297  	if err != nil {
   298  		utils.Fatalf("Failed to read database iostats: %v", err)
   299  	}
   300  	fmt.Println(ioStats)
   301  
   302  	return nil
   303  }
   304  
   305  func exportChain(ctx *cli.Context) error {
   306  	if len(ctx.Args()) < 1 {
   307  		utils.Fatalf("This command requires an argument.")
   308  	}
   309  	stack := makeFullNode(ctx)
   310  	defer stack.Close()
   311  
   312  	chain, _ := utils.MakeChain(ctx, stack)
   313  	start := time.Now()
   314  
   315  	var err error
   316  	fp := ctx.Args().First()
   317  	if len(ctx.Args()) < 3 {
   318  		err = utils.ExportChain(chain, fp)
   319  	} else {
   320  		// This can be improved to allow for numbers larger than 9223372036854775807
   321  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   322  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   323  		if ferr != nil || lerr != nil {
   324  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   325  		}
   326  		if first < 0 || last < 0 {
   327  			utils.Fatalf("Export error: block number must be greater than 0\n")
   328  		}
   329  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   330  	}
   331  
   332  	if err != nil {
   333  		utils.Fatalf("Export error: %v\n", err)
   334  	}
   335  	fmt.Printf("Export done in %v\n", time.Since(start))
   336  	return nil
   337  }
   338  
   339  // importPreimages imports preimage data from the specified file.
   340  func importPreimages(ctx *cli.Context) error {
   341  	if len(ctx.Args()) < 1 {
   342  		utils.Fatalf("This command requires an argument.")
   343  	}
   344  	stack := makeFullNode(ctx)
   345  	defer stack.Close()
   346  
   347  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   348  	start := time.Now()
   349  
   350  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   351  		utils.Fatalf("Import error: %v\n", err)
   352  	}
   353  	fmt.Printf("Import done in %v\n", time.Since(start))
   354  	return nil
   355  }
   356  
   357  // exportPreimages dumps the preimage data to specified json file in streaming way.
   358  func exportPreimages(ctx *cli.Context) error {
   359  	if len(ctx.Args()) < 1 {
   360  		utils.Fatalf("This command requires an argument.")
   361  	}
   362  	stack := makeFullNode(ctx)
   363  	defer stack.Close()
   364  
   365  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   366  	start := time.Now()
   367  
   368  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   369  		utils.Fatalf("Export error: %v\n", err)
   370  	}
   371  	fmt.Printf("Export done in %v\n", time.Since(start))
   372  	return nil
   373  }
   374  
   375  func copyDb(ctx *cli.Context) error {
   376  	// Ensure we have a source chain directory to copy
   377  	if len(ctx.Args()) != 1 {
   378  		utils.Fatalf("Source chaindata directory path argument missing")
   379  	}
   380  	// Initialize a new chain for the running node to sync into
   381  	stack := makeFullNode(ctx)
   382  	defer stack.Close()
   383  
   384  	chain, chainDb := utils.MakeChain(ctx, stack)
   385  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   386  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   387  
   388  	// Create a source peer to satisfy downloader requests from
   389  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   390  	if err != nil {
   391  		return err
   392  	}
   393  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   394  	if err != nil {
   395  		return err
   396  	}
   397  	peer := downloader.NewFakePeer("local", db, hc, dl)
   398  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   399  		return err
   400  	}
   401  	// Synchronise with the simulated peer
   402  	start := time.Now()
   403  
   404  	currentHeader := hc.CurrentHeader()
   405  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   406  		return err
   407  	}
   408  	for dl.Synchronising() {
   409  		time.Sleep(10 * time.Millisecond)
   410  	}
   411  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   412  
   413  	// Compact the entire database to remove any sync overhead
   414  	start = time.Now()
   415  	fmt.Println("Compacting entire database...")
   416  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   417  		utils.Fatalf("Compaction failed: %v", err)
   418  	}
   419  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   420  
   421  	return nil
   422  }
   423  
   424  func removeDB(ctx *cli.Context) error {
   425  	stack, _ := makeConfigNode(ctx)
   426  
   427  	for _, name := range []string{"chaindata", "lightchaindata"} {
   428  		// Ensure the database exists in the first place
   429  		logger := log.New("database", name)
   430  
   431  		dbdir := stack.ResolvePath(name)
   432  		if !common.FileExist(dbdir) {
   433  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   434  			continue
   435  		}
   436  		// Confirm removal and execute
   437  		fmt.Println(dbdir)
   438  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   439  		switch {
   440  		case err != nil:
   441  			utils.Fatalf("%v", err)
   442  		case !confirm:
   443  			logger.Warn("Database deletion aborted")
   444  		default:
   445  			start := time.Now()
   446  			os.RemoveAll(dbdir)
   447  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   448  		}
   449  	}
   450  	return nil
   451  }
   452  
   453  func dump(ctx *cli.Context) error {
   454  	stack := makeFullNode(ctx)
   455  	defer stack.Close()
   456  
   457  	chain, chainDb := utils.MakeChain(ctx, stack)
   458  	for _, arg := range ctx.Args() {
   459  		var block *types.Block
   460  		if hashish(arg) {
   461  			block = chain.GetBlockByHash(common.HexToHash(arg))
   462  		} else {
   463  			num, _ := strconv.Atoi(arg)
   464  			block = chain.GetBlockByNumber(uint64(num))
   465  		}
   466  		if block == nil {
   467  			fmt.Println("{}")
   468  			utils.Fatalf("block not found")
   469  		} else {
   470  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   471  			if err != nil {
   472  				utils.Fatalf("could not create new state: %v", err)
   473  			}
   474  			fmt.Printf("%s\n", state.Dump())
   475  		}
   476  	}
   477  	chainDb.Close()
   478  	return nil
   479  }
   480  
   481  // hashish returns true for strings that look like hashes.
   482  func hashish(x string) bool {
   483  	_, err := strconv.Atoi(x)
   484  	return err != nil
   485  }