github.com/cheng762/platon-go@v1.8.17-0.20190529111256-7deff2d7be26/cmd/platon/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/PlatONnetwork/PlatON-Go/cmd/utils"
    29  	"github.com/PlatONnetwork/PlatON-Go/common"
    30  	"github.com/PlatONnetwork/PlatON-Go/console"
    31  	"github.com/PlatONnetwork/PlatON-Go/core"
    32  	"github.com/PlatONnetwork/PlatON-Go/core/state"
    33  	"github.com/PlatONnetwork/PlatON-Go/core/types"
    34  	"github.com/PlatONnetwork/PlatON-Go/eth/downloader"
    35  	"github.com/PlatONnetwork/PlatON-Go/ethdb"
    36  	"github.com/PlatONnetwork/PlatON-Go/event"
    37  	"github.com/PlatONnetwork/PlatON-Go/log"
    38  	"github.com/PlatONnetwork/PlatON-Go/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  		},
    52  		Category: "BLOCKCHAIN COMMANDS",
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	importCommand = cli.Command{
    61  		Action:    utils.MigrateFlags(importChain),
    62  		Name:      "import",
    63  		Usage:     "Import a blockchain file",
    64  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    65  		Flags: []cli.Flag{
    66  			utils.DataDirFlag,
    67  			utils.CacheFlag,
    68  			utils.SyncModeFlag,
    69  			utils.GCModeFlag,
    70  			utils.CacheDatabaseFlag,
    71  			utils.CacheGCFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The import command imports blocks from an RLP-encoded form. The form can be one file
    76  with several RLP-encoded blocks, or several files can be used.
    77  
    78  If only one file is used, import error will result in failure. If several files are used,
    79  processing will proceed even if an individual RLP-file import failure occurs.`,
    80  	}
    81  	exportCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(exportChain),
    83  		Name:      "export",
    84  		Usage:     "Export blockchain into file",
    85  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  			utils.SyncModeFlag,
    90  		},
    91  		Category: "BLOCKCHAIN COMMANDS",
    92  		Description: `
    93  Requires a first argument of the file to write to.
    94  Optional second and third arguments control the first and
    95  last block to write. In this mode, the file will be appended
    96  if already existing. If the file ends with .gz, the output will
    97  be gzipped.`,
    98  	}
    99  	importPreimagesCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(importPreimages),
   101  		Name:      "import-preimages",
   102  		Usage:     "Import the preimage database from an RLP stream",
   103  		ArgsUsage: "<datafile>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  		},
   109  		Category: "BLOCKCHAIN COMMANDS",
   110  		Description: `
   111  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   112  	}
   113  	exportPreimagesCommand = cli.Command{
   114  		Action:    utils.MigrateFlags(exportPreimages),
   115  		Name:      "export-preimages",
   116  		Usage:     "Export the preimage database into an RLP stream",
   117  		ArgsUsage: "<dumpfile>",
   118  		Flags: []cli.Flag{
   119  			utils.DataDirFlag,
   120  			utils.CacheFlag,
   121  			utils.SyncModeFlag,
   122  		},
   123  		Category: "BLOCKCHAIN COMMANDS",
   124  		Description: `
   125  The export-preimages command export hash preimages to an RLP encoded stream`,
   126  	}
   127  	copydbCommand = cli.Command{
   128  		Action:    utils.MigrateFlags(copyDb),
   129  		Name:      "copydb",
   130  		Usage:     "Create a local chain from a target chaindata folder",
   131  		ArgsUsage: "<sourceChaindataDir>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.CacheFlag,
   135  			utils.SyncModeFlag,
   136  			utils.FakePoWFlag,
   137  			utils.TestnetFlag,
   138  			utils.BetanetFlag,
   139  			utils.InnerTestnetFlag,
   140  			utils.InnerDevnetFlag,
   141  			utils.InnerTimeFlag,
   142  		},
   143  		Category: "BLOCKCHAIN COMMANDS",
   144  		Description: `
   145  The first argument must be the directory containing the blockchain to download from`,
   146  	}
   147  	removedbCommand = cli.Command{
   148  		Action:    utils.MigrateFlags(removeDB),
   149  		Name:      "removedb",
   150  		Usage:     "Remove blockchain and state databases",
   151  		ArgsUsage: " ",
   152  		Flags: []cli.Flag{
   153  			utils.DataDirFlag,
   154  		},
   155  		Category: "BLOCKCHAIN COMMANDS",
   156  		Description: `
   157  Remove blockchain and state databases`,
   158  	}
   159  	dumpCommand = cli.Command{
   160  		Action:    utils.MigrateFlags(dump),
   161  		Name:      "dump",
   162  		Usage:     "Dump a specific block from storage",
   163  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   164  		Flags: []cli.Flag{
   165  			utils.DataDirFlag,
   166  			utils.CacheFlag,
   167  			utils.SyncModeFlag,
   168  		},
   169  		Category: "BLOCKCHAIN COMMANDS",
   170  		Description: `
   171  The arguments are interpreted as block numbers or hashes.
   172  Use "ethereum dump 0" to dump the genesis block.`,
   173  	}
   174  )
   175  
   176  // initGenesis will initialise the given JSON format genesis file and writes it as
   177  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   178  func initGenesis(ctx *cli.Context) error {
   179  	// Make sure we have a valid genesis JSON
   180  	genesisPath := ctx.Args().First()
   181  	if len(genesisPath) == 0 {
   182  		utils.Fatalf("Must supply path to genesis JSON file")
   183  	}
   184  	file, err := os.Open(genesisPath)
   185  	if err != nil {
   186  		utils.Fatalf("Failed to read genesis file: %v", err)
   187  	}
   188  	defer file.Close()
   189  
   190  	genesis := new(core.Genesis)
   191  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   192  		utils.Fatalf("invalid genesis file: %v", err)
   193  	}
   194  	// Open an initialise both full and light databases
   195  	stack := makeFullNode(ctx)
   196  	for _, name := range []string{"chaindata", "lightchaindata"} {
   197  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   198  		if err != nil {
   199  			utils.Fatalf("Failed to open database: %v", err)
   200  		}
   201  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   202  		if err != nil {
   203  			utils.Fatalf("Failed to write genesis block: %v", err)
   204  		}
   205  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   206  	}
   207  	return nil
   208  }
   209  
   210  func importChain(ctx *cli.Context) error {
   211  	if len(ctx.Args()) < 1 {
   212  		utils.Fatalf("This command requires an argument.")
   213  	}
   214  	stack := makeFullNode(ctx)
   215  	chain, chainDb := utils.MakeChain(ctx, stack)
   216  	defer chainDb.Close()
   217  
   218  	// Start periodically gathering memory profiles
   219  	var peakMemAlloc, peakMemSys uint64
   220  	go func() {
   221  		stats := new(runtime.MemStats)
   222  		for {
   223  			runtime.ReadMemStats(stats)
   224  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   225  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   226  			}
   227  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   228  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   229  			}
   230  			time.Sleep(5 * time.Second)
   231  		}
   232  	}()
   233  	// Import the chain
   234  	start := time.Now()
   235  
   236  	if len(ctx.Args()) == 1 {
   237  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   238  			log.Error("Import error", "err", err)
   239  		}
   240  	} else {
   241  		for _, arg := range ctx.Args() {
   242  			if err := utils.ImportChain(chain, arg); err != nil {
   243  				log.Error("Import error", "file", arg, "err", err)
   244  			}
   245  		}
   246  	}
   247  	chain.Stop()
   248  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   249  
   250  	// Output pre-compaction stats mostly to see the import trashing
   251  	db := chainDb.(*ethdb.LDBDatabase)
   252  
   253  	stats, err := db.LDB().GetProperty("leveldb.stats")
   254  	if err != nil {
   255  		utils.Fatalf("Failed to read database stats: %v", err)
   256  	}
   257  	fmt.Println(stats)
   258  
   259  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   260  	if err != nil {
   261  		utils.Fatalf("Failed to read database iostats: %v", err)
   262  	}
   263  	fmt.Println(ioStats)
   264  
   265  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   266  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   267  
   268  	// Print the memory statistics used by the importing
   269  	mem := new(runtime.MemStats)
   270  	runtime.ReadMemStats(mem)
   271  
   272  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   273  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   274  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   275  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   276  
   277  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   278  		return nil
   279  	}
   280  
   281  	// Compact the entire database to more accurately measure disk io and print the stats
   282  	start = time.Now()
   283  	fmt.Println("Compacting entire database...")
   284  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   285  		utils.Fatalf("Compaction failed: %v", err)
   286  	}
   287  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   288  
   289  	stats, err = db.LDB().GetProperty("leveldb.stats")
   290  	if err != nil {
   291  		utils.Fatalf("Failed to read database stats: %v", err)
   292  	}
   293  	fmt.Println(stats)
   294  
   295  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   296  	if err != nil {
   297  		utils.Fatalf("Failed to read database iostats: %v", err)
   298  	}
   299  	fmt.Println(ioStats)
   300  
   301  	return nil
   302  }
   303  
   304  func exportChain(ctx *cli.Context) error {
   305  	if len(ctx.Args()) < 1 {
   306  		utils.Fatalf("This command requires an argument.")
   307  	}
   308  	stack := makeFullNode(ctx)
   309  	chain, _ := utils.MakeChain(ctx, stack)
   310  	start := time.Now()
   311  
   312  	var err error
   313  	fp := ctx.Args().First()
   314  	if len(ctx.Args()) < 3 {
   315  		err = utils.ExportChain(chain, fp)
   316  	} else {
   317  		// This can be improved to allow for numbers larger than 9223372036854775807
   318  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   319  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   320  		if ferr != nil || lerr != nil {
   321  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   322  		}
   323  		if first < 0 || last < 0 {
   324  			utils.Fatalf("Export error: block number must be greater than 0\n")
   325  		}
   326  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   327  	}
   328  
   329  	if err != nil {
   330  		utils.Fatalf("Export error: %v\n", err)
   331  	}
   332  	fmt.Printf("Export done in %v\n", time.Since(start))
   333  	return nil
   334  }
   335  
   336  // importPreimages imports preimage data from the specified file.
   337  func importPreimages(ctx *cli.Context) error {
   338  	if len(ctx.Args()) < 1 {
   339  		utils.Fatalf("This command requires an argument.")
   340  	}
   341  	stack := makeFullNode(ctx)
   342  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   343  
   344  	start := time.Now()
   345  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   346  		utils.Fatalf("Import error: %v\n", err)
   347  	}
   348  	fmt.Printf("Import done in %v\n", time.Since(start))
   349  	return nil
   350  }
   351  
   352  // exportPreimages dumps the preimage data to specified json file in streaming way.
   353  func exportPreimages(ctx *cli.Context) error {
   354  	if len(ctx.Args()) < 1 {
   355  		utils.Fatalf("This command requires an argument.")
   356  	}
   357  	stack := makeFullNode(ctx)
   358  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   359  
   360  	start := time.Now()
   361  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   362  		utils.Fatalf("Export error: %v\n", err)
   363  	}
   364  	fmt.Printf("Export done in %v\n", time.Since(start))
   365  	return nil
   366  }
   367  
   368  func copyDb(ctx *cli.Context) error {
   369  	// Ensure we have a source chain directory to copy
   370  	if len(ctx.Args()) != 1 {
   371  		utils.Fatalf("Source chaindata directory path argument missing")
   372  	}
   373  	// Initialize a new chain for the running node to sync into
   374  	stack := makeFullNode(ctx)
   375  	chain, chainDb := utils.MakeChain(ctx, stack)
   376  
   377  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   378  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   379  
   380  	// Create a source peer to satisfy downloader requests from
   381  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   382  	if err != nil {
   383  		return err
   384  	}
   385  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   386  	if err != nil {
   387  		return err
   388  	}
   389  	peer := downloader.NewFakePeer("local", db, hc, dl)
   390  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   391  		return err
   392  	}
   393  	// Synchronise with the simulated peer
   394  	start := time.Now()
   395  
   396  	currentHeader := hc.CurrentHeader()
   397  	if err = dl.Synchronise("local", currentHeader.Hash(), currentHeader.Number, syncmode); err != nil {
   398  		return err
   399  	}
   400  	for dl.Synchronising() {
   401  		time.Sleep(10 * time.Millisecond)
   402  	}
   403  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   404  
   405  	// Compact the entire database to remove any sync overhead
   406  	start = time.Now()
   407  	fmt.Println("Compacting entire database...")
   408  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   409  		utils.Fatalf("Compaction failed: %v", err)
   410  	}
   411  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   412  
   413  	return nil
   414  }
   415  
   416  func removeDB(ctx *cli.Context) error {
   417  	stack, _ := makeConfigNode(ctx)
   418  
   419  	for _, name := range []string{"chaindata", "lightchaindata"} {
   420  		// Ensure the database exists in the first place
   421  		logger := log.New("database", name)
   422  
   423  		dbdir := stack.ResolvePath(name)
   424  		if !common.FileExist(dbdir) {
   425  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   426  			continue
   427  		}
   428  		// Confirm removal and execute
   429  		fmt.Println(dbdir)
   430  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   431  		switch {
   432  		case err != nil:
   433  			utils.Fatalf("%v", err)
   434  		case !confirm:
   435  			logger.Warn("Database deletion aborted")
   436  		default:
   437  			start := time.Now()
   438  			os.RemoveAll(dbdir)
   439  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   440  		}
   441  	}
   442  	return nil
   443  }
   444  
   445  func dump(ctx *cli.Context) error {
   446  	stack := makeFullNode(ctx)
   447  	chain, chainDb := utils.MakeChain(ctx, stack)
   448  	for _, arg := range ctx.Args() {
   449  		var block *types.Block
   450  		if hashish(arg) {
   451  			block = chain.GetBlockByHash(common.HexToHash(arg))
   452  		} else {
   453  			num, _ := strconv.Atoi(arg)
   454  			block = chain.GetBlockByNumber(uint64(num))
   455  		}
   456  		if block == nil {
   457  			fmt.Println("{}")
   458  			utils.Fatalf("block not found")
   459  		} else {
   460  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), block.Number(), block.Hash())
   461  			if err != nil {
   462  				utils.Fatalf("could not create new state: %v", err)
   463  			}
   464  			fmt.Printf("%s\n", state.Dump())
   465  		}
   466  	}
   467  	chainDb.Close()
   468  	return nil
   469  }
   470  
   471  // hashish returns true for strings that look like hashes.
   472  func hashish(x string) bool {
   473  	_, err := strconv.Atoi(x)
   474  	return err != nil
   475  }