github.com/n1ghtfa1l/go-vnt@v0.6.4-alpha.6/cmd/gvnt/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/pkg/errors"
    29  	"github.com/syndtr/goleveldb/leveldb/util"
    30  	"github.com/vntchain/go-vnt/cmd/utils"
    31  	"github.com/vntchain/go-vnt/common"
    32  	"github.com/vntchain/go-vnt/console"
    33  	"github.com/vntchain/go-vnt/core"
    34  	"github.com/vntchain/go-vnt/core/state"
    35  	"github.com/vntchain/go-vnt/core/types"
    36  	"github.com/vntchain/go-vnt/event"
    37  	"github.com/vntchain/go-vnt/log"
    38  	"github.com/vntchain/go-vnt/trie"
    39  	"github.com/vntchain/go-vnt/vnt/downloader"
    40  	"github.com/vntchain/go-vnt/vntdb"
    41  	cli "gopkg.in/urfave/cli.v1"
    42  )
    43  
    44  var (
    45  	initCommand = cli.Command{
    46  		Action:    utils.MigrateFlags(initGenesis),
    47  		Name:      "init",
    48  		Usage:     "Bootstrap and initialize a new genesis block",
    49  		ArgsUsage: "<genesisPath>",
    50  		Flags: []cli.Flag{
    51  			utils.DataDirFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.GCModeFlag,
    70  			utils.CacheDatabaseFlag,
    71  			utils.CacheGCFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The import command imports blocks from an RLP-encoded form. The form can be one file
    76  with several RLP-encoded blocks, or several files can be used.
    77  
    78  If only one file is used, import error will result in failure. If several files are used,
    79  processing will proceed even if an individual RLP-file import failure occurs.`,
    80  	}
    81  	exportCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(exportChain),
    83  		Name:      "export",
    84  		Usage:     "Export blockchain into file",
    85  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  		},
    90  		Category: "BLOCKCHAIN COMMANDS",
    91  		Description: `
    92  Requires a first argument of the file to write to.
    93  Optional second and third arguments control the first and
    94  last block to write. In this mode, the file will be appended
    95  if already existing.`,
    96  	}
    97  	importPreimagesCommand = cli.Command{
    98  		Action:    utils.MigrateFlags(importPreimages),
    99  		Name:      "import-preimages",
   100  		Usage:     "Import the preimage database from an RLP stream",
   101  		ArgsUsage: "<datafile>",
   102  		Flags: []cli.Flag{
   103  			utils.DataDirFlag,
   104  			utils.CacheFlag,
   105  		},
   106  		Category: "BLOCKCHAIN COMMANDS",
   107  		Description: `
   108  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   109  	}
   110  	exportPreimagesCommand = cli.Command{
   111  		Action:    utils.MigrateFlags(exportPreimages),
   112  		Name:      "export-preimages",
   113  		Usage:     "Export the preimage database into an RLP stream",
   114  		ArgsUsage: "<dumpfile>",
   115  		Flags: []cli.Flag{
   116  			utils.DataDirFlag,
   117  			utils.CacheFlag,
   118  		},
   119  		Category: "BLOCKCHAIN COMMANDS",
   120  		Description: `
   121  The export-preimages command export hash preimages to an RLP encoded stream`,
   122  	}
   123  	copydbCommand = cli.Command{
   124  		Action:    utils.MigrateFlags(copyDb),
   125  		Name:      "copydb",
   126  		Usage:     "Create a local chain from a target chaindata folder",
   127  		ArgsUsage: "<sourceChaindataDir>",
   128  		Flags: []cli.Flag{
   129  			utils.DataDirFlag,
   130  			utils.CacheFlag,
   131  			utils.SyncModeFlag,
   132  		},
   133  		Category: "BLOCKCHAIN COMMANDS",
   134  		Description: `
   135  The first argument must be the directory containing the blockchain to download from`,
   136  	}
   137  	removedbCommand = cli.Command{
   138  		Action:    utils.MigrateFlags(removeDB),
   139  		Name:      "removedb",
   140  		Usage:     "Remove blockchain and state databases",
   141  		ArgsUsage: " ",
   142  		Flags: []cli.Flag{
   143  			utils.DataDirFlag,
   144  		},
   145  		Category: "BLOCKCHAIN COMMANDS",
   146  		Description: `
   147  Remove blockchain and state databases`,
   148  	}
   149  	dumpCommand = cli.Command{
   150  		Action:    utils.MigrateFlags(dump),
   151  		Name:      "dump",
   152  		Usage:     "Dump a specific block from storage",
   153  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   154  		Flags: []cli.Flag{
   155  			utils.DataDirFlag,
   156  			utils.CacheFlag,
   157  		},
   158  		Category: "BLOCKCHAIN COMMANDS",
   159  		Description: `
   160  The arguments are interpreted as block numbers or hashes.
   161  Use "gvnt dump 0" to dump the genesis block.`,
   162  	}
   163  )
   164  
   165  // initGenesis will initialise the given JSON format genesis file and writes it as
   166  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   167  func initGenesis(ctx *cli.Context) error {
   168  	// Make sure we have a valid genesis JSON
   169  	genesisPath := ctx.Args().First()
   170  	if len(genesisPath) == 0 {
   171  		utils.Fatalf("Must supply path to genesis JSON file")
   172  	}
   173  	file, err := os.Open(genesisPath)
   174  	if err != nil {
   175  		utils.Fatalf("Failed to read genesis file: %v", err)
   176  	}
   177  	defer file.Close()
   178  
   179  	genesis := new(core.Genesis)
   180  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   181  		utils.Fatalf("invalid genesis file: %v", err)
   182  	}
   183  
   184  	// 如果是dpos共识,验证配置的见证人数目与相关Url数目是否一致
   185  	if genesis.Config != nil && genesis.Config.Dpos != nil {
   186  		witnessNum := genesis.Config.Dpos.WitnessesNum
   187  		if witnessNum != len(genesis.Config.Dpos.WitnessesUrl) || witnessNum != len(genesis.Witnesses) {
   188  			return fmt.Errorf("the length of witnessesUrl [%d] and witnesses [%d] must be equal to witnessNum [%d]", len(genesis.Config.Dpos.WitnessesUrl), len(genesis.Witnesses), witnessNum)
   189  		}
   190  	} else {
   191  		return errors.New("Dpos config should not be empty")
   192  	}
   193  
   194  	// Open an initialise both full and light databases
   195  	stack := makeFullNode(ctx)
   196  	for _, name := range []string{"chaindata", "lightchaindata"} {
   197  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   198  		if err != nil {
   199  			utils.Fatalf("Failed to open database: %v", err)
   200  		}
   201  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   202  		if err != nil {
   203  			utils.Fatalf("Failed to write genesis block: %v", err)
   204  		}
   205  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   206  	}
   207  	return nil
   208  }
   209  
   210  func importChain(ctx *cli.Context) error {
   211  	if len(ctx.Args()) < 1 {
   212  		utils.Fatalf("This command requires an argument.")
   213  	}
   214  	stack := makeFullNode(ctx)
   215  	chain, chainDb := utils.MakeChain(ctx, stack)
   216  	defer chainDb.Close()
   217  
   218  	// Start periodically gathering memory profiles
   219  	var peakMemAlloc, peakMemSys uint64
   220  	go func() {
   221  		stats := new(runtime.MemStats)
   222  		for {
   223  			runtime.ReadMemStats(stats)
   224  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   225  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   226  			}
   227  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   228  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   229  			}
   230  			time.Sleep(5 * time.Second)
   231  		}
   232  	}()
   233  	// Import the chain
   234  	start := time.Now()
   235  
   236  	if len(ctx.Args()) == 1 {
   237  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   238  			log.Error("Import error", "err", err)
   239  		}
   240  	} else {
   241  		for _, arg := range ctx.Args() {
   242  			if err := utils.ImportChain(chain, arg); err != nil {
   243  				log.Error("Import error", "file", arg, "err", err)
   244  			}
   245  		}
   246  	}
   247  	chain.Stop()
   248  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   249  
   250  	// Output pre-compaction stats mostly to see the import trashing
   251  	db := chainDb.(*vntdb.LDBDatabase)
   252  
   253  	stats, err := db.LDB().GetProperty("leveldb.stats")
   254  	if err != nil {
   255  		utils.Fatalf("Failed to read database stats: %v", err)
   256  	}
   257  	fmt.Println(stats)
   258  
   259  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   260  	if err != nil {
   261  		utils.Fatalf("Failed to read database iostats: %v", err)
   262  	}
   263  	fmt.Println(ioStats)
   264  
   265  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   266  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   267  
   268  	// Print the memory statistics used by the importing
   269  	mem := new(runtime.MemStats)
   270  	runtime.ReadMemStats(mem)
   271  
   272  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   273  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   274  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   275  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   276  
   277  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   278  		return nil
   279  	}
   280  
   281  	// Compact the entire database to more accurately measure disk io and print the stats
   282  	start = time.Now()
   283  	fmt.Println("Compacting entire database...")
   284  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   285  		utils.Fatalf("Compaction failed: %v", err)
   286  	}
   287  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   288  
   289  	stats, err = db.LDB().GetProperty("leveldb.stats")
   290  	if err != nil {
   291  		utils.Fatalf("Failed to read database stats: %v", err)
   292  	}
   293  	fmt.Println(stats)
   294  
   295  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   296  	if err != nil {
   297  		utils.Fatalf("Failed to read database iostats: %v", err)
   298  	}
   299  	fmt.Println(ioStats)
   300  
   301  	return nil
   302  }
   303  
   304  func exportChain(ctx *cli.Context) error {
   305  	if len(ctx.Args()) < 1 {
   306  		utils.Fatalf("This command requires an argument.")
   307  	}
   308  	stack := makeFullNode(ctx)
   309  	chain, _ := utils.MakeChain(ctx, stack)
   310  	start := time.Now()
   311  
   312  	var err error
   313  	fp := ctx.Args().First()
   314  	if len(ctx.Args()) < 3 {
   315  		err = utils.ExportChain(chain, fp)
   316  	} else {
   317  		// This can be improved to allow for numbers larger than 9223372036854775807
   318  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   319  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   320  		if ferr != nil || lerr != nil {
   321  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   322  		}
   323  		if first < 0 || last < 0 {
   324  			utils.Fatalf("Export error: block number must be greater than 0\n")
   325  		}
   326  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   327  	}
   328  
   329  	if err != nil {
   330  		utils.Fatalf("Export error: %v\n", err)
   331  	}
   332  	fmt.Printf("Export done in %v\n", time.Since(start))
   333  	return nil
   334  }
   335  
   336  // importPreimages imports preimage data from the specified file.
   337  func importPreimages(ctx *cli.Context) error {
   338  	if len(ctx.Args()) < 1 {
   339  		utils.Fatalf("This command requires an argument.")
   340  	}
   341  	stack := makeFullNode(ctx)
   342  	diskdb := utils.MakeChainDatabase(ctx, stack).(*vntdb.LDBDatabase)
   343  
   344  	start := time.Now()
   345  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   346  		utils.Fatalf("Export error: %v\n", err)
   347  	}
   348  	fmt.Printf("Export done in %v\n", time.Since(start))
   349  	return nil
   350  }
   351  
   352  // exportPreimages dumps the preimage data to specified json file in streaming way.
   353  func exportPreimages(ctx *cli.Context) error {
   354  	if len(ctx.Args()) < 1 {
   355  		utils.Fatalf("This command requires an argument.")
   356  	}
   357  	stack := makeFullNode(ctx)
   358  	diskdb := utils.MakeChainDatabase(ctx, stack).(*vntdb.LDBDatabase)
   359  
   360  	start := time.Now()
   361  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   362  		utils.Fatalf("Export error: %v\n", err)
   363  	}
   364  	fmt.Printf("Export done in %v\n", time.Since(start))
   365  	return nil
   366  }
   367  
   368  func copyDb(ctx *cli.Context) error {
   369  	// Ensure we have a source chain directory to copy
   370  	if len(ctx.Args()) != 1 {
   371  		utils.Fatalf("Source chaindata directory path argument missing")
   372  	}
   373  	// Initialize a new chain for the running node to sync into
   374  	stack := makeFullNode(ctx)
   375  	chain, chainDb := utils.MakeChain(ctx, stack)
   376  
   377  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   378  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   379  
   380  	// Create a source peer to satisfy downloader requests from
   381  	db, err := vntdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   382  	if err != nil {
   383  		return err
   384  	}
   385  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   386  	if err != nil {
   387  		return err
   388  	}
   389  	peer := downloader.NewFakePeer("local", db, hc, dl)
   390  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   391  		return err
   392  	}
   393  	// Synchronise with the simulated peer
   394  	start := time.Now()
   395  
   396  	currentHeader := hc.CurrentHeader()
   397  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   398  		return err
   399  	}
   400  	for dl.Synchronising() {
   401  		time.Sleep(10 * time.Millisecond)
   402  	}
   403  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   404  
   405  	// Compact the entire database to remove any sync overhead
   406  	start = time.Now()
   407  	fmt.Println("Compacting entire database...")
   408  	if err = chainDb.(*vntdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   409  		utils.Fatalf("Compaction failed: %v", err)
   410  	}
   411  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   412  
   413  	return nil
   414  }
   415  
   416  func removeDB(ctx *cli.Context) error {
   417  	stack, _ := makeConfigNode(ctx)
   418  
   419  	for _, name := range []string{"chaindata", "lightchaindata"} {
   420  		// Ensure the database exists in the first place
   421  		logger := log.New("database", name)
   422  
   423  		dbdir := stack.ResolvePath(name)
   424  		if !common.FileExist(dbdir) {
   425  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   426  			continue
   427  		}
   428  		// Confirm removal and execute
   429  		fmt.Println(dbdir)
   430  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   431  		switch {
   432  		case err != nil:
   433  			utils.Fatalf("%v", err)
   434  		case !confirm:
   435  			logger.Warn("Database deletion aborted")
   436  		default:
   437  			start := time.Now()
   438  			os.RemoveAll(dbdir)
   439  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   440  		}
   441  	}
   442  	return nil
   443  }
   444  
   445  func dump(ctx *cli.Context) error {
   446  	stack := makeFullNode(ctx)
   447  	chain, chainDb := utils.MakeChain(ctx, stack)
   448  	for _, arg := range ctx.Args() {
   449  		var block *types.Block
   450  		if hashish(arg) {
   451  			block = chain.GetBlockByHash(common.HexToHash(arg))
   452  		} else {
   453  			num, _ := strconv.Atoi(arg)
   454  			block = chain.GetBlockByNumber(uint64(num))
   455  		}
   456  		if block == nil {
   457  			fmt.Println("{}")
   458  			utils.Fatalf("block not found")
   459  		} else {
   460  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   461  			if err != nil {
   462  				utils.Fatalf("could not create new state: %v", err)
   463  			}
   464  			fmt.Printf("%s\n", state.Dump())
   465  		}
   466  	}
   467  	chainDb.Close()
   468  	return nil
   469  }
   470  
   471  // hashish returns true for strings that look like hashes.
   472  func hashish(x string) bool {
   473  	_, err := strconv.Atoi(x)
   474  	return err != nil
   475  }