github.com/ubiq/go-ethereum@v3.0.1+incompatible/cmd/gubiq/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ubiq/go-ubiq/cmd/utils"
    29  	"github.com/ubiq/go-ubiq/common"
    30  	"github.com/ubiq/go-ubiq/console"
    31  	"github.com/ubiq/go-ubiq/core"
    32  	"github.com/ubiq/go-ubiq/core/state"
    33  	"github.com/ubiq/go-ubiq/core/types"
    34  	"github.com/ubiq/go-ubiq/eth/downloader"
    35  	"github.com/ubiq/go-ubiq/ethdb"
    36  	"github.com/ubiq/go-ubiq/event"
    37  	"github.com/ubiq/go-ubiq/log"
    38  	"github.com/ubiq/go-ubiq/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  		},
    52  		Category: "BLOCKCHAIN COMMANDS",
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	importCommand = cli.Command{
    61  		Action:    utils.MigrateFlags(importChain),
    62  		Name:      "import",
    63  		Usage:     "Import a blockchain file",
    64  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    65  		Flags: []cli.Flag{
    66  			utils.DataDirFlag,
    67  			utils.CacheFlag,
    68  			utils.SyncModeFlag,
    69  			utils.GCModeFlag,
    70  			utils.CacheDatabaseFlag,
    71  			utils.CacheGCFlag,
    72  		},
    73  		Category: "BLOCKCHAIN COMMANDS",
    74  		Description: `
    75  The import command imports blocks from an RLP-encoded form. The form can be one file
    76  with several RLP-encoded blocks, or several files can be used.
    77  
    78  If only one file is used, import error will result in failure. If several files are used,
    79  processing will proceed even if an individual RLP-file import failure occurs.`,
    80  	}
    81  	exportCommand = cli.Command{
    82  		Action:    utils.MigrateFlags(exportChain),
    83  		Name:      "export",
    84  		Usage:     "Export blockchain into file",
    85  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    86  		Flags: []cli.Flag{
    87  			utils.DataDirFlag,
    88  			utils.CacheFlag,
    89  			utils.SyncModeFlag,
    90  		},
    91  		Category: "BLOCKCHAIN COMMANDS",
    92  		Description: `
    93  Requires a first argument of the file to write to.
    94  Optional second and third arguments control the first and
    95  last block to write. In this mode, the file will be appended
    96  if already existing. If the file ends with .gz, the output will
    97  be gzipped.`,
    98  	}
    99  	importPreimagesCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(importPreimages),
   101  		Name:      "import-preimages",
   102  		Usage:     "Import the preimage database from an RLP stream",
   103  		ArgsUsage: "<datafile>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  		},
   109  		Category: "BLOCKCHAIN COMMANDS",
   110  		Description: `
   111  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   112  	}
   113  	exportPreimagesCommand = cli.Command{
   114  		Action:    utils.MigrateFlags(exportPreimages),
   115  		Name:      "export-preimages",
   116  		Usage:     "Export the preimage database into an RLP stream",
   117  		ArgsUsage: "<dumpfile>",
   118  		Flags: []cli.Flag{
   119  			utils.DataDirFlag,
   120  			utils.CacheFlag,
   121  			utils.SyncModeFlag,
   122  		},
   123  		Category: "BLOCKCHAIN COMMANDS",
   124  		Description: `
   125  The export-preimages command export hash preimages to an RLP encoded stream`,
   126  	}
   127  	copydbCommand = cli.Command{
   128  		Action:    utils.MigrateFlags(copyDb),
   129  		Name:      "copydb",
   130  		Usage:     "Create a local chain from a target chaindata folder",
   131  		ArgsUsage: "<sourceChaindataDir>",
   132  		Flags: []cli.Flag{
   133  			utils.DataDirFlag,
   134  			utils.CacheFlag,
   135  			utils.SyncModeFlag,
   136  			utils.FakePoWFlag,
   137  			utils.TestnetFlag,
   138  		},
   139  		Category: "BLOCKCHAIN COMMANDS",
   140  		Description: `
   141  The first argument must be the directory containing the blockchain to download from`,
   142  	}
   143  	removedbCommand = cli.Command{
   144  		Action:    utils.MigrateFlags(removeDB),
   145  		Name:      "removedb",
   146  		Usage:     "Remove blockchain and state databases",
   147  		ArgsUsage: " ",
   148  		Flags: []cli.Flag{
   149  			utils.DataDirFlag,
   150  		},
   151  		Category: "BLOCKCHAIN COMMANDS",
   152  		Description: `
   153  Remove blockchain and state databases`,
   154  	}
   155  	dumpCommand = cli.Command{
   156  		Action:    utils.MigrateFlags(dump),
   157  		Name:      "dump",
   158  		Usage:     "Dump a specific block from storage",
   159  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   160  		Flags: []cli.Flag{
   161  			utils.DataDirFlag,
   162  			utils.CacheFlag,
   163  			utils.SyncModeFlag,
   164  		},
   165  		Category: "BLOCKCHAIN COMMANDS",
   166  		Description: `
   167  The arguments are interpreted as block numbers or hashes.
   168  Use "ubiq dump 0" to dump the genesis block.`,
   169  	}
   170  )
   171  
   172  // initGenesis will initialise the given JSON format genesis file and writes it as
   173  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   174  func initGenesis(ctx *cli.Context) error {
   175  	// Make sure we have a valid genesis JSON
   176  	genesisPath := ctx.Args().First()
   177  	if len(genesisPath) == 0 {
   178  		utils.Fatalf("Must supply path to genesis JSON file")
   179  	}
   180  	file, err := os.Open(genesisPath)
   181  	if err != nil {
   182  		utils.Fatalf("Failed to read genesis file: %v", err)
   183  	}
   184  	defer file.Close()
   185  
   186  	genesis := new(core.Genesis)
   187  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   188  		utils.Fatalf("invalid genesis file: %v", err)
   189  	}
   190  	// Open an initialise both full and light databases
   191  	stack := makeFullNode(ctx)
   192  	for _, name := range []string{"chaindata", "lightchaindata"} {
   193  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   194  		if err != nil {
   195  			utils.Fatalf("Failed to open database: %v", err)
   196  		}
   197  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   198  		if err != nil {
   199  			utils.Fatalf("Failed to write genesis block: %v", err)
   200  		}
   201  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   202  	}
   203  	return nil
   204  }
   205  
   206  func importChain(ctx *cli.Context) error {
   207  	if len(ctx.Args()) < 1 {
   208  		utils.Fatalf("This command requires an argument.")
   209  	}
   210  	stack := makeFullNode(ctx)
   211  	chain, chainDb := utils.MakeChain(ctx, stack)
   212  	defer chainDb.Close()
   213  
   214  	// Start periodically gathering memory profiles
   215  	var peakMemAlloc, peakMemSys uint64
   216  	go func() {
   217  		stats := new(runtime.MemStats)
   218  		for {
   219  			runtime.ReadMemStats(stats)
   220  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   221  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   222  			}
   223  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   224  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   225  			}
   226  			time.Sleep(5 * time.Second)
   227  		}
   228  	}()
   229  	// Import the chain
   230  	start := time.Now()
   231  
   232  	if len(ctx.Args()) == 1 {
   233  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   234  			log.Error("Import error", "err", err)
   235  		}
   236  	} else {
   237  		for _, arg := range ctx.Args() {
   238  			if err := utils.ImportChain(chain, arg); err != nil {
   239  				log.Error("Import error", "file", arg, "err", err)
   240  			}
   241  		}
   242  	}
   243  	chain.Stop()
   244  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   245  
   246  	// Output pre-compaction stats mostly to see the import trashing
   247  	db := chainDb.(*ethdb.LDBDatabase)
   248  
   249  	stats, err := db.LDB().GetProperty("leveldb.stats")
   250  	if err != nil {
   251  		utils.Fatalf("Failed to read database stats: %v", err)
   252  	}
   253  	fmt.Println(stats)
   254  
   255  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   256  	if err != nil {
   257  		utils.Fatalf("Failed to read database iostats: %v", err)
   258  	}
   259  	fmt.Println(ioStats)
   260  
   261  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   262  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   263  
   264  	// Print the memory statistics used by the importing
   265  	mem := new(runtime.MemStats)
   266  	runtime.ReadMemStats(mem)
   267  
   268  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   269  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   270  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   271  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   272  
   273  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   274  		return nil
   275  	}
   276  
   277  	// Compact the entire database to more accurately measure disk io and print the stats
   278  	start = time.Now()
   279  	fmt.Println("Compacting entire database...")
   280  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   281  		utils.Fatalf("Compaction failed: %v", err)
   282  	}
   283  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   284  
   285  	stats, err = db.LDB().GetProperty("leveldb.stats")
   286  	if err != nil {
   287  		utils.Fatalf("Failed to read database stats: %v", err)
   288  	}
   289  	fmt.Println(stats)
   290  
   291  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   292  	if err != nil {
   293  		utils.Fatalf("Failed to read database iostats: %v", err)
   294  	}
   295  	fmt.Println(ioStats)
   296  
   297  	return nil
   298  }
   299  
   300  func exportChain(ctx *cli.Context) error {
   301  	if len(ctx.Args()) < 1 {
   302  		utils.Fatalf("This command requires an argument.")
   303  	}
   304  	stack := makeFullNode(ctx)
   305  	chain, _ := utils.MakeChain(ctx, stack)
   306  	start := time.Now()
   307  
   308  	var err error
   309  	fp := ctx.Args().First()
   310  	if len(ctx.Args()) < 3 {
   311  		err = utils.ExportChain(chain, fp)
   312  	} else {
   313  		// This can be improved to allow for numbers larger than 9223372036854775807
   314  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   315  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   316  		if ferr != nil || lerr != nil {
   317  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   318  		}
   319  		if first < 0 || last < 0 {
   320  			utils.Fatalf("Export error: block number must be greater than 0\n")
   321  		}
   322  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   323  	}
   324  
   325  	if err != nil {
   326  		utils.Fatalf("Export error: %v\n", err)
   327  	}
   328  	fmt.Printf("Export done in %v\n", time.Since(start))
   329  	return nil
   330  }
   331  
   332  // importPreimages imports preimage data from the specified file.
   333  func importPreimages(ctx *cli.Context) error {
   334  	if len(ctx.Args()) < 1 {
   335  		utils.Fatalf("This command requires an argument.")
   336  	}
   337  	stack := makeFullNode(ctx)
   338  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   339  
   340  	start := time.Now()
   341  	if err := utils.ImportPreimages(diskdb, ctx.Args().First()); err != nil {
   342  		utils.Fatalf("Import error: %v\n", err)
   343  	}
   344  	fmt.Printf("Import done in %v\n", time.Since(start))
   345  	return nil
   346  }
   347  
   348  // exportPreimages dumps the preimage data to specified json file in streaming way.
   349  func exportPreimages(ctx *cli.Context) error {
   350  	if len(ctx.Args()) < 1 {
   351  		utils.Fatalf("This command requires an argument.")
   352  	}
   353  	stack := makeFullNode(ctx)
   354  	diskdb := utils.MakeChainDatabase(ctx, stack).(*ethdb.LDBDatabase)
   355  
   356  	start := time.Now()
   357  	if err := utils.ExportPreimages(diskdb, ctx.Args().First()); err != nil {
   358  		utils.Fatalf("Export error: %v\n", err)
   359  	}
   360  	fmt.Printf("Export done in %v\n", time.Since(start))
   361  	return nil
   362  }
   363  
   364  func copyDb(ctx *cli.Context) error {
   365  	// Ensure we have a source chain directory to copy
   366  	if len(ctx.Args()) != 1 {
   367  		utils.Fatalf("Source chaindata directory path argument missing")
   368  	}
   369  	// Initialize a new chain for the running node to sync into
   370  	stack := makeFullNode(ctx)
   371  	chain, chainDb := utils.MakeChain(ctx, stack)
   372  
   373  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   374  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   375  
   376  	// Create a source peer to satisfy downloader requests from
   377  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   378  	if err != nil {
   379  		return err
   380  	}
   381  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   382  	if err != nil {
   383  		return err
   384  	}
   385  	peer := downloader.NewFakePeer("local", db, hc, dl)
   386  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   387  		return err
   388  	}
   389  	// Synchronise with the simulated peer
   390  	start := time.Now()
   391  
   392  	currentHeader := hc.CurrentHeader()
   393  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   394  		return err
   395  	}
   396  	for dl.Synchronising() {
   397  		time.Sleep(10 * time.Millisecond)
   398  	}
   399  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   400  
   401  	// Compact the entire database to remove any sync overhead
   402  	start = time.Now()
   403  	fmt.Println("Compacting entire database...")
   404  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   405  		utils.Fatalf("Compaction failed: %v", err)
   406  	}
   407  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   408  
   409  	return nil
   410  }
   411  
   412  func removeDB(ctx *cli.Context) error {
   413  	stack, _ := makeConfigNode(ctx)
   414  
   415  	for _, name := range []string{"chaindata", "lightchaindata"} {
   416  		// Ensure the database exists in the first place
   417  		logger := log.New("database", name)
   418  
   419  		dbdir := stack.ResolvePath(name)
   420  		if !common.FileExist(dbdir) {
   421  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   422  			continue
   423  		}
   424  		// Confirm removal and execute
   425  		fmt.Println(dbdir)
   426  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   427  		switch {
   428  		case err != nil:
   429  			utils.Fatalf("%v", err)
   430  		case !confirm:
   431  			logger.Warn("Database deletion aborted")
   432  		default:
   433  			start := time.Now()
   434  			os.RemoveAll(dbdir)
   435  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   436  		}
   437  	}
   438  	return nil
   439  }
   440  
   441  func dump(ctx *cli.Context) error {
   442  	stack := makeFullNode(ctx)
   443  	chain, chainDb := utils.MakeChain(ctx, stack)
   444  	for _, arg := range ctx.Args() {
   445  		var block *types.Block
   446  		if hashish(arg) {
   447  			block = chain.GetBlockByHash(common.HexToHash(arg))
   448  		} else {
   449  			num, _ := strconv.Atoi(arg)
   450  			block = chain.GetBlockByNumber(uint64(num))
   451  		}
   452  		if block == nil {
   453  			fmt.Println("{}")
   454  			utils.Fatalf("block not found")
   455  		} else {
   456  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   457  			if err != nil {
   458  				utils.Fatalf("could not create new state: %v", err)
   459  			}
   460  			fmt.Printf("%s\n", state.Dump())
   461  		}
   462  	}
   463  	chainDb.Close()
   464  	return nil
   465  }
   466  
   467  // hashish returns true for strings that look like hashes.
   468  func hashish(x string) bool {
   469  	_, err := strconv.Atoi(x)
   470  	return err != nil
   471  }