github.com/sberex/go-sberex@v1.8.2-0.20181113200658-ed96ac38f7d7/cmd/geth/chaincmd.go (about)

     1  // This file is part of the go-sberex library. The go-sberex library is 
     2  // free software: you can redistribute it and/or modify it under the terms 
     3  // of the GNU Lesser General Public License as published by the Free 
     4  // Software Foundation, either version 3 of the License, or (at your option)
     5  // any later version.
     6  //
     7  // The go-sberex library is distributed in the hope that it will be useful, 
     8  // but WITHOUT ANY WARRANTY; without even the implied warranty of
     9  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser 
    10  // General Public License <http://www.gnu.org/licenses/> for more details.
    11  
    12  package main
    13  
    14  import (
    15  	"encoding/json"
    16  	"fmt"
    17  	"os"
    18  	"runtime"
    19  	"strconv"
    20  	"sync/atomic"
    21  	"time"
    22  
    23  	"github.com/Sberex/go-sberex/cmd/utils"
    24  	"github.com/Sberex/go-sberex/common"
    25  	"github.com/Sberex/go-sberex/console"
    26  	"github.com/Sberex/go-sberex/core"
    27  	"github.com/Sberex/go-sberex/core/state"
    28  	"github.com/Sberex/go-sberex/core/types"
    29  	"github.com/Sberex/go-sberex/eth/downloader"
    30  	"github.com/Sberex/go-sberex/ethdb"
    31  	"github.com/Sberex/go-sberex/event"
    32  	"github.com/Sberex/go-sberex/log"
    33  	"github.com/Sberex/go-sberex/trie"
    34  	"github.com/syndtr/goleveldb/leveldb/util"
    35  	"gopkg.in/urfave/cli.v1"
    36  )
    37  
    38  var (
    39  	initCommand = cli.Command{
    40  		Action:    utils.MigrateFlags(initGenesis),
    41  		Name:      "init",
    42  		Usage:     "Bootstrap and initialize a new genesis block",
    43  		ArgsUsage: "<genesisPath>",
    44  		Flags: []cli.Flag{
    45  			utils.DataDirFlag,
    46  			utils.LightModeFlag,
    47  		},
    48  		Category: "BLOCKCHAIN COMMANDS",
    49  		Description: `
    50  The init command initializes a new genesis block and definition for the network.
    51  This is a destructive action and changes the network in which you will be
    52  participating.
    53  
    54  It expects the genesis file as argument.`,
    55  	}
    56  	importCommand = cli.Command{
    57  		Action:    utils.MigrateFlags(importChain),
    58  		Name:      "import",
    59  		Usage:     "Import a blockchain file",
    60  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    61  		Flags: []cli.Flag{
    62  			utils.DataDirFlag,
    63  			utils.CacheFlag,
    64  			utils.LightModeFlag,
    65  			utils.GCModeFlag,
    66  			utils.CacheDatabaseFlag,
    67  			utils.CacheGCFlag,
    68  		},
    69  		Category: "BLOCKCHAIN COMMANDS",
    70  		Description: `
    71  The import command imports blocks from an RLP-encoded form. The form can be one file
    72  with several RLP-encoded blocks, or several files can be used.
    73  
    74  If only one file is used, import error will result in failure. If several files are used,
    75  processing will proceed even if an individual RLP-file import failure occurs.`,
    76  	}
    77  	exportCommand = cli.Command{
    78  		Action:    utils.MigrateFlags(exportChain),
    79  		Name:      "export",
    80  		Usage:     "Export blockchain into file",
    81  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    82  		Flags: []cli.Flag{
    83  			utils.DataDirFlag,
    84  			utils.CacheFlag,
    85  			utils.LightModeFlag,
    86  		},
    87  		Category: "BLOCKCHAIN COMMANDS",
    88  		Description: `
    89  Requires a first argument of the file to write to.
    90  Optional second and third arguments control the first and
    91  last block to write. In this mode, the file will be appended
    92  if already existing.`,
    93  	}
    94  	copydbCommand = cli.Command{
    95  		Action:    utils.MigrateFlags(copyDb),
    96  		Name:      "copydb",
    97  		Usage:     "Create a local chain from a target chaindata folder",
    98  		ArgsUsage: "<sourceChaindataDir>",
    99  		Flags: []cli.Flag{
   100  			utils.DataDirFlag,
   101  			utils.CacheFlag,
   102  			utils.SyncModeFlag,
   103  			utils.FakePoWFlag,
   104  			utils.TestnetFlag,
   105  			utils.RinkebyFlag,
   106  		},
   107  		Category: "BLOCKCHAIN COMMANDS",
   108  		Description: `
   109  The first argument must be the directory containing the blockchain to download from`,
   110  	}
   111  	removedbCommand = cli.Command{
   112  		Action:    utils.MigrateFlags(removeDB),
   113  		Name:      "removedb",
   114  		Usage:     "Remove blockchain and state databases",
   115  		ArgsUsage: " ",
   116  		Flags: []cli.Flag{
   117  			utils.DataDirFlag,
   118  			utils.LightModeFlag,
   119  		},
   120  		Category: "BLOCKCHAIN COMMANDS",
   121  		Description: `
   122  Remove blockchain and state databases`,
   123  	}
   124  	dumpCommand = cli.Command{
   125  		Action:    utils.MigrateFlags(dump),
   126  		Name:      "dump",
   127  		Usage:     "Dump a specific block from storage",
   128  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   129  		Flags: []cli.Flag{
   130  			utils.DataDirFlag,
   131  			utils.CacheFlag,
   132  			utils.LightModeFlag,
   133  		},
   134  		Category: "BLOCKCHAIN COMMANDS",
   135  		Description: `
   136  The arguments are interpreted as block numbers or hashes.
   137  Use "sberex dump 0" to dump the genesis block.`,
   138  	}
   139  )
   140  
   141  // initGenesis will initialise the given JSON format genesis file and writes it as
   142  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   143  func initGenesis(ctx *cli.Context) error {
   144  	// Make sure we have a valid genesis JSON
   145  	genesisPath := ctx.Args().First()
   146  	if len(genesisPath) == 0 {
   147  		utils.Fatalf("Must supply path to genesis JSON file")
   148  	}
   149  	file, err := os.Open(genesisPath)
   150  	if err != nil {
   151  		utils.Fatalf("Failed to read genesis file: %v", err)
   152  	}
   153  	defer file.Close()
   154  
   155  	genesis := new(core.Genesis)
   156  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   157  		utils.Fatalf("invalid genesis file: %v", err)
   158  	}
   159  	// Open an initialise both full and light databases
   160  	stack := makeFullNode(ctx)
   161  	for _, name := range []string{"chaindata", "lightchaindata"} {
   162  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   163  		if err != nil {
   164  			utils.Fatalf("Failed to open database: %v", err)
   165  		}
   166  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   167  		if err != nil {
   168  			utils.Fatalf("Failed to write genesis block: %v", err)
   169  		}
   170  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   171  	}
   172  	return nil
   173  }
   174  
   175  func importChain(ctx *cli.Context) error {
   176  	if len(ctx.Args()) < 1 {
   177  		utils.Fatalf("This command requires an argument.")
   178  	}
   179  	stack := makeFullNode(ctx)
   180  	chain, chainDb := utils.MakeChain(ctx, stack)
   181  	defer chainDb.Close()
   182  
   183  	// Start periodically gathering memory profiles
   184  	var peakMemAlloc, peakMemSys uint64
   185  	go func() {
   186  		stats := new(runtime.MemStats)
   187  		for {
   188  			runtime.ReadMemStats(stats)
   189  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   190  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   191  			}
   192  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   193  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   194  			}
   195  			time.Sleep(5 * time.Second)
   196  		}
   197  	}()
   198  	// Import the chain
   199  	start := time.Now()
   200  
   201  	if len(ctx.Args()) == 1 {
   202  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   203  			log.Error("Import error", "err", err)
   204  		}
   205  	} else {
   206  		for _, arg := range ctx.Args() {
   207  			if err := utils.ImportChain(chain, arg); err != nil {
   208  				log.Error("Import error", "file", arg, "err", err)
   209  			}
   210  		}
   211  	}
   212  	chain.Stop()
   213  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   214  
   215  	// Output pre-compaction stats mostly to see the import trashing
   216  	db := chainDb.(*ethdb.LDBDatabase)
   217  
   218  	stats, err := db.LDB().GetProperty("leveldb.stats")
   219  	if err != nil {
   220  		utils.Fatalf("Failed to read database stats: %v", err)
   221  	}
   222  	fmt.Println(stats)
   223  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   224  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   225  
   226  	// Print the memory statistics used by the importing
   227  	mem := new(runtime.MemStats)
   228  	runtime.ReadMemStats(mem)
   229  
   230  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   231  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   232  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   233  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   234  
   235  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   236  		return nil
   237  	}
   238  
   239  	// Compact the entire database to more accurately measure disk io and print the stats
   240  	start = time.Now()
   241  	fmt.Println("Compacting entire database...")
   242  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   243  		utils.Fatalf("Compaction failed: %v", err)
   244  	}
   245  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   246  
   247  	stats, err = db.LDB().GetProperty("leveldb.stats")
   248  	if err != nil {
   249  		utils.Fatalf("Failed to read database stats: %v", err)
   250  	}
   251  	fmt.Println(stats)
   252  
   253  	return nil
   254  }
   255  
   256  func exportChain(ctx *cli.Context) error {
   257  	if len(ctx.Args()) < 1 {
   258  		utils.Fatalf("This command requires an argument.")
   259  	}
   260  	stack := makeFullNode(ctx)
   261  	chain, _ := utils.MakeChain(ctx, stack)
   262  	start := time.Now()
   263  
   264  	var err error
   265  	fp := ctx.Args().First()
   266  	if len(ctx.Args()) < 3 {
   267  		err = utils.ExportChain(chain, fp)
   268  	} else {
   269  		// This can be improved to allow for numbers larger than 9223372036854775807
   270  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   271  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   272  		if ferr != nil || lerr != nil {
   273  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   274  		}
   275  		if first < 0 || last < 0 {
   276  			utils.Fatalf("Export error: block number must be greater than 0\n")
   277  		}
   278  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   279  	}
   280  
   281  	if err != nil {
   282  		utils.Fatalf("Export error: %v\n", err)
   283  	}
   284  	fmt.Printf("Export done in %v", time.Since(start))
   285  	return nil
   286  }
   287  
   288  func copyDb(ctx *cli.Context) error {
   289  	// Ensure we have a source chain directory to copy
   290  	if len(ctx.Args()) != 1 {
   291  		utils.Fatalf("Source chaindata directory path argument missing")
   292  	}
   293  	// Initialize a new chain for the running node to sync into
   294  	stack := makeFullNode(ctx)
   295  	chain, chainDb := utils.MakeChain(ctx, stack)
   296  
   297  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   298  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   299  
   300  	// Create a source peer to satisfy downloader requests from
   301  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   302  	if err != nil {
   303  		return err
   304  	}
   305  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   306  	if err != nil {
   307  		return err
   308  	}
   309  	peer := downloader.NewFakePeer("local", db, hc, dl)
   310  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   311  		return err
   312  	}
   313  	// Synchronise with the simulated peer
   314  	start := time.Now()
   315  
   316  	currentHeader := hc.CurrentHeader()
   317  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   318  		return err
   319  	}
   320  	for dl.Synchronising() {
   321  		time.Sleep(10 * time.Millisecond)
   322  	}
   323  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   324  
   325  	// Compact the entire database to remove any sync overhead
   326  	start = time.Now()
   327  	fmt.Println("Compacting entire database...")
   328  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   329  		utils.Fatalf("Compaction failed: %v", err)
   330  	}
   331  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   332  
   333  	return nil
   334  }
   335  
   336  func removeDB(ctx *cli.Context) error {
   337  	stack, _ := makeConfigNode(ctx)
   338  
   339  	for _, name := range []string{"chaindata", "lightchaindata"} {
   340  		// Ensure the database exists in the first place
   341  		logger := log.New("database", name)
   342  
   343  		dbdir := stack.ResolvePath(name)
   344  		if !common.FileExist(dbdir) {
   345  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   346  			continue
   347  		}
   348  		// Confirm removal and execute
   349  		fmt.Println(dbdir)
   350  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   351  		switch {
   352  		case err != nil:
   353  			utils.Fatalf("%v", err)
   354  		case !confirm:
   355  			logger.Warn("Database deletion aborted")
   356  		default:
   357  			start := time.Now()
   358  			os.RemoveAll(dbdir)
   359  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   360  		}
   361  	}
   362  	return nil
   363  }
   364  
   365  func dump(ctx *cli.Context) error {
   366  	stack := makeFullNode(ctx)
   367  	chain, chainDb := utils.MakeChain(ctx, stack)
   368  	for _, arg := range ctx.Args() {
   369  		var block *types.Block
   370  		if hashish(arg) {
   371  			block = chain.GetBlockByHash(common.HexToHash(arg))
   372  		} else {
   373  			num, _ := strconv.Atoi(arg)
   374  			block = chain.GetBlockByNumber(uint64(num))
   375  		}
   376  		if block == nil {
   377  			fmt.Println("{}")
   378  			utils.Fatalf("block not found")
   379  		} else {
   380  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   381  			if err != nil {
   382  				utils.Fatalf("could not create new state: %v", err)
   383  			}
   384  			fmt.Printf("%s\n", state.Dump())
   385  		}
   386  	}
   387  	chainDb.Close()
   388  	return nil
   389  }
   390  
   391  // hashish returns true for strings that look like hashes.
   392  func hashish(x string) bool {
   393  	_, err := strconv.Atoi(x)
   394  	return err != nil
   395  }