github.com/samgwo/go-ethereum@v1.8.2-0.20180302101319-49bcb5fbd55e/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/console"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/eth/downloader"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  			utils.LightModeFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.LightModeFlag,
    70  			utils.GCModeFlag,
    71  			utils.CacheDatabaseFlag,
    72  			utils.CacheGCFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The import command imports blocks from an RLP-encoded form. The form can be one file
    77  with several RLP-encoded blocks, or several files can be used.
    78  
    79  If only one file is used, import error will result in failure. If several files are used,
    80  processing will proceed even if an individual RLP-file import failure occurs.`,
    81  	}
    82  	exportCommand = cli.Command{
    83  		Action:    utils.MigrateFlags(exportChain),
    84  		Name:      "export",
    85  		Usage:     "Export blockchain into file",
    86  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    87  		Flags: []cli.Flag{
    88  			utils.DataDirFlag,
    89  			utils.CacheFlag,
    90  			utils.LightModeFlag,
    91  		},
    92  		Category: "BLOCKCHAIN COMMANDS",
    93  		Description: `
    94  Requires a first argument of the file to write to.
    95  Optional second and third arguments control the first and
    96  last block to write. In this mode, the file will be appended
    97  if already existing.`,
    98  	}
    99  	copydbCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(copyDb),
   101  		Name:      "copydb",
   102  		Usage:     "Create a local chain from a target chaindata folder",
   103  		ArgsUsage: "<sourceChaindataDir>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  			utils.FakePoWFlag,
   109  			utils.TestnetFlag,
   110  			utils.RinkebyFlag,
   111  		},
   112  		Category: "BLOCKCHAIN COMMANDS",
   113  		Description: `
   114  The first argument must be the directory containing the blockchain to download from`,
   115  	}
   116  	removedbCommand = cli.Command{
   117  		Action:    utils.MigrateFlags(removeDB),
   118  		Name:      "removedb",
   119  		Usage:     "Remove blockchain and state databases",
   120  		ArgsUsage: " ",
   121  		Flags: []cli.Flag{
   122  			utils.DataDirFlag,
   123  			utils.LightModeFlag,
   124  		},
   125  		Category: "BLOCKCHAIN COMMANDS",
   126  		Description: `
   127  Remove blockchain and state databases`,
   128  	}
   129  	dumpCommand = cli.Command{
   130  		Action:    utils.MigrateFlags(dump),
   131  		Name:      "dump",
   132  		Usage:     "Dump a specific block from storage",
   133  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   134  		Flags: []cli.Flag{
   135  			utils.DataDirFlag,
   136  			utils.CacheFlag,
   137  			utils.LightModeFlag,
   138  		},
   139  		Category: "BLOCKCHAIN COMMANDS",
   140  		Description: `
   141  The arguments are interpreted as block numbers or hashes.
   142  Use "ethereum dump 0" to dump the genesis block.`,
   143  	}
   144  )
   145  
   146  // initGenesis will initialise the given JSON format genesis file and writes it as
   147  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   148  func initGenesis(ctx *cli.Context) error {
   149  	// Make sure we have a valid genesis JSON
   150  	genesisPath := ctx.Args().First()
   151  	if len(genesisPath) == 0 {
   152  		utils.Fatalf("Must supply path to genesis JSON file")
   153  	}
   154  	file, err := os.Open(genesisPath)
   155  	if err != nil {
   156  		utils.Fatalf("Failed to read genesis file: %v", err)
   157  	}
   158  	defer file.Close()
   159  
   160  	genesis := new(core.Genesis)
   161  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   162  		utils.Fatalf("invalid genesis file: %v", err)
   163  	}
   164  	// Open an initialise both full and light databases
   165  	stack := makeFullNode(ctx)
   166  	for _, name := range []string{"chaindata", "lightchaindata"} {
   167  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   168  		if err != nil {
   169  			utils.Fatalf("Failed to open database: %v", err)
   170  		}
   171  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   172  		if err != nil {
   173  			utils.Fatalf("Failed to write genesis block: %v", err)
   174  		}
   175  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   176  	}
   177  	return nil
   178  }
   179  
   180  func importChain(ctx *cli.Context) error {
   181  	if len(ctx.Args()) < 1 {
   182  		utils.Fatalf("This command requires an argument.")
   183  	}
   184  	stack := makeFullNode(ctx)
   185  	chain, chainDb := utils.MakeChain(ctx, stack)
   186  	defer chainDb.Close()
   187  
   188  	// Start periodically gathering memory profiles
   189  	var peakMemAlloc, peakMemSys uint64
   190  	go func() {
   191  		stats := new(runtime.MemStats)
   192  		for {
   193  			runtime.ReadMemStats(stats)
   194  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   195  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   196  			}
   197  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   198  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   199  			}
   200  			time.Sleep(5 * time.Second)
   201  		}
   202  	}()
   203  	// Import the chain
   204  	start := time.Now()
   205  
   206  	if len(ctx.Args()) == 1 {
   207  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   208  			log.Error("Import error", "err", err)
   209  		}
   210  	} else {
   211  		for _, arg := range ctx.Args() {
   212  			if err := utils.ImportChain(chain, arg); err != nil {
   213  				log.Error("Import error", "file", arg, "err", err)
   214  			}
   215  		}
   216  	}
   217  	chain.Stop()
   218  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   219  
   220  	// Output pre-compaction stats mostly to see the import trashing
   221  	db := chainDb.(*ethdb.LDBDatabase)
   222  
   223  	stats, err := db.LDB().GetProperty("leveldb.stats")
   224  	if err != nil {
   225  		utils.Fatalf("Failed to read database stats: %v", err)
   226  	}
   227  	fmt.Println(stats)
   228  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   229  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   230  
   231  	// Print the memory statistics used by the importing
   232  	mem := new(runtime.MemStats)
   233  	runtime.ReadMemStats(mem)
   234  
   235  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   236  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   237  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   238  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   239  
   240  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   241  		return nil
   242  	}
   243  
   244  	// Compact the entire database to more accurately measure disk io and print the stats
   245  	start = time.Now()
   246  	fmt.Println("Compacting entire database...")
   247  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   248  		utils.Fatalf("Compaction failed: %v", err)
   249  	}
   250  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   251  
   252  	stats, err = db.LDB().GetProperty("leveldb.stats")
   253  	if err != nil {
   254  		utils.Fatalf("Failed to read database stats: %v", err)
   255  	}
   256  	fmt.Println(stats)
   257  
   258  	return nil
   259  }
   260  
   261  func exportChain(ctx *cli.Context) error {
   262  	if len(ctx.Args()) < 1 {
   263  		utils.Fatalf("This command requires an argument.")
   264  	}
   265  	stack := makeFullNode(ctx)
   266  	chain, _ := utils.MakeChain(ctx, stack)
   267  	start := time.Now()
   268  
   269  	var err error
   270  	fp := ctx.Args().First()
   271  	if len(ctx.Args()) < 3 {
   272  		err = utils.ExportChain(chain, fp)
   273  	} else {
   274  		// This can be improved to allow for numbers larger than 9223372036854775807
   275  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   276  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   277  		if ferr != nil || lerr != nil {
   278  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   279  		}
   280  		if first < 0 || last < 0 {
   281  			utils.Fatalf("Export error: block number must be greater than 0\n")
   282  		}
   283  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   284  	}
   285  
   286  	if err != nil {
   287  		utils.Fatalf("Export error: %v\n", err)
   288  	}
   289  	fmt.Printf("Export done in %v", time.Since(start))
   290  	return nil
   291  }
   292  
   293  func copyDb(ctx *cli.Context) error {
   294  	// Ensure we have a source chain directory to copy
   295  	if len(ctx.Args()) != 1 {
   296  		utils.Fatalf("Source chaindata directory path argument missing")
   297  	}
   298  	// Initialize a new chain for the running node to sync into
   299  	stack := makeFullNode(ctx)
   300  	chain, chainDb := utils.MakeChain(ctx, stack)
   301  
   302  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   303  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   304  
   305  	// Create a source peer to satisfy downloader requests from
   306  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   307  	if err != nil {
   308  		return err
   309  	}
   310  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   311  	if err != nil {
   312  		return err
   313  	}
   314  	peer := downloader.NewFakePeer("local", db, hc, dl)
   315  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   316  		return err
   317  	}
   318  	// Synchronise with the simulated peer
   319  	start := time.Now()
   320  
   321  	currentHeader := hc.CurrentHeader()
   322  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   323  		return err
   324  	}
   325  	for dl.Synchronising() {
   326  		time.Sleep(10 * time.Millisecond)
   327  	}
   328  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   329  
   330  	// Compact the entire database to remove any sync overhead
   331  	start = time.Now()
   332  	fmt.Println("Compacting entire database...")
   333  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   334  		utils.Fatalf("Compaction failed: %v", err)
   335  	}
   336  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   337  
   338  	return nil
   339  }
   340  
   341  func removeDB(ctx *cli.Context) error {
   342  	stack, _ := makeConfigNode(ctx)
   343  
   344  	for _, name := range []string{"chaindata", "lightchaindata"} {
   345  		// Ensure the database exists in the first place
   346  		logger := log.New("database", name)
   347  
   348  		dbdir := stack.ResolvePath(name)
   349  		if !common.FileExist(dbdir) {
   350  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   351  			continue
   352  		}
   353  		// Confirm removal and execute
   354  		fmt.Println(dbdir)
   355  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   356  		switch {
   357  		case err != nil:
   358  			utils.Fatalf("%v", err)
   359  		case !confirm:
   360  			logger.Warn("Database deletion aborted")
   361  		default:
   362  			start := time.Now()
   363  			os.RemoveAll(dbdir)
   364  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   365  		}
   366  	}
   367  	return nil
   368  }
   369  
   370  func dump(ctx *cli.Context) error {
   371  	stack := makeFullNode(ctx)
   372  	chain, chainDb := utils.MakeChain(ctx, stack)
   373  	for _, arg := range ctx.Args() {
   374  		var block *types.Block
   375  		if hashish(arg) {
   376  			block = chain.GetBlockByHash(common.HexToHash(arg))
   377  		} else {
   378  			num, _ := strconv.Atoi(arg)
   379  			block = chain.GetBlockByNumber(uint64(num))
   380  		}
   381  		if block == nil {
   382  			fmt.Println("{}")
   383  			utils.Fatalf("block not found")
   384  		} else {
   385  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   386  			if err != nil {
   387  				utils.Fatalf("could not create new state: %v", err)
   388  			}
   389  			fmt.Printf("%s\n", state.Dump())
   390  		}
   391  	}
   392  	chainDb.Close()
   393  	return nil
   394  }
   395  
   396  // hashish returns true for strings that look like hashes.
   397  func hashish(x string) bool {
   398  	_, err := strconv.Atoi(x)
   399  	return err != nil
   400  }