github.com/clem109/go-ethereum@v1.8.3-0.20180316121352-fe6cf00f480a/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/console"
    31  	"github.com/ethereum/go-ethereum/core"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/eth/downloader"
    35  	"github.com/ethereum/go-ethereum/ethdb"
    36  	"github.com/ethereum/go-ethereum/event"
    37  	"github.com/ethereum/go-ethereum/log"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  			utils.LightModeFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.LightModeFlag,
    70  			utils.GCModeFlag,
    71  			utils.CacheDatabaseFlag,
    72  			utils.CacheGCFlag,
    73  		},
    74  		Category: "BLOCKCHAIN COMMANDS",
    75  		Description: `
    76  The import command imports blocks from an RLP-encoded form. The form can be one file
    77  with several RLP-encoded blocks, or several files can be used.
    78  
    79  If only one file is used, import error will result in failure. If several files are used,
    80  processing will proceed even if an individual RLP-file import failure occurs.`,
    81  	}
    82  	exportCommand = cli.Command{
    83  		Action:    utils.MigrateFlags(exportChain),
    84  		Name:      "export",
    85  		Usage:     "Export blockchain into file",
    86  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    87  		Flags: []cli.Flag{
    88  			utils.DataDirFlag,
    89  			utils.CacheFlag,
    90  			utils.LightModeFlag,
    91  		},
    92  		Category: "BLOCKCHAIN COMMANDS",
    93  		Description: `
    94  Requires a first argument of the file to write to.
    95  Optional second and third arguments control the first and
    96  last block to write. In this mode, the file will be appended
    97  if already existing.`,
    98  	}
    99  	copydbCommand = cli.Command{
   100  		Action:    utils.MigrateFlags(copyDb),
   101  		Name:      "copydb",
   102  		Usage:     "Create a local chain from a target chaindata folder",
   103  		ArgsUsage: "<sourceChaindataDir>",
   104  		Flags: []cli.Flag{
   105  			utils.DataDirFlag,
   106  			utils.CacheFlag,
   107  			utils.SyncModeFlag,
   108  			utils.FakePoWFlag,
   109  			utils.TestnetFlag,
   110  			utils.RinkebyFlag,
   111  		},
   112  		Category: "BLOCKCHAIN COMMANDS",
   113  		Description: `
   114  The first argument must be the directory containing the blockchain to download from`,
   115  	}
   116  	removedbCommand = cli.Command{
   117  		Action:    utils.MigrateFlags(removeDB),
   118  		Name:      "removedb",
   119  		Usage:     "Remove blockchain and state databases",
   120  		ArgsUsage: " ",
   121  		Flags: []cli.Flag{
   122  			utils.DataDirFlag,
   123  			utils.LightModeFlag,
   124  		},
   125  		Category: "BLOCKCHAIN COMMANDS",
   126  		Description: `
   127  Remove blockchain and state databases`,
   128  	}
   129  	dumpCommand = cli.Command{
   130  		Action:    utils.MigrateFlags(dump),
   131  		Name:      "dump",
   132  		Usage:     "Dump a specific block from storage",
   133  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   134  		Flags: []cli.Flag{
   135  			utils.DataDirFlag,
   136  			utils.CacheFlag,
   137  			utils.LightModeFlag,
   138  		},
   139  		Category: "BLOCKCHAIN COMMANDS",
   140  		Description: `
   141  The arguments are interpreted as block numbers or hashes.
   142  Use "ethereum dump 0" to dump the genesis block.`,
   143  	}
   144  )
   145  
   146  // initGenesis will initialise the given JSON format genesis file and writes it as
   147  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   148  func initGenesis(ctx *cli.Context) error {
   149  	// Make sure we have a valid genesis JSON
   150  	genesisPath := ctx.Args().First()
   151  	if len(genesisPath) == 0 {
   152  		utils.Fatalf("Must supply path to genesis JSON file")
   153  	}
   154  	file, err := os.Open(genesisPath)
   155  	if err != nil {
   156  		utils.Fatalf("Failed to read genesis file: %v", err)
   157  	}
   158  	defer file.Close()
   159  
   160  	genesis := new(core.Genesis)
   161  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   162  		utils.Fatalf("invalid genesis file: %v", err)
   163  	}
   164  	// Open an initialise both full and light databases
   165  	stack := makeFullNode(ctx)
   166  	for _, name := range []string{"chaindata", "lightchaindata"} {
   167  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   168  		if err != nil {
   169  			utils.Fatalf("Failed to open database: %v", err)
   170  		}
   171  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   172  		if err != nil {
   173  			utils.Fatalf("Failed to write genesis block: %v", err)
   174  		}
   175  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   176  	}
   177  	return nil
   178  }
   179  
   180  func importChain(ctx *cli.Context) error {
   181  	if len(ctx.Args()) < 1 {
   182  		utils.Fatalf("This command requires an argument.")
   183  	}
   184  	stack := makeFullNode(ctx)
   185  	chain, chainDb := utils.MakeChain(ctx, stack)
   186  	defer chainDb.Close()
   187  
   188  	// Start periodically gathering memory profiles
   189  	var peakMemAlloc, peakMemSys uint64
   190  	go func() {
   191  		stats := new(runtime.MemStats)
   192  		for {
   193  			runtime.ReadMemStats(stats)
   194  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   195  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   196  			}
   197  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   198  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   199  			}
   200  			time.Sleep(5 * time.Second)
   201  		}
   202  	}()
   203  	// Import the chain
   204  	start := time.Now()
   205  
   206  	if len(ctx.Args()) == 1 {
   207  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   208  			log.Error("Import error", "err", err)
   209  		}
   210  	} else {
   211  		for _, arg := range ctx.Args() {
   212  			if err := utils.ImportChain(chain, arg); err != nil {
   213  				log.Error("Import error", "file", arg, "err", err)
   214  			}
   215  		}
   216  	}
   217  	chain.Stop()
   218  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   219  
   220  	// Output pre-compaction stats mostly to see the import trashing
   221  	db := chainDb.(*ethdb.LDBDatabase)
   222  
   223  	stats, err := db.LDB().GetProperty("leveldb.stats")
   224  	if err != nil {
   225  		utils.Fatalf("Failed to read database stats: %v", err)
   226  	}
   227  	fmt.Println(stats)
   228  
   229  	ioStats, err := db.LDB().GetProperty("leveldb.iostats")
   230  	if err != nil {
   231  		utils.Fatalf("Failed to read database iostats: %v", err)
   232  	}
   233  	fmt.Println(ioStats)
   234  
   235  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   236  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   237  
   238  	// Print the memory statistics used by the importing
   239  	mem := new(runtime.MemStats)
   240  	runtime.ReadMemStats(mem)
   241  
   242  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   243  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   244  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   245  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   246  
   247  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   248  		return nil
   249  	}
   250  
   251  	// Compact the entire database to more accurately measure disk io and print the stats
   252  	start = time.Now()
   253  	fmt.Println("Compacting entire database...")
   254  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   255  		utils.Fatalf("Compaction failed: %v", err)
   256  	}
   257  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   258  
   259  	stats, err = db.LDB().GetProperty("leveldb.stats")
   260  	if err != nil {
   261  		utils.Fatalf("Failed to read database stats: %v", err)
   262  	}
   263  	fmt.Println(stats)
   264  
   265  	ioStats, err = db.LDB().GetProperty("leveldb.iostats")
   266  	if err != nil {
   267  		utils.Fatalf("Failed to read database iostats: %v", err)
   268  	}
   269  	fmt.Println(ioStats)
   270  
   271  	return nil
   272  }
   273  
   274  func exportChain(ctx *cli.Context) error {
   275  	if len(ctx.Args()) < 1 {
   276  		utils.Fatalf("This command requires an argument.")
   277  	}
   278  	stack := makeFullNode(ctx)
   279  	chain, _ := utils.MakeChain(ctx, stack)
   280  	start := time.Now()
   281  
   282  	var err error
   283  	fp := ctx.Args().First()
   284  	if len(ctx.Args()) < 3 {
   285  		err = utils.ExportChain(chain, fp)
   286  	} else {
   287  		// This can be improved to allow for numbers larger than 9223372036854775807
   288  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   289  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   290  		if ferr != nil || lerr != nil {
   291  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   292  		}
   293  		if first < 0 || last < 0 {
   294  			utils.Fatalf("Export error: block number must be greater than 0\n")
   295  		}
   296  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   297  	}
   298  
   299  	if err != nil {
   300  		utils.Fatalf("Export error: %v\n", err)
   301  	}
   302  	fmt.Printf("Export done in %v", time.Since(start))
   303  	return nil
   304  }
   305  
   306  func copyDb(ctx *cli.Context) error {
   307  	// Ensure we have a source chain directory to copy
   308  	if len(ctx.Args()) != 1 {
   309  		utils.Fatalf("Source chaindata directory path argument missing")
   310  	}
   311  	// Initialize a new chain for the running node to sync into
   312  	stack := makeFullNode(ctx)
   313  	chain, chainDb := utils.MakeChain(ctx, stack)
   314  
   315  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   316  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   317  
   318  	// Create a source peer to satisfy downloader requests from
   319  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   320  	if err != nil {
   321  		return err
   322  	}
   323  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   324  	if err != nil {
   325  		return err
   326  	}
   327  	peer := downloader.NewFakePeer("local", db, hc, dl)
   328  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   329  		return err
   330  	}
   331  	// Synchronise with the simulated peer
   332  	start := time.Now()
   333  
   334  	currentHeader := hc.CurrentHeader()
   335  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   336  		return err
   337  	}
   338  	for dl.Synchronising() {
   339  		time.Sleep(10 * time.Millisecond)
   340  	}
   341  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   342  
   343  	// Compact the entire database to remove any sync overhead
   344  	start = time.Now()
   345  	fmt.Println("Compacting entire database...")
   346  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   347  		utils.Fatalf("Compaction failed: %v", err)
   348  	}
   349  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   350  
   351  	return nil
   352  }
   353  
   354  func removeDB(ctx *cli.Context) error {
   355  	stack, _ := makeConfigNode(ctx)
   356  
   357  	for _, name := range []string{"chaindata", "lightchaindata"} {
   358  		// Ensure the database exists in the first place
   359  		logger := log.New("database", name)
   360  
   361  		dbdir := stack.ResolvePath(name)
   362  		if !common.FileExist(dbdir) {
   363  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   364  			continue
   365  		}
   366  		// Confirm removal and execute
   367  		fmt.Println(dbdir)
   368  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   369  		switch {
   370  		case err != nil:
   371  			utils.Fatalf("%v", err)
   372  		case !confirm:
   373  			logger.Warn("Database deletion aborted")
   374  		default:
   375  			start := time.Now()
   376  			os.RemoveAll(dbdir)
   377  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   378  		}
   379  	}
   380  	return nil
   381  }
   382  
   383  func dump(ctx *cli.Context) error {
   384  	stack := makeFullNode(ctx)
   385  	chain, chainDb := utils.MakeChain(ctx, stack)
   386  	for _, arg := range ctx.Args() {
   387  		var block *types.Block
   388  		if hashish(arg) {
   389  			block = chain.GetBlockByHash(common.HexToHash(arg))
   390  		} else {
   391  			num, _ := strconv.Atoi(arg)
   392  			block = chain.GetBlockByNumber(uint64(num))
   393  		}
   394  		if block == nil {
   395  			fmt.Println("{}")
   396  			utils.Fatalf("block not found")
   397  		} else {
   398  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   399  			if err != nil {
   400  				utils.Fatalf("could not create new state: %v", err)
   401  			}
   402  			fmt.Printf("%s\n", state.Dump())
   403  		}
   404  	}
   405  	chainDb.Close()
   406  	return nil
   407  }
   408  
   409  // hashish returns true for strings that look like hashes.
   410  func hashish(x string) bool {
   411  	_, err := strconv.Atoi(x)
   412  	return err != nil
   413  }