github.com/gilgames000/kcc-geth@v1.0.6/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/ethereum/go-ethereum/cmd/utils"
    29  	"github.com/ethereum/go-ethereum/common"
    30  	"github.com/ethereum/go-ethereum/core"
    31  	"github.com/ethereum/go-ethereum/core/rawdb"
    32  	"github.com/ethereum/go-ethereum/core/state"
    33  	"github.com/ethereum/go-ethereum/core/types"
    34  	"github.com/ethereum/go-ethereum/eth/downloader"
    35  	"github.com/ethereum/go-ethereum/event"
    36  	"github.com/ethereum/go-ethereum/log"
    37  	"github.com/ethereum/go-ethereum/metrics"
    38  	"github.com/ethereum/go-ethereum/trie"
    39  	"gopkg.in/urfave/cli.v1"
    40  )
    41  
    42  var (
    43  	initCommand = cli.Command{
    44  		Action:    utils.MigrateFlags(initGenesis),
    45  		Name:      "init",
    46  		Usage:     "Bootstrap and initialize a new genesis block",
    47  		ArgsUsage: "<genesisPath>",
    48  		Flags: []cli.Flag{
    49  			utils.DataDirFlag,
    50  		},
    51  		Category: "BLOCKCHAIN COMMANDS",
    52  		Description: `
    53  The init command initializes a new genesis block and definition for the network.
    54  This is a destructive action and changes the network in which you will be
    55  participating.
    56  
    57  It expects the genesis file as argument.`,
    58  	}
    59  	dumpGenesisCommand = cli.Command{
    60  		Action:    utils.MigrateFlags(dumpGenesis),
    61  		Name:      "dumpgenesis",
    62  		Usage:     "Dumps genesis block JSON configuration to stdout",
    63  		ArgsUsage: "",
    64  		Flags: []cli.Flag{
    65  			utils.DataDirFlag,
    66  		},
    67  		Category: "BLOCKCHAIN COMMANDS",
    68  		Description: `
    69  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    70  	}
    71  	importCommand = cli.Command{
    72  		Action:    utils.MigrateFlags(importChain),
    73  		Name:      "import",
    74  		Usage:     "Import a blockchain file",
    75  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    76  		Flags: []cli.Flag{
    77  			utils.DataDirFlag,
    78  			utils.CacheFlag,
    79  			utils.SyncModeFlag,
    80  			utils.GCModeFlag,
    81  			utils.SnapshotFlag,
    82  			utils.CacheDatabaseFlag,
    83  			utils.CacheGCFlag,
    84  			utils.MetricsEnabledFlag,
    85  			utils.MetricsEnabledExpensiveFlag,
    86  			utils.MetricsHTTPFlag,
    87  			utils.MetricsPortFlag,
    88  			utils.MetricsEnableInfluxDBFlag,
    89  			utils.MetricsInfluxDBEndpointFlag,
    90  			utils.MetricsInfluxDBDatabaseFlag,
    91  			utils.MetricsInfluxDBUsernameFlag,
    92  			utils.MetricsInfluxDBPasswordFlag,
    93  			utils.MetricsInfluxDBTagsFlag,
    94  			utils.TxLookupLimitFlag,
    95  		},
    96  		Category: "BLOCKCHAIN COMMANDS",
    97  		Description: `
    98  The import command imports blocks from an RLP-encoded form. The form can be one file
    99  with several RLP-encoded blocks, or several files can be used.
   100  
   101  If only one file is used, import error will result in failure. If several files are used,
   102  processing will proceed even if an individual RLP-file import failure occurs.`,
   103  	}
   104  	exportCommand = cli.Command{
   105  		Action:    utils.MigrateFlags(exportChain),
   106  		Name:      "export",
   107  		Usage:     "Export blockchain into file",
   108  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   109  		Flags: []cli.Flag{
   110  			utils.DataDirFlag,
   111  			utils.CacheFlag,
   112  			utils.SyncModeFlag,
   113  		},
   114  		Category: "BLOCKCHAIN COMMANDS",
   115  		Description: `
   116  Requires a first argument of the file to write to.
   117  Optional second and third arguments control the first and
   118  last block to write. In this mode, the file will be appended
   119  if already existing. If the file ends with .gz, the output will
   120  be gzipped.`,
   121  	}
   122  	importPreimagesCommand = cli.Command{
   123  		Action:    utils.MigrateFlags(importPreimages),
   124  		Name:      "import-preimages",
   125  		Usage:     "Import the preimage database from an RLP stream",
   126  		ArgsUsage: "<datafile>",
   127  		Flags: []cli.Flag{
   128  			utils.DataDirFlag,
   129  			utils.CacheFlag,
   130  			utils.SyncModeFlag,
   131  		},
   132  		Category: "BLOCKCHAIN COMMANDS",
   133  		Description: `
   134  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   135  	}
   136  	exportPreimagesCommand = cli.Command{
   137  		Action:    utils.MigrateFlags(exportPreimages),
   138  		Name:      "export-preimages",
   139  		Usage:     "Export the preimage database into an RLP stream",
   140  		ArgsUsage: "<dumpfile>",
   141  		Flags: []cli.Flag{
   142  			utils.DataDirFlag,
   143  			utils.CacheFlag,
   144  			utils.SyncModeFlag,
   145  		},
   146  		Category: "BLOCKCHAIN COMMANDS",
   147  		Description: `
   148  The export-preimages command export hash preimages to an RLP encoded stream`,
   149  	}
   150  	copydbCommand = cli.Command{
   151  		Action:    utils.MigrateFlags(copyDb),
   152  		Name:      "copydb",
   153  		Usage:     "Create a local chain from a target chaindata folder",
   154  		ArgsUsage: "<sourceChaindataDir>",
   155  		Flags: []cli.Flag{
   156  			utils.DataDirFlag,
   157  			utils.CacheFlag,
   158  			utils.SyncModeFlag,
   159  			utils.FakePoWFlag,
   160  			utils.MainnetFlag,
   161  			utils.TxLookupLimitFlag,
   162  			utils.TestnetFlag,
   163  		},
   164  		Category: "BLOCKCHAIN COMMANDS",
   165  		Description: `
   166  The first argument must be the directory containing the blockchain to download from`,
   167  	}
   168  	dumpCommand = cli.Command{
   169  		Action:    utils.MigrateFlags(dump),
   170  		Name:      "dump",
   171  		Usage:     "Dump a specific block from storage",
   172  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   173  		Flags: []cli.Flag{
   174  			utils.DataDirFlag,
   175  			utils.CacheFlag,
   176  			utils.SyncModeFlag,
   177  			utils.IterativeOutputFlag,
   178  			utils.ExcludeCodeFlag,
   179  			utils.ExcludeStorageFlag,
   180  			utils.IncludeIncompletesFlag,
   181  		},
   182  		Category: "BLOCKCHAIN COMMANDS",
   183  		Description: `
   184  The arguments are interpreted as block numbers or hashes.
   185  Use "ethereum dump 0" to dump the genesis block.`,
   186  	}
   187  )
   188  
   189  // initGenesis will initialise the given JSON format genesis file and writes it as
   190  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   191  func initGenesis(ctx *cli.Context) error {
   192  	// Make sure we have a valid genesis JSON
   193  	genesisPath := ctx.Args().First()
   194  	if len(genesisPath) == 0 {
   195  		utils.Fatalf("Must supply path to genesis JSON file")
   196  	}
   197  	file, err := os.Open(genesisPath)
   198  	if err != nil {
   199  		utils.Fatalf("Failed to read genesis file: %v", err)
   200  	}
   201  	defer file.Close()
   202  
   203  	genesis := new(core.Genesis)
   204  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   205  		utils.Fatalf("invalid genesis file: %v", err)
   206  	}
   207  	// Open and initialise both full and light databases
   208  	stack, _ := makeConfigNode(ctx)
   209  	defer stack.Close()
   210  
   211  	for _, name := range []string{"chaindata", "lightchaindata"} {
   212  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   213  		if err != nil {
   214  			utils.Fatalf("Failed to open database: %v", err)
   215  		}
   216  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   217  		if err != nil {
   218  			utils.Fatalf("Failed to write genesis block: %v", err)
   219  		}
   220  		chaindb.Close()
   221  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   222  	}
   223  	return nil
   224  }
   225  
   226  func dumpGenesis(ctx *cli.Context) error {
   227  	genesis := utils.MakeGenesis(ctx)
   228  	if genesis == nil {
   229  		genesis = core.DefaultGenesisBlock()
   230  	}
   231  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   232  		utils.Fatalf("could not encode genesis")
   233  	}
   234  	return nil
   235  }
   236  
   237  func importChain(ctx *cli.Context) error {
   238  	if len(ctx.Args()) < 1 {
   239  		utils.Fatalf("This command requires an argument.")
   240  	}
   241  	// Start metrics export if enabled
   242  	utils.SetupMetrics(ctx)
   243  	// Start system runtime metrics collection
   244  	go metrics.CollectProcessMetrics(3 * time.Second)
   245  
   246  	stack, _ := makeConfigNode(ctx)
   247  	defer stack.Close()
   248  
   249  	chain, db := utils.MakeChain(ctx, stack, false)
   250  	defer db.Close()
   251  
   252  	// Start periodically gathering memory profiles
   253  	var peakMemAlloc, peakMemSys uint64
   254  	go func() {
   255  		stats := new(runtime.MemStats)
   256  		for {
   257  			runtime.ReadMemStats(stats)
   258  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   259  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   260  			}
   261  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   262  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   263  			}
   264  			time.Sleep(5 * time.Second)
   265  		}
   266  	}()
   267  	// Import the chain
   268  	start := time.Now()
   269  
   270  	var importErr error
   271  
   272  	if len(ctx.Args()) == 1 {
   273  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   274  			importErr = err
   275  			log.Error("Import error", "err", err)
   276  		}
   277  	} else {
   278  		for _, arg := range ctx.Args() {
   279  			if err := utils.ImportChain(chain, arg); err != nil {
   280  				importErr = err
   281  				log.Error("Import error", "file", arg, "err", err)
   282  			}
   283  		}
   284  	}
   285  	chain.Stop()
   286  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   287  
   288  	// Output pre-compaction stats mostly to see the import trashing
   289  	showLeveldbStats(db)
   290  
   291  	// Print the memory statistics used by the importing
   292  	mem := new(runtime.MemStats)
   293  	runtime.ReadMemStats(mem)
   294  
   295  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   296  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   297  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   298  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   299  
   300  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   301  		return nil
   302  	}
   303  
   304  	// Compact the entire database to more accurately measure disk io and print the stats
   305  	start = time.Now()
   306  	fmt.Println("Compacting entire database...")
   307  	if err := db.Compact(nil, nil); err != nil {
   308  		utils.Fatalf("Compaction failed: %v", err)
   309  	}
   310  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   311  
   312  	showLeveldbStats(db)
   313  	return importErr
   314  }
   315  
   316  func exportChain(ctx *cli.Context) error {
   317  	if len(ctx.Args()) < 1 {
   318  		utils.Fatalf("This command requires an argument.")
   319  	}
   320  
   321  	stack, _ := makeConfigNode(ctx)
   322  	defer stack.Close()
   323  
   324  	chain, _ := utils.MakeChain(ctx, stack, true)
   325  	start := time.Now()
   326  
   327  	var err error
   328  	fp := ctx.Args().First()
   329  	if len(ctx.Args()) < 3 {
   330  		err = utils.ExportChain(chain, fp)
   331  	} else {
   332  		// This can be improved to allow for numbers larger than 9223372036854775807
   333  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   334  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   335  		if ferr != nil || lerr != nil {
   336  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   337  		}
   338  		if first < 0 || last < 0 {
   339  			utils.Fatalf("Export error: block number must be greater than 0\n")
   340  		}
   341  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   342  	}
   343  
   344  	if err != nil {
   345  		utils.Fatalf("Export error: %v\n", err)
   346  	}
   347  	fmt.Printf("Export done in %v\n", time.Since(start))
   348  	return nil
   349  }
   350  
   351  // importPreimages imports preimage data from the specified file.
   352  func importPreimages(ctx *cli.Context) error {
   353  	if len(ctx.Args()) < 1 {
   354  		utils.Fatalf("This command requires an argument.")
   355  	}
   356  
   357  	stack, _ := makeConfigNode(ctx)
   358  	defer stack.Close()
   359  
   360  	db := utils.MakeChainDatabase(ctx, stack)
   361  	start := time.Now()
   362  
   363  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   364  		utils.Fatalf("Import error: %v\n", err)
   365  	}
   366  	fmt.Printf("Import done in %v\n", time.Since(start))
   367  	return nil
   368  }
   369  
   370  // exportPreimages dumps the preimage data to specified json file in streaming way.
   371  func exportPreimages(ctx *cli.Context) error {
   372  	if len(ctx.Args()) < 1 {
   373  		utils.Fatalf("This command requires an argument.")
   374  	}
   375  
   376  	stack, _ := makeConfigNode(ctx)
   377  	defer stack.Close()
   378  
   379  	db := utils.MakeChainDatabase(ctx, stack)
   380  	start := time.Now()
   381  
   382  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   383  		utils.Fatalf("Export error: %v\n", err)
   384  	}
   385  	fmt.Printf("Export done in %v\n", time.Since(start))
   386  	return nil
   387  }
   388  
   389  func copyDb(ctx *cli.Context) error {
   390  	// Ensure we have a source chain directory to copy
   391  	if len(ctx.Args()) < 1 {
   392  		utils.Fatalf("Source chaindata directory path argument missing")
   393  	}
   394  	if len(ctx.Args()) < 2 {
   395  		utils.Fatalf("Source ancient chain directory path argument missing")
   396  	}
   397  	// Initialize a new chain for the running node to sync into
   398  	stack, _ := makeConfigNode(ctx)
   399  	defer stack.Close()
   400  
   401  	chain, chainDb := utils.MakeChain(ctx, stack, false)
   402  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   403  
   404  	var syncBloom *trie.SyncBloom
   405  	if syncMode == downloader.FastSync {
   406  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   407  	}
   408  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   409  
   410  	// Create a source peer to satisfy downloader requests from
   411  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   412  	if err != nil {
   413  		return err
   414  	}
   415  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   416  	if err != nil {
   417  		return err
   418  	}
   419  	peer := downloader.NewFakePeer("local", db, hc, dl)
   420  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   421  		return err
   422  	}
   423  	// Synchronise with the simulated peer
   424  	start := time.Now()
   425  
   426  	currentHeader := hc.CurrentHeader()
   427  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   428  		return err
   429  	}
   430  	for dl.Synchronising() {
   431  		time.Sleep(10 * time.Millisecond)
   432  	}
   433  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   434  
   435  	// Compact the entire database to remove any sync overhead
   436  	start = time.Now()
   437  	fmt.Println("Compacting entire database...")
   438  	if err = db.Compact(nil, nil); err != nil {
   439  		utils.Fatalf("Compaction failed: %v", err)
   440  	}
   441  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   442  	return nil
   443  }
   444  
   445  func dump(ctx *cli.Context) error {
   446  	stack, _ := makeConfigNode(ctx)
   447  	defer stack.Close()
   448  
   449  	chain, chainDb := utils.MakeChain(ctx, stack, true)
   450  	defer chainDb.Close()
   451  	for _, arg := range ctx.Args() {
   452  		var block *types.Block
   453  		if hashish(arg) {
   454  			block = chain.GetBlockByHash(common.HexToHash(arg))
   455  		} else {
   456  			num, _ := strconv.Atoi(arg)
   457  			block = chain.GetBlockByNumber(uint64(num))
   458  		}
   459  		if block == nil {
   460  			fmt.Println("{}")
   461  			utils.Fatalf("block not found")
   462  		} else {
   463  			state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil)
   464  			if err != nil {
   465  				utils.Fatalf("could not create new state: %v", err)
   466  			}
   467  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   468  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   469  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   470  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   471  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   472  			} else {
   473  				if includeMissing {
   474  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   475  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   476  				}
   477  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   478  			}
   479  		}
   480  	}
   481  	return nil
   482  }
   483  
   484  // hashish returns true for strings that look like hashes.
   485  func hashish(x string) bool {
   486  	_, err := strconv.Atoi(x)
   487  	return err != nil
   488  }