git.pirl.io/community/pirl@v0.0.0-20201111064343-9d3d31ff74be/cmd/pirl/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"git.pirl.io/community/pirl/cmd/utils"
    30  	"git.pirl.io/community/pirl/common"
    31  	"git.pirl.io/community/pirl/console"
    32  	"git.pirl.io/community/pirl/core"
    33  	"git.pirl.io/community/pirl/core/rawdb"
    34  	"git.pirl.io/community/pirl/core/state"
    35  	"git.pirl.io/community/pirl/core/types"
    36  	"git.pirl.io/community/pirl/eth/downloader"
    37  	"git.pirl.io/community/pirl/event"
    38  	"git.pirl.io/community/pirl/log"
    39  	"git.pirl.io/community/pirl/trie"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  		},
    52  		Category: "BLOCKCHAIN COMMANDS",
    53  		Description: `
    54  The init command initializes a new genesis block and definition for the network.
    55  This is a destructive action and changes the network in which you will be
    56  participating.
    57  
    58  It expects the genesis file as argument.`,
    59  	}
    60  	dumpGenesisCommand = cli.Command{
    61  		Action:    utils.MigrateFlags(dumpGenesis),
    62  		Name:      "dumpgenesis",
    63  		Usage:     "Dumps genesis block JSON configuration to stdout",
    64  		ArgsUsage: "",
    65  		Flags: []cli.Flag{
    66  			utils.DataDirFlag,
    67  		},
    68  		Category: "BLOCKCHAIN COMMANDS",
    69  		Description: `
    70  The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`,
    71  	}
    72  	importCommand = cli.Command{
    73  		Action:    utils.MigrateFlags(importChain),
    74  		Name:      "import",
    75  		Usage:     "Import a blockchain file",
    76  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    77  		Flags: []cli.Flag{
    78  			utils.DataDirFlag,
    79  			utils.CacheFlag,
    80  			utils.SyncModeFlag,
    81  			utils.GCModeFlag,
    82  			utils.CacheDatabaseFlag,
    83  			utils.CacheGCFlag,
    84  		},
    85  		Category: "BLOCKCHAIN COMMANDS",
    86  		Description: `
    87  The import command imports blocks from an RLP-encoded form. The form can be one file
    88  with several RLP-encoded blocks, or several files can be used.
    89  
    90  If only one file is used, import error will result in failure. If several files are used,
    91  processing will proceed even if an individual RLP-file import failure occurs.`,
    92  	}
    93  	exportCommand = cli.Command{
    94  		Action:    utils.MigrateFlags(exportChain),
    95  		Name:      "export",
    96  		Usage:     "Export blockchain into file",
    97  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    98  		Flags: []cli.Flag{
    99  			utils.DataDirFlag,
   100  			utils.CacheFlag,
   101  			utils.SyncModeFlag,
   102  		},
   103  		Category: "BLOCKCHAIN COMMANDS",
   104  		Description: `
   105  Requires a first argument of the file to write to.
   106  Optional second and third arguments control the first and
   107  last block to write. In this mode, the file will be appended
   108  if already existing. If the file ends with .gz, the output will
   109  be gzipped.`,
   110  	}
   111  	importPreimagesCommand = cli.Command{
   112  		Action:    utils.MigrateFlags(importPreimages),
   113  		Name:      "import-preimages",
   114  		Usage:     "Import the preimage database from an RLP stream",
   115  		ArgsUsage: "<datafile>",
   116  		Flags: []cli.Flag{
   117  			utils.DataDirFlag,
   118  			utils.CacheFlag,
   119  			utils.SyncModeFlag,
   120  		},
   121  		Category: "BLOCKCHAIN COMMANDS",
   122  		Description: `
   123  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   124  	}
   125  	exportPreimagesCommand = cli.Command{
   126  		Action:    utils.MigrateFlags(exportPreimages),
   127  		Name:      "export-preimages",
   128  		Usage:     "Export the preimage database into an RLP stream",
   129  		ArgsUsage: "<dumpfile>",
   130  		Flags: []cli.Flag{
   131  			utils.DataDirFlag,
   132  			utils.CacheFlag,
   133  			utils.SyncModeFlag,
   134  		},
   135  		Category: "BLOCKCHAIN COMMANDS",
   136  		Description: `
   137  The export-preimages command export hash preimages to an RLP encoded stream`,
   138  	}
   139  	copydbCommand = cli.Command{
   140  		Action:    utils.MigrateFlags(copyDb),
   141  		Name:      "copydb",
   142  		Usage:     "Create a local chain from a target chaindata folder",
   143  		ArgsUsage: "<sourceChaindataDir>",
   144  		Flags: []cli.Flag{
   145  			utils.DataDirFlag,
   146  			utils.CacheFlag,
   147  			utils.SyncModeFlag,
   148  			utils.FakePoWFlag,
   149  			utils.TestnetFlag,
   150  			utils.RinkebyFlag,
   151  		},
   152  		Category: "BLOCKCHAIN COMMANDS",
   153  		Description: `
   154  The first argument must be the directory containing the blockchain to download from`,
   155  	}
   156  	removedbCommand = cli.Command{
   157  		Action:    utils.MigrateFlags(removeDB),
   158  		Name:      "removedb",
   159  		Usage:     "Remove blockchain and state databases",
   160  		ArgsUsage: " ",
   161  		Flags: []cli.Flag{
   162  			utils.DataDirFlag,
   163  		},
   164  		Category: "BLOCKCHAIN COMMANDS",
   165  		Description: `
   166  Remove blockchain and state databases`,
   167  	}
   168  	dumpCommand = cli.Command{
   169  		Action:    utils.MigrateFlags(dump),
   170  		Name:      "dump",
   171  		Usage:     "Dump a specific block from storage",
   172  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   173  		Flags: []cli.Flag{
   174  			utils.DataDirFlag,
   175  			utils.CacheFlag,
   176  			utils.SyncModeFlag,
   177  			utils.IterativeOutputFlag,
   178  			utils.ExcludeCodeFlag,
   179  			utils.ExcludeStorageFlag,
   180  			utils.IncludeIncompletesFlag,
   181  		},
   182  		Category: "BLOCKCHAIN COMMANDS",
   183  		Description: `
   184  The arguments are interpreted as block numbers or hashes.
   185  Use "ethereum dump 0" to dump the genesis block.`,
   186  	}
   187  	inspectCommand = cli.Command{
   188  		Action:    utils.MigrateFlags(inspect),
   189  		Name:      "inspect",
   190  		Usage:     "Inspect the storage size for each type of data in the database",
   191  		ArgsUsage: " ",
   192  		Flags: []cli.Flag{
   193  			utils.DataDirFlag,
   194  			utils.AncientFlag,
   195  			utils.CacheFlag,
   196  			utils.TestnetFlag,
   197  			utils.RinkebyFlag,
   198  			utils.GoerliFlag,
   199  			utils.SyncModeFlag,
   200  		},
   201  		Category: "BLOCKCHAIN COMMANDS",
   202  	}
   203  )
   204  
   205  // initGenesis will initialise the given JSON format genesis file and writes it as
   206  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   207  func initGenesis(ctx *cli.Context) error {
   208  	// Make sure we have a valid genesis JSON
   209  	genesisPath := ctx.Args().First()
   210  	if len(genesisPath) == 0 {
   211  		utils.Fatalf("Must supply path to genesis JSON file")
   212  	}
   213  	file, err := os.Open(genesisPath)
   214  	if err != nil {
   215  		utils.Fatalf("Failed to read genesis file: %v", err)
   216  	}
   217  	defer file.Close()
   218  
   219  	genesis := new(core.Genesis)
   220  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   221  		utils.Fatalf("invalid genesis file: %v", err)
   222  	}
   223  	// Open an initialise both full and light databases
   224  	stack := makeFullNode(ctx)
   225  	defer stack.Close()
   226  
   227  	for _, name := range []string{"chaindata", "lightchaindata"} {
   228  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   229  		if err != nil {
   230  			utils.Fatalf("Failed to open database: %v", err)
   231  		}
   232  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   233  		if err != nil {
   234  			utils.Fatalf("Failed to write genesis block: %v", err)
   235  		}
   236  		chaindb.Close()
   237  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   238  	}
   239  	return nil
   240  }
   241  
   242  func dumpGenesis(ctx *cli.Context) error {
   243  	genesis := utils.MakeGenesis(ctx)
   244  	if genesis == nil {
   245  		genesis = core.DefaultGenesisBlock()
   246  	}
   247  	if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   248  		utils.Fatalf("could not encode genesis")
   249  	}
   250  	return nil
   251  }
   252  
   253  func importChain(ctx *cli.Context) error {
   254  	if len(ctx.Args()) < 1 {
   255  		utils.Fatalf("This command requires an argument.")
   256  	}
   257  	stack := makeFullNode(ctx)
   258  	defer stack.Close()
   259  
   260  	chain, db := utils.MakeChain(ctx, stack)
   261  	defer db.Close()
   262  
   263  	// Start periodically gathering memory profiles
   264  	var peakMemAlloc, peakMemSys uint64
   265  	go func() {
   266  		stats := new(runtime.MemStats)
   267  		for {
   268  			runtime.ReadMemStats(stats)
   269  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   270  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   271  			}
   272  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   273  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   274  			}
   275  			time.Sleep(5 * time.Second)
   276  		}
   277  	}()
   278  	// Import the chain
   279  	start := time.Now()
   280  
   281  	if len(ctx.Args()) == 1 {
   282  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   283  			log.Error("Import error", "err", err)
   284  		}
   285  	} else {
   286  		for _, arg := range ctx.Args() {
   287  			if err := utils.ImportChain(chain, arg); err != nil {
   288  				log.Error("Import error", "file", arg, "err", err)
   289  			}
   290  		}
   291  	}
   292  	chain.Stop()
   293  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   294  
   295  	// Output pre-compaction stats mostly to see the import trashing
   296  	stats, err := db.Stat("leveldb.stats")
   297  	if err != nil {
   298  		utils.Fatalf("Failed to read database stats: %v", err)
   299  	}
   300  	fmt.Println(stats)
   301  
   302  	ioStats, err := db.Stat("leveldb.iostats")
   303  	if err != nil {
   304  		utils.Fatalf("Failed to read database iostats: %v", err)
   305  	}
   306  	fmt.Println(ioStats)
   307  
   308  	// Print the memory statistics used by the importing
   309  	mem := new(runtime.MemStats)
   310  	runtime.ReadMemStats(mem)
   311  
   312  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   313  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   314  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   315  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   316  
   317  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   318  		return nil
   319  	}
   320  
   321  	// Compact the entire database to more accurately measure disk io and print the stats
   322  	start = time.Now()
   323  	fmt.Println("Compacting entire database...")
   324  	if err = db.Compact(nil, nil); err != nil {
   325  		utils.Fatalf("Compaction failed: %v", err)
   326  	}
   327  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   328  
   329  	stats, err = db.Stat("leveldb.stats")
   330  	if err != nil {
   331  		utils.Fatalf("Failed to read database stats: %v", err)
   332  	}
   333  	fmt.Println(stats)
   334  
   335  	ioStats, err = db.Stat("leveldb.iostats")
   336  	if err != nil {
   337  		utils.Fatalf("Failed to read database iostats: %v", err)
   338  	}
   339  	fmt.Println(ioStats)
   340  	return nil
   341  }
   342  
   343  func exportChain(ctx *cli.Context) error {
   344  	if len(ctx.Args()) < 1 {
   345  		utils.Fatalf("This command requires an argument.")
   346  	}
   347  	stack := makeFullNode(ctx)
   348  	defer stack.Close()
   349  
   350  	chain, _ := utils.MakeChain(ctx, stack)
   351  	start := time.Now()
   352  
   353  	var err error
   354  	fp := ctx.Args().First()
   355  	if len(ctx.Args()) < 3 {
   356  		err = utils.ExportChain(chain, fp)
   357  	} else {
   358  		// This can be improved to allow for numbers larger than 9223372036854775807
   359  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   360  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   361  		if ferr != nil || lerr != nil {
   362  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   363  		}
   364  		if first < 0 || last < 0 {
   365  			utils.Fatalf("Export error: block number must be greater than 0\n")
   366  		}
   367  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   368  	}
   369  
   370  	if err != nil {
   371  		utils.Fatalf("Export error: %v\n", err)
   372  	}
   373  	fmt.Printf("Export done in %v\n", time.Since(start))
   374  	return nil
   375  }
   376  
   377  // importPreimages imports preimage data from the specified file.
   378  func importPreimages(ctx *cli.Context) error {
   379  	if len(ctx.Args()) < 1 {
   380  		utils.Fatalf("This command requires an argument.")
   381  	}
   382  	stack := makeFullNode(ctx)
   383  	defer stack.Close()
   384  
   385  	db := utils.MakeChainDatabase(ctx, stack)
   386  	start := time.Now()
   387  
   388  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   389  		utils.Fatalf("Import error: %v\n", err)
   390  	}
   391  	fmt.Printf("Import done in %v\n", time.Since(start))
   392  	return nil
   393  }
   394  
   395  // exportPreimages dumps the preimage data to specified json file in streaming way.
   396  func exportPreimages(ctx *cli.Context) error {
   397  	if len(ctx.Args()) < 1 {
   398  		utils.Fatalf("This command requires an argument.")
   399  	}
   400  	stack := makeFullNode(ctx)
   401  	defer stack.Close()
   402  
   403  	db := utils.MakeChainDatabase(ctx, stack)
   404  	start := time.Now()
   405  
   406  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   407  		utils.Fatalf("Export error: %v\n", err)
   408  	}
   409  	fmt.Printf("Export done in %v\n", time.Since(start))
   410  	return nil
   411  }
   412  
   413  func copyDb(ctx *cli.Context) error {
   414  	// Ensure we have a source chain directory to copy
   415  	if len(ctx.Args()) < 1 {
   416  		utils.Fatalf("Source chaindata directory path argument missing")
   417  	}
   418  	if len(ctx.Args()) < 2 {
   419  		utils.Fatalf("Source ancient chain directory path argument missing")
   420  	}
   421  	// Initialize a new chain for the running node to sync into
   422  	stack := makeFullNode(ctx)
   423  	defer stack.Close()
   424  
   425  	chain, chainDb := utils.MakeChain(ctx, stack)
   426  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   427  
   428  	var syncBloom *trie.SyncBloom
   429  	if syncMode == downloader.FastSync {
   430  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   431  	}
   432  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   433  
   434  	// Create a source peer to satisfy downloader requests from
   435  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   436  	if err != nil {
   437  		return err
   438  	}
   439  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   440  	if err != nil {
   441  		return err
   442  	}
   443  	peer := downloader.NewFakePeer("local", db, hc, dl)
   444  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   445  		return err
   446  	}
   447  	// Synchronise with the simulated peer
   448  	start := time.Now()
   449  
   450  	currentHeader := hc.CurrentHeader()
   451  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   452  		return err
   453  	}
   454  	for dl.Synchronising() {
   455  		time.Sleep(10 * time.Millisecond)
   456  	}
   457  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   458  
   459  	// Compact the entire database to remove any sync overhead
   460  	start = time.Now()
   461  	fmt.Println("Compacting entire database...")
   462  	if err = db.Compact(nil, nil); err != nil {
   463  		utils.Fatalf("Compaction failed: %v", err)
   464  	}
   465  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   466  	return nil
   467  }
   468  
   469  func removeDB(ctx *cli.Context) error {
   470  	stack, config := makeConfigNode(ctx)
   471  
   472  	// Remove the full node state database
   473  	path := stack.ResolvePath("chaindata")
   474  	if common.FileExist(path) {
   475  		confirmAndRemoveDB(path, "full node state database")
   476  	} else {
   477  		log.Info("Full node state database missing", "path", path)
   478  	}
   479  	// Remove the full node ancient database
   480  	path = config.Eth.DatabaseFreezer
   481  	switch {
   482  	case path == "":
   483  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   484  	case !filepath.IsAbs(path):
   485  		path = config.Node.ResolvePath(path)
   486  	}
   487  	if common.FileExist(path) {
   488  		confirmAndRemoveDB(path, "full node ancient database")
   489  	} else {
   490  		log.Info("Full node ancient database missing", "path", path)
   491  	}
   492  	// Remove the light node database
   493  	path = stack.ResolvePath("lightchaindata")
   494  	if common.FileExist(path) {
   495  		confirmAndRemoveDB(path, "light node database")
   496  	} else {
   497  		log.Info("Light node database missing", "path", path)
   498  	}
   499  	return nil
   500  }
   501  
   502  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   503  // folder if accepted.
   504  func confirmAndRemoveDB(database string, kind string) {
   505  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   506  	switch {
   507  	case err != nil:
   508  		utils.Fatalf("%v", err)
   509  	case !confirm:
   510  		log.Info("Database deletion skipped", "path", database)
   511  	default:
   512  		start := time.Now()
   513  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   514  			// If we're at the top level folder, recurse into
   515  			if path == database {
   516  				return nil
   517  			}
   518  			// Delete all the files, but not subfolders
   519  			if !info.IsDir() {
   520  				os.Remove(path)
   521  				return nil
   522  			}
   523  			return filepath.SkipDir
   524  		})
   525  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   526  	}
   527  }
   528  
   529  func dump(ctx *cli.Context) error {
   530  	stack := makeFullNode(ctx)
   531  	defer stack.Close()
   532  
   533  	chain, chainDb := utils.MakeChain(ctx, stack)
   534  	defer chainDb.Close()
   535  	for _, arg := range ctx.Args() {
   536  		var block *types.Block
   537  		if hashish(arg) {
   538  			block = chain.GetBlockByHash(common.HexToHash(arg))
   539  		} else {
   540  			num, _ := strconv.Atoi(arg)
   541  			block = chain.GetBlockByNumber(uint64(num))
   542  		}
   543  		if block == nil {
   544  			fmt.Println("{}")
   545  			utils.Fatalf("block not found")
   546  		} else {
   547  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   548  			if err != nil {
   549  				utils.Fatalf("could not create new state: %v", err)
   550  			}
   551  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   552  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   553  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   554  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   555  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   556  			} else {
   557  				if includeMissing {
   558  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   559  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   560  				}
   561  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   562  			}
   563  		}
   564  	}
   565  	return nil
   566  }
   567  
   568  func inspect(ctx *cli.Context) error {
   569  	node, _ := makeConfigNode(ctx)
   570  	defer node.Close()
   571  
   572  	_, chainDb := utils.MakeChain(ctx, node)
   573  	defer chainDb.Close()
   574  
   575  	return rawdb.InspectDatabase(chainDb)
   576  }
   577  
   578  // hashish returns true for strings that look like hashes.
   579  func hashish(x string) bool {
   580  	_, err := strconv.Atoi(x)
   581  	return err != nil
   582  }