github.com/SmartMeshFoundation/Spectrum@v0.0.0-20220621030607-452a266fee1e/cmd/smc/chaincmd.go (about)

     1  // Copyright 2015 The Spectrum Authors
     2  // This file is part of Spectrum.
     3  //
     4  // Spectrum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // Spectrum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with Spectrum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"runtime"
    24  	"strconv"
    25  	"sync/atomic"
    26  	"time"
    27  
    28  	"github.com/SmartMeshFoundation/Spectrum/cmd/utils"
    29  	"github.com/SmartMeshFoundation/Spectrum/common"
    30  	"github.com/SmartMeshFoundation/Spectrum/console"
    31  	"github.com/SmartMeshFoundation/Spectrum/core"
    32  	"github.com/SmartMeshFoundation/Spectrum/core/state"
    33  	"github.com/SmartMeshFoundation/Spectrum/core/types"
    34  	"github.com/SmartMeshFoundation/Spectrum/eth/downloader"
    35  	"github.com/SmartMeshFoundation/Spectrum/ethdb"
    36  	"github.com/SmartMeshFoundation/Spectrum/event"
    37  	"github.com/SmartMeshFoundation/Spectrum/log"
    38  	"github.com/SmartMeshFoundation/Spectrum/trie"
    39  	"github.com/syndtr/goleveldb/leveldb/util"
    40  	"gopkg.in/urfave/cli.v1"
    41  )
    42  
    43  var (
    44  	initCommand = cli.Command{
    45  		Action:    utils.MigrateFlags(initGenesis),
    46  		Name:      "init",
    47  		Usage:     "Bootstrap and initialize a new genesis block",
    48  		ArgsUsage: "<genesisPath>",
    49  		Flags: []cli.Flag{
    50  			utils.DataDirFlag,
    51  			utils.LightModeFlag,
    52  		},
    53  		Category: "BLOCKCHAIN COMMANDS",
    54  		Description: `
    55  The init command initializes a new genesis block and definition for the network.
    56  This is a destructive action and changes the network in which you will be
    57  participating.
    58  
    59  It expects the genesis file as argument.`,
    60  	}
    61  	importCommand = cli.Command{
    62  		Action:    utils.MigrateFlags(importChain),
    63  		Name:      "import",
    64  		Usage:     "Import a blockchain file",
    65  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    66  		Flags: []cli.Flag{
    67  			utils.DataDirFlag,
    68  			utils.CacheFlag,
    69  			utils.LightModeFlag,
    70  		},
    71  		Category: "BLOCKCHAIN COMMANDS",
    72  		Description: `
    73  The import command imports blocks from an RLP-encoded form. The form can be one file
    74  with several RLP-encoded blocks, or several files can be used.
    75  
    76  If only one file is used, import error will result in failure. If several files are used,
    77  processing will proceed even if an individual RLP-file import failure occurs.`,
    78  	}
    79  	exportCommand = cli.Command{
    80  		Action:    utils.MigrateFlags(exportChain),
    81  		Name:      "export",
    82  		Usage:     "Export blockchain into file",
    83  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    84  		Flags: []cli.Flag{
    85  			utils.DataDirFlag,
    86  			utils.CacheFlag,
    87  			utils.LightModeFlag,
    88  		},
    89  		Category: "BLOCKCHAIN COMMANDS",
    90  		Description: `
    91  Requires a first argument of the file to write to.
    92  Optional second and third arguments control the first and
    93  last block to write. In this mode, the file will be appended
    94  if already existing.`,
    95  	}
    96  	copydbCommand = cli.Command{
    97  		Action:    utils.MigrateFlags(copyDb),
    98  		Name:      "copydb",
    99  		Usage:     "Create a local chain from a target chaindata folder",
   100  		ArgsUsage: "<sourceChaindataDir>",
   101  		Flags: []cli.Flag{
   102  			utils.DataDirFlag,
   103  			utils.CacheFlag,
   104  			utils.SyncModeFlag,
   105  			utils.FakePoWFlag,
   106  			utils.TestnetFlag,
   107  		},
   108  		Category: "BLOCKCHAIN COMMANDS",
   109  		Description: `
   110  The first argument must be the directory containing the blockchain to download from`,
   111  	}
   112  	removedbCommand = cli.Command{
   113  		Action:    utils.MigrateFlags(removeDB),
   114  		Name:      "removedb",
   115  		Usage:     "Remove blockchain and state databases",
   116  		ArgsUsage: " ",
   117  		Flags: []cli.Flag{
   118  			utils.DataDirFlag,
   119  			utils.LightModeFlag,
   120  		},
   121  		Category: "BLOCKCHAIN COMMANDS",
   122  		Description: `
   123  Remove blockchain and state databases`,
   124  	}
   125  	dumpCommand = cli.Command{
   126  		Action:    utils.MigrateFlags(dump),
   127  		Name:      "dump",
   128  		Usage:     "Dump a specific block from storage",
   129  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   130  		Flags: []cli.Flag{
   131  			utils.DataDirFlag,
   132  			utils.CacheFlag,
   133  			utils.LightModeFlag,
   134  		},
   135  		Category: "BLOCKCHAIN COMMANDS",
   136  		Description: `
   137  The arguments are interpreted as block numbers or hashes.
   138  Use "ethereum dump 0" to dump the genesis block.`,
   139  	}
   140  )
   141  
   142  // initGenesis will initialise the given JSON format genesis file and writes it as
   143  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   144  func initGenesis(ctx *cli.Context) error {
   145  	// Make sure we have a valid genesis JSON
   146  	genesisPath := ctx.Args().First()
   147  	if len(genesisPath) == 0 {
   148  		utils.Fatalf("Must supply path to genesis JSON file")
   149  	}
   150  	file, err := os.Open(genesisPath)
   151  	if err != nil {
   152  		utils.Fatalf("Failed to read genesis file: %v", err)
   153  	}
   154  	defer file.Close()
   155  
   156  	genesis := new(core.Genesis)
   157  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   158  		utils.Fatalf("invalid genesis file: %v", err)
   159  	}
   160  	// Open an initialise both full and light databases
   161  	stack := makeFullNode(ctx)
   162  	for _, name := range []string{"chaindata", "lightchaindata"} {
   163  		chaindb, err := stack.OpenDatabase(name, 0, 0)
   164  		if err != nil {
   165  			utils.Fatalf("Failed to open database: %v", err)
   166  		}
   167  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   168  		if err != nil {
   169  			utils.Fatalf("Failed to write genesis block: %v", err)
   170  		}
   171  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   172  	}
   173  	return nil
   174  }
   175  
   176  func importChain(ctx *cli.Context) error {
   177  	if len(ctx.Args()) < 1 {
   178  		utils.Fatalf("This command requires an argument.")
   179  	}
   180  	stack := makeFullNode(ctx)
   181  	chain, chainDb := utils.MakeChain(ctx, stack)
   182  	defer chainDb.Close()
   183  
   184  	// Start periodically gathering memory profiles
   185  	var peakMemAlloc, peakMemSys uint64
   186  	go func() {
   187  		stats := new(runtime.MemStats)
   188  		for {
   189  			runtime.ReadMemStats(stats)
   190  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   191  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   192  			}
   193  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   194  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   195  			}
   196  			time.Sleep(5 * time.Second)
   197  		}
   198  	}()
   199  	// Import the chain
   200  	start := time.Now()
   201  
   202  	if len(ctx.Args()) == 1 {
   203  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   204  			utils.Fatalf("Import error: %v", err)
   205  		}
   206  	} else {
   207  		for _, arg := range ctx.Args() {
   208  			if err := utils.ImportChain(chain, arg); err != nil {
   209  				log.Error("Import error", "file", arg, "err", err)
   210  			}
   211  		}
   212  	}
   213  
   214  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   215  
   216  	// Output pre-compaction stats mostly to see the import trashing
   217  	db := chainDb.(*ethdb.LDBDatabase)
   218  
   219  	stats, err := db.LDB().GetProperty("leveldb.stats")
   220  	if err != nil {
   221  		utils.Fatalf("Failed to read database stats: %v", err)
   222  	}
   223  	fmt.Println(stats)
   224  	fmt.Printf("Trie cache misses:  %d\n", trie.CacheMisses())
   225  	fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads())
   226  
   227  	// Print the memory statistics used by the importing
   228  	mem := new(runtime.MemStats)
   229  	runtime.ReadMemStats(mem)
   230  
   231  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   232  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   233  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   234  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   235  
   236  	if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) {
   237  		return nil
   238  	}
   239  
   240  	// Compact the entire database to more accurately measure disk io and print the stats
   241  	start = time.Now()
   242  	fmt.Println("Compacting entire database...")
   243  	if err = db.LDB().CompactRange(util.Range{}); err != nil {
   244  		utils.Fatalf("Compaction failed: %v", err)
   245  	}
   246  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   247  
   248  	stats, err = db.LDB().GetProperty("leveldb.stats")
   249  	if err != nil {
   250  		utils.Fatalf("Failed to read database stats: %v", err)
   251  	}
   252  	fmt.Println(stats)
   253  
   254  	return nil
   255  }
   256  
   257  func exportChain(ctx *cli.Context) error {
   258  	if len(ctx.Args()) < 1 {
   259  		utils.Fatalf("This command requires an argument.")
   260  	}
   261  	stack := makeFullNode(ctx)
   262  	chain, _ := utils.MakeChain(ctx, stack)
   263  	start := time.Now()
   264  
   265  	var err error
   266  	fp := ctx.Args().First()
   267  	if len(ctx.Args()) < 3 {
   268  		err = utils.ExportChain(chain, fp)
   269  	} else {
   270  		// This can be improved to allow for numbers larger than 9223372036854775807
   271  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   272  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   273  		if ferr != nil || lerr != nil {
   274  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   275  		}
   276  		if first < 0 || last < 0 {
   277  			utils.Fatalf("Export error: block number must be greater than 0\n")
   278  		}
   279  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   280  	}
   281  
   282  	if err != nil {
   283  		utils.Fatalf("Export error: %v\n", err)
   284  	}
   285  	fmt.Printf("Export done in %v", time.Since(start))
   286  	return nil
   287  }
   288  
   289  func copyDb(ctx *cli.Context) error {
   290  	// Ensure we have a source chain directory to copy
   291  	if len(ctx.Args()) != 1 {
   292  		utils.Fatalf("Source chaindata directory path argument missing")
   293  	}
   294  	// Initialize a new chain for the running node to sync into
   295  	stack := makeFullNode(ctx)
   296  	chain, chainDb := utils.MakeChain(ctx, stack)
   297  
   298  	syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   299  	dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil)
   300  
   301  	// Create a source peer to satisfy downloader requests from
   302  	db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256)
   303  	if err != nil {
   304  		return err
   305  	}
   306  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   307  	if err != nil {
   308  		return err
   309  	}
   310  	peer := downloader.NewFakePeer("local", db, hc, dl)
   311  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   312  		return err
   313  	}
   314  	// Synchronise with the simulated peer
   315  	start := time.Now()
   316  
   317  	currentHeader := hc.CurrentHeader()
   318  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil {
   319  		return err
   320  	}
   321  	for dl.Synchronising() {
   322  		time.Sleep(10 * time.Millisecond)
   323  	}
   324  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   325  
   326  	// Compact the entire database to remove any sync overhead
   327  	start = time.Now()
   328  	fmt.Println("Compacting entire database...")
   329  	if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil {
   330  		utils.Fatalf("Compaction failed: %v", err)
   331  	}
   332  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   333  
   334  	return nil
   335  }
   336  
   337  func removeDB(ctx *cli.Context) error {
   338  	stack, _ := makeConfigNode(ctx)
   339  
   340  	for _, name := range []string{"chaindata", "lightchaindata"} {
   341  		// Ensure the database exists in the first place
   342  		logger := log.New("database", name)
   343  
   344  		dbdir := stack.ResolvePath(name)
   345  		if !common.FileExist(dbdir) {
   346  			logger.Info("Database doesn't exist, skipping", "path", dbdir)
   347  			continue
   348  		}
   349  		// Confirm removal and execute
   350  		fmt.Println(dbdir)
   351  		confirm, err := console.Stdin.PromptConfirm("Remove this database?")
   352  		switch {
   353  		case err != nil:
   354  			utils.Fatalf("%v", err)
   355  		case !confirm:
   356  			logger.Warn("Database deletion aborted")
   357  		default:
   358  			start := time.Now()
   359  			os.RemoveAll(dbdir)
   360  			logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start)))
   361  		}
   362  	}
   363  	return nil
   364  }
   365  
   366  func dump(ctx *cli.Context) error {
   367  	stack := makeFullNode(ctx)
   368  	chain, chainDb := utils.MakeChain(ctx, stack)
   369  	for _, arg := range ctx.Args() {
   370  		var block *types.Block
   371  		if hashish(arg) {
   372  			block = chain.GetBlockByHash(common.HexToHash(arg))
   373  		} else {
   374  			num, _ := strconv.Atoi(arg)
   375  			block = chain.GetBlockByNumber(uint64(num))
   376  		}
   377  		if block == nil {
   378  			fmt.Println("{}")
   379  			utils.Fatalf("block not found")
   380  		} else {
   381  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   382  			if err != nil {
   383  				utils.Fatalf("could not create new state: %v", err)
   384  			}
   385  			fmt.Printf("%s\n", state.Dump())
   386  		}
   387  	}
   388  	chainDb.Close()
   389  	return nil
   390  }
   391  
   392  // hashish returns true for strings that look like hashes.
   393  func hashish(x string) bool {
   394  	_, err := strconv.Atoi(x)
   395  	return err != nil
   396  }