github.com/bigzoro/my_simplechain@v0.0.0-20240315012955-8ad0a2a29bb9/cmd/sipe/chaincmd.go (about)

     1  // Copyright 2015 The go-simplechain Authors
     2  // This file is part of go-simplechain.
     3  //
     4  // go-simplechain is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-simplechain is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-simplechain. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"fmt"
    22  	"os"
    23  	"path/filepath"
    24  	"runtime"
    25  	"strconv"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/bigzoro/my_simplechain/cmd/utils"
    30  	"github.com/bigzoro/my_simplechain/common"
    31  	"github.com/bigzoro/my_simplechain/common/hexutil"
    32  	"github.com/bigzoro/my_simplechain/consensus/hotstuff"
    33  	bls "github.com/bigzoro/my_simplechain/consensus/hotstuff/bls12-381"
    34  	hots "github.com/bigzoro/my_simplechain/consensus/hotstuff/common"
    35  	"github.com/bigzoro/my_simplechain/console"
    36  	"github.com/bigzoro/my_simplechain/core"
    37  	"github.com/bigzoro/my_simplechain/core/rawdb"
    38  	"github.com/bigzoro/my_simplechain/core/state"
    39  	"github.com/bigzoro/my_simplechain/core/types"
    40  	"github.com/bigzoro/my_simplechain/eth/downloader"
    41  	"github.com/bigzoro/my_simplechain/ethdb"
    42  	"github.com/bigzoro/my_simplechain/event"
    43  	"github.com/bigzoro/my_simplechain/log"
    44  	"github.com/bigzoro/my_simplechain/node"
    45  	"github.com/bigzoro/my_simplechain/trie"
    46  	"gopkg.in/urfave/cli.v1"
    47  )
    48  
    49  var (
    50  	initCommand = cli.Command{
    51  		Action:    utils.MigrateFlags(initGenesis),
    52  		Name:      "init",
    53  		Usage:     "Bootstrap and initialize a new genesis block",
    54  		ArgsUsage: "<genesisPath>",
    55  		Flags: []cli.Flag{
    56  			utils.DataDirFlag,
    57  		},
    58  		Category: "BLOCKCHAIN COMMANDS",
    59  		Description: `
    60  The init command initializes a new genesis block and definition for the network.
    61  This is a destructive action and changes the network in which you will be
    62  participating.
    63  
    64  It expects the genesis file as argument.`,
    65  	}
    66  	importCommand = cli.Command{
    67  		Action:    utils.MigrateFlags(importChain),
    68  		Name:      "import",
    69  		Usage:     "Import a blockchain file",
    70  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    71  		Flags: []cli.Flag{
    72  			utils.DataDirFlag,
    73  			utils.CacheFlag,
    74  			utils.SyncModeFlag,
    75  			utils.GCModeFlag,
    76  			utils.CacheDatabaseFlag,
    77  			utils.CacheGCFlag,
    78  		},
    79  		Category: "BLOCKCHAIN COMMANDS",
    80  		Description: `
    81  The import command imports blocks from an RLP-encoded form. The form can be one file
    82  with several RLP-encoded blocks, or several files can be used.
    83  
    84  If only one file is used, import error will result in failure. If several files are used,
    85  processing will proceed even if an individual RLP-file import failure occurs.`,
    86  	}
    87  	exportCommand = cli.Command{
    88  		Action:    utils.MigrateFlags(exportChain),
    89  		Name:      "export",
    90  		Usage:     "Export blockchain into file",
    91  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
    92  		Flags: []cli.Flag{
    93  			utils.DataDirFlag,
    94  			utils.CacheFlag,
    95  			utils.SyncModeFlag,
    96  		},
    97  		Category: "BLOCKCHAIN COMMANDS",
    98  		Description: `
    99  Requires a first argument of the file to write to.
   100  Optional second and third arguments control the first and
   101  last block to write. In this mode, the file will be appended
   102  if already existing. If the file ends with .gz, the output will
   103  be gzipped.`,
   104  	}
   105  	importPreimagesCommand = cli.Command{
   106  		Action:    utils.MigrateFlags(importPreimages),
   107  		Name:      "import-preimages",
   108  		Usage:     "Import the preimage database from an RLP stream",
   109  		ArgsUsage: "<datafile>",
   110  		Flags: []cli.Flag{
   111  			utils.DataDirFlag,
   112  			utils.CacheFlag,
   113  			utils.SyncModeFlag,
   114  		},
   115  		Category: "BLOCKCHAIN COMMANDS",
   116  		Description: `
   117  	The import-preimages command imports hash preimages from an RLP encoded stream.`,
   118  	}
   119  	exportPreimagesCommand = cli.Command{
   120  		Action:    utils.MigrateFlags(exportPreimages),
   121  		Name:      "export-preimages",
   122  		Usage:     "Export the preimage database into an RLP stream",
   123  		ArgsUsage: "<dumpfile>",
   124  		Flags: []cli.Flag{
   125  			utils.DataDirFlag,
   126  			utils.CacheFlag,
   127  			utils.SyncModeFlag,
   128  		},
   129  		Category: "BLOCKCHAIN COMMANDS",
   130  		Description: `
   131  The export-preimages command export hash preimages to an RLP encoded stream`,
   132  	}
   133  	copydbCommand = cli.Command{
   134  		Action:    utils.MigrateFlags(copyDb),
   135  		Name:      "copydb",
   136  		Usage:     "Create a local chain from a target chaindata folder",
   137  		ArgsUsage: "<sourceChaindataDir>",
   138  		Flags: []cli.Flag{
   139  			utils.DataDirFlag,
   140  			utils.CacheFlag,
   141  			utils.SyncModeFlag,
   142  			utils.FakePoWFlag,
   143  			utils.TestnetFlag,
   144  		},
   145  		Category: "BLOCKCHAIN COMMANDS",
   146  		Description: `
   147  The first argument must be the directory containing the blockchain to download from`,
   148  	}
   149  	removedbCommand = cli.Command{
   150  		Action:    utils.MigrateFlags(removeDB),
   151  		Name:      "removedb",
   152  		Usage:     "Remove blockchain and state databases",
   153  		ArgsUsage: " ",
   154  		Flags: []cli.Flag{
   155  			utils.DataDirFlag,
   156  		},
   157  		Category: "BLOCKCHAIN COMMANDS",
   158  		Description: `
   159  Remove blockchain and state databases`,
   160  	}
   161  	dumpCommand = cli.Command{
   162  		Action:    utils.MigrateFlags(dump),
   163  		Name:      "dump",
   164  		Usage:     "Dump a specific block from storage",
   165  		ArgsUsage: "[<blockHash> | <blockNum>]...",
   166  		Flags: []cli.Flag{
   167  			utils.DataDirFlag,
   168  			utils.CacheFlag,
   169  			utils.SyncModeFlag,
   170  			utils.IterativeOutputFlag,
   171  			utils.ExcludeCodeFlag,
   172  			utils.ExcludeStorageFlag,
   173  			utils.IncludeIncompletesFlag,
   174  		},
   175  		Category: "BLOCKCHAIN COMMANDS",
   176  		Description: `
   177  The arguments are interpreted as block numbers or hashes.
   178  Use "ethereum dump 0" to dump the genesis block.`,
   179  	}
   180  	inspectCommand = cli.Command{
   181  		Action:    utils.MigrateFlags(inspect),
   182  		Name:      "inspect",
   183  		Usage:     "Inspect the storage size for each type of data in the database",
   184  		ArgsUsage: " ",
   185  		Flags: []cli.Flag{
   186  			utils.DataDirFlag,
   187  			utils.AncientFlag,
   188  			utils.CacheFlag,
   189  			utils.TestnetFlag,
   190  			utils.SyncModeFlag,
   191  		},
   192  		Category: "BLOCKCHAIN COMMANDS",
   193  	}
   194  	genScretKeyCommand = cli.Command{
   195  		Action:    utils.MigrateFlags(geneBLS12381Sec),
   196  		Name:      "genbls12sec",
   197  		Usage:     "generate a bls12-385 curve based secret key",
   198  		ArgsUsage: " ",
   199  		Flags: []cli.Flag{
   200  			utils.DataDirFlag,
   201  		},
   202  		Category: "BLOCKCHAIN COMMANDS",
   203  		Description: `
   204  		The genbls12sec command generate a bls12-385 curve based secret key and save it to the file "hotstuff-sec".
   205  		`,
   206  	}
   207  	aggregateCommand = cli.Command{
   208  		Action:   utils.MigrateFlags(aggregateBLS),
   209  		Name:     "aggregatebls",
   210  		Usage:    "Aggregate bls partial signature.",
   211  		Flags:    []cli.Flag{},
   212  		Category: "BLOCKCHAIN COMMANDS",
   213  	}
   214  )
   215  
   216  // initGenesis will initialise the given JSON format genesis file and writes it as
   217  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   218  func initGenesis(ctx *cli.Context) error {
   219  	// Make sure we have a valid genesis JSON
   220  	genesisPath := ctx.Args().First()
   221  	if len(genesisPath) == 0 {
   222  		utils.Fatalf("Must supply path to genesis JSON file")
   223  	}
   224  	file, err := os.Open(genesisPath)
   225  	if err != nil {
   226  		utils.Fatalf("Failed to read genesis file: %v", err)
   227  	}
   228  	defer file.Close()
   229  
   230  	genesis := new(core.Genesis)
   231  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   232  		utils.Fatalf("invalid genesis file: %v", err)
   233  	}
   234  
   235  	var snap interface {
   236  		Store(ethdb.Database) error
   237  		Hash() (hash common.Hash)
   238  	}
   239  	if genesis.Config.Hotstuff != nil {
   240  		conf := genesis.Config.Hotstuff
   241  		ids := make([]hots.ID, len(conf.Council))
   242  		pks := make([]*bls.PublicKey, 0, len(conf.Council))
   243  		for i := range conf.Council {
   244  			ids[i].SetUint32(conf.Council[i].ID)
   245  			pubkey := new(bls.PublicKey)
   246  			if err := pubkey.FromBytes(conf.Council[i].PublicKey); err != nil {
   247  				utils.Fatalf("public key recover: %v", err)
   248  			}
   249  			pks = append(pks, pubkey)
   250  		}
   251  
   252  		snap = hotstuff.NewSnapshot(ids, pks)
   253  		snaphash := snap.Hash()
   254  		genesis.ExtraData = append(snaphash[:], genesis.ExtraData...)
   255  	}
   256  	// Open an initialise both full and light databases
   257  	stack := makeFullNode(ctx)
   258  
   259  	defer stack.Close()
   260  
   261  	for _, name := range []string{"chaindata", "lightchaindata"} {
   262  		chaindb, err := stack.OpenDatabase(name, 0, 0, "")
   263  		if err != nil {
   264  			utils.Fatalf("Failed to open database: %v", err)
   265  		}
   266  		_, hash, err := core.SetupGenesisBlock(chaindb, genesis)
   267  		if err != nil {
   268  			utils.Fatalf("Failed to write genesis block: %v", err)
   269  		}
   270  		if genesis.Config.Hotstuff != nil {
   271  			if err := snap.Store(chaindb); err != nil {
   272  				utils.Fatalf("Failed to write snapshot: %v", err)
   273  			}
   274  		}
   275  		chaindb.Close()
   276  		log.Info("Successfully wrote genesis state", "database", name, "hash", hash)
   277  	}
   278  	return nil
   279  }
   280  
   281  func importChain(ctx *cli.Context) error {
   282  	if len(ctx.Args()) < 1 {
   283  		utils.Fatalf("This command requires an argument.")
   284  	}
   285  	stack := makeFullNode(ctx)
   286  	defer stack.Close()
   287  
   288  	chain, db := utils.MakeChain(ctx, stack)
   289  	defer db.Close()
   290  
   291  	// Start periodically gathering memory profiles
   292  	var peakMemAlloc, peakMemSys uint64
   293  	go func() {
   294  		stats := new(runtime.MemStats)
   295  		for {
   296  			runtime.ReadMemStats(stats)
   297  			if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc {
   298  				atomic.StoreUint64(&peakMemAlloc, stats.Alloc)
   299  			}
   300  			if atomic.LoadUint64(&peakMemSys) < stats.Sys {
   301  				atomic.StoreUint64(&peakMemSys, stats.Sys)
   302  			}
   303  			time.Sleep(5 * time.Second)
   304  		}
   305  	}()
   306  	// Import the chain
   307  	start := time.Now()
   308  
   309  	if len(ctx.Args()) == 1 {
   310  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   311  			log.Error("Import error", "err", err)
   312  		}
   313  	} else {
   314  		for _, arg := range ctx.Args() {
   315  			if err := utils.ImportChain(chain, arg); err != nil {
   316  				log.Error("Import error", "file", arg, "err", err)
   317  			}
   318  		}
   319  	}
   320  	chain.Stop()
   321  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   322  
   323  	// Output pre-compaction stats mostly to see the import trashing
   324  	stats, err := db.Stat("leveldb.stats")
   325  	if err != nil {
   326  		utils.Fatalf("Failed to read database stats: %v", err)
   327  	}
   328  	fmt.Println(stats)
   329  
   330  	ioStats, err := db.Stat("leveldb.iostats")
   331  	if err != nil {
   332  		utils.Fatalf("Failed to read database iostats: %v", err)
   333  	}
   334  	fmt.Println(ioStats)
   335  
   336  	// Print the memory statistics used by the importing
   337  	mem := new(runtime.MemStats)
   338  	runtime.ReadMemStats(mem)
   339  
   340  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024)
   341  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024)
   342  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   343  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   344  
   345  	if ctx.GlobalBool(utils.NoCompactionFlag.Name) {
   346  		return nil
   347  	}
   348  
   349  	// Compact the entire database to more accurately measure disk io and print the stats
   350  	start = time.Now()
   351  	fmt.Println("Compacting entire database...")
   352  	if err = db.Compact(nil, nil); err != nil {
   353  		utils.Fatalf("Compaction failed: %v", err)
   354  	}
   355  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   356  
   357  	stats, err = db.Stat("leveldb.stats")
   358  	if err != nil {
   359  		utils.Fatalf("Failed to read database stats: %v", err)
   360  	}
   361  	fmt.Println(stats)
   362  
   363  	ioStats, err = db.Stat("leveldb.iostats")
   364  	if err != nil {
   365  		utils.Fatalf("Failed to read database iostats: %v", err)
   366  	}
   367  	fmt.Println(ioStats)
   368  	return nil
   369  }
   370  
   371  func exportChain(ctx *cli.Context) error {
   372  	if len(ctx.Args()) < 1 {
   373  		utils.Fatalf("This command requires an argument.")
   374  	}
   375  	stack := makeFullNode(ctx)
   376  	defer stack.Close()
   377  
   378  	chain, _ := utils.MakeChain(ctx, stack)
   379  	start := time.Now()
   380  
   381  	var err error
   382  	fp := ctx.Args().First()
   383  	if len(ctx.Args()) < 3 {
   384  		err = utils.ExportChain(chain, fp)
   385  	} else {
   386  		// This can be improved to allow for numbers larger than 9223372036854775807
   387  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   388  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   389  		if ferr != nil || lerr != nil {
   390  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   391  		}
   392  		if first < 0 || last < 0 {
   393  			utils.Fatalf("Export error: block number must be greater than 0\n")
   394  		}
   395  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   396  	}
   397  
   398  	if err != nil {
   399  		utils.Fatalf("Export error: %v\n", err)
   400  	}
   401  	fmt.Printf("Export done in %v\n", time.Since(start))
   402  	return nil
   403  }
   404  
   405  // importPreimages imports preimage data from the specified file.
   406  func importPreimages(ctx *cli.Context) error {
   407  	if len(ctx.Args()) < 1 {
   408  		utils.Fatalf("This command requires an argument.")
   409  	}
   410  	stack := makeFullNode(ctx)
   411  	defer stack.Close()
   412  
   413  	db := utils.MakeChainDatabase(ctx, stack)
   414  	start := time.Now()
   415  
   416  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   417  		utils.Fatalf("Import error: %v\n", err)
   418  	}
   419  	fmt.Printf("Import done in %v\n", time.Since(start))
   420  	return nil
   421  }
   422  
   423  // exportPreimages dumps the preimage data to specified json file in streaming way.
   424  func exportPreimages(ctx *cli.Context) error {
   425  	if len(ctx.Args()) < 1 {
   426  		utils.Fatalf("This command requires an argument.")
   427  	}
   428  	stack := makeFullNode(ctx)
   429  	defer stack.Close()
   430  
   431  	db := utils.MakeChainDatabase(ctx, stack)
   432  	start := time.Now()
   433  
   434  	if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil {
   435  		utils.Fatalf("Export error: %v\n", err)
   436  	}
   437  	fmt.Printf("Export done in %v\n", time.Since(start))
   438  	return nil
   439  }
   440  
   441  func copyDb(ctx *cli.Context) error {
   442  	// Ensure we have a source chain directory to copy
   443  	if len(ctx.Args()) < 1 {
   444  		utils.Fatalf("Source chaindata directory path argument missing")
   445  	}
   446  	if len(ctx.Args()) < 2 {
   447  		utils.Fatalf("Source ancient chain directory path argument missing")
   448  	}
   449  	// Initialize a new chain for the running node to sync into
   450  	stack := makeFullNode(ctx)
   451  	defer stack.Close()
   452  
   453  	chain, chainDb := utils.MakeChain(ctx, stack)
   454  	syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode)
   455  
   456  	var syncBloom *trie.SyncBloom
   457  	if syncMode == downloader.FastSync {
   458  		syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb)
   459  	}
   460  	dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil)
   461  
   462  	// Create a source peer to satisfy downloader requests from
   463  	db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "")
   464  	if err != nil {
   465  		return err
   466  	}
   467  	hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false })
   468  	if err != nil {
   469  		return err
   470  	}
   471  	peer := downloader.NewFakePeer("local", db, hc, dl)
   472  	if err = dl.RegisterPeer("local", 63, peer); err != nil {
   473  		return err
   474  	}
   475  	// Synchronise with the simulated peer
   476  	start := time.Now()
   477  
   478  	currentHeader := hc.CurrentHeader()
   479  	if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil {
   480  		return err
   481  	}
   482  	for dl.Synchronising() {
   483  		time.Sleep(10 * time.Millisecond)
   484  	}
   485  	fmt.Printf("Database copy done in %v\n", time.Since(start))
   486  
   487  	// Compact the entire database to remove any sync overhead
   488  	start = time.Now()
   489  	fmt.Println("Compacting entire database...")
   490  	if err = db.Compact(nil, nil); err != nil {
   491  		utils.Fatalf("Compaction failed: %v", err)
   492  	}
   493  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   494  	return nil
   495  }
   496  
   497  func removeDB(ctx *cli.Context) error {
   498  	stack, config := makeConfigNode(ctx)
   499  
   500  	// Remove the full node state database
   501  	path := stack.ResolvePath("chaindata")
   502  	if common.FileExist(path) {
   503  		confirmAndRemoveDB(path, "full node state database")
   504  	} else {
   505  		log.Info("Full node state database missing", "path", path)
   506  	}
   507  	// Remove the full node ancient database
   508  	path = config.Eth.DatabaseFreezer
   509  	switch {
   510  	case path == "":
   511  		path = filepath.Join(stack.ResolvePath("chaindata"), "ancient")
   512  	case !filepath.IsAbs(path):
   513  		path = config.Node.ResolvePath(path)
   514  	}
   515  	if common.FileExist(path) {
   516  		confirmAndRemoveDB(path, "full node ancient database")
   517  	} else {
   518  		log.Info("Full node ancient database missing", "path", path)
   519  	}
   520  	// Remove the light node database
   521  	path = stack.ResolvePath("lightchaindata")
   522  	if common.FileExist(path) {
   523  		confirmAndRemoveDB(path, "light node database")
   524  	} else {
   525  		log.Info("Light node database missing", "path", path)
   526  	}
   527  	return nil
   528  }
   529  
   530  // confirmAndRemoveDB prompts the user for a last confirmation and removes the
   531  // folder if accepted.
   532  func confirmAndRemoveDB(database string, kind string) {
   533  	confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database))
   534  	switch {
   535  	case err != nil:
   536  		utils.Fatalf("%v", err)
   537  	case !confirm:
   538  		log.Info("Database deletion skipped", "path", database)
   539  	default:
   540  		start := time.Now()
   541  		filepath.Walk(database, func(path string, info os.FileInfo, err error) error {
   542  			// If we're at the top level folder, recurse into
   543  			if path == database {
   544  				return nil
   545  			}
   546  			// Delete all the files, but not subfolders
   547  			if !info.IsDir() {
   548  				os.Remove(path)
   549  				return nil
   550  			}
   551  			return filepath.SkipDir
   552  		})
   553  		log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start)))
   554  	}
   555  }
   556  
   557  func dump(ctx *cli.Context) error {
   558  	stack := makeFullNode(ctx)
   559  	defer stack.Close()
   560  
   561  	chain, chainDb := utils.MakeChain(ctx, stack)
   562  	defer chainDb.Close()
   563  	for _, arg := range ctx.Args() {
   564  		var block *types.Block
   565  		if hashish(arg) {
   566  			block = chain.GetBlockByHash(common.HexToHash(arg))
   567  		} else {
   568  			num, _ := strconv.Atoi(arg)
   569  			block = chain.GetBlockByNumber(uint64(num))
   570  		}
   571  		if block == nil {
   572  			fmt.Println("{}")
   573  			utils.Fatalf("block not found")
   574  		} else {
   575  			state, err := state.New(block.Root(), state.NewDatabase(chainDb))
   576  			if err != nil {
   577  				utils.Fatalf("could not create new state: %v", err)
   578  			}
   579  			excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name)
   580  			excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name)
   581  			includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name)
   582  			if ctx.Bool(utils.IterativeOutputFlag.Name) {
   583  				state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout))
   584  			} else {
   585  				if includeMissing {
   586  					fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" +
   587  						" otherwise the accounts will overwrite each other in the resulting mapping.")
   588  				}
   589  				fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false))
   590  			}
   591  		}
   592  	}
   593  	return nil
   594  }
   595  
   596  func inspect(ctx *cli.Context) error {
   597  	node, _ := makeConfigNode(ctx)
   598  	defer node.Close()
   599  
   600  	_, chainDb := utils.MakeChain(ctx, node)
   601  	defer chainDb.Close()
   602  
   603  	return rawdb.InspectDatabase(chainDb)
   604  }
   605  
   606  // hashish returns true for strings that look like hashes.
   607  func hashish(x string) bool {
   608  	_, err := strconv.Atoi(x)
   609  	return err != nil
   610  }
   611  
   612  func geneBLS12381Sec(ctx *cli.Context) error {
   613  	privkey := bls.GeneratePrivateKey()
   614  	path := node.DefaultDataDir()
   615  	if ctx.GlobalIsSet(utils.DataDirFlag.Name) {
   616  		path = ctx.GlobalString(utils.DataDirFlag.Name)
   617  	}
   618  	path = filepath.Join(path, "hotstuff-sec")
   619  
   620  	if err := os.WriteFile(path, privkey.ToBytes(), 0600); err != nil {
   621  		return err
   622  	}
   623  
   624  	fmt.Printf("%x", privkey.Public().ToBytes())
   625  	return nil
   626  }
   627  
   628  func aggregateBLS(ctx *cli.Context) error {
   629  
   630  	input := ctx.Args()
   631  	sigs := make([]*bls.PartialSignature, 0, len(input))
   632  	for i := range input {
   633  		sigbytes, err := hexutil.Decode(input[i])
   634  		if err != nil {
   635  			return err
   636  		}
   637  		sig := new(bls.PartialSignature)
   638  		if err := sig.FromBytes(sigbytes); err != nil {
   639  			return err
   640  		}
   641  		sigs = append(sigs, sig)
   642  	}
   643  	aggr, err := bls.Combine(sigs...)
   644  	if err != nil {
   645  		return err
   646  	}
   647  	aggrbytes, _ := aggr.ToBytes()
   648  	fmt.Printf("0x%x", aggrbytes)
   649  	return nil
   650  }