github.com/ethereum/go-ethereum@v1.16.1/cmd/geth/chaincmd.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package main
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"os"
    24  	"path/filepath"
    25  	"regexp"
    26  	"runtime"
    27  	"slices"
    28  	"strconv"
    29  	"strings"
    30  	"sync/atomic"
    31  	"time"
    32  
    33  	"github.com/ethereum/go-ethereum/cmd/utils"
    34  	"github.com/ethereum/go-ethereum/common"
    35  	"github.com/ethereum/go-ethereum/common/hexutil"
    36  	"github.com/ethereum/go-ethereum/core"
    37  	"github.com/ethereum/go-ethereum/core/history"
    38  	"github.com/ethereum/go-ethereum/core/rawdb"
    39  	"github.com/ethereum/go-ethereum/core/state"
    40  	"github.com/ethereum/go-ethereum/core/types"
    41  	"github.com/ethereum/go-ethereum/crypto"
    42  	"github.com/ethereum/go-ethereum/ethdb"
    43  	"github.com/ethereum/go-ethereum/internal/debug"
    44  	"github.com/ethereum/go-ethereum/internal/era"
    45  	"github.com/ethereum/go-ethereum/internal/era/eradl"
    46  	"github.com/ethereum/go-ethereum/internal/flags"
    47  	"github.com/ethereum/go-ethereum/log"
    48  	"github.com/ethereum/go-ethereum/node"
    49  	"github.com/ethereum/go-ethereum/params"
    50  	"github.com/urfave/cli/v2"
    51  )
    52  
    53  var (
    54  	initCommand = &cli.Command{
    55  		Action:    initGenesis,
    56  		Name:      "init",
    57  		Usage:     "Bootstrap and initialize a new genesis block",
    58  		ArgsUsage: "<genesisPath>",
    59  		Flags: slices.Concat([]cli.Flag{
    60  			utils.CachePreimagesFlag,
    61  			utils.OverrideOsaka,
    62  			utils.OverrideVerkle,
    63  		}, utils.DatabaseFlags),
    64  		Description: `
    65  The init command initializes a new genesis block and definition for the network.
    66  This is a destructive action and changes the network in which you will be
    67  participating.
    68  
    69  It expects the genesis file as argument.`,
    70  	}
    71  	dumpGenesisCommand = &cli.Command{
    72  		Action:    dumpGenesis,
    73  		Name:      "dumpgenesis",
    74  		Usage:     "Dumps genesis block JSON configuration to stdout",
    75  		ArgsUsage: "",
    76  		Flags:     slices.Concat([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags),
    77  		Description: `
    78  The dumpgenesis command prints the genesis configuration of the network preset
    79  if one is set.  Otherwise it prints the genesis from the datadir.`,
    80  	}
    81  	importCommand = &cli.Command{
    82  		Action:    importChain,
    83  		Name:      "import",
    84  		Usage:     "Import a blockchain file",
    85  		ArgsUsage: "<filename> (<filename 2> ... <filename N>) ",
    86  		Flags: slices.Concat([]cli.Flag{
    87  			utils.GCModeFlag,
    88  			utils.SnapshotFlag,
    89  			utils.CacheFlag,
    90  			utils.CacheDatabaseFlag,
    91  			utils.CacheTrieFlag,
    92  			utils.CacheGCFlag,
    93  			utils.CacheSnapshotFlag,
    94  			utils.CacheNoPrefetchFlag,
    95  			utils.CachePreimagesFlag,
    96  			utils.NoCompactionFlag,
    97  			utils.MetricsEnabledFlag,
    98  			utils.MetricsEnabledExpensiveFlag,
    99  			utils.MetricsHTTPFlag,
   100  			utils.MetricsPortFlag,
   101  			utils.MetricsEnableInfluxDBFlag,
   102  			utils.MetricsEnableInfluxDBV2Flag,
   103  			utils.MetricsInfluxDBEndpointFlag,
   104  			utils.MetricsInfluxDBDatabaseFlag,
   105  			utils.MetricsInfluxDBUsernameFlag,
   106  			utils.MetricsInfluxDBPasswordFlag,
   107  			utils.MetricsInfluxDBTagsFlag,
   108  			utils.MetricsInfluxDBTokenFlag,
   109  			utils.MetricsInfluxDBBucketFlag,
   110  			utils.MetricsInfluxDBOrganizationFlag,
   111  			utils.TxLookupLimitFlag,
   112  			utils.VMTraceFlag,
   113  			utils.VMTraceJsonConfigFlag,
   114  			utils.TransactionHistoryFlag,
   115  			utils.LogHistoryFlag,
   116  			utils.LogNoHistoryFlag,
   117  			utils.LogExportCheckpointsFlag,
   118  			utils.StateHistoryFlag,
   119  		}, utils.DatabaseFlags, debug.Flags),
   120  		Before: func(ctx *cli.Context) error {
   121  			flags.MigrateGlobalFlags(ctx)
   122  			return debug.Setup(ctx)
   123  		},
   124  		Description: `
   125  The import command allows the import of blocks from an RLP-encoded format. This format can be a single file
   126  containing multiple RLP-encoded blocks, or multiple files can be given.
   127  
   128  If only one file is used, an import error will result in the entire import process failing. If
   129  multiple files are processed, the import process will continue even if an individual RLP file fails
   130  to import successfully.`,
   131  	}
   132  	exportCommand = &cli.Command{
   133  		Action:    exportChain,
   134  		Name:      "export",
   135  		Usage:     "Export blockchain into file",
   136  		ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]",
   137  		Flags:     slices.Concat([]cli.Flag{utils.CacheFlag}, utils.DatabaseFlags),
   138  		Description: `
   139  Requires a first argument of the file to write to.
   140  Optional second and third arguments control the first and
   141  last block to write. In this mode, the file will be appended
   142  if already existing. If the file ends with .gz, the output will
   143  be gzipped.`,
   144  	}
   145  	importHistoryCommand = &cli.Command{
   146  		Action:    importHistory,
   147  		Name:      "import-history",
   148  		Usage:     "Import an Era archive",
   149  		ArgsUsage: "<dir>",
   150  		Flags:     slices.Concat([]cli.Flag{utils.TxLookupLimitFlag, utils.TransactionHistoryFlag}, utils.DatabaseFlags, utils.NetworkFlags),
   151  		Description: `
   152  The import-history command will import blocks and their corresponding receipts
   153  from Era archives.
   154  `,
   155  	}
   156  	exportHistoryCommand = &cli.Command{
   157  		Action:    exportHistory,
   158  		Name:      "export-history",
   159  		Usage:     "Export blockchain history to Era archives",
   160  		ArgsUsage: "<dir> <first> <last>",
   161  		Flags:     utils.DatabaseFlags,
   162  		Description: `
   163  The export-history command will export blocks and their corresponding receipts
   164  into Era archives. Eras are typically packaged in steps of 8192 blocks.
   165  `,
   166  	}
   167  	importPreimagesCommand = &cli.Command{
   168  		Action:    importPreimages,
   169  		Name:      "import-preimages",
   170  		Usage:     "Import the preimage database from an RLP stream",
   171  		ArgsUsage: "<datafile>",
   172  		Flags:     slices.Concat([]cli.Flag{utils.CacheFlag}, utils.DatabaseFlags),
   173  		Description: `
   174  The import-preimages command imports hash preimages from an RLP encoded stream.
   175  It's deprecated, please use "geth db import" instead.
   176  `,
   177  	}
   178  
   179  	dumpCommand = &cli.Command{
   180  		Action:    dump,
   181  		Name:      "dump",
   182  		Usage:     "Dump a specific block from storage",
   183  		ArgsUsage: "[? <blockHash> | <blockNum>]",
   184  		Flags: slices.Concat([]cli.Flag{
   185  			utils.CacheFlag,
   186  			utils.IterativeOutputFlag,
   187  			utils.ExcludeCodeFlag,
   188  			utils.ExcludeStorageFlag,
   189  			utils.IncludeIncompletesFlag,
   190  			utils.StartKeyFlag,
   191  			utils.DumpLimitFlag,
   192  		}, utils.DatabaseFlags),
   193  		Description: `
   194  This command dumps out the state for a given block (or latest, if none provided).
   195  `,
   196  	}
   197  
   198  	pruneHistoryCommand = &cli.Command{
   199  		Action:    pruneHistory,
   200  		Name:      "prune-history",
   201  		Usage:     "Prune blockchain history (block bodies and receipts) up to the merge block",
   202  		ArgsUsage: "",
   203  		Flags:     utils.DatabaseFlags,
   204  		Description: `
   205  The prune-history command removes historical block bodies and receipts from the
   206  blockchain database up to the merge block, while preserving block headers. This
   207  helps reduce storage requirements for nodes that don't need full historical data.`,
   208  	}
   209  
   210  	downloadEraCommand = &cli.Command{
   211  		Action:    downloadEra,
   212  		Name:      "download-era",
   213  		Usage:     "Fetches era1 files (pre-merge history) from an HTTP endpoint",
   214  		ArgsUsage: "",
   215  		Flags: slices.Concat(
   216  			utils.DatabaseFlags,
   217  			utils.NetworkFlags,
   218  			[]cli.Flag{
   219  				eraBlockFlag,
   220  				eraEpochFlag,
   221  				eraAllFlag,
   222  				eraServerFlag,
   223  			},
   224  		),
   225  	}
   226  )
   227  
   228  var (
   229  	eraBlockFlag = &cli.StringFlag{
   230  		Name:  "block",
   231  		Usage: "Block number to fetch. (can also be a range <start>-<end>)",
   232  	}
   233  	eraEpochFlag = &cli.StringFlag{
   234  		Name:  "epoch",
   235  		Usage: "Epoch number to fetch (can also be a range <start>-<end>)",
   236  	}
   237  	eraAllFlag = &cli.BoolFlag{
   238  		Name:  "all",
   239  		Usage: "Download all available era1 files",
   240  	}
   241  	eraServerFlag = &cli.StringFlag{
   242  		Name:  "server",
   243  		Usage: "era1 server URL",
   244  	}
   245  )
   246  
   247  // initGenesis will initialise the given JSON format genesis file and writes it as
   248  // the zero'd block (i.e. genesis) or will fail hard if it can't succeed.
   249  func initGenesis(ctx *cli.Context) error {
   250  	if ctx.Args().Len() != 1 {
   251  		utils.Fatalf("need genesis.json file as the only argument")
   252  	}
   253  	genesisPath := ctx.Args().First()
   254  	if len(genesisPath) == 0 {
   255  		utils.Fatalf("invalid path to genesis file")
   256  	}
   257  	file, err := os.Open(genesisPath)
   258  	if err != nil {
   259  		utils.Fatalf("Failed to read genesis file: %v", err)
   260  	}
   261  	defer file.Close()
   262  
   263  	genesis := new(core.Genesis)
   264  	if err := json.NewDecoder(file).Decode(genesis); err != nil {
   265  		utils.Fatalf("invalid genesis file: %v", err)
   266  	}
   267  	// Open and initialise both full and light databases
   268  	stack, _ := makeConfigNode(ctx)
   269  	defer stack.Close()
   270  
   271  	var overrides core.ChainOverrides
   272  	if ctx.IsSet(utils.OverrideOsaka.Name) {
   273  		v := ctx.Uint64(utils.OverrideOsaka.Name)
   274  		overrides.OverrideOsaka = &v
   275  	}
   276  	if ctx.IsSet(utils.OverrideVerkle.Name) {
   277  		v := ctx.Uint64(utils.OverrideVerkle.Name)
   278  		overrides.OverrideVerkle = &v
   279  	}
   280  
   281  	chaindb := utils.MakeChainDatabase(ctx, stack, false)
   282  	defer chaindb.Close()
   283  
   284  	triedb := utils.MakeTrieDatabase(ctx, chaindb, ctx.Bool(utils.CachePreimagesFlag.Name), false, genesis.IsVerkle())
   285  	defer triedb.Close()
   286  
   287  	_, hash, compatErr, err := core.SetupGenesisBlockWithOverride(chaindb, triedb, genesis, &overrides)
   288  	if err != nil {
   289  		utils.Fatalf("Failed to write genesis block: %v", err)
   290  	}
   291  	if compatErr != nil {
   292  		utils.Fatalf("Failed to write chain config: %v", compatErr)
   293  	}
   294  	log.Info("Successfully wrote genesis state", "database", "chaindata", "hash", hash)
   295  
   296  	return nil
   297  }
   298  
   299  func dumpGenesis(ctx *cli.Context) error {
   300  	// check if there is a testnet preset enabled
   301  	var genesis *core.Genesis
   302  	if utils.IsNetworkPreset(ctx) {
   303  		genesis = utils.MakeGenesis(ctx)
   304  	} else if ctx.IsSet(utils.DeveloperFlag.Name) && !ctx.IsSet(utils.DataDirFlag.Name) {
   305  		genesis = core.DeveloperGenesisBlock(11_500_000, nil)
   306  	}
   307  
   308  	if genesis != nil {
   309  		if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil {
   310  			utils.Fatalf("could not encode genesis: %s", err)
   311  		}
   312  		return nil
   313  	}
   314  
   315  	// dump whatever already exists in the datadir
   316  	stack, _ := makeConfigNode(ctx)
   317  
   318  	db, err := stack.OpenDatabaseWithOptions("chaindata", node.DatabaseOptions{ReadOnly: true})
   319  	if err != nil {
   320  		return err
   321  	}
   322  	defer db.Close()
   323  
   324  	genesis, err = core.ReadGenesis(db)
   325  	if err != nil {
   326  		utils.Fatalf("failed to read genesis: %s", err)
   327  	}
   328  
   329  	if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil {
   330  		utils.Fatalf("could not encode stored genesis: %s", err)
   331  	}
   332  
   333  	return nil
   334  }
   335  
   336  func importChain(ctx *cli.Context) error {
   337  	if ctx.Args().Len() < 1 {
   338  		utils.Fatalf("This command requires an argument.")
   339  	}
   340  	stack, cfg := makeConfigNode(ctx)
   341  	defer stack.Close()
   342  
   343  	// Start metrics export if enabled
   344  	utils.SetupMetrics(&cfg.Metrics)
   345  
   346  	chain, db := utils.MakeChain(ctx, stack, false)
   347  	defer db.Close()
   348  
   349  	// Start periodically gathering memory profiles
   350  	var peakMemAlloc, peakMemSys atomic.Uint64
   351  	go func() {
   352  		stats := new(runtime.MemStats)
   353  		for {
   354  			runtime.ReadMemStats(stats)
   355  			if peakMemAlloc.Load() < stats.Alloc {
   356  				peakMemAlloc.Store(stats.Alloc)
   357  			}
   358  			if peakMemSys.Load() < stats.Sys {
   359  				peakMemSys.Store(stats.Sys)
   360  			}
   361  			time.Sleep(5 * time.Second)
   362  		}
   363  	}()
   364  	// Import the chain
   365  	start := time.Now()
   366  
   367  	var importErr error
   368  
   369  	if ctx.Args().Len() == 1 {
   370  		if err := utils.ImportChain(chain, ctx.Args().First()); err != nil {
   371  			importErr = err
   372  			log.Error("Import error", "err", err)
   373  		}
   374  	} else {
   375  		for _, arg := range ctx.Args().Slice() {
   376  			if err := utils.ImportChain(chain, arg); err != nil {
   377  				importErr = err
   378  				log.Error("Import error", "file", arg, "err", err)
   379  				if err == utils.ErrImportInterrupted {
   380  					break
   381  				}
   382  			}
   383  		}
   384  	}
   385  	chain.Stop()
   386  	fmt.Printf("Import done in %v.\n\n", time.Since(start))
   387  
   388  	// Output pre-compaction stats mostly to see the import trashing
   389  	showDBStats(db)
   390  
   391  	// Print the memory statistics used by the importing
   392  	mem := new(runtime.MemStats)
   393  	runtime.ReadMemStats(mem)
   394  
   395  	fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(peakMemAlloc.Load())/1024/1024)
   396  	fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(peakMemSys.Load())/1024/1024)
   397  	fmt.Printf("Allocations:   %.3f million\n", float64(mem.Mallocs)/1000000)
   398  	fmt.Printf("GC pause:      %v\n\n", time.Duration(mem.PauseTotalNs))
   399  
   400  	if ctx.Bool(utils.NoCompactionFlag.Name) {
   401  		return nil
   402  	}
   403  
   404  	// Compact the entire database to more accurately measure disk io and print the stats
   405  	start = time.Now()
   406  	fmt.Println("Compacting entire database...")
   407  	if err := db.Compact(nil, nil); err != nil {
   408  		utils.Fatalf("Compaction failed: %v", err)
   409  	}
   410  	fmt.Printf("Compaction done in %v.\n\n", time.Since(start))
   411  
   412  	showDBStats(db)
   413  	return importErr
   414  }
   415  
   416  func exportChain(ctx *cli.Context) error {
   417  	if ctx.Args().Len() < 1 {
   418  		utils.Fatalf("This command requires an argument.")
   419  	}
   420  
   421  	stack, _ := makeConfigNode(ctx)
   422  	defer stack.Close()
   423  
   424  	chain, db := utils.MakeChain(ctx, stack, true)
   425  	defer db.Close()
   426  	start := time.Now()
   427  
   428  	var err error
   429  	fp := ctx.Args().First()
   430  	if ctx.Args().Len() < 3 {
   431  		err = utils.ExportChain(chain, fp)
   432  	} else {
   433  		// This can be improved to allow for numbers larger than 9223372036854775807
   434  		first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   435  		last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   436  		if ferr != nil || lerr != nil {
   437  			utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   438  		}
   439  		if first < 0 || last < 0 {
   440  			utils.Fatalf("Export error: block number must be greater than 0\n")
   441  		}
   442  		if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   443  			utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   444  		}
   445  		err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last))
   446  	}
   447  	if err != nil {
   448  		utils.Fatalf("Export error: %v\n", err)
   449  	}
   450  	fmt.Printf("Export done in %v\n", time.Since(start))
   451  	return nil
   452  }
   453  
   454  func importHistory(ctx *cli.Context) error {
   455  	if ctx.Args().Len() != 1 {
   456  		utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
   457  	}
   458  
   459  	stack, _ := makeConfigNode(ctx)
   460  	defer stack.Close()
   461  
   462  	chain, db := utils.MakeChain(ctx, stack, false)
   463  	defer db.Close()
   464  
   465  	var (
   466  		start   = time.Now()
   467  		dir     = ctx.Args().Get(0)
   468  		network string
   469  	)
   470  
   471  	// Determine network.
   472  	if utils.IsNetworkPreset(ctx) {
   473  		switch {
   474  		case ctx.Bool(utils.MainnetFlag.Name):
   475  			network = "mainnet"
   476  		case ctx.Bool(utils.SepoliaFlag.Name):
   477  			network = "sepolia"
   478  		case ctx.Bool(utils.HoleskyFlag.Name):
   479  			network = "holesky"
   480  		case ctx.Bool(utils.HoodiFlag.Name):
   481  			network = "hoodi"
   482  		}
   483  	} else {
   484  		// No network flag set, try to determine network based on files
   485  		// present in directory.
   486  		var networks []string
   487  		for _, n := range params.NetworkNames {
   488  			entries, err := era.ReadDir(dir, n)
   489  			if err != nil {
   490  				return fmt.Errorf("error reading %s: %w", dir, err)
   491  			}
   492  			if len(entries) > 0 {
   493  				networks = append(networks, n)
   494  			}
   495  		}
   496  		if len(networks) == 0 {
   497  			return fmt.Errorf("no era1 files found in %s", dir)
   498  		}
   499  		if len(networks) > 1 {
   500  			return errors.New("multiple networks found, use a network flag to specify desired network")
   501  		}
   502  		network = networks[0]
   503  	}
   504  
   505  	if err := utils.ImportHistory(chain, dir, network); err != nil {
   506  		return err
   507  	}
   508  	fmt.Printf("Import done in %v\n", time.Since(start))
   509  	return nil
   510  }
   511  
   512  // exportHistory exports chain history in Era archives at a specified
   513  // directory.
   514  func exportHistory(ctx *cli.Context) error {
   515  	if ctx.Args().Len() != 3 {
   516  		utils.Fatalf("usage: %s", ctx.Command.ArgsUsage)
   517  	}
   518  
   519  	stack, _ := makeConfigNode(ctx)
   520  	defer stack.Close()
   521  
   522  	chain, _ := utils.MakeChain(ctx, stack, true)
   523  	start := time.Now()
   524  
   525  	var (
   526  		dir         = ctx.Args().Get(0)
   527  		first, ferr = strconv.ParseInt(ctx.Args().Get(1), 10, 64)
   528  		last, lerr  = strconv.ParseInt(ctx.Args().Get(2), 10, 64)
   529  	)
   530  	if ferr != nil || lerr != nil {
   531  		utils.Fatalf("Export error in parsing parameters: block number not an integer\n")
   532  	}
   533  	if first < 0 || last < 0 {
   534  		utils.Fatalf("Export error: block number must be greater than 0\n")
   535  	}
   536  	if head := chain.CurrentSnapBlock(); uint64(last) > head.Number.Uint64() {
   537  		utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.Number.Uint64())
   538  	}
   539  	err := utils.ExportHistory(chain, dir, uint64(first), uint64(last), uint64(era.MaxEra1Size))
   540  	if err != nil {
   541  		utils.Fatalf("Export error: %v\n", err)
   542  	}
   543  	fmt.Printf("Export done in %v\n", time.Since(start))
   544  	return nil
   545  }
   546  
   547  // importPreimages imports preimage data from the specified file.
   548  // it is deprecated, and the export function has been removed, but
   549  // the import function is kept around for the time being so that
   550  // older file formats can still be imported.
   551  func importPreimages(ctx *cli.Context) error {
   552  	if ctx.Args().Len() < 1 {
   553  		utils.Fatalf("This command requires an argument.")
   554  	}
   555  
   556  	stack, _ := makeConfigNode(ctx)
   557  	defer stack.Close()
   558  
   559  	db := utils.MakeChainDatabase(ctx, stack, false)
   560  	defer db.Close()
   561  	start := time.Now()
   562  
   563  	if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil {
   564  		utils.Fatalf("Import error: %v\n", err)
   565  	}
   566  	fmt.Printf("Import done in %v\n", time.Since(start))
   567  	return nil
   568  }
   569  
   570  func parseDumpConfig(ctx *cli.Context, db ethdb.Database) (*state.DumpConfig, common.Hash, error) {
   571  	var header *types.Header
   572  	if ctx.NArg() > 1 {
   573  		return nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg())
   574  	}
   575  	if ctx.NArg() == 1 {
   576  		arg := ctx.Args().First()
   577  		if hashish(arg) {
   578  			hash := common.HexToHash(arg)
   579  			if number := rawdb.ReadHeaderNumber(db, hash); number != nil {
   580  				header = rawdb.ReadHeader(db, hash, *number)
   581  			} else {
   582  				return nil, common.Hash{}, fmt.Errorf("block %x not found", hash)
   583  			}
   584  		} else {
   585  			number, err := strconv.ParseUint(arg, 10, 64)
   586  			if err != nil {
   587  				return nil, common.Hash{}, err
   588  			}
   589  			if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) {
   590  				header = rawdb.ReadHeader(db, hash, number)
   591  			} else {
   592  				return nil, common.Hash{}, fmt.Errorf("header for block %d not found", number)
   593  			}
   594  		}
   595  	} else {
   596  		// Use latest
   597  		header = rawdb.ReadHeadHeader(db)
   598  	}
   599  	if header == nil {
   600  		return nil, common.Hash{}, errors.New("no head block found")
   601  	}
   602  	startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name))
   603  	var start common.Hash
   604  	switch len(startArg) {
   605  	case 0: // common.Hash
   606  	case 32:
   607  		start = common.BytesToHash(startArg)
   608  	case 20:
   609  		start = crypto.Keccak256Hash(startArg)
   610  		log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex())
   611  	default:
   612  		return nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg)
   613  	}
   614  	conf := &state.DumpConfig{
   615  		SkipCode:          ctx.Bool(utils.ExcludeCodeFlag.Name),
   616  		SkipStorage:       ctx.Bool(utils.ExcludeStorageFlag.Name),
   617  		OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name),
   618  		Start:             start.Bytes(),
   619  		Max:               ctx.Uint64(utils.DumpLimitFlag.Name),
   620  	}
   621  	log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(),
   622  		"skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage,
   623  		"start", hexutil.Encode(conf.Start), "limit", conf.Max)
   624  	return conf, header.Root, nil
   625  }
   626  
   627  func dump(ctx *cli.Context) error {
   628  	stack, _ := makeConfigNode(ctx)
   629  	defer stack.Close()
   630  
   631  	db := utils.MakeChainDatabase(ctx, stack, true)
   632  	defer db.Close()
   633  
   634  	conf, root, err := parseDumpConfig(ctx, db)
   635  	if err != nil {
   636  		return err
   637  	}
   638  	triedb := utils.MakeTrieDatabase(ctx, db, true, true, false) // always enable preimage lookup
   639  	defer triedb.Close()
   640  
   641  	state, err := state.New(root, state.NewDatabase(triedb, nil))
   642  	if err != nil {
   643  		return err
   644  	}
   645  	if ctx.Bool(utils.IterativeOutputFlag.Name) {
   646  		state.IterativeDump(conf, json.NewEncoder(os.Stdout))
   647  	} else {
   648  		fmt.Println(string(state.Dump(conf)))
   649  	}
   650  	return nil
   651  }
   652  
   653  // hashish returns true for strings that look like hashes.
   654  func hashish(x string) bool {
   655  	_, err := strconv.Atoi(x)
   656  	return err != nil
   657  }
   658  
   659  func pruneHistory(ctx *cli.Context) error {
   660  	stack, _ := makeConfigNode(ctx)
   661  	defer stack.Close()
   662  
   663  	// Open the chain database
   664  	chain, chaindb := utils.MakeChain(ctx, stack, false)
   665  	defer chaindb.Close()
   666  	defer chain.Stop()
   667  
   668  	// Determine the prune point. This will be the first PoS block.
   669  	prunePoint, ok := history.PrunePoints[chain.Genesis().Hash()]
   670  	if !ok || prunePoint == nil {
   671  		return errors.New("prune point not found")
   672  	}
   673  	var (
   674  		mergeBlock     = prunePoint.BlockNumber
   675  		mergeBlockHash = prunePoint.BlockHash.Hex()
   676  	)
   677  
   678  	// Check we're far enough past merge to ensure all data is in freezer
   679  	currentHeader := chain.CurrentHeader()
   680  	if currentHeader == nil {
   681  		return errors.New("current header not found")
   682  	}
   683  	if currentHeader.Number.Uint64() < mergeBlock+params.FullImmutabilityThreshold {
   684  		return fmt.Errorf("chain not far enough past merge block, need %d more blocks",
   685  			mergeBlock+params.FullImmutabilityThreshold-currentHeader.Number.Uint64())
   686  	}
   687  
   688  	// Double-check the prune block in db has the expected hash.
   689  	hash := rawdb.ReadCanonicalHash(chaindb, mergeBlock)
   690  	if hash != common.HexToHash(mergeBlockHash) {
   691  		return fmt.Errorf("merge block hash mismatch: got %s, want %s", hash.Hex(), mergeBlockHash)
   692  	}
   693  
   694  	log.Info("Starting history pruning", "head", currentHeader.Number, "tail", mergeBlock, "tailHash", mergeBlockHash)
   695  	start := time.Now()
   696  	rawdb.PruneTransactionIndex(chaindb, mergeBlock)
   697  	if _, err := chaindb.TruncateTail(mergeBlock); err != nil {
   698  		return fmt.Errorf("failed to truncate ancient data: %v", err)
   699  	}
   700  	log.Info("History pruning completed", "tail", mergeBlock, "elapsed", common.PrettyDuration(time.Since(start)))
   701  
   702  	// TODO(s1na): what if there is a crash between the two prune operations?
   703  
   704  	return nil
   705  }
   706  
   707  // downladEra is the era1 file downloader tool.
   708  func downloadEra(ctx *cli.Context) error {
   709  	flags.CheckExclusive(ctx, eraBlockFlag, eraEpochFlag, eraAllFlag)
   710  
   711  	// Resolve the network.
   712  	var network = "mainnet"
   713  	if utils.IsNetworkPreset(ctx) {
   714  		switch {
   715  		case ctx.IsSet(utils.MainnetFlag.Name):
   716  		case ctx.IsSet(utils.SepoliaFlag.Name):
   717  			network = "sepolia"
   718  		default:
   719  			return fmt.Errorf("unsupported network, no known era1 checksums")
   720  		}
   721  	}
   722  
   723  	// Resolve the destination directory.
   724  	stack, _ := makeConfigNode(ctx)
   725  	defer stack.Close()
   726  
   727  	ancients := stack.ResolveAncient("chaindata", "")
   728  	dir := filepath.Join(ancients, rawdb.ChainFreezerName, "era")
   729  	if ctx.IsSet(utils.EraFlag.Name) {
   730  		dir = filepath.Join(ancients, ctx.String(utils.EraFlag.Name))
   731  	}
   732  
   733  	baseURL := ctx.String(eraServerFlag.Name)
   734  	if baseURL == "" {
   735  		return fmt.Errorf("need --%s flag to download", eraServerFlag.Name)
   736  	}
   737  
   738  	l, err := eradl.New(baseURL, network)
   739  	if err != nil {
   740  		return err
   741  	}
   742  	switch {
   743  	case ctx.IsSet(eraAllFlag.Name):
   744  		return l.DownloadAll(dir)
   745  
   746  	case ctx.IsSet(eraBlockFlag.Name):
   747  		s := ctx.String(eraBlockFlag.Name)
   748  		start, end, ok := parseRange(s)
   749  		if !ok {
   750  			return fmt.Errorf("invalid block range: %q", s)
   751  		}
   752  		return l.DownloadBlockRange(start, end, dir)
   753  
   754  	case ctx.IsSet(eraEpochFlag.Name):
   755  		s := ctx.String(eraEpochFlag.Name)
   756  		start, end, ok := parseRange(s)
   757  		if !ok {
   758  			return fmt.Errorf("invalid epoch range: %q", s)
   759  		}
   760  		return l.DownloadEpochRange(start, end, dir)
   761  
   762  	default:
   763  		return fmt.Errorf("specify one of --%s, --%s, or --%s to download", eraAllFlag.Name, eraBlockFlag.Name, eraEpochFlag.Name)
   764  	}
   765  }
   766  
   767  func parseRange(s string) (start uint64, end uint64, ok bool) {
   768  	log.Info("Parsing block range", "input", s)
   769  	if m, _ := regexp.MatchString("^[0-9]+-[0-9]+$", s); m {
   770  		s1, s2, _ := strings.Cut(s, "-")
   771  		start, err := strconv.ParseUint(s1, 10, 64)
   772  		if err != nil {
   773  			return 0, 0, false
   774  		}
   775  		end, err = strconv.ParseUint(s2, 10, 64)
   776  		if err != nil {
   777  			return 0, 0, false
   778  		}
   779  		if start > end {
   780  			return 0, 0, false
   781  		}
   782  		log.Info("Parsing block range", "start", start, "end", end)
   783  		return start, end, true
   784  	}
   785  	if m, _ := regexp.MatchString("^[0-9]+$", s); m {
   786  		start, err := strconv.ParseUint(s, 10, 64)
   787  		if err != nil {
   788  			return 0, 0, false
   789  		}
   790  		end = start
   791  		log.Info("Parsing single block range", "block", start)
   792  		return start, end, true
   793  	}
   794  	return 0, 0, false
   795  }