github.com/carter-ya/go-ethereum@v0.0.0-20230628080049-d2309be3983b/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "os" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/cmd/utils" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/common/hexutil" 32 "github.com/ethereum/go-ethereum/core" 33 "github.com/ethereum/go-ethereum/core/rawdb" 34 "github.com/ethereum/go-ethereum/core/state" 35 "github.com/ethereum/go-ethereum/core/types" 36 "github.com/ethereum/go-ethereum/crypto" 37 "github.com/ethereum/go-ethereum/ethdb" 38 "github.com/ethereum/go-ethereum/internal/flags" 39 "github.com/ethereum/go-ethereum/log" 40 "github.com/ethereum/go-ethereum/metrics" 41 "github.com/ethereum/go-ethereum/node" 42 "github.com/urfave/cli/v2" 43 ) 44 45 var ( 46 initCommand = &cli.Command{ 47 Action: initGenesis, 48 Name: "init", 49 Usage: "Bootstrap and initialize a new genesis block", 50 ArgsUsage: "<genesisPath>", 51 Flags: utils.DatabasePathFlags, 52 Description: ` 53 The init command initializes a new genesis block and definition for the network. 54 This is a destructive action and changes the network in which you will be 55 participating. 56 57 It expects the genesis file as argument.`, 58 } 59 dumpGenesisCommand = &cli.Command{ 60 Action: dumpGenesis, 61 Name: "dumpgenesis", 62 Usage: "Dumps genesis block JSON configuration to stdout", 63 ArgsUsage: "", 64 Flags: append([]cli.Flag{utils.DataDirFlag}, utils.NetworkFlags...), 65 Description: ` 66 The dumpgenesis command prints the genesis configuration of the network preset 67 if one is set. Otherwise it prints the genesis from the datadir.`, 68 } 69 importCommand = &cli.Command{ 70 Action: importChain, 71 Name: "import", 72 Usage: "Import a blockchain file", 73 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 74 Flags: flags.Merge([]cli.Flag{ 75 utils.CacheFlag, 76 utils.SyncModeFlag, 77 utils.GCModeFlag, 78 utils.SnapshotFlag, 79 utils.CacheDatabaseFlag, 80 utils.CacheGCFlag, 81 utils.MetricsEnabledFlag, 82 utils.MetricsEnabledExpensiveFlag, 83 utils.MetricsHTTPFlag, 84 utils.MetricsPortFlag, 85 utils.MetricsEnableInfluxDBFlag, 86 utils.MetricsEnableInfluxDBV2Flag, 87 utils.MetricsInfluxDBEndpointFlag, 88 utils.MetricsInfluxDBDatabaseFlag, 89 utils.MetricsInfluxDBUsernameFlag, 90 utils.MetricsInfluxDBPasswordFlag, 91 utils.MetricsInfluxDBTagsFlag, 92 utils.MetricsInfluxDBTokenFlag, 93 utils.MetricsInfluxDBBucketFlag, 94 utils.MetricsInfluxDBOrganizationFlag, 95 utils.TxLookupLimitFlag, 96 }, utils.DatabasePathFlags), 97 Description: ` 98 The import command imports blocks from an RLP-encoded form. The form can be one file 99 with several RLP-encoded blocks, or several files can be used. 100 101 If only one file is used, import error will result in failure. If several files are used, 102 processing will proceed even if an individual RLP-file import failure occurs.`, 103 } 104 exportCommand = &cli.Command{ 105 Action: exportChain, 106 Name: "export", 107 Usage: "Export blockchain into file", 108 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 109 Flags: flags.Merge([]cli.Flag{ 110 utils.CacheFlag, 111 utils.SyncModeFlag, 112 }, utils.DatabasePathFlags), 113 Description: ` 114 Requires a first argument of the file to write to. 115 Optional second and third arguments control the first and 116 last block to write. In this mode, the file will be appended 117 if already existing. If the file ends with .gz, the output will 118 be gzipped.`, 119 } 120 importPreimagesCommand = &cli.Command{ 121 Action: importPreimages, 122 Name: "import-preimages", 123 Usage: "Import the preimage database from an RLP stream", 124 ArgsUsage: "<datafile>", 125 Flags: flags.Merge([]cli.Flag{ 126 utils.CacheFlag, 127 utils.SyncModeFlag, 128 }, utils.DatabasePathFlags), 129 Description: ` 130 The import-preimages command imports hash preimages from an RLP encoded stream. 131 It's deprecated, please use "geth db import" instead. 132 `, 133 } 134 exportPreimagesCommand = &cli.Command{ 135 Action: exportPreimages, 136 Name: "export-preimages", 137 Usage: "Export the preimage database into an RLP stream", 138 ArgsUsage: "<dumpfile>", 139 Flags: flags.Merge([]cli.Flag{ 140 utils.CacheFlag, 141 utils.SyncModeFlag, 142 }, utils.DatabasePathFlags), 143 Description: ` 144 The export-preimages command exports hash preimages to an RLP encoded stream. 145 It's deprecated, please use "geth db export" instead. 146 `, 147 } 148 dumpCommand = &cli.Command{ 149 Action: dump, 150 Name: "dump", 151 Usage: "Dump a specific block from storage", 152 ArgsUsage: "[? <blockHash> | <blockNum>]", 153 Flags: flags.Merge([]cli.Flag{ 154 utils.CacheFlag, 155 utils.IterativeOutputFlag, 156 utils.ExcludeCodeFlag, 157 utils.ExcludeStorageFlag, 158 utils.IncludeIncompletesFlag, 159 utils.StartKeyFlag, 160 utils.DumpLimitFlag, 161 }, utils.DatabasePathFlags), 162 Description: ` 163 This command dumps out the state for a given block (or latest, if none provided). 164 `, 165 } 166 ) 167 168 // initGenesis will initialise the given JSON format genesis file and writes it as 169 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 170 func initGenesis(ctx *cli.Context) error { 171 if ctx.Args().Len() != 1 { 172 utils.Fatalf("need genesis.json file as the only argument") 173 } 174 genesisPath := ctx.Args().First() 175 if len(genesisPath) == 0 { 176 utils.Fatalf("invalid path to genesis file") 177 } 178 file, err := os.Open(genesisPath) 179 if err != nil { 180 utils.Fatalf("Failed to read genesis file: %v", err) 181 } 182 defer file.Close() 183 184 genesis := new(core.Genesis) 185 if err := json.NewDecoder(file).Decode(genesis); err != nil { 186 utils.Fatalf("invalid genesis file: %v", err) 187 } 188 // Open and initialise both full and light databases 189 stack, _ := makeConfigNode(ctx) 190 defer stack.Close() 191 for _, name := range []string{"chaindata", "lightchaindata"} { 192 chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.String(utils.AncientFlag.Name), "", false) 193 if err != nil { 194 utils.Fatalf("Failed to open database: %v", err) 195 } 196 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 197 if err != nil { 198 utils.Fatalf("Failed to write genesis block: %v", err) 199 } 200 chaindb.Close() 201 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 202 } 203 return nil 204 } 205 206 func dumpGenesis(ctx *cli.Context) error { 207 // if there is a testnet preset enabled, dump that 208 if utils.IsNetworkPreset(ctx) { 209 genesis := utils.MakeGenesis(ctx) 210 if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { 211 utils.Fatalf("could not encode genesis: %s", err) 212 } 213 return nil 214 } 215 // dump whatever already exists in the datadir 216 stack, _ := makeConfigNode(ctx) 217 for _, name := range []string{"chaindata", "lightchaindata"} { 218 db, err := stack.OpenDatabase(name, 0, 0, "", true) 219 if err != nil { 220 if !os.IsNotExist(err) { 221 return err 222 } 223 continue 224 } 225 genesis, err := core.ReadGenesis(db) 226 if err != nil { 227 utils.Fatalf("failed to read genesis: %s", err) 228 } 229 db.Close() 230 231 if err := json.NewEncoder(os.Stdout).Encode(*genesis); err != nil { 232 utils.Fatalf("could not encode stored genesis: %s", err) 233 } 234 return nil 235 } 236 if ctx.IsSet(utils.DataDirFlag.Name) { 237 utils.Fatalf("no existing datadir at %s", stack.Config().DataDir) 238 } 239 utils.Fatalf("no network preset provided. no exisiting genesis in the default datadir") 240 return nil 241 } 242 243 func importChain(ctx *cli.Context) error { 244 if ctx.Args().Len() < 1 { 245 utils.Fatalf("This command requires an argument.") 246 } 247 // Start metrics export if enabled 248 utils.SetupMetrics(ctx) 249 // Start system runtime metrics collection 250 go metrics.CollectProcessMetrics(3 * time.Second) 251 252 stack, _ := makeConfigNode(ctx) 253 defer stack.Close() 254 255 chain, db := utils.MakeChain(ctx, stack) 256 defer db.Close() 257 258 // Start periodically gathering memory profiles 259 var peakMemAlloc, peakMemSys uint64 260 go func() { 261 stats := new(runtime.MemStats) 262 for { 263 runtime.ReadMemStats(stats) 264 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 265 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 266 } 267 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 268 atomic.StoreUint64(&peakMemSys, stats.Sys) 269 } 270 time.Sleep(5 * time.Second) 271 } 272 }() 273 // Import the chain 274 start := time.Now() 275 276 var importErr error 277 278 if ctx.Args().Len() == 1 { 279 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 280 importErr = err 281 log.Error("Import error", "err", err) 282 } 283 } else { 284 for _, arg := range ctx.Args().Slice() { 285 if err := utils.ImportChain(chain, arg); err != nil { 286 importErr = err 287 log.Error("Import error", "file", arg, "err", err) 288 } 289 } 290 } 291 chain.Stop() 292 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 293 294 // Output pre-compaction stats mostly to see the import trashing 295 showLeveldbStats(db) 296 297 // Print the memory statistics used by the importing 298 mem := new(runtime.MemStats) 299 runtime.ReadMemStats(mem) 300 301 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 302 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 303 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 304 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 305 306 if ctx.Bool(utils.NoCompactionFlag.Name) { 307 return nil 308 } 309 310 // Compact the entire database to more accurately measure disk io and print the stats 311 start = time.Now() 312 fmt.Println("Compacting entire database...") 313 if err := db.Compact(nil, nil); err != nil { 314 utils.Fatalf("Compaction failed: %v", err) 315 } 316 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 317 318 showLeveldbStats(db) 319 return importErr 320 } 321 322 func exportChain(ctx *cli.Context) error { 323 if ctx.Args().Len() < 1 { 324 utils.Fatalf("This command requires an argument.") 325 } 326 327 stack, _ := makeConfigNode(ctx) 328 defer stack.Close() 329 330 chain, _ := utils.MakeChain(ctx, stack) 331 start := time.Now() 332 333 var err error 334 fp := ctx.Args().First() 335 if ctx.Args().Len() < 3 { 336 err = utils.ExportChain(chain, fp) 337 } else { 338 // This can be improved to allow for numbers larger than 9223372036854775807 339 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 340 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 341 if ferr != nil || lerr != nil { 342 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 343 } 344 if first < 0 || last < 0 { 345 utils.Fatalf("Export error: block number must be greater than 0\n") 346 } 347 if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { 348 utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) 349 } 350 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 351 } 352 353 if err != nil { 354 utils.Fatalf("Export error: %v\n", err) 355 } 356 fmt.Printf("Export done in %v\n", time.Since(start)) 357 return nil 358 } 359 360 // importPreimages imports preimage data from the specified file. 361 func importPreimages(ctx *cli.Context) error { 362 if ctx.Args().Len() < 1 { 363 utils.Fatalf("This command requires an argument.") 364 } 365 366 stack, _ := makeConfigNode(ctx) 367 defer stack.Close() 368 369 db := utils.MakeChainDatabase(ctx, stack, false) 370 start := time.Now() 371 372 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 373 utils.Fatalf("Import error: %v\n", err) 374 } 375 fmt.Printf("Import done in %v\n", time.Since(start)) 376 return nil 377 } 378 379 // exportPreimages dumps the preimage data to specified json file in streaming way. 380 func exportPreimages(ctx *cli.Context) error { 381 if ctx.Args().Len() < 1 { 382 utils.Fatalf("This command requires an argument.") 383 } 384 stack, _ := makeConfigNode(ctx) 385 defer stack.Close() 386 387 db := utils.MakeChainDatabase(ctx, stack, true) 388 start := time.Now() 389 390 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 391 utils.Fatalf("Export error: %v\n", err) 392 } 393 fmt.Printf("Export done in %v\n", time.Since(start)) 394 return nil 395 } 396 397 func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { 398 db := utils.MakeChainDatabase(ctx, stack, true) 399 var header *types.Header 400 if ctx.NArg() > 1 { 401 return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) 402 } 403 if ctx.NArg() == 1 { 404 arg := ctx.Args().First() 405 if hashish(arg) { 406 hash := common.HexToHash(arg) 407 if number := rawdb.ReadHeaderNumber(db, hash); number != nil { 408 header = rawdb.ReadHeader(db, hash, *number) 409 } else { 410 return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) 411 } 412 } else { 413 number, err := strconv.ParseUint(arg, 10, 64) 414 if err != nil { 415 return nil, nil, common.Hash{}, err 416 } 417 if hash := rawdb.ReadCanonicalHash(db, number); hash != (common.Hash{}) { 418 header = rawdb.ReadHeader(db, hash, number) 419 } else { 420 return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) 421 } 422 } 423 } else { 424 // Use latest 425 header = rawdb.ReadHeadHeader(db) 426 } 427 if header == nil { 428 return nil, nil, common.Hash{}, errors.New("no head block found") 429 } 430 startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) 431 var start common.Hash 432 switch len(startArg) { 433 case 0: // common.Hash 434 case 32: 435 start = common.BytesToHash(startArg) 436 case 20: 437 start = crypto.Keccak256Hash(startArg) 438 log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) 439 default: 440 return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) 441 } 442 var conf = &state.DumpConfig{ 443 SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), 444 SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), 445 OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), 446 Start: start.Bytes(), 447 Max: ctx.Uint64(utils.DumpLimitFlag.Name), 448 } 449 log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), 450 "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, 451 "start", hexutil.Encode(conf.Start), "limit", conf.Max) 452 return conf, db, header.Root, nil 453 } 454 455 func dump(ctx *cli.Context) error { 456 stack, _ := makeConfigNode(ctx) 457 defer stack.Close() 458 459 conf, db, root, err := parseDumpConfig(ctx, stack) 460 if err != nil { 461 return err 462 } 463 state, err := state.New(root, state.NewDatabase(db), nil) 464 if err != nil { 465 return err 466 } 467 if ctx.Bool(utils.IterativeOutputFlag.Name) { 468 state.IterativeDump(conf, json.NewEncoder(os.Stdout)) 469 } else { 470 if conf.OnlyWithAddresses { 471 fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ 472 " otherwise the accounts will overwrite each other in the resulting mapping.") 473 return fmt.Errorf("incompatible options") 474 } 475 fmt.Println(string(state.Dump(conf))) 476 } 477 return nil 478 } 479 480 // hashish returns true for strings that look like hashes. 481 func hashish(x string) bool { 482 _, err := strconv.Atoi(x) 483 return err != nil 484 }