github.com/testinprod-io/op-erigon@v1.9.6/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "os" 23 "path/filepath" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/cmd/utils" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/console" 32 "github.com/ethereum/go-ethereum/core" 33 "github.com/ethereum/go-ethereum/core/rawdb" 34 "github.com/ethereum/go-ethereum/core/state" 35 "github.com/ethereum/go-ethereum/core/types" 36 "github.com/ethereum/go-ethereum/eth/downloader" 37 "github.com/ethereum/go-ethereum/event" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/trie" 40 "gopkg.in/urfave/cli.v1" 41 ) 42 43 var ( 44 initCommand = cli.Command{ 45 Action: utils.MigrateFlags(initGenesis), 46 Name: "init", 47 Usage: "Bootstrap and initialize a new genesis block", 48 ArgsUsage: "<genesisPath>", 49 Flags: []cli.Flag{ 50 utils.DataDirFlag, 51 }, 52 Category: "BLOCKCHAIN COMMANDS", 53 Description: ` 54 The init command initializes a new genesis block and definition for the network. 55 This is a destructive action and changes the network in which you will be 56 participating. 57 58 It expects the genesis file as argument.`, 59 } 60 importCommand = cli.Command{ 61 Action: utils.MigrateFlags(importChain), 62 Name: "import", 63 Usage: "Import a blockchain file", 64 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 65 Flags: []cli.Flag{ 66 utils.DataDirFlag, 67 utils.CacheFlag, 68 utils.SyncModeFlag, 69 utils.GCModeFlag, 70 utils.CacheDatabaseFlag, 71 utils.CacheGCFlag, 72 }, 73 Category: "BLOCKCHAIN COMMANDS", 74 Description: ` 75 The import command imports blocks from an RLP-encoded form. The form can be one file 76 with several RLP-encoded blocks, or several files can be used. 77 78 If only one file is used, import error will result in failure. If several files are used, 79 processing will proceed even if an individual RLP-file import failure occurs.`, 80 } 81 exportCommand = cli.Command{ 82 Action: utils.MigrateFlags(exportChain), 83 Name: "export", 84 Usage: "Export blockchain into file", 85 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 86 Flags: []cli.Flag{ 87 utils.DataDirFlag, 88 utils.CacheFlag, 89 utils.SyncModeFlag, 90 }, 91 Category: "BLOCKCHAIN COMMANDS", 92 Description: ` 93 Requires a first argument of the file to write to. 94 Optional second and third arguments control the first and 95 last block to write. In this mode, the file will be appended 96 if already existing. If the file ends with .gz, the output will 97 be gzipped.`, 98 } 99 importPreimagesCommand = cli.Command{ 100 Action: utils.MigrateFlags(importPreimages), 101 Name: "import-preimages", 102 Usage: "Import the preimage database from an RLP stream", 103 ArgsUsage: "<datafile>", 104 Flags: []cli.Flag{ 105 utils.DataDirFlag, 106 utils.CacheFlag, 107 utils.SyncModeFlag, 108 }, 109 Category: "BLOCKCHAIN COMMANDS", 110 Description: ` 111 The import-preimages command imports hash preimages from an RLP encoded stream.`, 112 } 113 exportPreimagesCommand = cli.Command{ 114 Action: utils.MigrateFlags(exportPreimages), 115 Name: "export-preimages", 116 Usage: "Export the preimage database into an RLP stream", 117 ArgsUsage: "<dumpfile>", 118 Flags: []cli.Flag{ 119 utils.DataDirFlag, 120 utils.CacheFlag, 121 utils.SyncModeFlag, 122 }, 123 Category: "BLOCKCHAIN COMMANDS", 124 Description: ` 125 The export-preimages command export hash preimages to an RLP encoded stream`, 126 } 127 copydbCommand = cli.Command{ 128 Action: utils.MigrateFlags(copyDb), 129 Name: "copydb", 130 Usage: "Create a local chain from a target chaindata folder", 131 ArgsUsage: "<sourceChaindataDir>", 132 Flags: []cli.Flag{ 133 utils.DataDirFlag, 134 utils.CacheFlag, 135 utils.SyncModeFlag, 136 utils.FakePoWFlag, 137 utils.TestnetFlag, 138 utils.RinkebyFlag, 139 }, 140 Category: "BLOCKCHAIN COMMANDS", 141 Description: ` 142 The first argument must be the directory containing the blockchain to download from`, 143 } 144 removedbCommand = cli.Command{ 145 Action: utils.MigrateFlags(removeDB), 146 Name: "removedb", 147 Usage: "Remove blockchain and state databases", 148 ArgsUsage: " ", 149 Flags: []cli.Flag{ 150 utils.DataDirFlag, 151 }, 152 Category: "BLOCKCHAIN COMMANDS", 153 Description: ` 154 Remove blockchain and state databases`, 155 } 156 dumpCommand = cli.Command{ 157 Action: utils.MigrateFlags(dump), 158 Name: "dump", 159 Usage: "Dump a specific block from storage", 160 ArgsUsage: "[<blockHash> | <blockNum>]...", 161 Flags: []cli.Flag{ 162 utils.DataDirFlag, 163 utils.CacheFlag, 164 utils.SyncModeFlag, 165 utils.IterativeOutputFlag, 166 utils.ExcludeCodeFlag, 167 utils.ExcludeStorageFlag, 168 utils.IncludeIncompletesFlag, 169 }, 170 Category: "BLOCKCHAIN COMMANDS", 171 Description: ` 172 The arguments are interpreted as block numbers or hashes. 173 Use "ethereum dump 0" to dump the genesis block.`, 174 } 175 inspectCommand = cli.Command{ 176 Action: utils.MigrateFlags(inspect), 177 Name: "inspect", 178 Usage: "Inspect the storage size for each type of data in the database", 179 ArgsUsage: " ", 180 Flags: []cli.Flag{ 181 utils.DataDirFlag, 182 utils.AncientFlag, 183 utils.CacheFlag, 184 utils.TestnetFlag, 185 utils.RinkebyFlag, 186 utils.GoerliFlag, 187 utils.SyncModeFlag, 188 }, 189 Category: "BLOCKCHAIN COMMANDS", 190 } 191 ) 192 193 // initGenesis will initialise the given JSON format genesis file and writes it as 194 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 195 func initGenesis(ctx *cli.Context) error { 196 // Make sure we have a valid genesis JSON 197 genesisPath := ctx.Args().First() 198 if len(genesisPath) == 0 { 199 utils.Fatalf("Must supply path to genesis JSON file") 200 } 201 file, err := os.Open(genesisPath) 202 if err != nil { 203 utils.Fatalf("Failed to read genesis file: %v", err) 204 } 205 defer file.Close() 206 207 genesis := new(core.Genesis) 208 if err := json.NewDecoder(file).Decode(genesis); err != nil { 209 utils.Fatalf("invalid genesis file: %v", err) 210 } 211 // Open an initialise both full and light databases 212 stack := makeFullNode(ctx) 213 defer stack.Close() 214 215 for _, name := range []string{"chaindata", "lightchaindata"} { 216 chaindb, err := stack.OpenDatabase(name, 0, 0, "") 217 if err != nil { 218 utils.Fatalf("Failed to open database: %v", err) 219 } 220 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 221 if err != nil { 222 utils.Fatalf("Failed to write genesis block: %v", err) 223 } 224 chaindb.Close() 225 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 226 } 227 return nil 228 } 229 230 func importChain(ctx *cli.Context) error { 231 if len(ctx.Args()) < 1 { 232 utils.Fatalf("This command requires an argument.") 233 } 234 stack := makeFullNode(ctx) 235 defer stack.Close() 236 237 chain, db := utils.MakeChain(ctx, stack) 238 defer db.Close() 239 240 // Start periodically gathering memory profiles 241 var peakMemAlloc, peakMemSys uint64 242 go func() { 243 stats := new(runtime.MemStats) 244 for { 245 runtime.ReadMemStats(stats) 246 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 247 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 248 } 249 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 250 atomic.StoreUint64(&peakMemSys, stats.Sys) 251 } 252 time.Sleep(5 * time.Second) 253 } 254 }() 255 // Import the chain 256 start := time.Now() 257 258 if len(ctx.Args()) == 1 { 259 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 260 log.Error("Import error", "err", err) 261 } 262 } else { 263 for _, arg := range ctx.Args() { 264 if err := utils.ImportChain(chain, arg); err != nil { 265 log.Error("Import error", "file", arg, "err", err) 266 } 267 } 268 } 269 chain.Stop() 270 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 271 272 // Output pre-compaction stats mostly to see the import trashing 273 stats, err := db.Stat("leveldb.stats") 274 if err != nil { 275 utils.Fatalf("Failed to read database stats: %v", err) 276 } 277 fmt.Println(stats) 278 279 ioStats, err := db.Stat("leveldb.iostats") 280 if err != nil { 281 utils.Fatalf("Failed to read database iostats: %v", err) 282 } 283 fmt.Println(ioStats) 284 285 // Print the memory statistics used by the importing 286 mem := new(runtime.MemStats) 287 runtime.ReadMemStats(mem) 288 289 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 290 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 291 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 292 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 293 294 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 295 return nil 296 } 297 298 // Compact the entire database to more accurately measure disk io and print the stats 299 start = time.Now() 300 fmt.Println("Compacting entire database...") 301 if err = db.Compact(nil, nil); err != nil { 302 utils.Fatalf("Compaction failed: %v", err) 303 } 304 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 305 306 stats, err = db.Stat("leveldb.stats") 307 if err != nil { 308 utils.Fatalf("Failed to read database stats: %v", err) 309 } 310 fmt.Println(stats) 311 312 ioStats, err = db.Stat("leveldb.iostats") 313 if err != nil { 314 utils.Fatalf("Failed to read database iostats: %v", err) 315 } 316 fmt.Println(ioStats) 317 return nil 318 } 319 320 func exportChain(ctx *cli.Context) error { 321 if len(ctx.Args()) < 1 { 322 utils.Fatalf("This command requires an argument.") 323 } 324 stack := makeFullNode(ctx) 325 defer stack.Close() 326 327 chain, _ := utils.MakeChain(ctx, stack) 328 start := time.Now() 329 330 var err error 331 fp := ctx.Args().First() 332 if len(ctx.Args()) < 3 { 333 err = utils.ExportChain(chain, fp) 334 } else { 335 // This can be improved to allow for numbers larger than 9223372036854775807 336 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 337 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 338 if ferr != nil || lerr != nil { 339 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 340 } 341 if first < 0 || last < 0 { 342 utils.Fatalf("Export error: block number must be greater than 0\n") 343 } 344 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 345 } 346 347 if err != nil { 348 utils.Fatalf("Export error: %v\n", err) 349 } 350 fmt.Printf("Export done in %v\n", time.Since(start)) 351 return nil 352 } 353 354 // importPreimages imports preimage data from the specified file. 355 func importPreimages(ctx *cli.Context) error { 356 if len(ctx.Args()) < 1 { 357 utils.Fatalf("This command requires an argument.") 358 } 359 stack := makeFullNode(ctx) 360 defer stack.Close() 361 362 db := utils.MakeChainDatabase(ctx, stack) 363 start := time.Now() 364 365 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 366 utils.Fatalf("Import error: %v\n", err) 367 } 368 fmt.Printf("Import done in %v\n", time.Since(start)) 369 return nil 370 } 371 372 // exportPreimages dumps the preimage data to specified json file in streaming way. 373 func exportPreimages(ctx *cli.Context) error { 374 if len(ctx.Args()) < 1 { 375 utils.Fatalf("This command requires an argument.") 376 } 377 stack := makeFullNode(ctx) 378 defer stack.Close() 379 380 db := utils.MakeChainDatabase(ctx, stack) 381 start := time.Now() 382 383 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 384 utils.Fatalf("Export error: %v\n", err) 385 } 386 fmt.Printf("Export done in %v\n", time.Since(start)) 387 return nil 388 } 389 390 func copyDb(ctx *cli.Context) error { 391 // Ensure we have a source chain directory to copy 392 if len(ctx.Args()) < 1 { 393 utils.Fatalf("Source chaindata directory path argument missing") 394 } 395 if len(ctx.Args()) < 2 { 396 utils.Fatalf("Source ancient chain directory path argument missing") 397 } 398 // Initialize a new chain for the running node to sync into 399 stack := makeFullNode(ctx) 400 defer stack.Close() 401 402 chain, chainDb := utils.MakeChain(ctx, stack) 403 syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 404 405 var syncBloom *trie.SyncBloom 406 if syncMode == downloader.FastSync { 407 syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) 408 } 409 dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) 410 411 // Create a source peer to satisfy downloader requests from 412 db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") 413 if err != nil { 414 return err 415 } 416 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 417 if err != nil { 418 return err 419 } 420 peer := downloader.NewFakePeer("local", db, hc, dl) 421 if err = dl.RegisterPeer("local", 63, peer); err != nil { 422 return err 423 } 424 // Synchronise with the simulated peer 425 start := time.Now() 426 427 currentHeader := hc.CurrentHeader() 428 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { 429 return err 430 } 431 for dl.Synchronising() { 432 time.Sleep(10 * time.Millisecond) 433 } 434 fmt.Printf("Database copy done in %v\n", time.Since(start)) 435 436 // Compact the entire database to remove any sync overhead 437 start = time.Now() 438 fmt.Println("Compacting entire database...") 439 if err = db.Compact(nil, nil); err != nil { 440 utils.Fatalf("Compaction failed: %v", err) 441 } 442 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 443 return nil 444 } 445 446 func removeDB(ctx *cli.Context) error { 447 stack, config := makeConfigNode(ctx) 448 449 // Remove the full node state database 450 path := stack.ResolvePath("chaindata") 451 if common.FileExist(path) { 452 confirmAndRemoveDB(path, "full node state database") 453 } else { 454 log.Info("Full node state database missing", "path", path) 455 } 456 // Remove the full node ancient database 457 path = config.Eth.DatabaseFreezer 458 switch { 459 case path == "": 460 path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") 461 case !filepath.IsAbs(path): 462 path = config.Node.ResolvePath(path) 463 } 464 if common.FileExist(path) { 465 confirmAndRemoveDB(path, "full node ancient database") 466 } else { 467 log.Info("Full node ancient database missing", "path", path) 468 } 469 // Remove the light node database 470 path = stack.ResolvePath("lightchaindata") 471 if common.FileExist(path) { 472 confirmAndRemoveDB(path, "light node database") 473 } else { 474 log.Info("Light node database missing", "path", path) 475 } 476 return nil 477 } 478 479 // confirmAndRemoveDB prompts the user for a last confirmation and removes the 480 // folder if accepted. 481 func confirmAndRemoveDB(database string, kind string) { 482 confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) 483 switch { 484 case err != nil: 485 utils.Fatalf("%v", err) 486 case !confirm: 487 log.Info("Database deletion skipped", "path", database) 488 default: 489 start := time.Now() 490 filepath.Walk(database, func(path string, info os.FileInfo, err error) error { 491 // If we're at the top level folder, recurse into 492 if path == database { 493 return nil 494 } 495 // Delete all the files, but not subfolders 496 if !info.IsDir() { 497 os.Remove(path) 498 return nil 499 } 500 return filepath.SkipDir 501 }) 502 log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) 503 } 504 } 505 506 func dump(ctx *cli.Context) error { 507 stack := makeFullNode(ctx) 508 defer stack.Close() 509 510 chain, chainDb := utils.MakeChain(ctx, stack) 511 defer chainDb.Close() 512 for _, arg := range ctx.Args() { 513 var block *types.Block 514 if hashish(arg) { 515 block = chain.GetBlockByHash(common.HexToHash(arg)) 516 } else { 517 num, _ := strconv.Atoi(arg) 518 block = chain.GetBlockByNumber(uint64(num)) 519 } 520 if block == nil { 521 fmt.Println("{}") 522 utils.Fatalf("block not found") 523 } else { 524 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 525 if err != nil { 526 utils.Fatalf("could not create new state: %v", err) 527 } 528 excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) 529 excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) 530 includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) 531 if ctx.Bool(utils.IterativeOutputFlag.Name) { 532 state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) 533 } else { 534 if includeMissing { 535 fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + 536 " otherwise the accounts will overwrite each other in the resulting mapping.") 537 } 538 fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) 539 } 540 } 541 } 542 return nil 543 } 544 545 func inspect(ctx *cli.Context) error { 546 node, _ := makeConfigNode(ctx) 547 defer node.Close() 548 549 _, chainDb := utils.MakeChain(ctx, node) 550 defer chainDb.Close() 551 552 return rawdb.InspectDatabase(chainDb) 553 } 554 555 // hashish returns true for strings that look like hashes. 556 func hashish(x string) bool { 557 _, err := strconv.Atoi(x) 558 return err != nil 559 }