github.com/core-coin/go-core/v2@v2.1.9/cmd/gocore/chaincmd.go (about) 1 // Copyright 2015 by the Authors 2 // This file is part of go-core. 3 // 4 // go-core is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-core is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-core. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "os" 23 "path/filepath" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "gopkg.in/urfave/cli.v1" 30 31 "github.com/core-coin/go-core/v2/cmd/utils" 32 "github.com/core-coin/go-core/v2/common" 33 "github.com/core-coin/go-core/v2/console/prompt" 34 "github.com/core-coin/go-core/v2/core" 35 "github.com/core-coin/go-core/v2/core/rawdb" 36 "github.com/core-coin/go-core/v2/core/state" 37 "github.com/core-coin/go-core/v2/core/types" 38 "github.com/core-coin/go-core/v2/event" 39 "github.com/core-coin/go-core/v2/log" 40 "github.com/core-coin/go-core/v2/metrics" 41 "github.com/core-coin/go-core/v2/trie" 42 "github.com/core-coin/go-core/v2/xcb/downloader" 43 ) 44 45 var ( 46 initCommand = cli.Command{ 47 Action: utils.MigrateFlags(initGenesis), 48 Name: "init", 49 Usage: "Bootstrap and initialize a new genesis block", 50 ArgsUsage: "<genesisPath>", 51 Flags: []cli.Flag{ 52 utils.DataDirFlag, 53 utils.NetworkIdFlag, 54 }, 55 Category: "BLOCKCHAIN COMMANDS", 56 Description: ` 57 The init command initializes a new genesis block and definition for the network. 58 This is a destructive action and changes the network in which you will be 59 participating. 60 61 It expects the genesis file as argument.`, 62 } 63 dumpGenesisCommand = cli.Command{ 64 Action: utils.MigrateFlags(dumpGenesis), 65 Name: "dumpgenesis", 66 Usage: "Dumps genesis block JSON configuration to stdout", 67 ArgsUsage: "", 68 Flags: []cli.Flag{ 69 utils.DataDirFlag, 70 utils.NetworkIdFlag, 71 }, 72 Category: "BLOCKCHAIN COMMANDS", 73 Description: ` 74 The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, 75 } 76 importCommand = cli.Command{ 77 Action: utils.MigrateFlags(importChain), 78 Name: "import", 79 Usage: "Import a blockchain file", 80 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 81 Flags: []cli.Flag{ 82 utils.DataDirFlag, 83 utils.CacheFlag, 84 utils.SyncModeFlag, 85 utils.GCModeFlag, 86 utils.SnapshotFlag, 87 utils.CacheDatabaseFlag, 88 utils.CacheGCFlag, 89 utils.MetricsEnabledFlag, 90 utils.MetricsEnabledExpensiveFlag, 91 utils.MetricsHTTPFlag, 92 utils.MetricsPortFlag, 93 utils.MetricsEnableInfluxDBFlag, 94 utils.MetricsInfluxDBEndpointFlag, 95 utils.MetricsInfluxDBDatabaseFlag, 96 utils.MetricsInfluxDBUsernameFlag, 97 utils.MetricsInfluxDBPasswordFlag, 98 utils.MetricsInfluxDBTagsFlag, 99 utils.TxLookupLimitFlag, 100 utils.NetworkIdFlag, 101 }, 102 Category: "BLOCKCHAIN COMMANDS", 103 Description: ` 104 The import command imports blocks from an RLP-encoded form. The form can be one file 105 with several RLP-encoded blocks, or several files can be used. 106 107 If only one file is used, import error will result in failure. If several files are used, 108 processing will proceed even if an individual RLP-file import failure occurs.`, 109 } 110 exportCommand = cli.Command{ 111 Action: utils.MigrateFlags(exportChain), 112 Name: "export", 113 Usage: "Export blockchain into file", 114 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 115 Flags: []cli.Flag{ 116 utils.DataDirFlag, 117 utils.CacheFlag, 118 utils.SyncModeFlag, 119 utils.NetworkIdFlag, 120 }, 121 Category: "BLOCKCHAIN COMMANDS", 122 Description: ` 123 Requires a first argument of the file to write to. 124 Optional second and third arguments control the first and 125 last block to write. In this mode, the file will be appended 126 if already existing. If the file ends with .gz, the output will 127 be gzipped.`, 128 } 129 importPreimagesCommand = cli.Command{ 130 Action: utils.MigrateFlags(importPreimages), 131 Name: "import-preimages", 132 Usage: "Import the preimage database from an RLP stream", 133 ArgsUsage: "<datafile>", 134 Flags: []cli.Flag{ 135 utils.DataDirFlag, 136 utils.CacheFlag, 137 utils.SyncModeFlag, 138 utils.NetworkIdFlag, 139 }, 140 Category: "BLOCKCHAIN COMMANDS", 141 Description: ` 142 The import-preimages command imports hash preimages from an RLP encoded stream.`, 143 } 144 exportPreimagesCommand = cli.Command{ 145 Action: utils.MigrateFlags(exportPreimages), 146 Name: "export-preimages", 147 Usage: "Export the preimage database into an RLP stream", 148 ArgsUsage: "<dumpfile>", 149 Flags: []cli.Flag{ 150 utils.DataDirFlag, 151 utils.CacheFlag, 152 utils.SyncModeFlag, 153 utils.NetworkIdFlag, 154 }, 155 Category: "BLOCKCHAIN COMMANDS", 156 Description: ` 157 The export-preimages command export hash preimages to an RLP encoded stream`, 158 } 159 copydbCommand = cli.Command{ 160 Action: utils.MigrateFlags(copyDb), 161 Name: "copydb", 162 Usage: "Create a local chain from a target chaindata folder", 163 ArgsUsage: "<sourceChaindataDir>", 164 Flags: []cli.Flag{ 165 utils.DataDirFlag, 166 utils.CacheFlag, 167 utils.SyncModeFlag, 168 utils.FakePoWFlag, 169 utils.DevinFlag, 170 utils.TxLookupLimitFlag, 171 utils.NetworkIdFlag, 172 }, 173 Category: "BLOCKCHAIN COMMANDS", 174 Description: ` 175 The first argument must be the directory containing the blockchain to download from`, 176 } 177 removedbCommand = cli.Command{ 178 Action: utils.MigrateFlags(removeDB), 179 Name: "removedb", 180 Usage: "Remove blockchain and state databases", 181 ArgsUsage: " ", 182 Flags: []cli.Flag{ 183 utils.DataDirFlag, 184 utils.NetworkIdFlag, 185 }, 186 Category: "BLOCKCHAIN COMMANDS", 187 Description: ` 188 Remove blockchain and state databases`, 189 } 190 dumpCommand = cli.Command{ 191 Action: utils.MigrateFlags(dump), 192 Name: "dump", 193 Usage: "Dump a specific block from storage", 194 ArgsUsage: "[<blockHash> | <blockNum>]...", 195 Flags: []cli.Flag{ 196 utils.DataDirFlag, 197 utils.CacheFlag, 198 utils.SyncModeFlag, 199 utils.IterativeOutputFlag, 200 utils.ExcludeCodeFlag, 201 utils.ExcludeStorageFlag, 202 utils.IncludeIncompletesFlag, 203 utils.NetworkIdFlag, 204 }, 205 Category: "BLOCKCHAIN COMMANDS", 206 Description: ` 207 The arguments are interpreted as block numbers or hashes. 208 Use "core dump 0" to dump the genesis block.`, 209 } 210 inspectCommand = cli.Command{ 211 Action: utils.MigrateFlags(inspect), 212 Name: "inspect", 213 Usage: "Inspect the storage size for each type of data in the database", 214 ArgsUsage: " ", 215 Flags: []cli.Flag{ 216 utils.DataDirFlag, 217 utils.AncientFlag, 218 utils.CacheFlag, 219 utils.DevinFlag, 220 utils.SyncModeFlag, 221 utils.NetworkIdFlag, 222 }, 223 Category: "BLOCKCHAIN COMMANDS", 224 } 225 ) 226 227 // initGenesis will initialise the given JSON format genesis file and writes it as 228 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 229 func initGenesis(ctx *cli.Context) error { 230 // Make sure we have a valid genesis JSON 231 genesisPath := ctx.Args().First() 232 if len(genesisPath) == 0 { 233 utils.Fatalf("Must supply path to genesis JSON file") 234 } 235 file, err := os.Open(genesisPath) 236 if err != nil { 237 utils.Fatalf("Failed to read genesis file: %v", err) 238 } 239 defer file.Close() 240 241 // Open and initialise both full and light databases 242 stack, _ := makeConfigNode(ctx) 243 defer stack.Close() 244 245 genesis := new(core.Genesis) 246 if err := json.NewDecoder(file).Decode(genesis); err != nil { 247 utils.Fatalf("invalid genesis file: %v", err) 248 } 249 250 for _, name := range []string{"chaindata", "lightchaindata"} { 251 chaindb, err := stack.OpenDatabase(name, 0, 0, "") 252 if err != nil { 253 utils.Fatalf("Failed to open database: %v", err) 254 } 255 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 256 if err != nil { 257 utils.Fatalf("Failed to write genesis block: %v", err) 258 } 259 chaindb.Close() 260 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 261 } 262 return nil 263 } 264 265 func dumpGenesis(ctx *cli.Context) error { 266 genesis := utils.MakeGenesis(ctx) 267 if genesis == nil { 268 genesis = core.DefaultGenesisBlock() 269 } 270 if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { 271 utils.Fatalf("could not encode genesis") 272 } 273 return nil 274 } 275 276 func importChain(ctx *cli.Context) error { 277 if len(ctx.Args()) < 1 { 278 utils.Fatalf("This command requires an argument.") 279 } 280 // Start metrics export if enabled 281 utils.SetupMetrics(ctx) 282 // Start system runtime metrics collection 283 go metrics.CollectProcessMetrics(3 * time.Second) 284 285 stack, _ := makeConfigNode(ctx) 286 defer stack.Close() 287 288 chain, db := utils.MakeChain(ctx, stack, false) 289 defer db.Close() 290 291 // Start periodically gathering memory profiles 292 var peakMemAlloc, peakMemSys uint64 293 go func() { 294 stats := new(runtime.MemStats) 295 for { 296 runtime.ReadMemStats(stats) 297 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 298 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 299 } 300 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 301 atomic.StoreUint64(&peakMemSys, stats.Sys) 302 } 303 time.Sleep(5 * time.Second) 304 } 305 }() 306 // Import the chain 307 start := time.Now() 308 309 var importErr error 310 311 if len(ctx.Args()) == 1 { 312 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 313 importErr = err 314 log.Error("Import error", "err", err) 315 } 316 } else { 317 for _, arg := range ctx.Args() { 318 if err := utils.ImportChain(chain, arg); err != nil { 319 importErr = err 320 log.Error("Import error", "file", arg, "err", err) 321 } 322 } 323 } 324 chain.Stop() 325 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 326 327 // Output pre-compaction stats mostly to see the import trashing 328 stats, err := db.Stat("leveldb.stats") 329 if err != nil { 330 utils.Fatalf("Failed to read database stats: %v", err) 331 } 332 fmt.Println(stats) 333 334 ioStats, err := db.Stat("leveldb.iostats") 335 if err != nil { 336 utils.Fatalf("Failed to read database iostats: %v", err) 337 } 338 fmt.Println(ioStats) 339 340 // Print the memory statistics used by the importing 341 mem := new(runtime.MemStats) 342 runtime.ReadMemStats(mem) 343 344 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 345 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 346 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 347 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 348 349 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 350 return nil 351 } 352 353 // Compact the entire database to more accurately measure disk io and print the stats 354 start = time.Now() 355 fmt.Println("Compacting entire database...") 356 if err = db.Compact(nil, nil); err != nil { 357 utils.Fatalf("Compaction failed: %v", err) 358 } 359 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 360 361 stats, err = db.Stat("leveldb.stats") 362 if err != nil { 363 utils.Fatalf("Failed to read database stats: %v", err) 364 } 365 fmt.Println(stats) 366 367 ioStats, err = db.Stat("leveldb.iostats") 368 if err != nil { 369 utils.Fatalf("Failed to read database iostats: %v", err) 370 } 371 fmt.Println(ioStats) 372 return importErr 373 } 374 375 func exportChain(ctx *cli.Context) error { 376 if len(ctx.Args()) < 1 { 377 utils.Fatalf("This command requires an argument.") 378 } 379 380 stack, _ := makeConfigNode(ctx) 381 defer stack.Close() 382 383 chain, _ := utils.MakeChain(ctx, stack, true) 384 start := time.Now() 385 386 var err error 387 fp := ctx.Args().First() 388 if len(ctx.Args()) < 3 { 389 err = utils.ExportChain(chain, fp) 390 } else { 391 // This can be improved to allow for numbers larger than 9223372036854775807 392 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 393 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 394 if ferr != nil || lerr != nil { 395 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 396 } 397 if first < 0 || last < 0 { 398 utils.Fatalf("Export error: block number must be greater than 0\n") 399 } 400 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 401 } 402 403 if err != nil { 404 utils.Fatalf("Export error: %v\n", err) 405 } 406 fmt.Printf("Export done in %v\n", time.Since(start)) 407 return nil 408 } 409 410 // importPreimages imports preimage data from the specified file. 411 func importPreimages(ctx *cli.Context) error { 412 if len(ctx.Args()) < 1 { 413 utils.Fatalf("This command requires an argument.") 414 } 415 416 stack, _ := makeConfigNode(ctx) 417 defer stack.Close() 418 419 db := utils.MakeChainDatabase(ctx, stack) 420 start := time.Now() 421 422 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 423 utils.Fatalf("Import error: %v\n", err) 424 } 425 fmt.Printf("Import done in %v\n", time.Since(start)) 426 return nil 427 } 428 429 // exportPreimages dumps the preimage data to specified json file in streaming way. 430 func exportPreimages(ctx *cli.Context) error { 431 if len(ctx.Args()) < 1 { 432 utils.Fatalf("This command requires an argument.") 433 } 434 435 stack, _ := makeConfigNode(ctx) 436 defer stack.Close() 437 438 db := utils.MakeChainDatabase(ctx, stack) 439 start := time.Now() 440 441 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 442 utils.Fatalf("Export error: %v\n", err) 443 } 444 fmt.Printf("Export done in %v\n", time.Since(start)) 445 return nil 446 } 447 448 func copyDb(ctx *cli.Context) error { 449 // Ensure we have a source chain directory to copy 450 if len(ctx.Args()) < 1 { 451 utils.Fatalf("Source chaindata directory path argument missing") 452 } 453 if len(ctx.Args()) < 2 { 454 utils.Fatalf("Source ancient chain directory path argument missing") 455 } 456 // Initialize a new chain for the running node to sync into 457 stack, _ := makeConfigNode(ctx) 458 defer stack.Close() 459 460 chain, chainDb := utils.MakeChain(ctx, stack, false) 461 syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 462 463 var syncBloom *trie.SyncBloom 464 if syncMode == downloader.FastSync { 465 syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) 466 } 467 dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) 468 469 // Create a source peer to satisfy downloader requests from 470 db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") 471 if err != nil { 472 return err 473 } 474 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 475 if err != nil { 476 return err 477 } 478 peer := downloader.NewFakePeer("local", db, hc, dl) 479 if err = dl.RegisterPeer("local", 63, peer); err != nil { 480 return err 481 } 482 // Synchronise with the simulated peer 483 start := time.Now() 484 485 currentHeader := hc.CurrentHeader() 486 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { 487 return err 488 } 489 for dl.Synchronising() { 490 time.Sleep(10 * time.Millisecond) 491 } 492 fmt.Printf("Database copy done in %v\n", time.Since(start)) 493 494 // Compact the entire database to remove any sync overhead 495 start = time.Now() 496 fmt.Println("Compacting entire database...") 497 if err = db.Compact(nil, nil); err != nil { 498 utils.Fatalf("Compaction failed: %v", err) 499 } 500 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 501 return nil 502 } 503 504 func removeDB(ctx *cli.Context) error { 505 stack, config := makeConfigNode(ctx) 506 507 // Remove the full node state database 508 path := stack.ResolvePath("chaindata") 509 if common.FileExist(path) { 510 confirmAndRemoveDB(path, "full node state database") 511 } else { 512 log.Info("Full node state database missing", "path", path) 513 } 514 // Remove the full node ancient database 515 path = config.Xcb.DatabaseFreezer 516 switch { 517 case path == "": 518 path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") 519 case !filepath.IsAbs(path): 520 path = config.Node.ResolvePath(path) 521 } 522 if common.FileExist(path) { 523 confirmAndRemoveDB(path, "full node ancient database") 524 } else { 525 log.Info("Full node ancient database missing", "path", path) 526 } 527 // Remove the light node database 528 path = stack.ResolvePath("lightchaindata") 529 if common.FileExist(path) { 530 confirmAndRemoveDB(path, "light node database") 531 } else { 532 log.Info("Light node database missing", "path", path) 533 } 534 return nil 535 } 536 537 // confirmAndRemoveDB prompts the user for a last confirmation and removes the 538 // folder if accepted. 539 func confirmAndRemoveDB(database string, kind string) { 540 confirm, err := prompt.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) 541 switch { 542 case err != nil: 543 utils.Fatalf("%v", err) 544 case !confirm: 545 log.Info("Database deletion skipped", "path", database) 546 default: 547 start := time.Now() 548 filepath.Walk(database, func(path string, info os.FileInfo, err error) error { 549 // If we're at the top level folder, recurse into 550 if path == database { 551 return nil 552 } 553 // Delete all the files, but not subfolders 554 if !info.IsDir() { 555 os.Remove(path) 556 return nil 557 } 558 return filepath.SkipDir 559 }) 560 log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) 561 } 562 } 563 564 func dump(ctx *cli.Context) error { 565 stack, _ := makeConfigNode(ctx) 566 defer stack.Close() 567 568 chain, chainDb := utils.MakeChain(ctx, stack, true) 569 defer chainDb.Close() 570 for _, arg := range ctx.Args() { 571 var block *types.Block 572 if hashish(arg) { 573 block = chain.GetBlockByHash(common.HexToHash(arg)) 574 } else { 575 num, _ := strconv.Atoi(arg) 576 block = chain.GetBlockByNumber(uint64(num)) 577 } 578 if block == nil { 579 fmt.Println("{}") 580 utils.Fatalf("block not found") 581 } else { 582 state, err := state.New(block.Root(), state.NewDatabase(chainDb), nil) 583 if err != nil { 584 utils.Fatalf("could not create new state: %v", err) 585 } 586 excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) 587 excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) 588 includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) 589 if ctx.Bool(utils.IterativeOutputFlag.Name) { 590 state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) 591 } else { 592 if includeMissing { 593 fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + 594 " otherwise the accounts will overwrite each other in the resulting mapping.") 595 } 596 fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) 597 } 598 } 599 } 600 return nil 601 } 602 603 func inspect(ctx *cli.Context) error { 604 node, _ := makeConfigNode(ctx) 605 defer node.Close() 606 607 _, chainDb := utils.MakeChain(ctx, node, true) 608 defer chainDb.Close() 609 610 return rawdb.InspectDatabase(chainDb) 611 } 612 613 // hashish returns true for strings that look like hashes. 614 func hashish(x string) bool { 615 _, err := strconv.Atoi(x) 616 return err != nil 617 }