github.com/notegio/go-ethereum@v1.9.5-4/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "os" 23 "path/filepath" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/cmd/utils" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/console" 32 "github.com/ethereum/go-ethereum/core" 33 "github.com/ethereum/go-ethereum/core/rawdb" 34 "github.com/ethereum/go-ethereum/core/state" 35 "github.com/ethereum/go-ethereum/core/types" 36 "github.com/ethereum/go-ethereum/eth/downloader" 37 "github.com/ethereum/go-ethereum/event" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/trie" 40 "gopkg.in/urfave/cli.v1" 41 ) 42 43 var ( 44 initCommand = cli.Command{ 45 Action: utils.MigrateFlags(initGenesis), 46 Name: "init", 47 Usage: "Bootstrap and initialize a new genesis block", 48 ArgsUsage: "<genesisPath>", 49 Flags: []cli.Flag{ 50 utils.DataDirFlag, 51 utils.KafkaLogTopicFlag, 52 utils.KafkaLogBrokerFlag, 53 utils.KafkaTransactionTopicFlag, 54 }, 55 Category: "BLOCKCHAIN COMMANDS", 56 Description: ` 57 The init command initializes a new genesis block and definition for the network. 58 This is a destructive action and changes the network in which you will be 59 participating. 60 61 It expects the genesis file as argument.`, 62 } 63 importCommand = cli.Command{ 64 Action: utils.MigrateFlags(importChain), 65 Name: "import", 66 Usage: "Import a blockchain file", 67 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 68 Flags: []cli.Flag{ 69 utils.DataDirFlag, 70 utils.CacheFlag, 71 utils.SyncModeFlag, 72 utils.GCModeFlag, 73 utils.CacheDatabaseFlag, 74 utils.CacheGCFlag, 75 }, 76 Category: "BLOCKCHAIN COMMANDS", 77 Description: ` 78 The import command imports blocks from an RLP-encoded form. The form can be one file 79 with several RLP-encoded blocks, or several files can be used. 80 81 If only one file is used, import error will result in failure. If several files are used, 82 processing will proceed even if an individual RLP-file import failure occurs.`, 83 } 84 exportCommand = cli.Command{ 85 Action: utils.MigrateFlags(exportChain), 86 Name: "export", 87 Usage: "Export blockchain into file", 88 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 89 Flags: []cli.Flag{ 90 utils.DataDirFlag, 91 utils.CacheFlag, 92 utils.SyncModeFlag, 93 }, 94 Category: "BLOCKCHAIN COMMANDS", 95 Description: ` 96 Requires a first argument of the file to write to. 97 Optional second and third arguments control the first and 98 last block to write. In this mode, the file will be appended 99 if already existing. If the file ends with .gz, the output will 100 be gzipped.`, 101 } 102 importPreimagesCommand = cli.Command{ 103 Action: utils.MigrateFlags(importPreimages), 104 Name: "import-preimages", 105 Usage: "Import the preimage database from an RLP stream", 106 ArgsUsage: "<datafile>", 107 Flags: []cli.Flag{ 108 utils.DataDirFlag, 109 utils.CacheFlag, 110 utils.SyncModeFlag, 111 }, 112 Category: "BLOCKCHAIN COMMANDS", 113 Description: ` 114 The import-preimages command imports hash preimages from an RLP encoded stream.`, 115 } 116 exportPreimagesCommand = cli.Command{ 117 Action: utils.MigrateFlags(exportPreimages), 118 Name: "export-preimages", 119 Usage: "Export the preimage database into an RLP stream", 120 ArgsUsage: "<dumpfile>", 121 Flags: []cli.Flag{ 122 utils.DataDirFlag, 123 utils.CacheFlag, 124 utils.SyncModeFlag, 125 }, 126 Category: "BLOCKCHAIN COMMANDS", 127 Description: ` 128 The export-preimages command export hash preimages to an RLP encoded stream`, 129 } 130 copydbCommand = cli.Command{ 131 Action: utils.MigrateFlags(copyDb), 132 Name: "copydb", 133 Usage: "Create a local chain from a target chaindata folder", 134 ArgsUsage: "<sourceChaindataDir>", 135 Flags: []cli.Flag{ 136 utils.DataDirFlag, 137 utils.CacheFlag, 138 utils.SyncModeFlag, 139 utils.FakePoWFlag, 140 utils.TestnetFlag, 141 utils.RinkebyFlag, 142 }, 143 Category: "BLOCKCHAIN COMMANDS", 144 Description: ` 145 The first argument must be the directory containing the blockchain to download from`, 146 } 147 removedbCommand = cli.Command{ 148 Action: utils.MigrateFlags(removeDB), 149 Name: "removedb", 150 Usage: "Remove blockchain and state databases", 151 ArgsUsage: " ", 152 Flags: []cli.Flag{ 153 utils.DataDirFlag, 154 }, 155 Category: "BLOCKCHAIN COMMANDS", 156 Description: ` 157 Remove blockchain and state databases`, 158 } 159 dumpCommand = cli.Command{ 160 Action: utils.MigrateFlags(dump), 161 Name: "dump", 162 Usage: "Dump a specific block from storage", 163 ArgsUsage: "[<blockHash> | <blockNum>]...", 164 Flags: []cli.Flag{ 165 utils.DataDirFlag, 166 utils.CacheFlag, 167 utils.SyncModeFlag, 168 utils.IterativeOutputFlag, 169 utils.ExcludeCodeFlag, 170 utils.ExcludeStorageFlag, 171 utils.IncludeIncompletesFlag, 172 }, 173 Category: "BLOCKCHAIN COMMANDS", 174 Description: ` 175 The arguments are interpreted as block numbers or hashes. 176 Use "ethereum dump 0" to dump the genesis block.`, 177 } 178 inspectCommand = cli.Command{ 179 Action: utils.MigrateFlags(inspect), 180 Name: "inspect", 181 Usage: "Inspect the storage size for each type of data in the database", 182 ArgsUsage: " ", 183 Flags: []cli.Flag{ 184 utils.DataDirFlag, 185 utils.AncientFlag, 186 utils.CacheFlag, 187 utils.TestnetFlag, 188 utils.RinkebyFlag, 189 utils.GoerliFlag, 190 utils.SyncModeFlag, 191 }, 192 Category: "BLOCKCHAIN COMMANDS", 193 } 194 setHeadCommand = cli.Command{ 195 Action: utils.MigrateFlags(setHead), 196 Name: "sethead", 197 Usage: "Sets the head block to a specific block", 198 ArgsUsage: "[<blockHash> | <blockNum> | <-blockCount>]...", 199 Flags: []cli.Flag{ 200 utils.DataDirFlag, 201 utils.CacheFlag, 202 utils.SyncModeFlag, 203 utils.KafkaLogBrokerFlag, 204 utils.KafkaLogTopicFlag, 205 }, 206 Category: "BLOCKCHAIN COMMANDS", 207 Description: ` 208 The arguments are interpreted as block numbers, hashes, or a number of blocks to be rolled back. 209 Use "ethereum sethead -2" to drop the two most recent blocks`, 210 } 211 verifyStateTrieCommand = cli.Command{ 212 Action: utils.MigrateFlags(verifyStateTrie), 213 Name: "verifystatetrie", 214 Usage: "Verfies the state trie", 215 Flags: []cli.Flag{ 216 utils.DataDirFlag, 217 utils.CacheFlag, 218 utils.SyncModeFlag, 219 }, 220 Category: "BLOCKCHAIN COMMANDS", 221 Description: ` 222 Verify proofs of the latest block state trie. Exit 0 if correct, else exit 1`, 223 } 224 compactCommand = cli.Command{ 225 Action: utils.MigrateFlags(compact), 226 Name: "compactdb", 227 Usage: "Compacts the database", 228 Flags: []cli.Flag{ 229 utils.DataDirFlag, 230 utils.CacheFlag, 231 utils.SyncModeFlag, 232 }, 233 Category: "BLOCKCHAIN COMMANDS", 234 Description: ` 235 Compacts the database`, 236 } 237 238 ) 239 240 // initGenesis will initialise the given JSON format genesis file and writes it as 241 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 242 func initGenesis(ctx *cli.Context) error { 243 // Make sure we have a valid genesis JSON 244 genesisPath := ctx.Args().First() 245 if len(genesisPath) == 0 { 246 utils.Fatalf("Must supply path to genesis JSON file") 247 } 248 file, err := os.Open(genesisPath) 249 if err != nil { 250 utils.Fatalf("Failed to read genesis file: %v", err) 251 } 252 defer file.Close() 253 254 genesis := new(core.Genesis) 255 if err := json.NewDecoder(file).Decode(genesis); err != nil { 256 utils.Fatalf("invalid genesis file: %v", err) 257 } 258 // Open an initialise both full and light databases 259 stack := makeFullNode(ctx) 260 defer stack.Close() 261 262 for _, name := range []string{"chaindata", "lightchaindata"} { 263 chaindb, err := stack.OpenDatabase(name, 0, 0, "") 264 if err != nil { 265 utils.Fatalf("Failed to open database: %v", err) 266 } 267 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 268 if err != nil { 269 utils.Fatalf("Failed to write genesis block: %v", err) 270 } 271 chaindb.Close() 272 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 273 } 274 return nil 275 } 276 277 func importChain(ctx *cli.Context) error { 278 if len(ctx.Args()) < 1 { 279 utils.Fatalf("This command requires an argument.") 280 } 281 stack := makeFullNode(ctx) 282 defer stack.Close() 283 284 chain, db := utils.MakeChain(ctx, stack) 285 defer db.Close() 286 287 // Start periodically gathering memory profiles 288 var peakMemAlloc, peakMemSys uint64 289 go func() { 290 stats := new(runtime.MemStats) 291 for { 292 runtime.ReadMemStats(stats) 293 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 294 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 295 } 296 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 297 atomic.StoreUint64(&peakMemSys, stats.Sys) 298 } 299 time.Sleep(5 * time.Second) 300 } 301 }() 302 // Import the chain 303 start := time.Now() 304 305 if len(ctx.Args()) == 1 { 306 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 307 log.Error("Import error", "err", err) 308 } 309 } else { 310 for _, arg := range ctx.Args() { 311 if err := utils.ImportChain(chain, arg); err != nil { 312 log.Error("Import error", "file", arg, "err", err) 313 } 314 } 315 } 316 chain.Stop() 317 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 318 319 // Output pre-compaction stats mostly to see the import trashing 320 stats, err := db.Stat("leveldb.stats") 321 if err != nil { 322 utils.Fatalf("Failed to read database stats: %v", err) 323 } 324 fmt.Println(stats) 325 326 ioStats, err := db.Stat("leveldb.iostats") 327 if err != nil { 328 utils.Fatalf("Failed to read database iostats: %v", err) 329 } 330 fmt.Println(ioStats) 331 332 // Print the memory statistics used by the importing 333 mem := new(runtime.MemStats) 334 runtime.ReadMemStats(mem) 335 336 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 337 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 338 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 339 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 340 341 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 342 return nil 343 } 344 345 // Compact the entire database to more accurately measure disk io and print the stats 346 start = time.Now() 347 fmt.Println("Compacting entire database...") 348 if err = db.Compact(nil, nil); err != nil { 349 utils.Fatalf("Compaction failed: %v", err) 350 } 351 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 352 353 stats, err = db.Stat("leveldb.stats") 354 if err != nil { 355 utils.Fatalf("Failed to read database stats: %v", err) 356 } 357 fmt.Println(stats) 358 359 ioStats, err = db.Stat("leveldb.iostats") 360 if err != nil { 361 utils.Fatalf("Failed to read database iostats: %v", err) 362 } 363 fmt.Println(ioStats) 364 return nil 365 } 366 367 func exportChain(ctx *cli.Context) error { 368 if len(ctx.Args()) < 1 { 369 utils.Fatalf("This command requires an argument.") 370 } 371 stack := makeFullNode(ctx) 372 defer stack.Close() 373 374 chain, _ := utils.MakeChain(ctx, stack) 375 start := time.Now() 376 377 var err error 378 fp := ctx.Args().First() 379 if len(ctx.Args()) < 3 { 380 err = utils.ExportChain(chain, fp) 381 } else { 382 // This can be improved to allow for numbers larger than 9223372036854775807 383 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 384 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 385 if ferr != nil || lerr != nil { 386 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 387 } 388 if first < 0 || last < 0 { 389 utils.Fatalf("Export error: block number must be greater than 0\n") 390 } 391 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 392 } 393 394 if err != nil { 395 utils.Fatalf("Export error: %v\n", err) 396 } 397 fmt.Printf("Export done in %v\n", time.Since(start)) 398 return nil 399 } 400 401 // importPreimages imports preimage data from the specified file. 402 func importPreimages(ctx *cli.Context) error { 403 if len(ctx.Args()) < 1 { 404 utils.Fatalf("This command requires an argument.") 405 } 406 stack := makeFullNode(ctx) 407 defer stack.Close() 408 409 db := utils.MakeChainDatabase(ctx, stack) 410 start := time.Now() 411 412 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 413 utils.Fatalf("Import error: %v\n", err) 414 } 415 fmt.Printf("Import done in %v\n", time.Since(start)) 416 return nil 417 } 418 419 // exportPreimages dumps the preimage data to specified json file in streaming way. 420 func exportPreimages(ctx *cli.Context) error { 421 if len(ctx.Args()) < 1 { 422 utils.Fatalf("This command requires an argument.") 423 } 424 stack := makeFullNode(ctx) 425 defer stack.Close() 426 427 db := utils.MakeChainDatabase(ctx, stack) 428 start := time.Now() 429 430 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 431 utils.Fatalf("Export error: %v\n", err) 432 } 433 fmt.Printf("Export done in %v\n", time.Since(start)) 434 return nil 435 } 436 437 func copyDb(ctx *cli.Context) error { 438 // Ensure we have a source chain directory to copy 439 if len(ctx.Args()) < 1 { 440 utils.Fatalf("Source chaindata directory path argument missing") 441 } 442 if len(ctx.Args()) < 2 { 443 utils.Fatalf("Source ancient chain directory path argument missing") 444 } 445 // Initialize a new chain for the running node to sync into 446 stack := makeFullNode(ctx) 447 defer stack.Close() 448 449 chain, chainDb := utils.MakeChain(ctx, stack) 450 syncMode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 451 452 var syncBloom *trie.SyncBloom 453 if syncMode == downloader.FastSync { 454 syncBloom = trie.NewSyncBloom(uint64(ctx.GlobalInt(utils.CacheFlag.Name)/2), chainDb) 455 } 456 dl := downloader.New(0, chainDb, syncBloom, new(event.TypeMux), chain, nil, nil) 457 458 // Create a source peer to satisfy downloader requests from 459 db, err := rawdb.NewLevelDBDatabaseWithFreezer(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name)/2, 256, ctx.Args().Get(1), "") 460 if err != nil { 461 return err 462 } 463 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 464 if err != nil { 465 return err 466 } 467 peer := downloader.NewFakePeer("local", db, hc, dl) 468 if err = dl.RegisterPeer("local", 63, peer); err != nil { 469 return err 470 } 471 // Synchronise with the simulated peer 472 start := time.Now() 473 474 currentHeader := hc.CurrentHeader() 475 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncMode); err != nil { 476 return err 477 } 478 for dl.Synchronising() { 479 time.Sleep(10 * time.Millisecond) 480 } 481 fmt.Printf("Database copy done in %v\n", time.Since(start)) 482 483 // Compact the entire database to remove any sync overhead 484 start = time.Now() 485 fmt.Println("Compacting entire database...") 486 if err = db.Compact(nil, nil); err != nil { 487 utils.Fatalf("Compaction failed: %v", err) 488 } 489 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 490 return nil 491 } 492 493 func removeDB(ctx *cli.Context) error { 494 stack, config := makeConfigNode(ctx) 495 496 // Remove the full node state database 497 path := stack.ResolvePath("chaindata") 498 if common.FileExist(path) { 499 confirmAndRemoveDB(path, "full node state database") 500 } else { 501 log.Info("Full node state database missing", "path", path) 502 } 503 // Remove the full node ancient database 504 path = config.Eth.DatabaseFreezer 505 switch { 506 case path == "": 507 path = filepath.Join(stack.ResolvePath("chaindata"), "ancient") 508 case !filepath.IsAbs(path): 509 path = config.Node.ResolvePath(path) 510 } 511 if common.FileExist(path) { 512 confirmAndRemoveDB(path, "full node ancient database") 513 } else { 514 log.Info("Full node ancient database missing", "path", path) 515 } 516 // Remove the light node database 517 path = stack.ResolvePath("lightchaindata") 518 if common.FileExist(path) { 519 confirmAndRemoveDB(path, "light node database") 520 } else { 521 log.Info("Light node database missing", "path", path) 522 } 523 return nil 524 } 525 526 // confirmAndRemoveDB prompts the user for a last confirmation and removes the 527 // folder if accepted. 528 func confirmAndRemoveDB(database string, kind string) { 529 confirm, err := console.Stdin.PromptConfirm(fmt.Sprintf("Remove %s (%s)?", kind, database)) 530 switch { 531 case err != nil: 532 utils.Fatalf("%v", err) 533 case !confirm: 534 log.Info("Database deletion skipped", "path", database) 535 default: 536 start := time.Now() 537 filepath.Walk(database, func(path string, info os.FileInfo, err error) error { 538 // If we're at the top level folder, recurse into 539 if path == database { 540 return nil 541 } 542 // Delete all the files, but not subfolders 543 if !info.IsDir() { 544 os.Remove(path) 545 return nil 546 } 547 return filepath.SkipDir 548 }) 549 log.Info("Database successfully deleted", "path", database, "elapsed", common.PrettyDuration(time.Since(start))) 550 } 551 } 552 553 func dump(ctx *cli.Context) error { 554 stack := makeFullNode(ctx) 555 defer stack.Close() 556 557 chain, chainDb := utils.MakeChain(ctx, stack) 558 defer chainDb.Close() 559 for _, arg := range ctx.Args() { 560 var block *types.Block 561 if hashish(arg) { 562 block = chain.GetBlockByHash(common.HexToHash(arg)) 563 } else { 564 num, _ := strconv.Atoi(arg) 565 block = chain.GetBlockByNumber(uint64(num)) 566 } 567 if block == nil { 568 fmt.Println("{}") 569 utils.Fatalf("block not found") 570 } else { 571 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 572 if err != nil { 573 utils.Fatalf("could not create new state: %v", err) 574 } 575 excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) 576 excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) 577 includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) 578 if ctx.Bool(utils.IterativeOutputFlag.Name) { 579 state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) 580 } else { 581 if includeMissing { 582 fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + 583 " otherwise the accounts will overwrite each other in the resulting mapping.") 584 } 585 fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) 586 } 587 } 588 } 589 return nil 590 } 591 592 func setHead(ctx *cli.Context) error { 593 if len(ctx.Args()) < 1 { 594 utils.Fatalf("This command requires an argument.") 595 } 596 stack := makeFullNode(ctx) 597 chain, db := utils.MakeChain(ctx, stack) 598 arg := ctx.Args()[0] 599 blockNumber, err := strconv.Atoi(arg) 600 if err != nil { 601 block := chain.GetBlockByHash(common.HexToHash(arg)) 602 blockNumber = int(block.Number().Int64()) 603 } else if blockNumber < 0 { 604 latestHash := rawdb.ReadHeadBlockHash(db) 605 block := chain.GetBlockByHash(latestHash) 606 blockNumber = int(block.Number().Int64()) + blockNumber 607 } 608 if err := chain.SetHead(uint64(blockNumber)); err != nil { 609 fmt.Printf("Failed to set head to %v", blockNumber) 610 return err 611 } 612 chain.Stop() 613 db.Close() 614 fmt.Printf("Rolled back chain to block %v\n", blockNumber) 615 return nil 616 } 617 618 func verifyStateTrie(ctx *cli.Context) error { 619 stack := makeFullNode(ctx) 620 bc, db := utils.MakeChain(ctx, stack) 621 latestHash := rawdb.ReadHeadBlockHash(db) 622 block := bc.GetBlockByHash(latestHash) 623 624 tr, err := trie.New(block.Root(), trie.NewDatabase(db)) 625 if err != nil { 626 log.Error(fmt.Sprintf("Unhandled trie error")) 627 return err 628 } 629 nodesToCheck := 1000000 630 if len(ctx.Args()) > 0 { 631 arg := ctx.Args()[0] 632 nodesToCheck, err = strconv.Atoi(arg) 633 if err != nil { return err } 634 } 635 636 iterators := []trie.NodeIterator{} 637 for i := 0; i < 256; i++ { 638 iterators = append(iterators, tr.NodeIterator([]byte{byte(i)})) 639 } 640 for i := 0; i < nodesToCheck; i += len(iterators) { 641 log.Info("Checking leaves", "checked", i, "limit", nodesToCheck) 642 for _, it := range iterators { 643 for it.Next(true) { 644 if it.Leaf() { 645 break 646 } 647 } 648 if err := it.Error(); err != nil { 649 return err 650 } 651 } 652 } 653 bc.Stop() 654 db.Close() 655 // fmt.Printf("Rolled back chain to block %v\n", blockNumber) 656 return nil 657 } 658 659 func compact(ctx *cli.Context) error { 660 stack := makeFullNode(ctx) 661 _, db := utils.MakeChain(ctx, stack) 662 start := time.Now() 663 err := db.Compact(nil, nil) 664 log.Info("Done", "time", time.Since(start)) 665 return err 666 } 667 668 669 func inspect(ctx *cli.Context) error { 670 node, _ := makeConfigNode(ctx) 671 defer node.Close() 672 673 _, chainDb := utils.MakeChain(ctx, node) 674 defer chainDb.Close() 675 676 return rawdb.InspectDatabase(chainDb) 677 } 678 679 // hashish returns true for strings that look like hashes. 680 func hashish(x string) bool { 681 _, err := strconv.Atoi(x) 682 return err != nil 683 }