github.com/Gessiux/neatchain@v1.3.1/chain/neatchain/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-neatchain. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-neatchain. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "fmt" 21 "os" 22 "runtime" 23 "strconv" 24 "sync/atomic" 25 "time" 26 27 "github.com/Gessiux/neatchain/neatdb" 28 neatptc "github.com/Gessiux/neatchain/neatptc" 29 "github.com/Gessiux/neatchain/params" 30 31 "github.com/Gessiux/neatchain/chain/core" 32 "github.com/Gessiux/neatchain/chain/core/rawdb" 33 "github.com/Gessiux/neatchain/chain/core/state" 34 "github.com/Gessiux/neatchain/chain/core/types" 35 "github.com/Gessiux/neatchain/chain/log" 36 "github.com/Gessiux/neatchain/neatptc/downloader" 37 "github.com/Gessiux/neatchain/utilities/common" 38 "github.com/Gessiux/neatchain/utilities/console" 39 "github.com/Gessiux/neatchain/utilities/event" 40 "github.com/Gessiux/neatchain/utilities/rlp" 41 "github.com/Gessiux/neatchain/utilities/utils" 42 "gopkg.in/urfave/cli.v1" 43 ) 44 45 var ( 46 initNEATGenesisCmd = cli.Command{ 47 Action: utils.MigrateFlags(initNeatGenesis), 48 Name: "init-neatchain", 49 Usage: "Initialize NEAT genesis.json file. init-neatchain {\"1000000000000000000000000000\",\"100000000000000000000000\"}", 50 ArgsUsage: "<genesisPath>", 51 Flags: []cli.Flag{ 52 utils.DataDirFlag, 53 }, 54 Category: "BLOCKCHAIN COMMANDS", 55 Description: "The init-neatchain initializes a new NEAT genesis.json file for the network.", 56 } 57 58 initCommand = cli.Command{ 59 Action: utils.MigrateFlags(initCmd), 60 Name: "init", 61 Usage: "Bootstrap and initialize a new genesis block", 62 ArgsUsage: "<genesisPath>", 63 Flags: []cli.Flag{ 64 utils.DataDirFlag, 65 }, 66 Category: "BLOCKCHAIN COMMANDS", 67 Description: ` 68 The init command initializes a new genesis block and definition for the network. 69 This is a destructive action and changes the network in which you will be 70 participating. 71 72 It expects the genesis file as argument.`, 73 } 74 initSideChainCmd = cli.Command{ 75 Action: utils.MigrateFlags(InitSideChainCmd), 76 Name: "init-side-chain", 77 Usage: "neatchain --sideChain=side_0,side_1,side_2 init-side-chain", 78 Description: "Initialize side chain genesis from chain info db", 79 } 80 // initCommand = cli.Command{ 81 // Action: utils.MigrateFlags(initGenesis), 82 // Name: "init", 83 // Usage: "Bootstrap and initialize a new genesis block", 84 // ArgsUsage: "<genesisPath>", 85 // Flags: []cli.Flag{ 86 // utils.DataDirFlag, 87 // }, 88 // Category: "BLOCKCHAIN COMMANDS", 89 // Description: ` 90 //The init command initializes a new genesis block and definition for the network. 91 //This is a destructive action and changes the network in which you will be 92 //participating. 93 94 //It expects the genesis file as argument.`, 95 // } 96 97 createValidatorCmd = cli.Command{ 98 //Action: GeneratePrivateValidatorCmd, 99 Action: utils.MigrateFlags(CreatePrivateValidatorCmd), 100 Name: "create-validator", 101 Usage: "create-validator address", //create priv_validator.json for address 102 Flags: []cli.Flag{ 103 utils.DataDirFlag, 104 }, 105 Description: "Create priv_validator.json for address", 106 } 107 108 importCommand = cli.Command{ 109 Action: utils.MigrateFlags(importChain), 110 Name: "import", 111 Usage: "Import a blockchain file", 112 ArgsUsage: "<chainname> <filename> (<filename 2> ... <filename N>) ", 113 Flags: []cli.Flag{ 114 utils.DataDirFlag, 115 utils.CacheFlag, 116 utils.SyncModeFlag, 117 utils.GCModeFlag, 118 utils.CacheDatabaseFlag, 119 utils.CacheGCFlag, 120 }, 121 Category: "BLOCKCHAIN COMMANDS", 122 Description: ` 123 The import command imports blocks from an RLP-encoded form. The form can be one file 124 with several RLP-encoded blocks, or several files can be used. 125 126 If only one file is used, import error will result in failure. If several files are used, 127 processing will proceed even if an individual RLP-file import failure occurs.`, 128 } 129 exportCommand = cli.Command{ 130 Action: utils.MigrateFlags(exportChain), 131 Name: "export", 132 Usage: "Export blockchain into file", 133 ArgsUsage: "<chainname> <filename> [<blockNumFirst> <blockNumLast>]", 134 Flags: []cli.Flag{ 135 utils.DataDirFlag, 136 utils.CacheFlag, 137 utils.SyncModeFlag, 138 }, 139 Category: "BLOCKCHAIN COMMANDS", 140 Description: ` 141 Requires a first argument of the file to write to. 142 Optional second and third arguments control the first and 143 last block to write. In this mode, the file will be appended 144 if already existing.`, 145 } 146 importPreimagesCommand = cli.Command{ 147 Action: utils.MigrateFlags(importPreimages), 148 Name: "import-preimages", 149 Usage: "Import the preimage database from an RLP stream", 150 ArgsUsage: "<datafile>", 151 Flags: []cli.Flag{ 152 utils.DataDirFlag, 153 utils.CacheFlag, 154 utils.SyncModeFlag, 155 }, 156 Category: "BLOCKCHAIN COMMANDS", 157 Description: ` 158 The import-preimages command imports hash preimages from an RLP encoded stream.`, 159 } 160 exportPreimagesCommand = cli.Command{ 161 Action: utils.MigrateFlags(exportPreimages), 162 Name: "export-preimages", 163 Usage: "Export the preimage database into an RLP stream", 164 ArgsUsage: "<dumpfile>", 165 Flags: []cli.Flag{ 166 utils.DataDirFlag, 167 utils.CacheFlag, 168 utils.SyncModeFlag, 169 }, 170 Category: "BLOCKCHAIN COMMANDS", 171 Description: ` 172 The export-preimages command export hash preimages to an RLP encoded stream`, 173 } 174 copydbCommand = cli.Command{ 175 Action: utils.MigrateFlags(copyDb), 176 Name: "copydb", 177 Usage: "Create a local chain from a target chaindata folder", 178 ArgsUsage: "<sourceChaindataDir>", 179 Flags: []cli.Flag{ 180 utils.DataDirFlag, 181 utils.CacheFlag, 182 utils.SyncModeFlag, 183 utils.TestnetFlag, 184 }, 185 Category: "BLOCKCHAIN COMMANDS", 186 Description: ` 187 The first argument must be the directory containing the blockchain to download from`, 188 } 189 removedbCommand = cli.Command{ 190 Action: utils.MigrateFlags(removeDB), 191 Name: "removedb", 192 Usage: "Remove blockchain and state databases", 193 ArgsUsage: " ", 194 Flags: []cli.Flag{ 195 utils.DataDirFlag, 196 }, 197 Category: "BLOCKCHAIN COMMANDS", 198 Description: ` 199 Remove blockchain and state databases`, 200 } 201 dumpCommand = cli.Command{ 202 Action: utils.MigrateFlags(dump), 203 Name: "dump", 204 Usage: "Dump a specific block from storage", 205 ArgsUsage: "[<blockHash> | <blockNum>]...", 206 Flags: []cli.Flag{ 207 utils.DataDirFlag, 208 utils.CacheFlag, 209 }, 210 Category: "BLOCKCHAIN COMMANDS", 211 Description: ` 212 The arguments are interpreted as block numbers or hashes. 213 Use "ethereum dump 0" to dump the genesis block.`, 214 } 215 countBlockStateCommand = cli.Command{ 216 Action: utils.MigrateFlags(countBlockState), 217 Name: "count-blockstate", 218 Usage: "Count the block state", 219 ArgsUsage: "<datafile>", 220 Flags: []cli.Flag{ 221 utils.DataDirFlag, 222 utils.CacheFlag, 223 utils.SyncModeFlag, 224 }, 225 Category: "BLOCKCHAIN COMMANDS", 226 Description: ` 227 The count-blockstate command count the block state from a given height.`, 228 } 229 230 versionCommand = cli.Command{ 231 Action: utils.MigrateFlags(version), 232 Name: "version", 233 Usage: "Print version numbers", 234 ArgsUsage: " ", 235 Category: "MISCELLANEOUS COMMANDS", 236 Description: ` 237 The output of this command is supposed to be machine-readable. 238 `, 239 } 240 ) 241 242 // initGenesis will initialise the given JSON format genesis file and writes it as 243 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 244 //func initGenesis(ctx *cli.Context) error { 245 // // Make sure we have a valid genesis JSON 246 // genesisPath := ctx.Args().First() 247 // if len(genesisPath) == 0 { 248 // utils.Fatalf("Must supply path to genesis JSON file") 249 // } 250 // file, err := os.Open(genesisPath) 251 // if err != nil { 252 // utils.Fatalf("Failed to read genesis file: %v", err) 253 // } 254 // defer file.Close() 255 // 256 // genesis := new(core.Genesis) 257 // if err := json.NewDecoder(file).Decode(genesis); err != nil { 258 // utils.Fatalf("invalid genesis file: %v", err) 259 // } 260 // // Open an initialise both full and light databases 261 // stack := makeFullNode(ctx) 262 // for _, name := range []string{"chaindata", "lightchaindata"} { 263 // chaindb, err := stack.OpenDatabase(name, 0, 0, "") 264 // if err != nil { 265 // utils.Fatalf("Failed to open database: %v", err) 266 // } 267 // _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 268 // if err != nil { 269 // utils.Fatalf("Failed to write genesis block: %v", err) 270 // } 271 // log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 272 // } 273 // return nil 274 //} 275 276 func importChain(ctx *cli.Context) error { 277 if len(ctx.Args()) < 1 { 278 utils.Fatalf("This command requires an argument.") 279 } 280 281 chainName := ctx.Args().First() 282 if chainName == "" { 283 utils.Fatalf("This command requires chain name specified.") 284 } 285 286 stack, cfg := makeConfigNode(ctx, chainName) 287 cch := GetCMInstance(ctx).cch 288 utils.RegisterIntService(stack, &cfg.Eth, ctx, cch) 289 //stack := makeFullNode(ctx) 290 defer stack.Close() 291 292 chain, db := utils.MakeChain(ctx, stack) 293 defer db.Close() 294 295 // Start periodically gathering memory profiles 296 var peakMemAlloc, peakMemSys uint64 297 go func() { 298 stats := new(runtime.MemStats) 299 for { 300 runtime.ReadMemStats(stats) 301 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 302 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 303 } 304 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 305 atomic.StoreUint64(&peakMemSys, stats.Sys) 306 } 307 time.Sleep(5 * time.Second) 308 } 309 }() 310 // Import the chain 311 start := time.Now() 312 313 if len(ctx.Args()) == 2 { 314 if err := utils.ImportChain(chain, ctx.Args().Get(1)); err != nil { 315 log.Error("Import error", "err", err) 316 } 317 } else { 318 for i, arg := range ctx.Args() { 319 if i == 0 { 320 continue // skip the chain name 321 } 322 if err := utils.ImportChain(chain, arg); err != nil { 323 log.Error("Import error", "file", arg, "err", err) 324 } 325 } 326 } 327 chain.Stop() 328 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 329 330 // Output pre-compaction stats mostly to see the import trashing 331 stats, err := db.Stat("leveldb.stats") 332 if err != nil { 333 utils.Fatalf("Failed to read database stats: %v", err) 334 } 335 fmt.Println(stats) 336 337 ioStats, err := db.Stat("leveldb.iostats") 338 if err != nil { 339 utils.Fatalf("Failed to read database iostats: %v", err) 340 } 341 fmt.Println(ioStats) 342 343 // Print the memory statistics used by the importing 344 mem := new(runtime.MemStats) 345 runtime.ReadMemStats(mem) 346 347 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 348 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 349 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 350 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 351 352 if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) { 353 return nil 354 } 355 356 // Compact the entire database to more accurately measure disk io and print the stats 357 start = time.Now() 358 fmt.Println("Compacting entire database...") 359 if err = db.Compact(nil, nil); err != nil { 360 utils.Fatalf("Compaction failed: %v", err) 361 } 362 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 363 364 stats, err = db.Stat("leveldb.stats") 365 if err != nil { 366 utils.Fatalf("Failed to read database stats: %v", err) 367 } 368 fmt.Println(stats) 369 370 ioStats, err = db.Stat("leveldb.iostats") 371 if err != nil { 372 utils.Fatalf("Failed to read database iostats: %v", err) 373 } 374 fmt.Println(ioStats) 375 376 return nil 377 } 378 379 func exportChain(ctx *cli.Context) error { 380 if len(ctx.Args()) < 1 { 381 utils.Fatalf("This command requires an argument.") 382 } 383 384 chainName := ctx.Args().First() 385 if chainName == "" { 386 utils.Fatalf("This command requires chain name specified.") 387 } 388 389 stack, cfg := makeConfigNode(ctx, chainName) 390 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 391 //stack := makeFullNode(ctx) 392 defer stack.Close() 393 394 chain, _ := utils.MakeChain(ctx, stack) 395 start := time.Now() 396 397 var err error 398 fp := ctx.Args().Get(1) 399 if len(ctx.Args()) < 4 { 400 err = utils.ExportChain(chain, fp) 401 } else { 402 // This can be improved to allow for numbers larger than 9223372036854775807 403 first, ferr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 404 last, lerr := strconv.ParseInt(ctx.Args().Get(3), 10, 64) 405 if ferr != nil || lerr != nil { 406 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 407 } 408 if first < 0 || last < 0 { 409 utils.Fatalf("Export error: block number must be greater than 0\n") 410 } 411 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 412 } 413 414 if err != nil { 415 utils.Fatalf("Export error: %v\n", err) 416 } 417 fmt.Printf("Export done in %v", time.Since(start)) 418 return nil 419 } 420 421 // importPreimages imports preimage data from the specified file. 422 func importPreimages(ctx *cli.Context) error { 423 if len(ctx.Args()) < 1 { 424 utils.Fatalf("This command requires an argument.") 425 } 426 427 chainName := ctx.Args().Get(1) 428 if chainName == "" { 429 utils.Fatalf("This command requires chain name specified.") 430 } 431 432 stack, cfg := makeConfigNode(ctx, chainName) 433 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 434 defer stack.Close() 435 436 db := utils.MakeChainDatabase(ctx, stack) 437 start := time.Now() 438 439 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 440 utils.Fatalf("Import error: %v\n", err) 441 } 442 fmt.Printf("Import done in %v\n", time.Since(start)) 443 return nil 444 } 445 446 // exportPreimages dumps the preimage data to specified json file in streaming way. 447 func exportPreimages(ctx *cli.Context) error { 448 if len(ctx.Args()) < 1 { 449 utils.Fatalf("This command requires an argument.") 450 } 451 452 chainName := ctx.Args().Get(1) 453 if chainName == "" { 454 utils.Fatalf("This command requires chain name specified.") 455 } 456 457 stack, cfg := makeConfigNode(ctx, chainName) 458 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 459 defer stack.Close() 460 461 db := utils.MakeChainDatabase(ctx, stack) 462 start := time.Now() 463 464 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 465 utils.Fatalf("Export error: %v\n", err) 466 } 467 fmt.Printf("Export done in %v\n", time.Since(start)) 468 return nil 469 } 470 func copyDb(ctx *cli.Context) error { 471 // Ensure we have a source chain directory to copy 472 if len(ctx.Args()) != 1 { 473 utils.Fatalf("Source chaindata directory path argument missing") 474 } 475 476 chainName := ctx.Args().Get(1) 477 if chainName == "" { 478 utils.Fatalf("This command requires chain name specified.") 479 } 480 481 // Initialize a new chain for the running node to sync into 482 stack, _ := makeConfigNode(ctx, chainName) 483 chain, chainDb := utils.MakeChain(ctx, stack) 484 485 syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 486 dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil, nil) 487 488 // Create a source peer to satisfy downloader requests from 489 db, err := rawdb.NewLevelDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256, "") 490 if err != nil { 491 return err 492 } 493 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 494 if err != nil { 495 return err 496 } 497 peer := downloader.NewFakePeer("local", db, hc, dl) 498 if err = dl.RegisterPeer("local", 63, peer); err != nil { 499 return err 500 } 501 // Synchronise with the simulated peer 502 start := time.Now() 503 504 currentHeader := hc.CurrentHeader() 505 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { 506 return err 507 } 508 for dl.Synchronising() { 509 time.Sleep(10 * time.Millisecond) 510 } 511 fmt.Printf("Database copy done in %v\n", time.Since(start)) 512 513 // Compact the entire database to remove any sync overhead 514 start = time.Now() 515 fmt.Println("Compacting entire database...") 516 if err = db.Compact(nil, nil); err != nil { 517 utils.Fatalf("Compaction failed: %v", err) 518 } 519 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 520 521 return nil 522 } 523 524 func removeDB(ctx *cli.Context) error { 525 chainName := ctx.Args().Get(1) 526 if chainName == "" { 527 utils.Fatalf("This command requires chain name specified.") 528 } 529 530 stack, _ := makeConfigNode(ctx, chainName) 531 532 for _, name := range []string{"chaindata", "lightchaindata"} { 533 // Ensure the database exists in the first place 534 logger := log.New("database", name) 535 536 dbdir := stack.ResolvePath(name) 537 if !common.FileExist(dbdir) { 538 logger.Info("Database doesn't exist, skipping", "path", dbdir) 539 continue 540 } 541 // Confirm removal and execute 542 fmt.Println(dbdir) 543 confirm, err := console.Stdin.PromptConfirm("Remove this database?") 544 switch { 545 case err != nil: 546 utils.Fatalf("%v", err) 547 case !confirm: 548 logger.Warn("Database deletion aborted") 549 default: 550 start := time.Now() 551 os.RemoveAll(dbdir) 552 logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start))) 553 } 554 } 555 return nil 556 } 557 558 func dump(ctx *cli.Context) error { 559 chainName := ctx.Args().Get(1) 560 if chainName == "" { 561 utils.Fatalf("This command requires chain name specified.") 562 } 563 564 stack, _ := makeConfigNode(ctx, chainName) 565 chain, chainDb := utils.MakeChain(ctx, stack) 566 for _, arg := range ctx.Args() { 567 var block *types.Block 568 if hashish(arg) { 569 block = chain.GetBlockByHash(common.HexToHash(arg)) 570 } else { 571 num, _ := strconv.Atoi(arg) 572 block = chain.GetBlockByNumber(uint64(num)) 573 } 574 if block == nil { 575 fmt.Println("{}") 576 utils.Fatalf("block not found") 577 } else { 578 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 579 if err != nil { 580 utils.Fatalf("could not create new state: %v", err) 581 } 582 fmt.Printf("%s\n", state.Dump()) 583 } 584 } 585 chainDb.Close() 586 return nil 587 } 588 589 // hashish returns true for strings that look like hashes. 590 func hashish(x string) bool { 591 _, err := strconv.Atoi(x) 592 return err != nil 593 } 594 595 func countBlockState(ctx *cli.Context) error { 596 if len(ctx.Args()) < 1 { 597 utils.Fatalf("This command requires an argument.") 598 } 599 600 chainName := ctx.Args().Get(1) 601 if chainName == "" { 602 utils.Fatalf("This command requires chain name specified.") 603 } 604 605 stack, cfg := makeConfigNode(ctx, chainName) 606 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 607 defer stack.Close() 608 609 chainDb := utils.MakeChainDatabase(ctx, stack) 610 611 height, _ := strconv.ParseUint(ctx.Args().First(), 10, 64) 612 613 blockhash := rawdb.ReadCanonicalHash(chainDb, height) 614 block := rawdb.ReadBlock(chainDb, blockhash, height) 615 bsize := block.Size() 616 617 root := block.Header().Root 618 statedb, _ := state.New(block.Root(), state.NewDatabase(chainDb)) 619 accountTrie, _ := statedb.Database().OpenTrie(root) 620 621 count := CountSize{} 622 countTrie(chainDb, accountTrie, &count, func(addr common.Address, account state.Account) { 623 if account.Root != emptyRoot { 624 storageTrie, _ := statedb.Database().OpenStorageTrie(common.Hash{}, account.Root) 625 countTrie(chainDb, storageTrie, &count, nil) 626 } 627 628 if account.TX1Root != emptyRoot { 629 tx1Trie, _ := statedb.Database().OpenTX1Trie(common.Hash{}, account.TX1Root) 630 countTrie(chainDb, tx1Trie, &count, nil) 631 } 632 633 if account.TX3Root != emptyRoot { 634 tx3Trie, _ := statedb.Database().OpenTX3Trie(common.Hash{}, account.TX3Root) 635 countTrie(chainDb, tx3Trie, &count, nil) 636 } 637 638 if account.ProxiedRoot != emptyRoot { 639 proxiedTrie, _ := statedb.Database().OpenProxiedTrie(common.Hash{}, account.ProxiedRoot) 640 countTrie(chainDb, proxiedTrie, &count, nil) 641 } 642 643 if account.RewardRoot != emptyRoot { 644 rewardTrie, _ := statedb.Database().OpenRewardTrie(common.Hash{}, account.RewardRoot) 645 countTrie(chainDb, rewardTrie, &count, nil) 646 } 647 }) 648 649 // Open the file handle and potentially wrap with a gzip stream 650 fh, err := os.OpenFile("blockstate_nodedump", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 651 if err != nil { 652 return err 653 } 654 defer fh.Close() 655 656 // Write Node Data into file 657 for _, data := range count.Data { 658 fh.WriteString(data.key + " " + data.value + "\n") 659 } 660 661 fmt.Printf("Block %d, block size %v, state node %v, state size %v\n", height, bsize, count.Totalnode, count.Totalnodevaluesize) 662 return nil 663 } 664 665 type CountSize struct { 666 Totalnodevaluesize, Totalnode int 667 Data []nodeData 668 } 669 670 type nodeData struct { 671 key, value string 672 } 673 674 type processLeafTrie func(addr common.Address, account state.Account) 675 676 var emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 677 678 func countTrie(db neatdb.Database, t state.Trie, count *CountSize, processLeaf processLeafTrie) { 679 for it := t.NodeIterator(nil); it.Next(true); { 680 if !it.Leaf() { 681 // non leaf node -> count += value 682 node, _ := db.Get(it.Hash().Bytes()) 683 count.Totalnodevaluesize += len(node) 684 count.Totalnode++ 685 count.Data = append(count.Data, nodeData{it.Hash().String(), common.Bytes2Hex(node)}) 686 } else { 687 // Process the Account -> Inner Trie 688 if processLeaf != nil { 689 addr := t.GetKey(it.LeafKey()) 690 if len(addr) == 20 { 691 var data state.Account 692 rlp.DecodeBytes(it.LeafBlob(), &data) 693 694 processLeaf(common.BytesToAddress(addr), data) 695 } 696 } 697 } 698 } 699 } 700 701 func version(ctx *cli.Context) error { 702 fmt.Println("Chain:", clientIdentifier) 703 fmt.Println("Version:", params.VersionWithMeta) 704 if gitCommit != "" { 705 fmt.Println("Git Commit:", gitCommit) 706 } 707 if gitDate != "" { 708 fmt.Println("Git Commit Date:", gitDate) 709 } 710 fmt.Println("Architecture:", runtime.GOARCH) 711 fmt.Println("Protocol Versions:", neatptc.ProtocolVersions) 712 fmt.Println("Go Version:", runtime.Version()) 713 fmt.Println("Operating System:", runtime.GOOS) 714 fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) 715 fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) 716 return nil 717 }