github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/cmd/intchain/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "fmt" 21 "github.com/intfoundation/intchain/intdb" 22 "github.com/intfoundation/intchain/intprotocol" 23 "github.com/intfoundation/intchain/params" 24 "os" 25 "runtime" 26 "strconv" 27 "sync/atomic" 28 "time" 29 30 "github.com/intfoundation/intchain/cmd/utils" 31 "github.com/intfoundation/intchain/common" 32 "github.com/intfoundation/intchain/console" 33 "github.com/intfoundation/intchain/core" 34 "github.com/intfoundation/intchain/core/rawdb" 35 "github.com/intfoundation/intchain/core/state" 36 "github.com/intfoundation/intchain/core/types" 37 "github.com/intfoundation/intchain/event" 38 "github.com/intfoundation/intchain/intprotocol/downloader" 39 "github.com/intfoundation/intchain/log" 40 "github.com/intfoundation/intchain/rlp" 41 "gopkg.in/urfave/cli.v1" 42 ) 43 44 var ( 45 initINTGenesisCmd = cli.Command{ 46 Action: utils.MigrateFlags(initIntGenesis), 47 Name: "init-intchain", 48 Usage: "Initialize INT genesis.json file. init-intchain {\"1000000000\",\"100000\"}", 49 ArgsUsage: "<genesisPath>", 50 Flags: []cli.Flag{ 51 utils.DataDirFlag, 52 }, 53 Category: "BLOCKCHAIN COMMANDS", 54 Description: "The init-intchain initializes a new INT genesis.json file for the network.", 55 } 56 57 initCommand = cli.Command{ 58 Action: utils.MigrateFlags(initCmd), 59 Name: "init", 60 Usage: "Bootstrap and initialize a new genesis block", 61 ArgsUsage: "<genesisPath>", 62 Flags: []cli.Flag{ 63 utils.DataDirFlag, 64 }, 65 Category: "BLOCKCHAIN COMMANDS", 66 Description: ` 67 The init command initializes a new genesis block and definition for the network. 68 This is a destructive action and changes the network in which you will be 69 participating. 70 71 It expects the genesis file as argument.`, 72 } 73 //initChildChainCmd = cli.Command{ 74 // Action: utils.MigrateFlags(InitChildChainCmd), 75 // Name: "init-child-chain", 76 // Usage: "intchain --childChain=child_0,child_1,child_2 init-child-chain", 77 // Description: "Initialize child chain genesis from chain info db", 78 //} 79 // initCommand = cli.Command{ 80 // Action: utils.MigrateFlags(initGenesis), 81 // Name: "init", 82 // Usage: "Bootstrap and initialize a new genesis block", 83 // ArgsUsage: "<genesisPath>", 84 // Flags: []cli.Flag{ 85 // utils.DataDirFlag, 86 // }, 87 // Category: "BLOCKCHAIN COMMANDS", 88 // Description: ` 89 //The init command initializes a new genesis block and definition for the network. 90 //This is a destructive action and changes the network in which you will be 91 //participating. 92 93 //It expects the genesis file as argument.`, 94 // } 95 96 createValidatorCmd = cli.Command{ 97 //Action: GeneratePrivateValidatorCmd, 98 Action: utils.MigrateFlags(CreatePrivateValidatorCmd), 99 Name: "create-validator", 100 Usage: "create-validator address", //create priv_validator.json for address 101 Flags: []cli.Flag{ 102 utils.DataDirFlag, 103 }, 104 Description: "Create priv_validator.json for address", 105 } 106 107 importCommand = cli.Command{ 108 Action: utils.MigrateFlags(importChain), 109 Name: "import", 110 Usage: "Import a blockchain file", 111 ArgsUsage: "<chainname> <filename> (<filename 2> ... <filename N>) ", 112 Flags: []cli.Flag{ 113 utils.DataDirFlag, 114 utils.CacheFlag, 115 utils.SyncModeFlag, 116 utils.GCModeFlag, 117 utils.CacheDatabaseFlag, 118 utils.CacheGCFlag, 119 }, 120 Category: "BLOCKCHAIN COMMANDS", 121 Description: ` 122 The import command imports blocks from an RLP-encoded form. The form can be one file 123 with several RLP-encoded blocks, or several files can be used. 124 125 If only one file is used, import error will result in failure. If several files are used, 126 processing will proceed even if an individual RLP-file import failure occurs.`, 127 } 128 exportCommand = cli.Command{ 129 Action: utils.MigrateFlags(exportChain), 130 Name: "export", 131 Usage: "Export blockchain into file", 132 ArgsUsage: "<chainname> <filename> [<blockNumFirst> <blockNumLast>]", 133 Flags: []cli.Flag{ 134 utils.DataDirFlag, 135 utils.CacheFlag, 136 utils.SyncModeFlag, 137 }, 138 Category: "BLOCKCHAIN COMMANDS", 139 Description: ` 140 Requires a first argument of the file to write to. 141 Optional second and third arguments control the first and 142 last block to write. In this mode, the file will be appended 143 if already existing.`, 144 } 145 importPreimagesCommand = cli.Command{ 146 Action: utils.MigrateFlags(importPreimages), 147 Name: "import-preimages", 148 Usage: "Import the preimage database from an RLP stream", 149 ArgsUsage: "<datafile>", 150 Flags: []cli.Flag{ 151 utils.DataDirFlag, 152 utils.CacheFlag, 153 utils.SyncModeFlag, 154 }, 155 Category: "BLOCKCHAIN COMMANDS", 156 Description: ` 157 The import-preimages command imports hash preimages from an RLP encoded stream.`, 158 } 159 exportPreimagesCommand = cli.Command{ 160 Action: utils.MigrateFlags(exportPreimages), 161 Name: "export-preimages", 162 Usage: "Export the preimage database into an RLP stream", 163 ArgsUsage: "<dumpfile>", 164 Flags: []cli.Flag{ 165 utils.DataDirFlag, 166 utils.CacheFlag, 167 utils.SyncModeFlag, 168 }, 169 Category: "BLOCKCHAIN COMMANDS", 170 Description: ` 171 The export-preimages command export hash preimages to an RLP encoded stream`, 172 } 173 copydbCommand = cli.Command{ 174 Action: utils.MigrateFlags(copyDb), 175 Name: "copydb", 176 Usage: "Create a local chain from a target chaindata folder", 177 ArgsUsage: "<sourceChaindataDir>", 178 Flags: []cli.Flag{ 179 utils.DataDirFlag, 180 utils.CacheFlag, 181 utils.SyncModeFlag, 182 utils.TestnetFlag, 183 }, 184 Category: "BLOCKCHAIN COMMANDS", 185 Description: ` 186 The first argument must be the directory containing the blockchain to download from`, 187 } 188 removedbCommand = cli.Command{ 189 Action: utils.MigrateFlags(removeDB), 190 Name: "removedb", 191 Usage: "Remove blockchain and state databases", 192 ArgsUsage: " ", 193 Flags: []cli.Flag{ 194 utils.DataDirFlag, 195 }, 196 Category: "BLOCKCHAIN COMMANDS", 197 Description: ` 198 Remove blockchain and state databases`, 199 } 200 dumpCommand = cli.Command{ 201 Action: utils.MigrateFlags(dump), 202 Name: "dump", 203 Usage: "Dump a specific block from storage", 204 ArgsUsage: "[<blockHash> | <blockNum>]...", 205 Flags: []cli.Flag{ 206 utils.DataDirFlag, 207 utils.CacheFlag, 208 }, 209 Category: "BLOCKCHAIN COMMANDS", 210 Description: ` 211 The arguments are interpreted as block numbers or hashes. 212 Use "ethereum dump 0" to dump the genesis block.`, 213 } 214 countBlockStateCommand = cli.Command{ 215 Action: utils.MigrateFlags(countBlockState), 216 Name: "count-blockstate", 217 Usage: "Count the block state", 218 ArgsUsage: "<datafile>", 219 Flags: []cli.Flag{ 220 utils.DataDirFlag, 221 utils.CacheFlag, 222 utils.SyncModeFlag, 223 }, 224 Category: "BLOCKCHAIN COMMANDS", 225 Description: ` 226 The count-blockstate command count the block state from a given height.`, 227 } 228 229 versionCommand = cli.Command{ 230 Action: utils.MigrateFlags(version), 231 Name: "version", 232 Usage: "Print version numbers", 233 ArgsUsage: " ", 234 Category: "MISCELLANEOUS COMMANDS", 235 Description: ` 236 The output of this command is supposed to be machine-readable. 237 `, 238 } 239 ) 240 241 // initGenesis will initialise the given JSON format genesis file and writes it as 242 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 243 //func initGenesis(ctx *cli.Context) error { 244 // // Make sure we have a valid genesis JSON 245 // genesisPath := ctx.Args().First() 246 // if len(genesisPath) == 0 { 247 // utils.Fatalf("Must supply path to genesis JSON file") 248 // } 249 // file, err := os.Open(genesisPath) 250 // if err != nil { 251 // utils.Fatalf("Failed to read genesis file: %v", err) 252 // } 253 // defer file.Close() 254 // 255 // genesis := new(core.Genesis) 256 // if err := json.NewDecoder(file).Decode(genesis); err != nil { 257 // utils.Fatalf("invalid genesis file: %v", err) 258 // } 259 // // Open an initialise both full and light databases 260 // stack := makeFullNode(ctx) 261 // for _, name := range []string{"chaindata", "lightchaindata"} { 262 // chaindb, err := stack.OpenDatabase(name, 0, 0, "") 263 // if err != nil { 264 // utils.Fatalf("Failed to open database: %v", err) 265 // } 266 // _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 267 // if err != nil { 268 // utils.Fatalf("Failed to write genesis block: %v", err) 269 // } 270 // log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 271 // } 272 // return nil 273 //} 274 275 func importChain(ctx *cli.Context) error { 276 if len(ctx.Args()) < 1 { 277 utils.Fatalf("This command requires an argument.") 278 } 279 280 chainName := ctx.Args().First() 281 if chainName == "" { 282 utils.Fatalf("This command requires chain name specified.") 283 } 284 285 stack, cfg := makeConfigNode(ctx, chainName) 286 cch := GetCMInstance(ctx).cch 287 utils.RegisterIntService(stack, &cfg.Eth, ctx, cch) 288 //stack := makeFullNode(ctx) 289 defer stack.Close() 290 291 chain, db := utils.MakeChain(ctx, stack) 292 defer db.Close() 293 294 // Start periodically gathering memory profiles 295 var peakMemAlloc, peakMemSys uint64 296 go func() { 297 stats := new(runtime.MemStats) 298 for { 299 runtime.ReadMemStats(stats) 300 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 301 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 302 } 303 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 304 atomic.StoreUint64(&peakMemSys, stats.Sys) 305 } 306 time.Sleep(5 * time.Second) 307 } 308 }() 309 // Import the chain 310 start := time.Now() 311 312 if len(ctx.Args()) == 2 { 313 if err := utils.ImportChain(chain, ctx.Args().Get(1)); err != nil { 314 log.Error("Import error", "err", err) 315 } 316 } else { 317 for i, arg := range ctx.Args() { 318 if i == 0 { 319 continue // skip the chain name 320 } 321 if err := utils.ImportChain(chain, arg); err != nil { 322 log.Error("Import error", "file", arg, "err", err) 323 } 324 } 325 } 326 chain.Stop() 327 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 328 329 // Output pre-compaction stats mostly to see the import trashing 330 stats, err := db.Stat("leveldb.stats") 331 if err != nil { 332 utils.Fatalf("Failed to read database stats: %v", err) 333 } 334 fmt.Println(stats) 335 336 ioStats, err := db.Stat("leveldb.iostats") 337 if err != nil { 338 utils.Fatalf("Failed to read database iostats: %v", err) 339 } 340 fmt.Println(ioStats) 341 342 // Print the memory statistics used by the importing 343 mem := new(runtime.MemStats) 344 runtime.ReadMemStats(mem) 345 346 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 347 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 348 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 349 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 350 351 if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) { 352 return nil 353 } 354 355 // Compact the entire database to more accurately measure disk io and print the stats 356 start = time.Now() 357 fmt.Println("Compacting entire database...") 358 if err = db.Compact(nil, nil); err != nil { 359 utils.Fatalf("Compaction failed: %v", err) 360 } 361 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 362 363 stats, err = db.Stat("leveldb.stats") 364 if err != nil { 365 utils.Fatalf("Failed to read database stats: %v", err) 366 } 367 fmt.Println(stats) 368 369 ioStats, err = db.Stat("leveldb.iostats") 370 if err != nil { 371 utils.Fatalf("Failed to read database iostats: %v", err) 372 } 373 fmt.Println(ioStats) 374 375 return nil 376 } 377 378 func exportChain(ctx *cli.Context) error { 379 if len(ctx.Args()) < 1 { 380 utils.Fatalf("This command requires an argument.") 381 } 382 383 chainName := ctx.Args().First() 384 if chainName == "" { 385 utils.Fatalf("This command requires chain name specified.") 386 } 387 388 stack, cfg := makeConfigNode(ctx, chainName) 389 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 390 //stack := makeFullNode(ctx) 391 defer stack.Close() 392 393 chain, _ := utils.MakeChain(ctx, stack) 394 start := time.Now() 395 396 var err error 397 fp := ctx.Args().Get(1) 398 if len(ctx.Args()) < 4 { 399 err = utils.ExportChain(chain, fp) 400 } else { 401 // This can be improved to allow for numbers larger than 9223372036854775807 402 first, ferr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 403 last, lerr := strconv.ParseInt(ctx.Args().Get(3), 10, 64) 404 if ferr != nil || lerr != nil { 405 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 406 } 407 if first < 0 || last < 0 { 408 utils.Fatalf("Export error: block number must be greater than 0\n") 409 } 410 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 411 } 412 413 if err != nil { 414 utils.Fatalf("Export error: %v\n", err) 415 } 416 fmt.Printf("Export done in %v", time.Since(start)) 417 return nil 418 } 419 420 // importPreimages imports preimage data from the specified file. 421 func importPreimages(ctx *cli.Context) error { 422 if len(ctx.Args()) < 1 { 423 utils.Fatalf("This command requires an argument.") 424 } 425 426 chainName := ctx.Args().Get(1) 427 if chainName == "" { 428 utils.Fatalf("This command requires chain name specified.") 429 } 430 431 stack, cfg := makeConfigNode(ctx, chainName) 432 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 433 defer stack.Close() 434 435 db := utils.MakeChainDatabase(ctx, stack) 436 start := time.Now() 437 438 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 439 utils.Fatalf("Import error: %v\n", err) 440 } 441 fmt.Printf("Import done in %v\n", time.Since(start)) 442 return nil 443 } 444 445 // exportPreimages dumps the preimage data to specified json file in streaming way. 446 func exportPreimages(ctx *cli.Context) error { 447 if len(ctx.Args()) < 1 { 448 utils.Fatalf("This command requires an argument.") 449 } 450 451 chainName := ctx.Args().Get(1) 452 if chainName == "" { 453 utils.Fatalf("This command requires chain name specified.") 454 } 455 456 stack, cfg := makeConfigNode(ctx, chainName) 457 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 458 defer stack.Close() 459 460 db := utils.MakeChainDatabase(ctx, stack) 461 start := time.Now() 462 463 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 464 utils.Fatalf("Export error: %v\n", err) 465 } 466 fmt.Printf("Export done in %v\n", time.Since(start)) 467 return nil 468 } 469 func copyDb(ctx *cli.Context) error { 470 // Ensure we have a source chain directory to copy 471 if len(ctx.Args()) != 1 { 472 utils.Fatalf("Source chaindata directory path argument missing") 473 } 474 475 chainName := ctx.Args().Get(1) 476 if chainName == "" { 477 utils.Fatalf("This command requires chain name specified.") 478 } 479 480 // Initialize a new chain for the running node to sync into 481 stack, _ := makeConfigNode(ctx, chainName) 482 chain, chainDb := utils.MakeChain(ctx, stack) 483 484 syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 485 dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil, nil) 486 487 // Create a source peer to satisfy downloader requests from 488 db, err := rawdb.NewLevelDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256, "") 489 if err != nil { 490 return err 491 } 492 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 493 if err != nil { 494 return err 495 } 496 peer := downloader.NewFakePeer("local", db, hc, dl) 497 if err = dl.RegisterPeer("local", 63, peer); err != nil { 498 return err 499 } 500 // Synchronise with the simulated peer 501 start := time.Now() 502 503 currentHeader := hc.CurrentHeader() 504 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { 505 return err 506 } 507 for dl.Synchronising() { 508 time.Sleep(10 * time.Millisecond) 509 } 510 fmt.Printf("Database copy done in %v\n", time.Since(start)) 511 512 // Compact the entire database to remove any sync overhead 513 start = time.Now() 514 fmt.Println("Compacting entire database...") 515 if err = db.Compact(nil, nil); err != nil { 516 utils.Fatalf("Compaction failed: %v", err) 517 } 518 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 519 520 return nil 521 } 522 523 func removeDB(ctx *cli.Context) error { 524 chainName := ctx.Args().Get(1) 525 if chainName == "" { 526 utils.Fatalf("This command requires chain name specified.") 527 } 528 529 stack, _ := makeConfigNode(ctx, chainName) 530 531 for _, name := range []string{"chaindata", "lightchaindata"} { 532 // Ensure the database exists in the first place 533 logger := log.New("database", name) 534 535 dbdir := stack.ResolvePath(name) 536 if !common.FileExist(dbdir) { 537 logger.Info("Database doesn't exist, skipping", "path", dbdir) 538 continue 539 } 540 // Confirm removal and execute 541 fmt.Println(dbdir) 542 confirm, err := console.Stdin.PromptConfirm("Remove this database?") 543 switch { 544 case err != nil: 545 utils.Fatalf("%v", err) 546 case !confirm: 547 logger.Warn("Database deletion aborted") 548 default: 549 start := time.Now() 550 os.RemoveAll(dbdir) 551 logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start))) 552 } 553 } 554 return nil 555 } 556 557 func dump(ctx *cli.Context) error { 558 chainName := ctx.Args().Get(1) 559 if chainName == "" { 560 utils.Fatalf("This command requires chain name specified.") 561 } 562 563 stack, _ := makeConfigNode(ctx, chainName) 564 chain, chainDb := utils.MakeChain(ctx, stack) 565 for _, arg := range ctx.Args() { 566 var block *types.Block 567 if hashish(arg) { 568 block = chain.GetBlockByHash(common.HexToHash(arg)) 569 } else { 570 num, _ := strconv.Atoi(arg) 571 block = chain.GetBlockByNumber(uint64(num)) 572 } 573 if block == nil { 574 fmt.Println("{}") 575 utils.Fatalf("block not found") 576 } else { 577 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 578 if err != nil { 579 utils.Fatalf("could not create new state: %v", err) 580 } 581 fmt.Printf("%s\n", state.Dump()) 582 } 583 } 584 chainDb.Close() 585 return nil 586 } 587 588 // hashish returns true for strings that look like hashes. 589 func hashish(x string) bool { 590 _, err := strconv.Atoi(x) 591 return err != nil 592 } 593 594 func countBlockState(ctx *cli.Context) error { 595 if len(ctx.Args()) < 1 { 596 utils.Fatalf("This command requires an argument.") 597 } 598 599 chainName := ctx.Args().Get(1) 600 if chainName == "" { 601 utils.Fatalf("This command requires chain name specified.") 602 } 603 604 stack, cfg := makeConfigNode(ctx, chainName) 605 utils.RegisterIntService(stack, &cfg.Eth, ctx, GetCMInstance(ctx).cch) 606 defer stack.Close() 607 608 chainDb := utils.MakeChainDatabase(ctx, stack) 609 610 height, _ := strconv.ParseUint(ctx.Args().First(), 10, 64) 611 612 blockhash := rawdb.ReadCanonicalHash(chainDb, height) 613 block := rawdb.ReadBlock(chainDb, blockhash, height) 614 bsize := block.Size() 615 616 root := block.Header().Root 617 statedb, _ := state.New(block.Root(), state.NewDatabase(chainDb)) 618 accountTrie, _ := statedb.Database().OpenTrie(root) 619 620 count := CountSize{} 621 countTrie(chainDb, accountTrie, &count, func(addr common.Address, account state.Account) { 622 if account.Root != emptyRoot { 623 storageTrie, _ := statedb.Database().OpenStorageTrie(common.Hash{}, account.Root) 624 countTrie(chainDb, storageTrie, &count, nil) 625 } 626 627 if account.TX1Root != emptyRoot { 628 tx1Trie, _ := statedb.Database().OpenTX1Trie(common.Hash{}, account.TX1Root) 629 countTrie(chainDb, tx1Trie, &count, nil) 630 } 631 632 if account.TX3Root != emptyRoot { 633 tx3Trie, _ := statedb.Database().OpenTX3Trie(common.Hash{}, account.TX3Root) 634 countTrie(chainDb, tx3Trie, &count, nil) 635 } 636 637 if account.ProxiedRoot != emptyRoot { 638 proxiedTrie, _ := statedb.Database().OpenProxiedTrie(common.Hash{}, account.ProxiedRoot) 639 countTrie(chainDb, proxiedTrie, &count, nil) 640 } 641 642 if account.RewardRoot != emptyRoot { 643 rewardTrie, _ := statedb.Database().OpenRewardTrie(common.Hash{}, account.RewardRoot) 644 countTrie(chainDb, rewardTrie, &count, nil) 645 } 646 }) 647 648 // Open the file handle and potentially wrap with a gzip stream 649 fh, err := os.OpenFile("blockstate_nodedump", os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 650 if err != nil { 651 return err 652 } 653 defer fh.Close() 654 655 // Write Node Data into file 656 for _, data := range count.Data { 657 fh.WriteString(data.key + " " + data.value + "\n") 658 } 659 660 fmt.Printf("Block %d, block size %v, state node %v, state size %v\n", height, bsize, count.Totalnode, count.Totalnodevaluesize) 661 return nil 662 } 663 664 type CountSize struct { 665 Totalnodevaluesize, Totalnode int 666 Data []nodeData 667 } 668 669 type nodeData struct { 670 key, value string 671 } 672 673 type processLeafTrie func(addr common.Address, account state.Account) 674 675 var emptyRoot = common.HexToHash("56e81f171bcc55a6ff8345e692c0f86e5b48e01b996cadc001622fb5e363b421") 676 677 func countTrie(db intdb.Database, t state.Trie, count *CountSize, processLeaf processLeafTrie) { 678 for it := t.NodeIterator(nil); it.Next(true); { 679 if !it.Leaf() { 680 // non leaf node -> count += value 681 node, _ := db.Get(it.Hash().Bytes()) 682 count.Totalnodevaluesize += len(node) 683 count.Totalnode++ 684 count.Data = append(count.Data, nodeData{it.Hash().String(), common.Bytes2Hex(node)}) 685 } else { 686 // Process the Account -> Inner Trie 687 if processLeaf != nil { 688 addr := t.GetKey(it.LeafKey()) 689 if len(addr) == 20 { 690 var data state.Account 691 rlp.DecodeBytes(it.LeafBlob(), &data) 692 693 processLeaf(common.BytesToAddress(addr), data) 694 } 695 } 696 } 697 } 698 } 699 700 func version(ctx *cli.Context) error { 701 fmt.Println("Chain:", clientIdentifier) 702 fmt.Println("Version:", params.VersionWithMeta) 703 if gitCommit != "" { 704 fmt.Println("Git Commit:", gitCommit) 705 } 706 if gitDate != "" { 707 fmt.Println("Git Commit Date:", gitDate) 708 } 709 fmt.Println("Architecture:", runtime.GOARCH) 710 fmt.Println("Protocol Versions:", intprotocol.ProtocolVersions) 711 fmt.Println("Go Version:", runtime.Version()) 712 fmt.Println("Operating System:", runtime.GOOS) 713 fmt.Printf("GOPATH=%s\n", os.Getenv("GOPATH")) 714 fmt.Printf("GOROOT=%s\n", runtime.GOROOT()) 715 return nil 716 }