github.com/phillinzzz/newBsc@v1.1.6/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "net" 23 "os" 24 "path" 25 "runtime" 26 "strconv" 27 "strings" 28 "sync/atomic" 29 "time" 30 31 "github.com/phillinzzz/newBsc/cmd/utils" 32 "github.com/phillinzzz/newBsc/common" 33 "github.com/phillinzzz/newBsc/core" 34 "github.com/phillinzzz/newBsc/core/rawdb" 35 "github.com/phillinzzz/newBsc/core/state" 36 "github.com/phillinzzz/newBsc/core/types" 37 "github.com/phillinzzz/newBsc/log" 38 "github.com/phillinzzz/newBsc/metrics" 39 "github.com/phillinzzz/newBsc/node" 40 "github.com/phillinzzz/newBsc/p2p/enode" 41 "gopkg.in/urfave/cli.v1" 42 ) 43 44 var ( 45 initCommand = cli.Command{ 46 Action: utils.MigrateFlags(initGenesis), 47 Name: "init", 48 Usage: "Bootstrap and initialize a new genesis block", 49 ArgsUsage: "<genesisPath>", 50 Flags: []cli.Flag{ 51 utils.DataDirFlag, 52 }, 53 Category: "BLOCKCHAIN COMMANDS", 54 Description: ` 55 The init command initializes a new genesis block and definition for the network. 56 This is a destructive action and changes the network in which you will be 57 participating. 58 59 It expects the genesis file as argument.`, 60 } 61 initNetworkCommand = cli.Command{ 62 Action: utils.MigrateFlags(initNetwork), 63 Name: "init-network", 64 Usage: "Bootstrap and initialize a new genesis block, and nodekey, config files for network nodes", 65 ArgsUsage: "<genesisPath>", 66 Flags: []cli.Flag{ 67 utils.InitNetworkDir, 68 utils.InitNetworkPort, 69 utils.InitNetworkSize, 70 utils.InitNetworkIps, 71 configFileFlag, 72 }, 73 Category: "BLOCKCHAIN COMMANDS", 74 Description: ` 75 The init-network command initializes a new genesis block, definition for the network, config files for network nodes. 76 It expects the genesis file as argument.`, 77 } 78 dumpGenesisCommand = cli.Command{ 79 Action: utils.MigrateFlags(dumpGenesis), 80 Name: "dumpgenesis", 81 Usage: "Dumps genesis block JSON configuration to stdout", 82 ArgsUsage: "", 83 Flags: []cli.Flag{ 84 utils.MainnetFlag, 85 utils.RopstenFlag, 86 utils.RinkebyFlag, 87 utils.GoerliFlag, 88 utils.YoloV3Flag, 89 }, 90 Category: "BLOCKCHAIN COMMANDS", 91 Description: ` 92 The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, 93 } 94 importCommand = cli.Command{ 95 Action: utils.MigrateFlags(importChain), 96 Name: "import", 97 Usage: "Import a blockchain file", 98 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 99 Flags: []cli.Flag{ 100 utils.DataDirFlag, 101 utils.CacheFlag, 102 utils.SyncModeFlag, 103 utils.GCModeFlag, 104 utils.SnapshotFlag, 105 utils.CacheDatabaseFlag, 106 utils.CacheGCFlag, 107 utils.MetricsEnabledFlag, 108 utils.MetricsEnabledExpensiveFlag, 109 utils.MetricsHTTPFlag, 110 utils.MetricsPortFlag, 111 utils.MetricsEnableInfluxDBFlag, 112 utils.MetricsInfluxDBEndpointFlag, 113 utils.MetricsInfluxDBDatabaseFlag, 114 utils.MetricsInfluxDBUsernameFlag, 115 utils.MetricsInfluxDBPasswordFlag, 116 utils.MetricsInfluxDBTagsFlag, 117 utils.TxLookupLimitFlag, 118 }, 119 Category: "BLOCKCHAIN COMMANDS", 120 Description: ` 121 The import command imports blocks from an RLP-encoded form. The form can be one file 122 with several RLP-encoded blocks, or several files can be used. 123 124 If only one file is used, import error will result in failure. If several files are used, 125 processing will proceed even if an individual RLP-file import failure occurs.`, 126 } 127 exportCommand = cli.Command{ 128 Action: utils.MigrateFlags(exportChain), 129 Name: "export", 130 Usage: "Export blockchain into file", 131 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 132 Flags: []cli.Flag{ 133 utils.DataDirFlag, 134 utils.CacheFlag, 135 utils.SyncModeFlag, 136 }, 137 Category: "BLOCKCHAIN COMMANDS", 138 Description: ` 139 Requires a first argument of the file to write to. 140 Optional second and third arguments control the first and 141 last block to write. In this mode, the file will be appended 142 if already existing. If the file ends with .gz, the output will 143 be gzipped.`, 144 } 145 importPreimagesCommand = cli.Command{ 146 Action: utils.MigrateFlags(importPreimages), 147 Name: "import-preimages", 148 Usage: "Import the preimage database from an RLP stream", 149 ArgsUsage: "<datafile>", 150 Flags: []cli.Flag{ 151 utils.DataDirFlag, 152 utils.CacheFlag, 153 utils.SyncModeFlag, 154 }, 155 Category: "BLOCKCHAIN COMMANDS", 156 Description: ` 157 The import-preimages command imports hash preimages from an RLP encoded stream.`, 158 } 159 exportPreimagesCommand = cli.Command{ 160 Action: utils.MigrateFlags(exportPreimages), 161 Name: "export-preimages", 162 Usage: "Export the preimage database into an RLP stream", 163 ArgsUsage: "<dumpfile>", 164 Flags: []cli.Flag{ 165 utils.DataDirFlag, 166 utils.CacheFlag, 167 utils.SyncModeFlag, 168 }, 169 Category: "BLOCKCHAIN COMMANDS", 170 Description: ` 171 The export-preimages command export hash preimages to an RLP encoded stream`, 172 } 173 dumpCommand = cli.Command{ 174 Action: utils.MigrateFlags(dump), 175 Name: "dump", 176 Usage: "Dump a specific block from storage", 177 ArgsUsage: "[<blockHash> | <blockNum>]...", 178 Flags: []cli.Flag{ 179 utils.DataDirFlag, 180 utils.CacheFlag, 181 utils.SyncModeFlag, 182 utils.IterativeOutputFlag, 183 utils.ExcludeCodeFlag, 184 utils.ExcludeStorageFlag, 185 utils.IncludeIncompletesFlag, 186 }, 187 Category: "BLOCKCHAIN COMMANDS", 188 Description: ` 189 The arguments are interpreted as block numbers or hashes. 190 Use "ethereum dump 0" to dump the genesis block.`, 191 } 192 ) 193 194 // initGenesis will initialise the given JSON format genesis file and writes it as 195 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 196 func initGenesis(ctx *cli.Context) error { 197 // Make sure we have a valid genesis JSON 198 genesisPath := ctx.Args().First() 199 if len(genesisPath) == 0 { 200 utils.Fatalf("Must supply path to genesis JSON file") 201 } 202 file, err := os.Open(genesisPath) 203 if err != nil { 204 utils.Fatalf("Failed to read genesis file: %v", err) 205 } 206 defer file.Close() 207 208 genesis := new(core.Genesis) 209 if err := json.NewDecoder(file).Decode(genesis); err != nil { 210 utils.Fatalf("invalid genesis file: %v", err) 211 } 212 // Open and initialise both full and light databases 213 stack, _ := makeConfigNode(ctx) 214 defer stack.Close() 215 216 for _, name := range []string{"chaindata", "lightchaindata"} { 217 chaindb, err := stack.OpenDatabase(name, 0, 0, "", false) 218 if err != nil { 219 utils.Fatalf("Failed to open database: %v", err) 220 } 221 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 222 if err != nil { 223 utils.Fatalf("Failed to write genesis block: %v", err) 224 } 225 chaindb.Close() 226 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 227 } 228 return nil 229 } 230 231 // initNetwork will bootstrap and initialize a new genesis block, and nodekey, config files for network nodes 232 func initNetwork(ctx *cli.Context) error { 233 initDir := ctx.String(utils.InitNetworkDir.Name) 234 if len(initDir) == 0 { 235 utils.Fatalf("init.dir is required") 236 } 237 size := ctx.Int(utils.InitNetworkSize.Name) 238 port := ctx.Int(utils.InitNetworkPort.Name) 239 ipStr := ctx.String(utils.InitNetworkIps.Name) 240 cfgFile := ctx.String(configFileFlag.Name) 241 242 if len(cfgFile) == 0 { 243 utils.Fatalf("config file is required") 244 } 245 var ips []string 246 if len(ipStr) != 0 { 247 ips = strings.Split(ipStr, ",") 248 if len(ips) != size { 249 utils.Fatalf("mismatch of size and length of ips") 250 } 251 for i := 0; i < size; i++ { 252 _, err := net.ResolveIPAddr("", ips[i]) 253 if err != nil { 254 utils.Fatalf("invalid format of ip") 255 return err 256 } 257 } 258 } else { 259 ips = make([]string, size) 260 for i := 0; i < size; i++ { 261 ips[i] = "127.0.0.1" 262 } 263 } 264 265 // Make sure we have a valid genesis JSON 266 genesisPath := ctx.Args().First() 267 if len(genesisPath) == 0 { 268 utils.Fatalf("Must supply path to genesis JSON file") 269 } 270 file, err := os.Open(genesisPath) 271 if err != nil { 272 utils.Fatalf("Failed to read genesis file: %v", err) 273 } 274 defer file.Close() 275 276 genesis := new(core.Genesis) 277 if err := json.NewDecoder(file).Decode(genesis); err != nil { 278 utils.Fatalf("invalid genesis file: %v", err) 279 } 280 enodes := make([]*enode.Node, size) 281 282 // load config 283 var config gethConfig 284 err = loadConfig(cfgFile, &config) 285 if err != nil { 286 return err 287 } 288 config.Eth.Genesis = genesis 289 290 for i := 0; i < size; i++ { 291 stack, err := node.New(&config.Node) 292 if err != nil { 293 return err 294 } 295 stack.Config().DataDir = path.Join(initDir, fmt.Sprintf("node%d", i)) 296 pk := stack.Config().NodeKey() 297 enodes[i] = enode.NewV4(&pk.PublicKey, net.ParseIP(ips[i]), port, port) 298 } 299 300 for i := 0; i < size; i++ { 301 config.Node.HTTPHost = ips[i] 302 config.Node.P2P.StaticNodes = make([]*enode.Node, size-1) 303 for j := 0; j < i; j++ { 304 config.Node.P2P.StaticNodes[j] = enodes[j] 305 } 306 for j := i + 1; j < size; j++ { 307 config.Node.P2P.StaticNodes[j-1] = enodes[j] 308 } 309 out, err := tomlSettings.Marshal(config) 310 if err != nil { 311 return err 312 } 313 dump, err := os.OpenFile(path.Join(initDir, fmt.Sprintf("node%d", i), "config.toml"), os.O_RDWR|os.O_CREATE|os.O_TRUNC, 0644) 314 if err != nil { 315 return err 316 } 317 defer dump.Close() 318 dump.Write(out) 319 } 320 return nil 321 } 322 323 func dumpGenesis(ctx *cli.Context) error { 324 // TODO(rjl493456442) support loading from the custom datadir 325 genesis := utils.MakeGenesis(ctx) 326 if genesis == nil { 327 genesis = core.DefaultGenesisBlock() 328 } 329 if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { 330 utils.Fatalf("could not encode genesis") 331 } 332 return nil 333 } 334 335 func importChain(ctx *cli.Context) error { 336 if len(ctx.Args()) < 1 { 337 utils.Fatalf("This command requires an argument.") 338 } 339 // Start metrics export if enabled 340 utils.SetupMetrics(ctx) 341 // Start system runtime metrics collection 342 go metrics.CollectProcessMetrics(3 * time.Second) 343 344 stack, _ := makeConfigNode(ctx) 345 defer stack.Close() 346 347 chain, db := utils.MakeChain(ctx, stack) 348 defer db.Close() 349 350 // Start periodically gathering memory profiles 351 var peakMemAlloc, peakMemSys uint64 352 go func() { 353 stats := new(runtime.MemStats) 354 for { 355 runtime.ReadMemStats(stats) 356 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 357 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 358 } 359 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 360 atomic.StoreUint64(&peakMemSys, stats.Sys) 361 } 362 time.Sleep(5 * time.Second) 363 } 364 }() 365 // Import the chain 366 start := time.Now() 367 368 var importErr error 369 370 if len(ctx.Args()) == 1 { 371 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 372 importErr = err 373 log.Error("Import error", "err", err) 374 } 375 } else { 376 for _, arg := range ctx.Args() { 377 if err := utils.ImportChain(chain, arg); err != nil { 378 importErr = err 379 log.Error("Import error", "file", arg, "err", err) 380 } 381 } 382 } 383 chain.Stop() 384 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 385 386 // Output pre-compaction stats mostly to see the import trashing 387 showLeveldbStats(db) 388 389 // Print the memory statistics used by the importing 390 mem := new(runtime.MemStats) 391 runtime.ReadMemStats(mem) 392 393 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 394 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 395 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 396 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 397 398 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 399 return nil 400 } 401 402 // Compact the entire database to more accurately measure disk io and print the stats 403 start = time.Now() 404 fmt.Println("Compacting entire database...") 405 if err := db.Compact(nil, nil); err != nil { 406 utils.Fatalf("Compaction failed: %v", err) 407 } 408 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 409 410 showLeveldbStats(db) 411 return importErr 412 } 413 414 func exportChain(ctx *cli.Context) error { 415 if len(ctx.Args()) < 1 { 416 utils.Fatalf("This command requires an argument.") 417 } 418 419 stack, _ := makeConfigNode(ctx) 420 defer stack.Close() 421 422 chain, _ := utils.MakeChain(ctx, stack) 423 start := time.Now() 424 425 var err error 426 fp := ctx.Args().First() 427 if len(ctx.Args()) < 3 { 428 err = utils.ExportChain(chain, fp) 429 } else { 430 // This can be improved to allow for numbers larger than 9223372036854775807 431 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 432 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 433 if ferr != nil || lerr != nil { 434 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 435 } 436 if first < 0 || last < 0 { 437 utils.Fatalf("Export error: block number must be greater than 0\n") 438 } 439 if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { 440 utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) 441 } 442 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 443 } 444 445 if err != nil { 446 utils.Fatalf("Export error: %v\n", err) 447 } 448 fmt.Printf("Export done in %v\n", time.Since(start)) 449 return nil 450 } 451 452 // importPreimages imports preimage data from the specified file. 453 func importPreimages(ctx *cli.Context) error { 454 if len(ctx.Args()) < 1 { 455 utils.Fatalf("This command requires an argument.") 456 } 457 458 stack, _ := makeConfigNode(ctx) 459 defer stack.Close() 460 461 db := utils.MakeChainDatabase(ctx, stack, false) 462 start := time.Now() 463 464 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 465 utils.Fatalf("Import error: %v\n", err) 466 } 467 fmt.Printf("Import done in %v\n", time.Since(start)) 468 return nil 469 } 470 471 // exportPreimages dumps the preimage data to specified json file in streaming way. 472 func exportPreimages(ctx *cli.Context) error { 473 if len(ctx.Args()) < 1 { 474 utils.Fatalf("This command requires an argument.") 475 } 476 477 stack, _ := makeConfigNode(ctx) 478 defer stack.Close() 479 480 db := utils.MakeChainDatabase(ctx, stack, true) 481 start := time.Now() 482 483 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 484 utils.Fatalf("Export error: %v\n", err) 485 } 486 fmt.Printf("Export done in %v\n", time.Since(start)) 487 return nil 488 } 489 490 func dump(ctx *cli.Context) error { 491 stack, _ := makeConfigNode(ctx) 492 defer stack.Close() 493 494 db := utils.MakeChainDatabase(ctx, stack, true) 495 for _, arg := range ctx.Args() { 496 var header *types.Header 497 if hashish(arg) { 498 hash := common.HexToHash(arg) 499 number := rawdb.ReadHeaderNumber(db, hash) 500 if number != nil { 501 header = rawdb.ReadHeader(db, hash, *number) 502 } 503 } else { 504 number, _ := strconv.Atoi(arg) 505 hash := rawdb.ReadCanonicalHash(db, uint64(number)) 506 if hash != (common.Hash{}) { 507 header = rawdb.ReadHeader(db, hash, uint64(number)) 508 } 509 } 510 if header == nil { 511 fmt.Println("{}") 512 utils.Fatalf("block not found") 513 } else { 514 state, err := state.New(header.Root, state.NewDatabase(db), nil) 515 if err != nil { 516 utils.Fatalf("could not create new state: %v", err) 517 } 518 excludeCode := ctx.Bool(utils.ExcludeCodeFlag.Name) 519 excludeStorage := ctx.Bool(utils.ExcludeStorageFlag.Name) 520 includeMissing := ctx.Bool(utils.IncludeIncompletesFlag.Name) 521 if ctx.Bool(utils.IterativeOutputFlag.Name) { 522 state.IterativeDump(excludeCode, excludeStorage, !includeMissing, json.NewEncoder(os.Stdout)) 523 } else { 524 if includeMissing { 525 fmt.Printf("If you want to include accounts with missing preimages, you need iterative output, since" + 526 " otherwise the accounts will overwrite each other in the resulting mapping.") 527 } 528 fmt.Printf("%v %s\n", includeMissing, state.Dump(excludeCode, excludeStorage, false)) 529 } 530 } 531 } 532 return nil 533 } 534 535 // hashish returns true for strings that look like hashes. 536 func hashish(x string) bool { 537 _, err := strconv.Atoi(x) 538 return err != nil 539 }