github.com/arjunbeliever/ignite@v0.0.0-20220406110515-46bbbbec2587/cmd/ignite/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "os" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/arjunbeliever/ignite/cmd/utils" 30 "github.com/arjunbeliever/ignite/common" 31 "github.com/arjunbeliever/ignite/common/hexutil" 32 "github.com/arjunbeliever/ignite/core" 33 "github.com/arjunbeliever/ignite/core/rawdb" 34 "github.com/arjunbeliever/ignite/core/state" 35 "github.com/arjunbeliever/ignite/core/types" 36 "github.com/arjunbeliever/ignite/crypto" 37 "github.com/arjunbeliever/ignite/ethdb" 38 "github.com/arjunbeliever/ignite/log" 39 "github.com/arjunbeliever/ignite/metrics" 40 "github.com/arjunbeliever/ignite/node" 41 "gopkg.in/urfave/cli.v1" 42 ) 43 44 var ( 45 initCommand = cli.Command{ 46 Action: utils.MigrateFlags(initGenesis), 47 Name: "init", 48 Usage: "Bootstrap and initialize a new genesis block", 49 ArgsUsage: "<genesisPath>", 50 Flags: []cli.Flag{ 51 utils.DataDirFlag, 52 }, 53 Category: "BLOCKCHAIN COMMANDS", 54 Description: ` 55 The init command initializes a new genesis block and definition for the network. 56 This is a destructive action and changes the network in which you will be 57 participating. 58 59 It expects the genesis file as argument.`, 60 } 61 dumpGenesisCommand = cli.Command{ 62 Action: utils.MigrateFlags(dumpGenesis), 63 Name: "dumpgenesis", 64 Usage: "Dumps genesis block JSON configuration to stdout", 65 ArgsUsage: "", 66 Flags: []cli.Flag{ 67 utils.MainnetFlag, 68 utils.RopstenFlag, 69 utils.RinkebyFlag, 70 utils.GoerliFlag, 71 }, 72 Category: "BLOCKCHAIN COMMANDS", 73 Description: ` 74 The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, 75 } 76 importCommand = cli.Command{ 77 Action: utils.MigrateFlags(importChain), 78 Name: "import", 79 Usage: "Import a blockchain file", 80 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 81 Flags: []cli.Flag{ 82 utils.DataDirFlag, 83 utils.CacheFlag, 84 utils.SyncModeFlag, 85 utils.GCModeFlag, 86 utils.SnapshotFlag, 87 utils.CacheDatabaseFlag, 88 utils.CacheGCFlag, 89 utils.MetricsEnabledFlag, 90 utils.MetricsEnabledExpensiveFlag, 91 utils.MetricsHTTPFlag, 92 utils.MetricsPortFlag, 93 utils.MetricsEnableInfluxDBFlag, 94 utils.MetricsEnableInfluxDBV2Flag, 95 utils.MetricsInfluxDBEndpointFlag, 96 utils.MetricsInfluxDBDatabaseFlag, 97 utils.MetricsInfluxDBUsernameFlag, 98 utils.MetricsInfluxDBPasswordFlag, 99 utils.MetricsInfluxDBTagsFlag, 100 utils.MetricsInfluxDBTokenFlag, 101 utils.MetricsInfluxDBBucketFlag, 102 utils.MetricsInfluxDBOrganizationFlag, 103 utils.TxLookupLimitFlag, 104 }, 105 Category: "BLOCKCHAIN COMMANDS", 106 Description: ` 107 The import command imports blocks from an RLP-encoded form. The form can be one file 108 with several RLP-encoded blocks, or several files can be used. 109 110 If only one file is used, import error will result in failure. If several files are used, 111 processing will proceed even if an individual RLP-file import failure occurs.`, 112 } 113 exportCommand = cli.Command{ 114 Action: utils.MigrateFlags(exportChain), 115 Name: "export", 116 Usage: "Export blockchain into file", 117 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 118 Flags: []cli.Flag{ 119 utils.DataDirFlag, 120 utils.CacheFlag, 121 utils.SyncModeFlag, 122 }, 123 Category: "BLOCKCHAIN COMMANDS", 124 Description: ` 125 Requires a first argument of the file to write to. 126 Optional second and third arguments control the first and 127 last block to write. In this mode, the file will be appended 128 if already existing. If the file ends with .gz, the output will 129 be gzipped.`, 130 } 131 importPreimagesCommand = cli.Command{ 132 Action: utils.MigrateFlags(importPreimages), 133 Name: "import-preimages", 134 Usage: "Import the preimage database from an RLP stream", 135 ArgsUsage: "<datafile>", 136 Flags: []cli.Flag{ 137 utils.DataDirFlag, 138 utils.CacheFlag, 139 utils.SyncModeFlag, 140 }, 141 Category: "BLOCKCHAIN COMMANDS", 142 Description: ` 143 The import-preimages command imports hash preimages from an RLP encoded stream.`, 144 } 145 exportPreimagesCommand = cli.Command{ 146 Action: utils.MigrateFlags(exportPreimages), 147 Name: "export-preimages", 148 Usage: "Export the preimage database into an RLP stream", 149 ArgsUsage: "<dumpfile>", 150 Flags: []cli.Flag{ 151 utils.DataDirFlag, 152 utils.CacheFlag, 153 utils.SyncModeFlag, 154 }, 155 Category: "BLOCKCHAIN COMMANDS", 156 Description: ` 157 The export-preimages command export hash preimages to an RLP encoded stream`, 158 } 159 dumpCommand = cli.Command{ 160 Action: utils.MigrateFlags(dump), 161 Name: "dump", 162 Usage: "Dump a specific block from storage", 163 ArgsUsage: "[? <blockHash> | <blockNum>]", 164 Flags: []cli.Flag{ 165 utils.DataDirFlag, 166 utils.CacheFlag, 167 utils.IterativeOutputFlag, 168 utils.ExcludeCodeFlag, 169 utils.ExcludeStorageFlag, 170 utils.IncludeIncompletesFlag, 171 utils.StartKeyFlag, 172 utils.DumpLimitFlag, 173 }, 174 Category: "BLOCKCHAIN COMMANDS", 175 Description: ` 176 This command dumps out the state for a given block (or latest, if none provided). 177 `, 178 } 179 ) 180 181 // initGenesis will initialise the given JSON format genesis file and writes it as 182 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 183 func initGenesis(ctx *cli.Context) error { 184 // Make sure we have a valid genesis JSON 185 genesisPath := ctx.Args().First() 186 if len(genesisPath) == 0 { 187 utils.Fatalf("Must supply path to genesis JSON file") 188 } 189 file, err := os.Open(genesisPath) 190 if err != nil { 191 utils.Fatalf("Failed to read genesis file: %v", err) 192 } 193 defer file.Close() 194 195 genesis := new(core.Genesis) 196 if err := json.NewDecoder(file).Decode(genesis); err != nil { 197 utils.Fatalf("invalid genesis file: %v", err) 198 } 199 // Open and initialise both full and light databases 200 stack, _ := makeConfigNode(ctx) 201 defer stack.Close() 202 203 for _, name := range []string{"chaindata", "lightchaindata"} { 204 chaindb, err := stack.OpenDatabase(name, 0, 0, "", false) 205 if err != nil { 206 utils.Fatalf("Failed to open database: %v", err) 207 } 208 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 209 if err != nil { 210 utils.Fatalf("Failed to write genesis block: %v", err) 211 } 212 chaindb.Close() 213 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 214 } 215 return nil 216 } 217 218 func dumpGenesis(ctx *cli.Context) error { 219 // TODO(rjl493456442) support loading from the custom datadir 220 genesis := utils.MakeGenesis(ctx) 221 if genesis == nil { 222 genesis = core.DefaultGenesisBlock() 223 } 224 if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { 225 utils.Fatalf("could not encode genesis") 226 } 227 return nil 228 } 229 230 func importChain(ctx *cli.Context) error { 231 if len(ctx.Args()) < 1 { 232 utils.Fatalf("This command requires an argument.") 233 } 234 // Start metrics export if enabled 235 utils.SetupMetrics(ctx) 236 // Start system runtime metrics collection 237 go metrics.CollectProcessMetrics(3 * time.Second) 238 239 stack, _ := makeConfigNode(ctx) 240 defer stack.Close() 241 242 chain, db := utils.MakeChain(ctx, stack) 243 defer db.Close() 244 245 // Start periodically gathering memory profiles 246 var peakMemAlloc, peakMemSys uint64 247 go func() { 248 stats := new(runtime.MemStats) 249 for { 250 runtime.ReadMemStats(stats) 251 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 252 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 253 } 254 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 255 atomic.StoreUint64(&peakMemSys, stats.Sys) 256 } 257 time.Sleep(5 * time.Second) 258 } 259 }() 260 // Import the chain 261 start := time.Now() 262 263 var importErr error 264 265 if len(ctx.Args()) == 1 { 266 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 267 importErr = err 268 log.Error("Import error", "err", err) 269 } 270 } else { 271 for _, arg := range ctx.Args() { 272 if err := utils.ImportChain(chain, arg); err != nil { 273 importErr = err 274 log.Error("Import error", "file", arg, "err", err) 275 } 276 } 277 } 278 chain.Stop() 279 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 280 281 // Output pre-compaction stats mostly to see the import trashing 282 showLeveldbStats(db) 283 284 // Print the memory statistics used by the importing 285 mem := new(runtime.MemStats) 286 runtime.ReadMemStats(mem) 287 288 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 289 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 290 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 291 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 292 293 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 294 return nil 295 } 296 297 // Compact the entire database to more accurately measure disk io and print the stats 298 start = time.Now() 299 fmt.Println("Compacting entire database...") 300 if err := db.Compact(nil, nil); err != nil { 301 utils.Fatalf("Compaction failed: %v", err) 302 } 303 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 304 305 showLeveldbStats(db) 306 return importErr 307 } 308 309 func exportChain(ctx *cli.Context) error { 310 if len(ctx.Args()) < 1 { 311 utils.Fatalf("This command requires an argument.") 312 } 313 314 stack, _ := makeConfigNode(ctx) 315 defer stack.Close() 316 317 chain, _ := utils.MakeChain(ctx, stack) 318 start := time.Now() 319 320 var err error 321 fp := ctx.Args().First() 322 if len(ctx.Args()) < 3 { 323 err = utils.ExportChain(chain, fp) 324 } else { 325 // This can be improved to allow for numbers larger than 9223372036854775807 326 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 327 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 328 if ferr != nil || lerr != nil { 329 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 330 } 331 if first < 0 || last < 0 { 332 utils.Fatalf("Export error: block number must be greater than 0\n") 333 } 334 if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { 335 utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) 336 } 337 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 338 } 339 340 if err != nil { 341 utils.Fatalf("Export error: %v\n", err) 342 } 343 fmt.Printf("Export done in %v\n", time.Since(start)) 344 return nil 345 } 346 347 // importPreimages imports preimage data from the specified file. 348 func importPreimages(ctx *cli.Context) error { 349 if len(ctx.Args()) < 1 { 350 utils.Fatalf("This command requires an argument.") 351 } 352 353 stack, _ := makeConfigNode(ctx) 354 defer stack.Close() 355 356 db := utils.MakeChainDatabase(ctx, stack, false) 357 start := time.Now() 358 359 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 360 utils.Fatalf("Import error: %v\n", err) 361 } 362 fmt.Printf("Import done in %v\n", time.Since(start)) 363 return nil 364 } 365 366 // exportPreimages dumps the preimage data to specified json file in streaming way. 367 func exportPreimages(ctx *cli.Context) error { 368 if len(ctx.Args()) < 1 { 369 utils.Fatalf("This command requires an argument.") 370 } 371 372 stack, _ := makeConfigNode(ctx) 373 defer stack.Close() 374 375 db := utils.MakeChainDatabase(ctx, stack, true) 376 start := time.Now() 377 378 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 379 utils.Fatalf("Export error: %v\n", err) 380 } 381 fmt.Printf("Export done in %v\n", time.Since(start)) 382 return nil 383 } 384 385 func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { 386 db := utils.MakeChainDatabase(ctx, stack, true) 387 var header *types.Header 388 if ctx.NArg() > 1 { 389 return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) 390 } 391 if ctx.NArg() == 1 { 392 arg := ctx.Args().First() 393 if hashish(arg) { 394 hash := common.HexToHash(arg) 395 if number := rawdb.ReadHeaderNumber(db, hash); number != nil { 396 header = rawdb.ReadHeader(db, hash, *number) 397 } else { 398 return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) 399 } 400 } else { 401 number, err := strconv.Atoi(arg) 402 if err != nil { 403 return nil, nil, common.Hash{}, err 404 } 405 if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { 406 header = rawdb.ReadHeader(db, hash, uint64(number)) 407 } else { 408 return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) 409 } 410 } 411 } else { 412 // Use latest 413 header = rawdb.ReadHeadHeader(db) 414 } 415 if header == nil { 416 return nil, nil, common.Hash{}, errors.New("no head block found") 417 } 418 startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) 419 var start common.Hash 420 switch len(startArg) { 421 case 0: // common.Hash 422 case 32: 423 start = common.BytesToHash(startArg) 424 case 20: 425 start = crypto.Keccak256Hash(startArg) 426 log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) 427 default: 428 return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) 429 } 430 var conf = &state.DumpConfig{ 431 SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), 432 SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), 433 OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), 434 Start: start.Bytes(), 435 Max: ctx.Uint64(utils.DumpLimitFlag.Name), 436 } 437 log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), 438 "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, 439 "start", hexutil.Encode(conf.Start), "limit", conf.Max) 440 return conf, db, header.Root, nil 441 } 442 443 func dump(ctx *cli.Context) error { 444 stack, _ := makeConfigNode(ctx) 445 defer stack.Close() 446 447 conf, db, root, err := parseDumpConfig(ctx, stack) 448 if err != nil { 449 return err 450 } 451 state, err := state.New(root, state.NewDatabase(db), nil) 452 if err != nil { 453 return err 454 } 455 if ctx.Bool(utils.IterativeOutputFlag.Name) { 456 state.IterativeDump(conf, json.NewEncoder(os.Stdout)) 457 } else { 458 if conf.OnlyWithAddresses { 459 fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ 460 " otherwise the accounts will overwrite each other in the resulting mapping.") 461 return fmt.Errorf("incompatible options") 462 } 463 fmt.Println(string(state.Dump(conf))) 464 } 465 return nil 466 } 467 468 // hashish returns true for strings that look like hashes. 469 func hashish(x string) bool { 470 _, err := strconv.Atoi(x) 471 return err != nil 472 }