github.com/bearnetworkchain/go-bearnetwork@v1.10.19-0.20220604150648-d63890c2e42b/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "errors" 22 "fmt" 23 "os" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/bearnetworkchain/go-bearnetwork/cmd/utils" 30 "github.com/bearnetworkchain/go-bearnetwork/common" 31 "github.com/bearnetworkchain/go-bearnetwork/common/hexutil" 32 "github.com/bearnetworkchain/go-bearnetwork/core" 33 "github.com/bearnetworkchain/go-bearnetwork/core/rawdb" 34 "github.com/bearnetworkchain/go-bearnetwork/core/state" 35 "github.com/bearnetworkchain/go-bearnetwork/core/types" 36 "github.com/bearnetworkchain/go-bearnetwork/crypto" 37 "github.com/bearnetworkchain/go-bearnetwork/ethdb" 38 "github.com/bearnetworkchain/go-bearnetwork/log" 39 "github.com/bearnetworkchain/go-bearnetwork/metrics" 40 "github.com/bearnetworkchain/go-bearnetwork/node" 41 "gopkg.in/urfave/cli.v1" 42 ) 43 44 var ( 45 initCommand = cli.Command{ 46 Action: utils.MigrateFlags(initGenesis), 47 Name: "init", 48 Usage: "Bootstrap and initialize a new genesis block", 49 ArgsUsage: "<genesisPath>", 50 Flags: utils.DatabasePathFlags, 51 Category: "BLOCKCHAIN COMMANDS", 52 Description: ` 53 The init command initializes a new genesis block and definition for the network. 54 This is a destructive action and changes the network in which you will be 55 participating. 56 57 It expects the genesis file as argument.`, 58 } 59 dumpGenesisCommand = cli.Command{ 60 Action: utils.MigrateFlags(dumpGenesis), 61 Name: "dumpgenesis", 62 Usage: "Dumps genesis block JSON configuration to stdout", 63 ArgsUsage: "", 64 Flags: utils.NetworkFlags, 65 Category: "BLOCKCHAIN COMMANDS", 66 Description: ` 67 The dumpgenesis command dumps the genesis block configuration in JSON format to stdout.`, 68 } 69 importCommand = cli.Command{ 70 Action: utils.MigrateFlags(importChain), 71 Name: "import", 72 Usage: "Import a blockchain file", 73 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 74 Flags: append([]cli.Flag{ 75 utils.CacheFlag, 76 utils.SyncModeFlag, 77 utils.GCModeFlag, 78 utils.SnapshotFlag, 79 utils.CacheDatabaseFlag, 80 utils.CacheGCFlag, 81 utils.MetricsEnabledFlag, 82 utils.MetricsEnabledExpensiveFlag, 83 utils.MetricsHTTPFlag, 84 utils.MetricsPortFlag, 85 utils.MetricsEnableInfluxDBFlag, 86 utils.MetricsEnableInfluxDBV2Flag, 87 utils.MetricsInfluxDBEndpointFlag, 88 utils.MetricsInfluxDBDatabaseFlag, 89 utils.MetricsInfluxDBUsernameFlag, 90 utils.MetricsInfluxDBPasswordFlag, 91 utils.MetricsInfluxDBTagsFlag, 92 utils.MetricsInfluxDBTokenFlag, 93 utils.MetricsInfluxDBBucketFlag, 94 utils.MetricsInfluxDBOrganizationFlag, 95 utils.TxLookupLimitFlag, 96 }, utils.DatabasePathFlags...), 97 Category: "BLOCKCHAIN COMMANDS", 98 Description: ` 99 The import command imports blocks from an RLP-encoded form. The form can be one file 100 with several RLP-encoded blocks, or several files can be used. 101 102 If only one file is used, import error will result in failure. If several files are used, 103 processing will proceed even if an individual RLP-file import failure occurs.`, 104 } 105 exportCommand = cli.Command{ 106 Action: utils.MigrateFlags(exportChain), 107 Name: "export", 108 Usage: "Export blockchain into file", 109 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 110 Flags: append([]cli.Flag{ 111 utils.CacheFlag, 112 utils.SyncModeFlag, 113 }, utils.DatabasePathFlags...), 114 Category: "BLOCKCHAIN COMMANDS", 115 Description: ` 116 Requires a first argument of the file to write to. 117 Optional second and third arguments control the first and 118 last block to write. In this mode, the file will be appended 119 if already existing. If the file ends with .gz, the output will 120 be gzipped.`, 121 } 122 importPreimagesCommand = cli.Command{ 123 Action: utils.MigrateFlags(importPreimages), 124 Name: "import-preimages", 125 Usage: "Import the preimage database from an RLP stream", 126 ArgsUsage: "<datafile>", 127 Flags: append([]cli.Flag{ 128 utils.CacheFlag, 129 utils.SyncModeFlag, 130 }, utils.DatabasePathFlags...), 131 Category: "BLOCKCHAIN COMMANDS", 132 Description: ` 133 The import-preimages command imports hash preimages from an RLP encoded stream. 134 It's deprecated, please use "geth db import" instead. 135 `, 136 } 137 exportPreimagesCommand = cli.Command{ 138 Action: utils.MigrateFlags(exportPreimages), 139 Name: "export-preimages", 140 Usage: "Export the preimage database into an RLP stream", 141 ArgsUsage: "<dumpfile>", 142 Flags: append([]cli.Flag{ 143 utils.CacheFlag, 144 utils.SyncModeFlag, 145 }, utils.DatabasePathFlags...), 146 Category: "BLOCKCHAIN COMMANDS", 147 Description: ` 148 The export-preimages command exports hash preimages to an RLP encoded stream. 149 It's deprecated, please use "geth db export" instead. 150 `, 151 } 152 dumpCommand = cli.Command{ 153 Action: utils.MigrateFlags(dump), 154 Name: "dump", 155 Usage: "Dump a specific block from storage", 156 ArgsUsage: "[? <blockHash> | <blockNum>]", 157 Flags: append([]cli.Flag{ 158 utils.CacheFlag, 159 utils.IterativeOutputFlag, 160 utils.ExcludeCodeFlag, 161 utils.ExcludeStorageFlag, 162 utils.IncludeIncompletesFlag, 163 utils.StartKeyFlag, 164 utils.DumpLimitFlag, 165 }, utils.DatabasePathFlags...), 166 Category: "BLOCKCHAIN COMMANDS", 167 Description: ` 168 This command dumps out the state for a given block (or latest, if none provided). 169 `, 170 } 171 ) 172 173 // initGenesis will initialise the given JSON format genesis file and writes it as 174 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 175 func initGenesis(ctx *cli.Context) error { 176 // Make sure we have a valid genesis JSON 177 genesisPath := ctx.Args().First() 178 if len(genesisPath) == 0 { 179 utils.Fatalf("Must supply path to genesis JSON file") 180 } 181 file, err := os.Open(genesisPath) 182 if err != nil { 183 utils.Fatalf("Failed to read genesis file: %v", err) 184 } 185 defer file.Close() 186 187 genesis := new(core.Genesis) 188 if err := json.NewDecoder(file).Decode(genesis); err != nil { 189 utils.Fatalf("invalid genesis file: %v", err) 190 } 191 // Open and initialise both full and light databases 192 stack, _ := makeConfigNode(ctx) 193 defer stack.Close() 194 for _, name := range []string{"chaindata", "lightchaindata"} { 195 chaindb, err := stack.OpenDatabaseWithFreezer(name, 0, 0, ctx.GlobalString(utils.AncientFlag.Name), "", false) 196 if err != nil { 197 utils.Fatalf("Failed to open database: %v", err) 198 } 199 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 200 if err != nil { 201 utils.Fatalf("Failed to write genesis block: %v", err) 202 } 203 chaindb.Close() 204 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 205 } 206 return nil 207 } 208 209 func dumpGenesis(ctx *cli.Context) error { 210 // TODO(rjl493456442) support loading from the custom datadir 211 genesis := utils.MakeGenesis(ctx) 212 if genesis == nil { 213 genesis = core.DefaultGenesisBlock() 214 } 215 if err := json.NewEncoder(os.Stdout).Encode(genesis); err != nil { 216 utils.Fatalf("could not encode genesis") 217 } 218 return nil 219 } 220 221 func importChain(ctx *cli.Context) error { 222 if len(ctx.Args()) < 1 { 223 utils.Fatalf("This command requires an argument.") 224 } 225 // Start metrics export if enabled 226 utils.SetupMetrics(ctx) 227 // Start system runtime metrics collection 228 go metrics.CollectProcessMetrics(3 * time.Second) 229 230 stack, _ := makeConfigNode(ctx) 231 defer stack.Close() 232 233 chain, db := utils.MakeChain(ctx, stack) 234 defer db.Close() 235 236 // Start periodically gathering memory profiles 237 var peakMemAlloc, peakMemSys uint64 238 go func() { 239 stats := new(runtime.MemStats) 240 for { 241 runtime.ReadMemStats(stats) 242 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 243 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 244 } 245 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 246 atomic.StoreUint64(&peakMemSys, stats.Sys) 247 } 248 time.Sleep(5 * time.Second) 249 } 250 }() 251 // Import the chain 252 start := time.Now() 253 254 var importErr error 255 256 if len(ctx.Args()) == 1 { 257 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 258 importErr = err 259 log.Error("Import error", "err", err) 260 } 261 } else { 262 for _, arg := range ctx.Args() { 263 if err := utils.ImportChain(chain, arg); err != nil { 264 importErr = err 265 log.Error("Import error", "file", arg, "err", err) 266 } 267 } 268 } 269 chain.Stop() 270 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 271 272 // Output pre-compaction stats mostly to see the import trashing 273 showLeveldbStats(db) 274 275 // Print the memory statistics used by the importing 276 mem := new(runtime.MemStats) 277 runtime.ReadMemStats(mem) 278 279 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 280 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 281 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 282 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 283 284 if ctx.GlobalBool(utils.NoCompactionFlag.Name) { 285 return nil 286 } 287 288 // Compact the entire database to more accurately measure disk io and print the stats 289 start = time.Now() 290 fmt.Println("Compacting entire database...") 291 if err := db.Compact(nil, nil); err != nil { 292 utils.Fatalf("Compaction failed: %v", err) 293 } 294 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 295 296 showLeveldbStats(db) 297 return importErr 298 } 299 300 func exportChain(ctx *cli.Context) error { 301 if len(ctx.Args()) < 1 { 302 utils.Fatalf("This command requires an argument.") 303 } 304 305 stack, _ := makeConfigNode(ctx) 306 defer stack.Close() 307 308 chain, _ := utils.MakeChain(ctx, stack) 309 start := time.Now() 310 311 var err error 312 fp := ctx.Args().First() 313 if len(ctx.Args()) < 3 { 314 err = utils.ExportChain(chain, fp) 315 } else { 316 // This can be improved to allow for numbers larger than 9223372036854775807 317 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 318 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 319 if ferr != nil || lerr != nil { 320 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 321 } 322 if first < 0 || last < 0 { 323 utils.Fatalf("Export error: block number must be greater than 0\n") 324 } 325 if head := chain.CurrentFastBlock(); uint64(last) > head.NumberU64() { 326 utils.Fatalf("Export error: block number %d larger than head block %d\n", uint64(last), head.NumberU64()) 327 } 328 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 329 } 330 331 if err != nil { 332 utils.Fatalf("Export error: %v\n", err) 333 } 334 fmt.Printf("Export done in %v\n", time.Since(start)) 335 return nil 336 } 337 338 // importPreimages imports preimage data from the specified file. 339 func importPreimages(ctx *cli.Context) error { 340 if len(ctx.Args()) < 1 { 341 utils.Fatalf("This command requires an argument.") 342 } 343 344 stack, _ := makeConfigNode(ctx) 345 defer stack.Close() 346 347 db := utils.MakeChainDatabase(ctx, stack, false) 348 start := time.Now() 349 350 if err := utils.ImportPreimages(db, ctx.Args().First()); err != nil { 351 utils.Fatalf("Import error: %v\n", err) 352 } 353 fmt.Printf("Import done in %v\n", time.Since(start)) 354 return nil 355 } 356 357 // exportPreimages dumps the preimage data to specified json file in streaming way. 358 func exportPreimages(ctx *cli.Context) error { 359 if len(ctx.Args()) < 1 { 360 utils.Fatalf("This command requires an argument.") 361 } 362 stack, _ := makeConfigNode(ctx) 363 defer stack.Close() 364 365 db := utils.MakeChainDatabase(ctx, stack, true) 366 start := time.Now() 367 368 if err := utils.ExportPreimages(db, ctx.Args().First()); err != nil { 369 utils.Fatalf("Export error: %v\n", err) 370 } 371 fmt.Printf("Export done in %v\n", time.Since(start)) 372 return nil 373 } 374 375 func parseDumpConfig(ctx *cli.Context, stack *node.Node) (*state.DumpConfig, ethdb.Database, common.Hash, error) { 376 db := utils.MakeChainDatabase(ctx, stack, true) 377 var header *types.Header 378 if ctx.NArg() > 1 { 379 return nil, nil, common.Hash{}, fmt.Errorf("expected 1 argument (number or hash), got %d", ctx.NArg()) 380 } 381 if ctx.NArg() == 1 { 382 arg := ctx.Args().First() 383 if hashish(arg) { 384 hash := common.HexToHash(arg) 385 if number := rawdb.ReadHeaderNumber(db, hash); number != nil { 386 header = rawdb.ReadHeader(db, hash, *number) 387 } else { 388 return nil, nil, common.Hash{}, fmt.Errorf("block %x not found", hash) 389 } 390 } else { 391 number, err := strconv.Atoi(arg) 392 if err != nil { 393 return nil, nil, common.Hash{}, err 394 } 395 if hash := rawdb.ReadCanonicalHash(db, uint64(number)); hash != (common.Hash{}) { 396 header = rawdb.ReadHeader(db, hash, uint64(number)) 397 } else { 398 return nil, nil, common.Hash{}, fmt.Errorf("header for block %d not found", number) 399 } 400 } 401 } else { 402 // Use latest 403 header = rawdb.ReadHeadHeader(db) 404 } 405 if header == nil { 406 return nil, nil, common.Hash{}, errors.New("no head block found") 407 } 408 startArg := common.FromHex(ctx.String(utils.StartKeyFlag.Name)) 409 var start common.Hash 410 switch len(startArg) { 411 case 0: // common.Hash 412 case 32: 413 start = common.BytesToHash(startArg) 414 case 20: 415 start = crypto.Keccak256Hash(startArg) 416 log.Info("Converting start-address to hash", "address", common.BytesToAddress(startArg), "hash", start.Hex()) 417 default: 418 return nil, nil, common.Hash{}, fmt.Errorf("invalid start argument: %x. 20 or 32 hex-encoded bytes required", startArg) 419 } 420 var conf = &state.DumpConfig{ 421 SkipCode: ctx.Bool(utils.ExcludeCodeFlag.Name), 422 SkipStorage: ctx.Bool(utils.ExcludeStorageFlag.Name), 423 OnlyWithAddresses: !ctx.Bool(utils.IncludeIncompletesFlag.Name), 424 Start: start.Bytes(), 425 Max: ctx.Uint64(utils.DumpLimitFlag.Name), 426 } 427 log.Info("State dump configured", "block", header.Number, "hash", header.Hash().Hex(), 428 "skipcode", conf.SkipCode, "skipstorage", conf.SkipStorage, 429 "start", hexutil.Encode(conf.Start), "limit", conf.Max) 430 return conf, db, header.Root, nil 431 } 432 433 func dump(ctx *cli.Context) error { 434 stack, _ := makeConfigNode(ctx) 435 defer stack.Close() 436 437 conf, db, root, err := parseDumpConfig(ctx, stack) 438 if err != nil { 439 return err 440 } 441 state, err := state.New(root, state.NewDatabase(db), nil) 442 if err != nil { 443 return err 444 } 445 if ctx.Bool(utils.IterativeOutputFlag.Name) { 446 state.IterativeDump(conf, json.NewEncoder(os.Stdout)) 447 } else { 448 if conf.OnlyWithAddresses { 449 fmt.Fprintf(os.Stderr, "If you want to include accounts with missing preimages, you need iterative output, since"+ 450 " otherwise the accounts will overwrite each other in the resulting mapping.") 451 return fmt.Errorf("incompatible options") 452 } 453 fmt.Println(string(state.Dump(conf))) 454 } 455 return nil 456 } 457 458 // hashish returns true for strings that look like hashes. 459 func hashish(x string) bool { 460 _, err := strconv.Atoi(x) 461 return err != nil 462 }