github.com/pfcoder/quorum@v2.0.3-0.20180501191142-d4a1b0958135+incompatible/cmd/geth/chaincmd.go (about) 1 // Copyright 2015 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "io" 23 "os" 24 "runtime" 25 "strconv" 26 "sync/atomic" 27 "time" 28 29 "github.com/ethereum/go-ethereum/cmd/utils" 30 "github.com/ethereum/go-ethereum/common" 31 "github.com/ethereum/go-ethereum/console" 32 "github.com/ethereum/go-ethereum/core" 33 "github.com/ethereum/go-ethereum/core/state" 34 "github.com/ethereum/go-ethereum/core/types" 35 "github.com/ethereum/go-ethereum/eth/downloader" 36 "github.com/ethereum/go-ethereum/ethdb" 37 "github.com/ethereum/go-ethereum/event" 38 "github.com/ethereum/go-ethereum/log" 39 "github.com/ethereum/go-ethereum/trie" 40 "github.com/syndtr/goleveldb/leveldb/util" 41 "gopkg.in/urfave/cli.v1" 42 ) 43 44 var ( 45 initCommand = cli.Command{ 46 Action: utils.MigrateFlags(initGenesis), 47 Name: "init", 48 Usage: "Bootstrap and initialize a new genesis block", 49 ArgsUsage: "<genesisPath>", 50 Flags: []cli.Flag{ 51 utils.DataDirFlag, 52 utils.LightModeFlag, 53 }, 54 Category: "BLOCKCHAIN COMMANDS", 55 Description: ` 56 The init command initializes a new genesis block and definition for the network. 57 This is a destructive action and changes the network in which you will be 58 participating. 59 60 It expects the genesis file as argument.`, 61 } 62 importCommand = cli.Command{ 63 Action: utils.MigrateFlags(importChain), 64 Name: "import", 65 Usage: "Import a blockchain file", 66 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 67 Flags: []cli.Flag{ 68 utils.DataDirFlag, 69 utils.CacheFlag, 70 utils.LightModeFlag, 71 }, 72 Category: "BLOCKCHAIN COMMANDS", 73 Description: ` 74 The import command imports blocks from an RLP-encoded form. The form can be one file 75 with several RLP-encoded blocks, or several files can be used. 76 77 If only one file is used, import error will result in failure. If several files are used, 78 processing will proceed even if an individual RLP-file import failure occurs.`, 79 } 80 exportCommand = cli.Command{ 81 Action: utils.MigrateFlags(exportChain), 82 Name: "export", 83 Usage: "Export blockchain into file", 84 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 85 Flags: []cli.Flag{ 86 utils.DataDirFlag, 87 utils.CacheFlag, 88 utils.LightModeFlag, 89 }, 90 Category: "BLOCKCHAIN COMMANDS", 91 Description: ` 92 Requires a first argument of the file to write to. 93 Optional second and third arguments control the first and 94 last block to write. In this mode, the file will be appended 95 if already existing.`, 96 } 97 copydbCommand = cli.Command{ 98 Action: utils.MigrateFlags(copyDb), 99 Name: "copydb", 100 Usage: "Create a local chain from a target chaindata folder", 101 ArgsUsage: "<sourceChaindataDir>", 102 Flags: []cli.Flag{ 103 utils.DataDirFlag, 104 utils.CacheFlag, 105 utils.SyncModeFlag, 106 utils.FakePoWFlag, 107 utils.TestnetFlag, 108 utils.RinkebyFlag, 109 }, 110 Category: "BLOCKCHAIN COMMANDS", 111 Description: ` 112 The first argument must be the directory containing the blockchain to download from`, 113 } 114 removedbCommand = cli.Command{ 115 Action: utils.MigrateFlags(removeDB), 116 Name: "removedb", 117 Usage: "Remove blockchain and state databases", 118 ArgsUsage: " ", 119 Flags: []cli.Flag{ 120 utils.DataDirFlag, 121 utils.LightModeFlag, 122 }, 123 Category: "BLOCKCHAIN COMMANDS", 124 Description: ` 125 Remove blockchain and state databases`, 126 } 127 dumpCommand = cli.Command{ 128 Action: utils.MigrateFlags(dump), 129 Name: "dump", 130 Usage: "Dump a specific block from storage", 131 ArgsUsage: "[<blockHash> | <blockNum>]...", 132 Flags: []cli.Flag{ 133 utils.DataDirFlag, 134 utils.CacheFlag, 135 utils.LightModeFlag, 136 }, 137 Category: "BLOCKCHAIN COMMANDS", 138 Description: ` 139 The arguments are interpreted as block numbers or hashes. 140 Use "ethereum dump 0" to dump the genesis block.`, 141 } 142 ) 143 144 // In the regular Genesis / ChainConfig struct, due to the way go deserializes 145 // json, IsQuorum defaults to false (when not specified). Here we specify it as 146 // a pointer so we can make the distinction and default unspecified to true. 147 func getIsQuorum(file io.Reader) bool { 148 altGenesis := new(struct { 149 Config *struct { 150 IsQuorum *bool `json:"isQuorum"` 151 } `json:"config"` 152 }) 153 154 if err := json.NewDecoder(file).Decode(altGenesis); err != nil { 155 utils.Fatalf("invalid genesis file: %v", err) 156 } 157 158 // unspecified defaults to true 159 return altGenesis.Config.IsQuorum == nil || *altGenesis.Config.IsQuorum 160 } 161 162 // initGenesis will initialise the given JSON format genesis file and writes it as 163 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 164 func initGenesis(ctx *cli.Context) error { 165 // Make sure we have a valid genesis JSON 166 genesisPath := ctx.Args().First() 167 if len(genesisPath) == 0 { 168 utils.Fatalf("Must supply path to genesis JSON file") 169 } 170 file, err := os.Open(genesisPath) 171 if err != nil { 172 utils.Fatalf("Failed to read genesis file: %v", err) 173 } 174 defer file.Close() 175 176 genesis := new(core.Genesis) 177 if err := json.NewDecoder(file).Decode(genesis); err != nil { 178 utils.Fatalf("invalid genesis file: %v", err) 179 } 180 181 file.Seek(0, 0) 182 genesis.Config.IsQuorum = getIsQuorum(file) 183 184 // Open an initialise both full and light databases 185 stack := makeFullNode(ctx) 186 for _, name := range []string{"chaindata", "lightchaindata"} { 187 chaindb, err := stack.OpenDatabase(name, 0, 0) 188 if err != nil { 189 utils.Fatalf("Failed to open database: %v", err) 190 } 191 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 192 if err != nil { 193 utils.Fatalf("Failed to write genesis block: %v", err) 194 } 195 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 196 } 197 return nil 198 } 199 200 func importChain(ctx *cli.Context) error { 201 if len(ctx.Args()) < 1 { 202 utils.Fatalf("This command requires an argument.") 203 } 204 stack := makeFullNode(ctx) 205 chain, chainDb := utils.MakeChain(ctx, stack) 206 defer chainDb.Close() 207 208 // Start periodically gathering memory profiles 209 var peakMemAlloc, peakMemSys uint64 210 go func() { 211 stats := new(runtime.MemStats) 212 for { 213 runtime.ReadMemStats(stats) 214 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 215 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 216 } 217 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 218 atomic.StoreUint64(&peakMemSys, stats.Sys) 219 } 220 time.Sleep(5 * time.Second) 221 } 222 }() 223 // Import the chain 224 start := time.Now() 225 226 if len(ctx.Args()) == 1 { 227 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 228 utils.Fatalf("Import error: %v", err) 229 } 230 } else { 231 for _, arg := range ctx.Args() { 232 if err := utils.ImportChain(chain, arg); err != nil { 233 log.Error("Import error", "file", arg, "err", err) 234 } 235 } 236 } 237 238 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 239 240 // Output pre-compaction stats mostly to see the import trashing 241 db := chainDb.(*ethdb.LDBDatabase) 242 243 stats, err := db.LDB().GetProperty("leveldb.stats") 244 if err != nil { 245 utils.Fatalf("Failed to read database stats: %v", err) 246 } 247 fmt.Println(stats) 248 fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses()) 249 fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads()) 250 251 // Print the memory statistics used by the importing 252 mem := new(runtime.MemStats) 253 runtime.ReadMemStats(mem) 254 255 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 256 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 257 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 258 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 259 260 if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) { 261 return nil 262 } 263 264 // Compact the entire database to more accurately measure disk io and print the stats 265 start = time.Now() 266 fmt.Println("Compacting entire database...") 267 if err = db.LDB().CompactRange(util.Range{}); err != nil { 268 utils.Fatalf("Compaction failed: %v", err) 269 } 270 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 271 272 stats, err = db.LDB().GetProperty("leveldb.stats") 273 if err != nil { 274 utils.Fatalf("Failed to read database stats: %v", err) 275 } 276 fmt.Println(stats) 277 278 return nil 279 } 280 281 func exportChain(ctx *cli.Context) error { 282 if len(ctx.Args()) < 1 { 283 utils.Fatalf("This command requires an argument.") 284 } 285 stack := makeFullNode(ctx) 286 chain, _ := utils.MakeChain(ctx, stack) 287 start := time.Now() 288 289 var err error 290 fp := ctx.Args().First() 291 if len(ctx.Args()) < 3 { 292 err = utils.ExportChain(chain, fp) 293 } else { 294 // This can be improved to allow for numbers larger than 9223372036854775807 295 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 296 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 297 if ferr != nil || lerr != nil { 298 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 299 } 300 if first < 0 || last < 0 { 301 utils.Fatalf("Export error: block number must be greater than 0\n") 302 } 303 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 304 } 305 306 if err != nil { 307 utils.Fatalf("Export error: %v\n", err) 308 } 309 fmt.Printf("Export done in %v", time.Since(start)) 310 return nil 311 } 312 313 func copyDb(ctx *cli.Context) error { 314 // Ensure we have a source chain directory to copy 315 if len(ctx.Args()) != 1 { 316 utils.Fatalf("Source chaindata directory path argument missing") 317 } 318 // Initialize a new chain for the running node to sync into 319 stack := makeFullNode(ctx) 320 chain, chainDb := utils.MakeChain(ctx, stack) 321 322 syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 323 dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil) 324 325 // Create a source peer to satisfy downloader requests from 326 db, err := ethdb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256) 327 if err != nil { 328 return err 329 } 330 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 331 if err != nil { 332 return err 333 } 334 peer := downloader.NewFakePeer("local", db, hc, dl) 335 if err = dl.RegisterPeer("local", 63, peer); err != nil { 336 return err 337 } 338 // Synchronise with the simulated peer 339 start := time.Now() 340 341 currentHeader := hc.CurrentHeader() 342 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { 343 return err 344 } 345 for dl.Synchronising() { 346 time.Sleep(10 * time.Millisecond) 347 } 348 fmt.Printf("Database copy done in %v\n", time.Since(start)) 349 350 // Compact the entire database to remove any sync overhead 351 start = time.Now() 352 fmt.Println("Compacting entire database...") 353 if err = chainDb.(*ethdb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil { 354 utils.Fatalf("Compaction failed: %v", err) 355 } 356 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 357 358 return nil 359 } 360 361 func removeDB(ctx *cli.Context) error { 362 stack, _ := makeConfigNode(ctx) 363 364 for _, name := range []string{"chaindata", "lightchaindata"} { 365 // Ensure the database exists in the first place 366 logger := log.New("database", name) 367 368 dbdir := stack.ResolvePath(name) 369 if !common.FileExist(dbdir) { 370 logger.Info("Database doesn't exist, skipping", "path", dbdir) 371 continue 372 } 373 // Confirm removal and execute 374 fmt.Println(dbdir) 375 confirm, err := console.Stdin.PromptConfirm("Remove this database?") 376 switch { 377 case err != nil: 378 utils.Fatalf("%v", err) 379 case !confirm: 380 logger.Warn("Database deletion aborted") 381 default: 382 start := time.Now() 383 os.RemoveAll(dbdir) 384 logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start))) 385 } 386 } 387 return nil 388 } 389 390 func dump(ctx *cli.Context) error { 391 stack := makeFullNode(ctx) 392 chain, chainDb := utils.MakeChain(ctx, stack) 393 for _, arg := range ctx.Args() { 394 var block *types.Block 395 if hashish(arg) { 396 block = chain.GetBlockByHash(common.HexToHash(arg)) 397 } else { 398 num, _ := strconv.Atoi(arg) 399 block = chain.GetBlockByNumber(uint64(num)) 400 } 401 if block == nil { 402 fmt.Println("{}") 403 utils.Fatalf("block not found") 404 } else { 405 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 406 if err != nil { 407 utils.Fatalf("could not create new state: %v", err) 408 } 409 fmt.Printf("%s\n", state.Dump()) 410 } 411 } 412 chainDb.Close() 413 return nil 414 } 415 416 // hashish returns true for strings that look like hashes. 417 func hashish(x string) bool { 418 _, err := strconv.Atoi(x) 419 return err != nil 420 }