github.com/aquanetwork/aquachain@v1.7.8/cmd/aquachain/chaincmd.go (about) 1 // Copyright 2015 The aquachain Authors 2 // This file is part of aquachain. 3 // 4 // aquachain is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // aquachain is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with aquachain. If not, see <http://www.gnu.org/licenses/>. 16 17 package main 18 19 import ( 20 "encoding/json" 21 "fmt" 22 "os" 23 "runtime" 24 "strconv" 25 "sync/atomic" 26 "time" 27 28 "github.com/syndtr/goleveldb/leveldb/util" 29 "gitlab.com/aquachain/aquachain/aqua/downloader" 30 "gitlab.com/aquachain/aquachain/aqua/event" 31 "gitlab.com/aquachain/aquachain/aquadb" 32 "gitlab.com/aquachain/aquachain/cmd/utils" 33 "gitlab.com/aquachain/aquachain/common" 34 "gitlab.com/aquachain/aquachain/common/log" 35 "gitlab.com/aquachain/aquachain/core" 36 "gitlab.com/aquachain/aquachain/core/state" 37 "gitlab.com/aquachain/aquachain/core/types" 38 "gitlab.com/aquachain/aquachain/opt/console" 39 "gitlab.com/aquachain/aquachain/trie" 40 "gopkg.in/urfave/cli.v1" 41 ) 42 43 var ( 44 initCommand = cli.Command{ 45 Action: utils.MigrateFlags(initGenesis), 46 Name: "init", 47 Usage: "Bootstrap and initialize a new genesis block", 48 ArgsUsage: "<genesisPath>", 49 Flags: []cli.Flag{ 50 utils.DataDirFlag, 51 }, 52 Category: "BLOCKCHAIN COMMANDS", 53 Description: ` 54 The init command initializes a new genesis block and definition for the network. 55 This is a destructive action and changes the network in which you will be 56 participating. 57 58 It expects the genesis file as argument.`, 59 } 60 importCommand = cli.Command{ 61 Action: utils.MigrateFlags(importChain), 62 Name: "import", 63 Usage: "Import a blockchain file", 64 ArgsUsage: "<filename> (<filename 2> ... <filename N>) ", 65 Flags: []cli.Flag{ 66 utils.DataDirFlag, 67 utils.CacheFlag, 68 utils.GCModeFlag, 69 utils.CacheDatabaseFlag, 70 utils.CacheGCFlag, 71 }, 72 Category: "BLOCKCHAIN COMMANDS", 73 Description: ` 74 The import command imports blocks from an RLP-encoded form. The form can be one file 75 with several RLP-encoded blocks, or several files can be used. 76 77 If only one file is used, import error will result in failure. If several files are used, 78 processing will proceed even if an individual RLP-file import failure occurs.`, 79 } 80 exportCommand = cli.Command{ 81 Action: utils.MigrateFlags(exportChain), 82 Name: "export", 83 Usage: "Export blockchain into file", 84 ArgsUsage: "<filename> [<blockNumFirst> <blockNumLast>]", 85 Flags: []cli.Flag{ 86 utils.DataDirFlag, 87 utils.CacheFlag, 88 }, 89 Category: "BLOCKCHAIN COMMANDS", 90 Description: ` 91 Requires a first argument of the file to write to. 92 Optional second and third arguments control the first and 93 last block to write. In this mode, the file will be appended 94 if already existing.`, 95 } 96 copydbCommand = cli.Command{ 97 Action: utils.MigrateFlags(copyDb), 98 Name: "copydb", 99 Usage: "Create a local chain from a target chaindata folder", 100 ArgsUsage: "<sourceChaindataDir>", 101 Flags: []cli.Flag{ 102 utils.DataDirFlag, 103 utils.CacheFlag, 104 utils.SyncModeFlag, 105 utils.FakePoWFlag, 106 utils.TestnetFlag, 107 utils.Testnet2Flag, 108 }, 109 Category: "BLOCKCHAIN COMMANDS", 110 Description: ` 111 The first argument must be the directory containing the blockchain to download from`, 112 } 113 removedbCommand = cli.Command{ 114 Action: utils.MigrateFlags(removeDB), 115 Name: "removedb", 116 Usage: "Remove blockchain and state databases", 117 ArgsUsage: " ", 118 Flags: []cli.Flag{ 119 utils.DataDirFlag, 120 }, 121 Category: "BLOCKCHAIN COMMANDS", 122 Description: ` 123 Remove blockchain and state databases`, 124 } 125 dumpCommand = cli.Command{ 126 Action: utils.MigrateFlags(dump), 127 Name: "dump", 128 Usage: "Dump a specific block from storage", 129 ArgsUsage: "[<blockHash> | <blockNum>]...", 130 Flags: []cli.Flag{ 131 utils.DataDirFlag, 132 utils.CacheFlag, 133 }, 134 Category: "BLOCKCHAIN COMMANDS", 135 Description: ` 136 The arguments are interpreted as block numbers or hashes. 137 Use "aquachain dump 0" to dump the genesis block.`, 138 } 139 ) 140 141 // initGenesis will initialise the given JSON format genesis file and writes it as 142 // the zero'd block (i.e. genesis) or will fail hard if it can't succeed. 143 func initGenesis(ctx *cli.Context) error { 144 // Make sure we have a valid genesis JSON 145 genesisPath := ctx.Args().First() 146 if len(genesisPath) == 0 { 147 utils.Fatalf("Must supply path to genesis JSON file") 148 } 149 file, err := os.Open(genesisPath) 150 if err != nil { 151 utils.Fatalf("Failed to read genesis file: %v", err) 152 } 153 defer file.Close() 154 155 genesis := new(core.Genesis) 156 if err := json.NewDecoder(file).Decode(genesis); err != nil { 157 utils.Fatalf("invalid genesis file: %v", err) 158 } 159 160 if genesis.Config == nil || genesis.Config.ChainId == nil { 161 utils.Fatalf("invalid genesis file: no chainid") 162 } 163 164 // Open an initialise db 165 stack := makeFullNode(ctx) 166 for _, name := range []string{"chaindata"} { 167 chaindb, err := stack.OpenDatabase(name, 0, 0) 168 if err != nil { 169 utils.Fatalf("Failed to open database: %v", err) 170 } 171 _, hash, err := core.SetupGenesisBlock(chaindb, genesis) 172 if err != nil { 173 utils.Fatalf("Failed to write genesis block: %v", err) 174 } 175 log.Info("Successfully wrote genesis state", "database", name, "hash", hash) 176 } 177 return nil 178 } 179 180 func importChain(ctx *cli.Context) error { 181 if len(ctx.Args()) < 1 { 182 utils.Fatalf("This command requires an argument.") 183 } 184 stack := makeFullNode(ctx) 185 chain, chainDb := utils.MakeChain(ctx, stack) 186 defer chainDb.Close() 187 188 // Start periodically gathering memory profiles 189 var peakMemAlloc, peakMemSys uint64 190 go func() { 191 stats := new(runtime.MemStats) 192 for { 193 runtime.ReadMemStats(stats) 194 if atomic.LoadUint64(&peakMemAlloc) < stats.Alloc { 195 atomic.StoreUint64(&peakMemAlloc, stats.Alloc) 196 } 197 if atomic.LoadUint64(&peakMemSys) < stats.Sys { 198 atomic.StoreUint64(&peakMemSys, stats.Sys) 199 } 200 time.Sleep(5 * time.Second) 201 } 202 }() 203 // Import the chain 204 start := time.Now() 205 exitcode := 0 206 207 if len(ctx.Args()) == 1 { 208 if err := utils.ImportChain(chain, ctx.Args().First()); err != nil { 209 log.Error("Import error", "err", err) 210 exitcode = 111 211 } 212 } else { 213 for _, arg := range ctx.Args() { 214 if err := utils.ImportChain(chain, arg); err != nil { 215 log.Error("Import error", "file", arg, "err", err) 216 } 217 } 218 } 219 chain.Stop() 220 fmt.Printf("Import done in %v.\n\n", time.Since(start)) 221 222 // Output pre-compaction stats mostly to see the import trashing 223 db := chainDb.(*aquadb.LDBDatabase) 224 225 stats, err := db.LDB().GetProperty("leveldb.stats") 226 if err != nil { 227 utils.Fatalf("Failed to read database stats: %v", err) 228 } 229 fmt.Println(stats) 230 fmt.Printf("Trie cache misses: %d\n", trie.CacheMisses()) 231 fmt.Printf("Trie cache unloads: %d\n\n", trie.CacheUnloads()) 232 233 // Print the memory statistics used by the importing 234 mem := new(runtime.MemStats) 235 runtime.ReadMemStats(mem) 236 237 fmt.Printf("Object memory: %.3f MB current, %.3f MB peak\n", float64(mem.Alloc)/1024/1024, float64(atomic.LoadUint64(&peakMemAlloc))/1024/1024) 238 fmt.Printf("System memory: %.3f MB current, %.3f MB peak\n", float64(mem.Sys)/1024/1024, float64(atomic.LoadUint64(&peakMemSys))/1024/1024) 239 fmt.Printf("Allocations: %.3f million\n", float64(mem.Mallocs)/1000000) 240 fmt.Printf("GC pause: %v\n\n", time.Duration(mem.PauseTotalNs)) 241 242 if ctx.GlobalIsSet(utils.NoCompactionFlag.Name) { 243 return nil 244 } 245 246 // Compact the entire database to more accurately measure disk io and print the stats 247 start = time.Now() 248 fmt.Println("Compacting entire database...") 249 if err = db.LDB().CompactRange(util.Range{}); err != nil { 250 utils.Fatalf("Compaction failed: %v", err) 251 } 252 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 253 254 stats, err = db.LDB().GetProperty("leveldb.stats") 255 if err != nil { 256 utils.Fatalf("Failed to read database stats: %v", err) 257 } 258 fmt.Println(stats) 259 260 if exitcode != 0 { 261 utils.Fatalf("Exiting with error code: %v", exitcode) 262 } 263 return nil 264 } 265 266 func exportChain(ctx *cli.Context) error { 267 if len(ctx.Args()) < 1 { 268 utils.Fatalf("This command requires an argument.") 269 } 270 stack := makeFullNode(ctx) 271 chain, _ := utils.MakeChain(ctx, stack) 272 start := time.Now() 273 274 var err error 275 fp := ctx.Args().First() 276 if len(ctx.Args()) < 3 { 277 err = utils.ExportChain(chain, fp) 278 } else { 279 // This can be improved to allow for numbers larger than 9223372036854775807 280 first, ferr := strconv.ParseInt(ctx.Args().Get(1), 10, 64) 281 last, lerr := strconv.ParseInt(ctx.Args().Get(2), 10, 64) 282 if ferr != nil || lerr != nil { 283 utils.Fatalf("Export error in parsing parameters: block number not an integer\n") 284 } 285 if first < 0 || last < 0 { 286 utils.Fatalf("Export error: block number must be greater than 0\n") 287 } 288 err = utils.ExportAppendChain(chain, fp, uint64(first), uint64(last)) 289 } 290 291 if err != nil { 292 utils.Fatalf("Export error: %v\n", err) 293 } 294 fmt.Printf("Export done in %v\n", time.Since(start)) 295 return nil 296 } 297 298 func copyDb(ctx *cli.Context) error { 299 // Ensure we have a source chain directory to copy 300 if len(ctx.Args()) != 1 { 301 utils.Fatalf("Source chaindata directory path argument missing") 302 } 303 // Initialize a new chain for the running node to sync into 304 stack := makeFullNode(ctx) 305 chain, chainDb := utils.MakeChain(ctx, stack) 306 307 syncmode := *utils.GlobalTextMarshaler(ctx, utils.SyncModeFlag.Name).(*downloader.SyncMode) 308 dl := downloader.New(syncmode, chainDb, new(event.TypeMux), chain, nil, nil) 309 310 // Create a source peer to satisfy downloader requests from 311 db, err := aquadb.NewLDBDatabase(ctx.Args().First(), ctx.GlobalInt(utils.CacheFlag.Name), 256) 312 if err != nil { 313 return err 314 } 315 hc, err := core.NewHeaderChain(db, chain.Config(), chain.Engine(), func() bool { return false }) 316 if err != nil { 317 return err 318 } 319 peer := downloader.NewFakePeer("local", db, hc, dl) 320 if err = dl.RegisterPeer("local", 63, peer); err != nil { 321 return err 322 } 323 // Synchronise with the simulated peer 324 start := time.Now() 325 326 currentHeader := hc.CurrentHeader() 327 if err = dl.Synchronise("local", currentHeader.Hash(), hc.GetTd(currentHeader.Hash(), currentHeader.Number.Uint64()), syncmode); err != nil { 328 return err 329 } 330 for dl.Synchronising() { 331 time.Sleep(10 * time.Millisecond) 332 } 333 fmt.Printf("Database copy done in %v\n", time.Since(start)) 334 335 // Compact the entire database to remove any sync overhead 336 start = time.Now() 337 fmt.Println("Compacting entire database...") 338 if err = chainDb.(*aquadb.LDBDatabase).LDB().CompactRange(util.Range{}); err != nil { 339 utils.Fatalf("Compaction failed: %v", err) 340 } 341 fmt.Printf("Compaction done in %v.\n\n", time.Since(start)) 342 343 return nil 344 } 345 346 func removeDB(ctx *cli.Context) error { 347 stack, _ := makeConfigNode(ctx) 348 349 name := "chaindata" 350 // Ensure the database exists in the first place 351 logger := log.New("database", name) 352 353 dbdir := stack.ResolvePath(name) 354 if !common.FileExist(dbdir) { 355 logger.Info("Database doesn't exist, skipping", "path", dbdir) 356 return nil 357 } 358 // Confirm removal and execute 359 fmt.Println(dbdir) 360 confirm, err := console.Stdin.PromptConfirm("Remove this database?") 361 switch { 362 case err != nil: 363 utils.Fatalf("%v", err) 364 case !confirm: 365 logger.Warn("Database deletion aborted") 366 default: 367 start := time.Now() 368 os.RemoveAll(dbdir) 369 logger.Info("Database successfully deleted", "elapsed", common.PrettyDuration(time.Since(start))) 370 } 371 372 return nil 373 } 374 375 func dump(ctx *cli.Context) error { 376 stack := makeFullNode(ctx) 377 chain, chainDb := utils.MakeChain(ctx, stack) 378 for _, arg := range ctx.Args() { 379 var block *types.Block 380 if hashish(arg) { 381 block = chain.GetBlockByHash(common.HexToHash(arg)) 382 } else { 383 num, _ := strconv.Atoi(arg) 384 block = chain.GetBlockByNumber(uint64(num)) 385 } 386 if block == nil { 387 fmt.Println("{}") 388 utils.Fatalf("block not found") 389 } else { 390 state, err := state.New(block.Root(), state.NewDatabase(chainDb)) 391 if err != nil { 392 utils.Fatalf("could not create new state: %v", err) 393 } 394 fmt.Printf("%s\n", state.Dump()) 395 } 396 } 397 chainDb.Close() 398 return nil 399 } 400 401 // hashish returns true for strings that look like hashes. 402 func hashish(x string) bool { 403 _, err := strconv.Atoi(x) 404 return err != nil 405 }