github.com/calmw/ethereum@v0.1.1/cmd/utils/cmd.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package utils contains internal helper functions for go-ethereum commands. 18 package utils 19 20 import ( 21 "bufio" 22 "compress/gzip" 23 "errors" 24 "fmt" 25 "io" 26 "os" 27 "os/signal" 28 "runtime" 29 "strings" 30 "syscall" 31 "time" 32 33 "github.com/calmw/ethereum/common" 34 "github.com/calmw/ethereum/core" 35 "github.com/calmw/ethereum/core/rawdb" 36 "github.com/calmw/ethereum/core/types" 37 "github.com/calmw/ethereum/crypto" 38 "github.com/calmw/ethereum/eth/ethconfig" 39 "github.com/calmw/ethereum/ethdb" 40 "github.com/calmw/ethereum/internal/debug" 41 "github.com/calmw/ethereum/log" 42 "github.com/calmw/ethereum/node" 43 "github.com/calmw/ethereum/rlp" 44 "github.com/urfave/cli/v2" 45 ) 46 47 const ( 48 importBatchSize = 2500 49 ) 50 51 // Fatalf formats a message to standard error and exits the program. 52 // The message is also printed to standard output if standard error 53 // is redirected to a different file. 54 func Fatalf(format string, args ...interface{}) { 55 w := io.MultiWriter(os.Stdout, os.Stderr) 56 if runtime.GOOS == "windows" { 57 // The SameFile check below doesn't work on Windows. 58 // stdout is unlikely to get redirected though, so just print there. 59 w = os.Stdout 60 } else { 61 outf, _ := os.Stdout.Stat() 62 errf, _ := os.Stderr.Stat() 63 if outf != nil && errf != nil && os.SameFile(outf, errf) { 64 w = os.Stderr 65 } 66 } 67 fmt.Fprintf(w, "Fatal: "+format+"\n", args...) 68 os.Exit(1) 69 } 70 71 func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) { 72 if err := stack.Start(); err != nil { 73 Fatalf("Error starting protocol stack: %v", err) 74 } 75 go func() { 76 sigc := make(chan os.Signal, 1) 77 signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) 78 defer signal.Stop(sigc) 79 80 minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb 81 if ctx.IsSet(MinFreeDiskSpaceFlag.Name) { 82 minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name) 83 } else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) { 84 minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100 85 } 86 if minFreeDiskSpace > 0 { 87 go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024) 88 } 89 90 shutdown := func() { 91 log.Info("Got interrupt, shutting down...") 92 go stack.Close() 93 for i := 10; i > 0; i-- { 94 <-sigc 95 if i > 1 { 96 log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) 97 } 98 } 99 debug.Exit() // ensure trace and CPU profile data is flushed. 100 debug.LoudPanic("boom") 101 } 102 103 if isConsole { 104 // In JS console mode, SIGINT is ignored because it's handled by the console. 105 // However, SIGTERM still shuts down the node. 106 for { 107 sig := <-sigc 108 if sig == syscall.SIGTERM { 109 shutdown() 110 return 111 } 112 } 113 } else { 114 <-sigc 115 shutdown() 116 } 117 }() 118 } 119 120 func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) { 121 for { 122 freeSpace, err := getFreeDiskSpace(path) 123 if err != nil { 124 log.Warn("Failed to get free disk space", "path", path, "err", err) 125 break 126 } 127 if freeSpace < freeDiskSpaceCritical { 128 log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace), "path", path) 129 sigc <- syscall.SIGTERM 130 break 131 } else if freeSpace < 2*freeDiskSpaceCritical { 132 log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical), "path", path) 133 } 134 time.Sleep(30 * time.Second) 135 } 136 } 137 138 func ImportChain(chain *core.BlockChain, fn string) error { 139 // Watch for Ctrl-C while the import is running. 140 // If a signal is received, the import will stop at the next batch. 141 interrupt := make(chan os.Signal, 1) 142 stop := make(chan struct{}) 143 signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) 144 defer signal.Stop(interrupt) 145 defer close(interrupt) 146 go func() { 147 if _, ok := <-interrupt; ok { 148 log.Info("Interrupted during import, stopping at next batch") 149 } 150 close(stop) 151 }() 152 checkInterrupt := func() bool { 153 select { 154 case <-stop: 155 return true 156 default: 157 return false 158 } 159 } 160 161 log.Info("Importing blockchain", "file", fn) 162 163 // Open the file handle and potentially unwrap the gzip stream 164 fh, err := os.Open(fn) 165 if err != nil { 166 return err 167 } 168 defer fh.Close() 169 170 var reader io.Reader = fh 171 if strings.HasSuffix(fn, ".gz") { 172 if reader, err = gzip.NewReader(reader); err != nil { 173 return err 174 } 175 } 176 stream := rlp.NewStream(reader, 0) 177 178 // Run actual the import. 179 blocks := make(types.Blocks, importBatchSize) 180 n := 0 181 for batch := 0; ; batch++ { 182 // Load a batch of RLP blocks. 183 if checkInterrupt() { 184 return fmt.Errorf("interrupted") 185 } 186 i := 0 187 for ; i < importBatchSize; i++ { 188 var b types.Block 189 if err := stream.Decode(&b); err == io.EOF { 190 break 191 } else if err != nil { 192 return fmt.Errorf("at block %d: %v", n, err) 193 } 194 // don't import first block 195 if b.NumberU64() == 0 { 196 i-- 197 continue 198 } 199 blocks[i] = &b 200 n++ 201 } 202 if i == 0 { 203 break 204 } 205 // Import the batch. 206 if checkInterrupt() { 207 return fmt.Errorf("interrupted") 208 } 209 missing := missingBlocks(chain, blocks[:i]) 210 if len(missing) == 0 { 211 log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash()) 212 continue 213 } 214 if failindex, err := chain.InsertChain(missing); err != nil { 215 var failnumber uint64 216 if failindex > 0 && failindex < len(missing) { 217 failnumber = missing[failindex].NumberU64() 218 } else { 219 failnumber = missing[0].NumberU64() 220 } 221 return fmt.Errorf("invalid block %d: %v", failnumber, err) 222 } 223 } 224 return nil 225 } 226 227 func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { 228 head := chain.CurrentBlock() 229 for i, block := range blocks { 230 // If we're behind the chain head, only check block, state is available at head 231 if head.Number.Uint64() > block.NumberU64() { 232 if !chain.HasBlock(block.Hash(), block.NumberU64()) { 233 return blocks[i:] 234 } 235 continue 236 } 237 // If we're above the chain head, state availability is a must 238 if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) { 239 return blocks[i:] 240 } 241 } 242 return nil 243 } 244 245 // ExportChain exports a blockchain into the specified file, truncating any data 246 // already present in the file. 247 func ExportChain(blockchain *core.BlockChain, fn string) error { 248 log.Info("Exporting blockchain", "file", fn) 249 250 // Open the file handle and potentially wrap with a gzip stream 251 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 252 if err != nil { 253 return err 254 } 255 defer fh.Close() 256 257 var writer io.Writer = fh 258 if strings.HasSuffix(fn, ".gz") { 259 writer = gzip.NewWriter(writer) 260 defer writer.(*gzip.Writer).Close() 261 } 262 // Iterate over the blocks and export them 263 if err := blockchain.Export(writer); err != nil { 264 return err 265 } 266 log.Info("Exported blockchain", "file", fn) 267 268 return nil 269 } 270 271 // ExportAppendChain exports a blockchain into the specified file, appending to 272 // the file if data already exists in it. 273 func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { 274 log.Info("Exporting blockchain", "file", fn) 275 276 // Open the file handle and potentially wrap with a gzip stream 277 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) 278 if err != nil { 279 return err 280 } 281 defer fh.Close() 282 283 var writer io.Writer = fh 284 if strings.HasSuffix(fn, ".gz") { 285 writer = gzip.NewWriter(writer) 286 defer writer.(*gzip.Writer).Close() 287 } 288 // Iterate over the blocks and export them 289 if err := blockchain.ExportN(writer, first, last); err != nil { 290 return err 291 } 292 log.Info("Exported blockchain to", "file", fn) 293 return nil 294 } 295 296 // ImportPreimages imports a batch of exported hash preimages into the database. 297 // It's a part of the deprecated functionality, should be removed in the future. 298 func ImportPreimages(db ethdb.Database, fn string) error { 299 log.Info("Importing preimages", "file", fn) 300 301 // Open the file handle and potentially unwrap the gzip stream 302 fh, err := os.Open(fn) 303 if err != nil { 304 return err 305 } 306 defer fh.Close() 307 308 var reader io.Reader = bufio.NewReader(fh) 309 if strings.HasSuffix(fn, ".gz") { 310 if reader, err = gzip.NewReader(reader); err != nil { 311 return err 312 } 313 } 314 stream := rlp.NewStream(reader, 0) 315 316 // Import the preimages in batches to prevent disk thrashing 317 preimages := make(map[common.Hash][]byte) 318 319 for { 320 // Read the next entry and ensure it's not junk 321 var blob []byte 322 323 if err := stream.Decode(&blob); err != nil { 324 if err == io.EOF { 325 break 326 } 327 return err 328 } 329 // Accumulate the preimages and flush when enough ws gathered 330 preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) 331 if len(preimages) > 1024 { 332 rawdb.WritePreimages(db, preimages) 333 preimages = make(map[common.Hash][]byte) 334 } 335 } 336 // Flush the last batch preimage data 337 if len(preimages) > 0 { 338 rawdb.WritePreimages(db, preimages) 339 } 340 return nil 341 } 342 343 // ExportPreimages exports all known hash preimages into the specified file, 344 // truncating any data already present in the file. 345 // It's a part of the deprecated functionality, should be removed in the future. 346 func ExportPreimages(db ethdb.Database, fn string) error { 347 log.Info("Exporting preimages", "file", fn) 348 349 // Open the file handle and potentially wrap with a gzip stream 350 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 351 if err != nil { 352 return err 353 } 354 defer fh.Close() 355 356 var writer io.Writer = fh 357 if strings.HasSuffix(fn, ".gz") { 358 writer = gzip.NewWriter(writer) 359 defer writer.(*gzip.Writer).Close() 360 } 361 // Iterate over the preimages and export them 362 it := db.NewIterator([]byte("secure-key-"), nil) 363 defer it.Release() 364 365 for it.Next() { 366 if err := rlp.Encode(writer, it.Value()); err != nil { 367 return err 368 } 369 } 370 log.Info("Exported preimages", "file", fn) 371 return nil 372 } 373 374 // exportHeader is used in the export/import flow. When we do an export, 375 // the first element we output is the exportHeader. 376 // Whenever a backwards-incompatible change is made, the Version header 377 // should be bumped. 378 // If the importer sees a higher version, it should reject the import. 379 type exportHeader struct { 380 Magic string // Always set to 'gethdbdump' for disambiguation 381 Version uint64 382 Kind string 383 UnixTime uint64 384 } 385 386 const exportMagic = "gethdbdump" 387 const ( 388 OpBatchAdd = 0 389 OpBatchDel = 1 390 ) 391 392 // ImportLDBData imports a batch of snapshot data into the database 393 func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error { 394 log.Info("Importing leveldb data", "file", f) 395 396 // Open the file handle and potentially unwrap the gzip stream 397 fh, err := os.Open(f) 398 if err != nil { 399 return err 400 } 401 defer fh.Close() 402 403 var reader io.Reader = bufio.NewReader(fh) 404 if strings.HasSuffix(f, ".gz") { 405 if reader, err = gzip.NewReader(reader); err != nil { 406 return err 407 } 408 } 409 stream := rlp.NewStream(reader, 0) 410 411 // Read the header 412 var header exportHeader 413 if err := stream.Decode(&header); err != nil { 414 return fmt.Errorf("could not decode header: %v", err) 415 } 416 if header.Magic != exportMagic { 417 return errors.New("incompatible data, wrong magic") 418 } 419 if header.Version != 0 { 420 return fmt.Errorf("incompatible version %d, (support only 0)", header.Version) 421 } 422 log.Info("Importing data", "file", f, "type", header.Kind, "data age", 423 common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0)))) 424 425 // Import the snapshot in batches to prevent disk thrashing 426 var ( 427 count int64 428 start = time.Now() 429 logged = time.Now() 430 batch = db.NewBatch() 431 ) 432 for { 433 // Read the next entry 434 var ( 435 op byte 436 key, val []byte 437 ) 438 if err := stream.Decode(&op); err != nil { 439 if err == io.EOF { 440 break 441 } 442 return err 443 } 444 if err := stream.Decode(&key); err != nil { 445 return err 446 } 447 if err := stream.Decode(&val); err != nil { 448 return err 449 } 450 if count < startIndex { 451 count++ 452 continue 453 } 454 switch op { 455 case OpBatchDel: 456 batch.Delete(key) 457 case OpBatchAdd: 458 batch.Put(key, val) 459 default: 460 return fmt.Errorf("unknown op %d\n", op) 461 } 462 if batch.ValueSize() > ethdb.IdealBatchSize { 463 if err := batch.Write(); err != nil { 464 return err 465 } 466 batch.Reset() 467 } 468 // Check interruption emitted by ctrl+c 469 if count%1000 == 0 { 470 select { 471 case <-interrupt: 472 if err := batch.Write(); err != nil { 473 return err 474 } 475 log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 476 return nil 477 default: 478 } 479 } 480 if count%1000 == 0 && time.Since(logged) > 8*time.Second { 481 log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 482 logged = time.Now() 483 } 484 count += 1 485 } 486 // Flush the last batch snapshot data 487 if batch.ValueSize() > 0 { 488 if err := batch.Write(); err != nil { 489 return err 490 } 491 } 492 log.Info("Imported chain data", "file", f, "count", count, 493 "elapsed", common.PrettyDuration(time.Since(start))) 494 return nil 495 } 496 497 // ChainDataIterator is an interface wraps all necessary functions to iterate 498 // the exporting chain data. 499 type ChainDataIterator interface { 500 // Next returns the key-value pair for next exporting entry in the iterator. 501 // When the end is reached, it will return (0, nil, nil, false). 502 Next() (byte, []byte, []byte, bool) 503 504 // Release releases associated resources. Release should always succeed and can 505 // be called multiple times without causing error. 506 Release() 507 } 508 509 // ExportChaindata exports the given data type (truncating any data already present) 510 // in the file. If the suffix is 'gz', gzip compression is used. 511 func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error { 512 log.Info("Exporting chain data", "file", fn, "kind", kind) 513 defer iter.Release() 514 515 // Open the file handle and potentially wrap with a gzip stream 516 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 517 if err != nil { 518 return err 519 } 520 defer fh.Close() 521 522 var writer io.Writer = fh 523 if strings.HasSuffix(fn, ".gz") { 524 writer = gzip.NewWriter(writer) 525 defer writer.(*gzip.Writer).Close() 526 } 527 // Write the header 528 if err := rlp.Encode(writer, &exportHeader{ 529 Magic: exportMagic, 530 Version: 0, 531 Kind: kind, 532 UnixTime: uint64(time.Now().Unix()), 533 }); err != nil { 534 return err 535 } 536 // Extract data from source iterator and dump them out to file 537 var ( 538 count int64 539 start = time.Now() 540 logged = time.Now() 541 ) 542 for { 543 op, key, val, ok := iter.Next() 544 if !ok { 545 break 546 } 547 if err := rlp.Encode(writer, op); err != nil { 548 return err 549 } 550 if err := rlp.Encode(writer, key); err != nil { 551 return err 552 } 553 if err := rlp.Encode(writer, val); err != nil { 554 return err 555 } 556 if count%1000 == 0 { 557 // Check interruption emitted by ctrl+c 558 select { 559 case <-interrupt: 560 log.Info("Chain data exporting interrupted", "file", fn, 561 "kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 562 return nil 563 default: 564 } 565 if time.Since(logged) > 8*time.Second { 566 log.Info("Exporting chain data", "file", fn, "kind", kind, 567 "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 568 logged = time.Now() 569 } 570 } 571 count++ 572 } 573 log.Info("Exported chain data", "file", fn, "kind", kind, "count", count, 574 "elapsed", common.PrettyDuration(time.Since(start))) 575 return nil 576 }