github.com/ethw3/go-ethereuma@v0.0.0-20221013053120-c14602a4c23c/cmd/utils/cmd.go (about) 1 // Copyright 2014 The go-ethereum Authors 2 // This file is part of go-ethereum. 3 // 4 // go-ethereum is free software: you can redistribute it and/or modify 5 // it under the terms of the GNU General Public License as published by 6 // the Free Software Foundation, either version 3 of the License, or 7 // (at your option) any later version. 8 // 9 // go-ethereum is distributed in the hope that it will be useful, 10 // but WITHOUT ANY WARRANTY; without even the implied warranty of 11 // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the 12 // GNU General Public License for more details. 13 // 14 // You should have received a copy of the GNU General Public License 15 // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>. 16 17 // Package utils contains internal helper functions for go-ethereum commands. 18 package utils 19 20 import ( 21 "bufio" 22 "compress/gzip" 23 "errors" 24 "fmt" 25 "io" 26 "os" 27 "os/signal" 28 "runtime" 29 "strings" 30 "syscall" 31 "time" 32 33 "github.com/ethw3/go-ethereuma/common" 34 "github.com/ethw3/go-ethereuma/core" 35 "github.com/ethw3/go-ethereuma/core/rawdb" 36 "github.com/ethw3/go-ethereuma/core/types" 37 "github.com/ethw3/go-ethereuma/crypto" 38 "github.com/ethw3/go-ethereuma/eth/ethconfig" 39 "github.com/ethw3/go-ethereuma/ethdb" 40 "github.com/ethw3/go-ethereuma/internal/debug" 41 "github.com/ethw3/go-ethereuma/log" 42 "github.com/ethw3/go-ethereuma/node" 43 "github.com/ethw3/go-ethereuma/rlp" 44 "github.com/urfave/cli/v2" 45 ) 46 47 const ( 48 importBatchSize = 2500 49 ) 50 51 // Fatalf formats a message to standard error and exits the program. 52 // The message is also printed to standard output if standard error 53 // is redirected to a different file. 54 func Fatalf(format string, args ...interface{}) { 55 w := io.MultiWriter(os.Stdout, os.Stderr) 56 if runtime.GOOS == "windows" { 57 // The SameFile check below doesn't work on Windows. 58 // stdout is unlikely to get redirected though, so just print there. 59 w = os.Stdout 60 } else { 61 outf, _ := os.Stdout.Stat() 62 errf, _ := os.Stderr.Stat() 63 if outf != nil && errf != nil && os.SameFile(outf, errf) { 64 w = os.Stderr 65 } 66 } 67 fmt.Fprintf(w, "Fatal: "+format+"\n", args...) 68 os.Exit(1) 69 } 70 71 func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) { 72 if err := stack.Start(); err != nil { 73 Fatalf("Error starting protocol stack: %v", err) 74 } 75 go func() { 76 sigc := make(chan os.Signal, 1) 77 signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM) 78 defer signal.Stop(sigc) 79 80 minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb 81 if ctx.IsSet(MinFreeDiskSpaceFlag.Name) { 82 minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name) 83 } else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) { 84 minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100 85 } 86 if minFreeDiskSpace > 0 { 87 go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024) 88 } 89 90 shutdown := func() { 91 log.Info("Got interrupt, shutting down...") 92 go stack.Close() 93 for i := 10; i > 0; i-- { 94 <-sigc 95 if i > 1 { 96 log.Warn("Already shutting down, interrupt more to panic.", "times", i-1) 97 } 98 } 99 debug.Exit() // ensure trace and CPU profile data is flushed. 100 debug.LoudPanic("boom") 101 } 102 103 if isConsole { 104 // In JS console mode, SIGINT is ignored because it's handled by the console. 105 // However, SIGTERM still shuts down the node. 106 for { 107 sig := <-sigc 108 if sig == syscall.SIGTERM { 109 shutdown() 110 return 111 } 112 } 113 } else { 114 <-sigc 115 shutdown() 116 } 117 }() 118 } 119 120 func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) { 121 for { 122 freeSpace, err := getFreeDiskSpace(path) 123 if err != nil { 124 log.Warn("Failed to get free disk space", "path", path, "err", err) 125 break 126 } 127 if freeSpace < freeDiskSpaceCritical { 128 log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace)) 129 sigc <- syscall.SIGTERM 130 break 131 } else if freeSpace < 2*freeDiskSpaceCritical { 132 log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical)) 133 } 134 time.Sleep(30 * time.Second) 135 } 136 } 137 138 func ImportChain(chain *core.BlockChain, fn string) error { 139 // Watch for Ctrl-C while the import is running. 140 // If a signal is received, the import will stop at the next batch. 141 interrupt := make(chan os.Signal, 1) 142 stop := make(chan struct{}) 143 signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM) 144 defer signal.Stop(interrupt) 145 defer close(interrupt) 146 go func() { 147 if _, ok := <-interrupt; ok { 148 log.Info("Interrupted during import, stopping at next batch") 149 } 150 close(stop) 151 }() 152 checkInterrupt := func() bool { 153 select { 154 case <-stop: 155 return true 156 default: 157 return false 158 } 159 } 160 161 log.Info("Importing blockchain", "file", fn) 162 163 // Open the file handle and potentially unwrap the gzip stream 164 fh, err := os.Open(fn) 165 if err != nil { 166 return err 167 } 168 defer fh.Close() 169 170 var reader io.Reader = fh 171 if strings.HasSuffix(fn, ".gz") { 172 if reader, err = gzip.NewReader(reader); err != nil { 173 return err 174 } 175 } 176 stream := rlp.NewStream(reader, 0) 177 178 // Run actual the import. 179 blocks := make(types.Blocks, importBatchSize) 180 n := 0 181 for batch := 0; ; batch++ { 182 // Load a batch of RLP blocks. 183 if checkInterrupt() { 184 return fmt.Errorf("interrupted") 185 } 186 i := 0 187 for ; i < importBatchSize; i++ { 188 var b types.Block 189 if err := stream.Decode(&b); err == io.EOF { 190 break 191 } else if err != nil { 192 return fmt.Errorf("at block %d: %v", n, err) 193 } 194 // don't import first block 195 if b.NumberU64() == 0 { 196 i-- 197 continue 198 } 199 blocks[i] = &b 200 n++ 201 } 202 if i == 0 { 203 break 204 } 205 // Import the batch. 206 if checkInterrupt() { 207 return fmt.Errorf("interrupted") 208 } 209 missing := missingBlocks(chain, blocks[:i]) 210 if len(missing) == 0 { 211 log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash()) 212 continue 213 } 214 if _, err := chain.InsertChain(missing); err != nil { 215 return fmt.Errorf("invalid block %d: %v", n, err) 216 } 217 } 218 return nil 219 } 220 221 func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block { 222 head := chain.CurrentBlock() 223 for i, block := range blocks { 224 // If we're behind the chain head, only check block, state is available at head 225 if head.NumberU64() > block.NumberU64() { 226 if !chain.HasBlock(block.Hash(), block.NumberU64()) { 227 return blocks[i:] 228 } 229 continue 230 } 231 // If we're above the chain head, state availability is a must 232 if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) { 233 return blocks[i:] 234 } 235 } 236 return nil 237 } 238 239 // ExportChain exports a blockchain into the specified file, truncating any data 240 // already present in the file. 241 func ExportChain(blockchain *core.BlockChain, fn string) error { 242 log.Info("Exporting blockchain", "file", fn) 243 244 // Open the file handle and potentially wrap with a gzip stream 245 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 246 if err != nil { 247 return err 248 } 249 defer fh.Close() 250 251 var writer io.Writer = fh 252 if strings.HasSuffix(fn, ".gz") { 253 writer = gzip.NewWriter(writer) 254 defer writer.(*gzip.Writer).Close() 255 } 256 // Iterate over the blocks and export them 257 if err := blockchain.Export(writer); err != nil { 258 return err 259 } 260 log.Info("Exported blockchain", "file", fn) 261 262 return nil 263 } 264 265 // ExportAppendChain exports a blockchain into the specified file, appending to 266 // the file if data already exists in it. 267 func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error { 268 log.Info("Exporting blockchain", "file", fn) 269 270 // Open the file handle and potentially wrap with a gzip stream 271 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm) 272 if err != nil { 273 return err 274 } 275 defer fh.Close() 276 277 var writer io.Writer = fh 278 if strings.HasSuffix(fn, ".gz") { 279 writer = gzip.NewWriter(writer) 280 defer writer.(*gzip.Writer).Close() 281 } 282 // Iterate over the blocks and export them 283 if err := blockchain.ExportN(writer, first, last); err != nil { 284 return err 285 } 286 log.Info("Exported blockchain to", "file", fn) 287 return nil 288 } 289 290 // ImportPreimages imports a batch of exported hash preimages into the database. 291 // It's a part of the deprecated functionality, should be removed in the future. 292 func ImportPreimages(db ethdb.Database, fn string) error { 293 log.Info("Importing preimages", "file", fn) 294 295 // Open the file handle and potentially unwrap the gzip stream 296 fh, err := os.Open(fn) 297 if err != nil { 298 return err 299 } 300 defer fh.Close() 301 302 var reader io.Reader = bufio.NewReader(fh) 303 if strings.HasSuffix(fn, ".gz") { 304 if reader, err = gzip.NewReader(reader); err != nil { 305 return err 306 } 307 } 308 stream := rlp.NewStream(reader, 0) 309 310 // Import the preimages in batches to prevent disk thrashing 311 preimages := make(map[common.Hash][]byte) 312 313 for { 314 // Read the next entry and ensure it's not junk 315 var blob []byte 316 317 if err := stream.Decode(&blob); err != nil { 318 if err == io.EOF { 319 break 320 } 321 return err 322 } 323 // Accumulate the preimages and flush when enough ws gathered 324 preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob) 325 if len(preimages) > 1024 { 326 rawdb.WritePreimages(db, preimages) 327 preimages = make(map[common.Hash][]byte) 328 } 329 } 330 // Flush the last batch preimage data 331 if len(preimages) > 0 { 332 rawdb.WritePreimages(db, preimages) 333 } 334 return nil 335 } 336 337 // ExportPreimages exports all known hash preimages into the specified file, 338 // truncating any data already present in the file. 339 // It's a part of the deprecated functionality, should be removed in the future. 340 func ExportPreimages(db ethdb.Database, fn string) error { 341 log.Info("Exporting preimages", "file", fn) 342 343 // Open the file handle and potentially wrap with a gzip stream 344 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 345 if err != nil { 346 return err 347 } 348 defer fh.Close() 349 350 var writer io.Writer = fh 351 if strings.HasSuffix(fn, ".gz") { 352 writer = gzip.NewWriter(writer) 353 defer writer.(*gzip.Writer).Close() 354 } 355 // Iterate over the preimages and export them 356 it := db.NewIterator([]byte("secure-key-"), nil) 357 defer it.Release() 358 359 for it.Next() { 360 if err := rlp.Encode(writer, it.Value()); err != nil { 361 return err 362 } 363 } 364 log.Info("Exported preimages", "file", fn) 365 return nil 366 } 367 368 // exportHeader is used in the export/import flow. When we do an export, 369 // the first element we output is the exportHeader. 370 // Whenever a backwards-incompatible change is made, the Version header 371 // should be bumped. 372 // If the importer sees a higher version, it should reject the import. 373 type exportHeader struct { 374 Magic string // Always set to 'gethdbdump' for disambiguation 375 Version uint64 376 Kind string 377 UnixTime uint64 378 } 379 380 const exportMagic = "gethdbdump" 381 const ( 382 OpBatchAdd = 0 383 OpBatchDel = 1 384 ) 385 386 // ImportLDBData imports a batch of snapshot data into the database 387 func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error { 388 log.Info("Importing leveldb data", "file", f) 389 390 // Open the file handle and potentially unwrap the gzip stream 391 fh, err := os.Open(f) 392 if err != nil { 393 return err 394 } 395 defer fh.Close() 396 397 var reader io.Reader = bufio.NewReader(fh) 398 if strings.HasSuffix(f, ".gz") { 399 if reader, err = gzip.NewReader(reader); err != nil { 400 return err 401 } 402 } 403 stream := rlp.NewStream(reader, 0) 404 405 // Read the header 406 var header exportHeader 407 if err := stream.Decode(&header); err != nil { 408 return fmt.Errorf("could not decode header: %v", err) 409 } 410 if header.Magic != exportMagic { 411 return errors.New("incompatible data, wrong magic") 412 } 413 if header.Version != 0 { 414 return fmt.Errorf("incompatible version %d, (support only 0)", header.Version) 415 } 416 log.Info("Importing data", "file", f, "type", header.Kind, "data age", 417 common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0)))) 418 419 // Import the snapshot in batches to prevent disk thrashing 420 var ( 421 count int64 422 start = time.Now() 423 logged = time.Now() 424 batch = db.NewBatch() 425 ) 426 for { 427 // Read the next entry 428 var ( 429 op byte 430 key, val []byte 431 ) 432 if err := stream.Decode(&op); err != nil { 433 if err == io.EOF { 434 break 435 } 436 return err 437 } 438 if err := stream.Decode(&key); err != nil { 439 return err 440 } 441 if err := stream.Decode(&val); err != nil { 442 return err 443 } 444 if count < startIndex { 445 count++ 446 continue 447 } 448 switch op { 449 case OpBatchDel: 450 batch.Delete(key) 451 case OpBatchAdd: 452 batch.Put(key, val) 453 default: 454 return fmt.Errorf("unknown op %d\n", op) 455 } 456 if batch.ValueSize() > ethdb.IdealBatchSize { 457 if err := batch.Write(); err != nil { 458 return err 459 } 460 batch.Reset() 461 } 462 // Check interruption emitted by ctrl+c 463 if count%1000 == 0 { 464 select { 465 case <-interrupt: 466 if err := batch.Write(); err != nil { 467 return err 468 } 469 log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 470 return nil 471 default: 472 } 473 } 474 if count%1000 == 0 && time.Since(logged) > 8*time.Second { 475 log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 476 logged = time.Now() 477 } 478 count += 1 479 } 480 // Flush the last batch snapshot data 481 if batch.ValueSize() > 0 { 482 if err := batch.Write(); err != nil { 483 return err 484 } 485 } 486 log.Info("Imported chain data", "file", f, "count", count, 487 "elapsed", common.PrettyDuration(time.Since(start))) 488 return nil 489 } 490 491 // ChainDataIterator is an interface wraps all necessary functions to iterate 492 // the exporting chain data. 493 type ChainDataIterator interface { 494 // Next returns the key-value pair for next exporting entry in the iterator. 495 // When the end is reached, it will return (0, nil, nil, false). 496 Next() (byte, []byte, []byte, bool) 497 498 // Release releases associated resources. Release should always succeed and can 499 // be called multiple times without causing error. 500 Release() 501 } 502 503 // ExportChaindata exports the given data type (truncating any data already present) 504 // in the file. If the suffix is 'gz', gzip compression is used. 505 func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error { 506 log.Info("Exporting chain data", "file", fn, "kind", kind) 507 defer iter.Release() 508 509 // Open the file handle and potentially wrap with a gzip stream 510 fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm) 511 if err != nil { 512 return err 513 } 514 defer fh.Close() 515 516 var writer io.Writer = fh 517 if strings.HasSuffix(fn, ".gz") { 518 writer = gzip.NewWriter(writer) 519 defer writer.(*gzip.Writer).Close() 520 } 521 // Write the header 522 if err := rlp.Encode(writer, &exportHeader{ 523 Magic: exportMagic, 524 Version: 0, 525 Kind: kind, 526 UnixTime: uint64(time.Now().Unix()), 527 }); err != nil { 528 return err 529 } 530 // Extract data from source iterator and dump them out to file 531 var ( 532 count int64 533 start = time.Now() 534 logged = time.Now() 535 ) 536 for { 537 op, key, val, ok := iter.Next() 538 if !ok { 539 break 540 } 541 if err := rlp.Encode(writer, op); err != nil { 542 return err 543 } 544 if err := rlp.Encode(writer, key); err != nil { 545 return err 546 } 547 if err := rlp.Encode(writer, val); err != nil { 548 return err 549 } 550 if count%1000 == 0 { 551 // Check interruption emitted by ctrl+c 552 select { 553 case <-interrupt: 554 log.Info("Chain data exporting interrupted", "file", fn, 555 "kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 556 return nil 557 default: 558 } 559 if time.Since(logged) > 8*time.Second { 560 log.Info("Exporting chain data", "file", fn, "kind", kind, 561 "count", count, "elapsed", common.PrettyDuration(time.Since(start))) 562 logged = time.Now() 563 } 564 } 565 count++ 566 } 567 log.Info("Exported chain data", "file", fn, "kind", kind, "count", count, 568 "elapsed", common.PrettyDuration(time.Since(start))) 569 return nil 570 }