github.com/core-coin/go-core/v2@v2.1.9/cmd/utils/cmd.go (about)

     1  // Copyright 2014 by the Authors
     2  // This file is part of go-core.
     3  //
     4  // go-core is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-core is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-core. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-core commands.
    18  package utils
    19  
    20  import (
    21  	"compress/gzip"
    22  	"fmt"
    23  	"io"
    24  	"os"
    25  	"os/signal"
    26  	"runtime"
    27  	"strings"
    28  	"syscall"
    29  
    30  	"github.com/core-coin/go-core/v2/xcbdb"
    31  
    32  	"github.com/core-coin/go-core/v2/common"
    33  	"github.com/core-coin/go-core/v2/core"
    34  	"github.com/core-coin/go-core/v2/core/rawdb"
    35  	"github.com/core-coin/go-core/v2/core/types"
    36  	"github.com/core-coin/go-core/v2/crypto"
    37  	"github.com/core-coin/go-core/v2/internal/debug"
    38  	"github.com/core-coin/go-core/v2/log"
    39  	"github.com/core-coin/go-core/v2/node"
    40  	"github.com/core-coin/go-core/v2/rlp"
    41  )
    42  
    43  const (
    44  	importBatchSize = 2500
    45  )
    46  
    47  // Fatalf formats a message to standard error and exits the program.
    48  // The message is also printed to standard output if standard error
    49  // is redirected to a different file.
    50  func Fatalf(format string, args ...interface{}) {
    51  	w := io.MultiWriter(os.Stdout, os.Stderr)
    52  	if runtime.GOOS == "windows" {
    53  		// The SameFile check below doesn't work on Windows.
    54  		// stdout is unlikely to get redirected though, so just print there.
    55  		w = os.Stdout
    56  	} else {
    57  		outf, _ := os.Stdout.Stat()
    58  		errf, _ := os.Stderr.Stat()
    59  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    60  			w = os.Stderr
    61  		}
    62  	}
    63  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    64  	os.Exit(1)
    65  }
    66  
    67  func StartNode(stack *node.Node) {
    68  	if err := stack.Start(); err != nil {
    69  		Fatalf("Error starting protocol stack: %v", err)
    70  	}
    71  	go func() {
    72  		sigc := make(chan os.Signal, 1)
    73  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    74  		defer signal.Stop(sigc)
    75  		<-sigc
    76  		log.Info("Got interrupt, shutting down...")
    77  		go stack.Close()
    78  		for i := 10; i > 0; i-- {
    79  			<-sigc
    80  			if i > 1 {
    81  				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    82  			}
    83  		}
    84  		debug.Exit() // ensure trace and CPU profile data is flushed.
    85  		debug.LoudPanic("boom")
    86  	}()
    87  }
    88  
    89  func ImportChain(chain *core.BlockChain, fn string) error {
    90  	// Watch for Ctrl-C while the import is running.
    91  	// If a signal is received, the import will stop at the next batch.
    92  	interrupt := make(chan os.Signal, 1)
    93  	stop := make(chan struct{})
    94  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
    95  	defer signal.Stop(interrupt)
    96  	defer close(interrupt)
    97  	go func() {
    98  		if _, ok := <-interrupt; ok {
    99  			log.Info("Interrupted during import, stopping at next batch")
   100  		}
   101  		close(stop)
   102  	}()
   103  	checkInterrupt := func() bool {
   104  		select {
   105  		case <-stop:
   106  			return true
   107  		default:
   108  			return false
   109  		}
   110  	}
   111  
   112  	log.Info("Importing blockchain", "file", fn)
   113  
   114  	// Open the file handle and potentially unwrap the gzip stream
   115  	fh, err := os.Open(fn)
   116  	if err != nil {
   117  		return err
   118  	}
   119  	defer fh.Close()
   120  
   121  	var reader io.Reader = fh
   122  	if strings.HasSuffix(fn, ".gz") {
   123  		if reader, err = gzip.NewReader(reader); err != nil {
   124  			return err
   125  		}
   126  	}
   127  	stream := rlp.NewStream(reader, 0)
   128  
   129  	// Run actual the import.
   130  	blocks := make(types.Blocks, importBatchSize)
   131  	n := 0
   132  	for batch := 0; ; batch++ {
   133  		// Load a batch of RLP blocks.
   134  		if checkInterrupt() {
   135  			return fmt.Errorf("interrupted")
   136  		}
   137  		i := 0
   138  		for ; i < importBatchSize; i++ {
   139  			var b types.Block
   140  			if err := stream.Decode(&b); err == io.EOF {
   141  				break
   142  			} else if err != nil {
   143  				return fmt.Errorf("at block %d: %v", n, err)
   144  			}
   145  			// don't import first block
   146  			if b.NumberU64() == 0 {
   147  				i--
   148  				continue
   149  			}
   150  			blocks[i] = &b
   151  			n++
   152  		}
   153  		if i == 0 {
   154  			break
   155  		}
   156  		// Import the batch.
   157  		if checkInterrupt() {
   158  			return fmt.Errorf("interrupted")
   159  		}
   160  		missing := missingBlocks(chain, blocks[:i])
   161  		if len(missing) == 0 {
   162  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   163  			continue
   164  		}
   165  		if _, err := chain.InsertChain(missing); err != nil {
   166  			return fmt.Errorf("invalid block %d: %v", n, err)
   167  		}
   168  	}
   169  	return nil
   170  }
   171  
   172  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   173  	head := chain.CurrentBlock()
   174  	for i, block := range blocks {
   175  		// If we're behind the chain head, only check block, state is available at head
   176  		if head.NumberU64() > block.NumberU64() {
   177  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   178  				return blocks[i:]
   179  			}
   180  			continue
   181  		}
   182  		// If we're above the chain head, state availability is a must
   183  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   184  			return blocks[i:]
   185  		}
   186  	}
   187  	return nil
   188  }
   189  
   190  // ExportChain exports a blockchain into the specified file, truncating any data
   191  // already present in the file.
   192  func ExportChain(blockchain *core.BlockChain, fn string) error {
   193  	log.Info("Exporting blockchain", "file", fn)
   194  
   195  	// Open the file handle and potentially wrap with a gzip stream
   196  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   197  	if err != nil {
   198  		return err
   199  	}
   200  	defer fh.Close()
   201  
   202  	var writer io.Writer = fh
   203  	if strings.HasSuffix(fn, ".gz") {
   204  		writer = gzip.NewWriter(writer)
   205  		defer writer.(*gzip.Writer).Close()
   206  	}
   207  	// Iterate over the blocks and export them
   208  	if err := blockchain.Export(writer); err != nil {
   209  		return err
   210  	}
   211  	log.Info("Exported blockchain", "file", fn)
   212  
   213  	return nil
   214  }
   215  
   216  // ExportAppendChain exports a blockchain into the specified file, appending to
   217  // the file if data already exists in it.
   218  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   219  	log.Info("Exporting blockchain", "file", fn)
   220  
   221  	// Open the file handle and potentially wrap with a gzip stream
   222  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   223  	if err != nil {
   224  		return err
   225  	}
   226  	defer fh.Close()
   227  
   228  	var writer io.Writer = fh
   229  	if strings.HasSuffix(fn, ".gz") {
   230  		writer = gzip.NewWriter(writer)
   231  		defer writer.(*gzip.Writer).Close()
   232  	}
   233  	// Iterate over the blocks and export them
   234  	if err := blockchain.ExportN(writer, first, last); err != nil {
   235  		return err
   236  	}
   237  	log.Info("Exported blockchain to", "file", fn)
   238  	return nil
   239  }
   240  
   241  // ImportPreimages imports a batch of exported hash preimages into the database.
   242  func ImportPreimages(db xcbdb.Database, fn string) error {
   243  	log.Info("Importing preimages", "file", fn)
   244  
   245  	// Open the file handle and potentially unwrap the gzip stream
   246  	fh, err := os.Open(fn)
   247  	if err != nil {
   248  		return err
   249  	}
   250  	defer fh.Close()
   251  
   252  	var reader io.Reader = fh
   253  	if strings.HasSuffix(fn, ".gz") {
   254  		if reader, err = gzip.NewReader(reader); err != nil {
   255  			return err
   256  		}
   257  	}
   258  	stream := rlp.NewStream(reader, 0)
   259  
   260  	// Import the preimages in batches to prevent disk trashing
   261  	preimages := make(map[common.Hash][]byte)
   262  
   263  	for {
   264  		// Read the next entry and ensure it's not junk
   265  		var blob []byte
   266  
   267  		if err := stream.Decode(&blob); err != nil {
   268  			if err == io.EOF {
   269  				break
   270  			}
   271  			return err
   272  		}
   273  		// Accumulate the preimages and flush when enough ws gathered
   274  		preimages[crypto.SHA3Hash(blob)] = common.CopyBytes(blob)
   275  		if len(preimages) > 1024 {
   276  			rawdb.WritePreimages(db, preimages)
   277  			preimages = make(map[common.Hash][]byte)
   278  		}
   279  	}
   280  	// Flush the last batch preimage data
   281  	if len(preimages) > 0 {
   282  		rawdb.WritePreimages(db, preimages)
   283  	}
   284  	return nil
   285  }
   286  
   287  // ExportPreimages exports all known hash preimages into the specified file,
   288  // truncating any data already present in the file.
   289  func ExportPreimages(db xcbdb.Database, fn string) error {
   290  	log.Info("Exporting preimages", "file", fn)
   291  
   292  	// Open the file handle and potentially wrap with a gzip stream
   293  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   294  	if err != nil {
   295  		return err
   296  	}
   297  	defer fh.Close()
   298  
   299  	var writer io.Writer = fh
   300  	if strings.HasSuffix(fn, ".gz") {
   301  		writer = gzip.NewWriter(writer)
   302  		defer writer.(*gzip.Writer).Close()
   303  	}
   304  	// Iterate over the preimages and export them
   305  	it := db.NewIterator([]byte("secure-key-"), nil)
   306  	defer it.Release()
   307  
   308  	for it.Next() {
   309  		if err := rlp.Encode(writer, it.Value()); err != nil {
   310  			return err
   311  		}
   312  	}
   313  	log.Info("Exported preimages", "file", fn)
   314  	return nil
   315  }