github.com/intfoundation/intchain@v0.0.0-20220727031208-4316ad31ca73/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"compress/gzip"
    22  	"fmt"
    23  	"github.com/intfoundation/intchain/consensus"
    24  	"github.com/intfoundation/intchain/intprotocol"
    25  	"gopkg.in/urfave/cli.v1"
    26  	"io"
    27  	"os"
    28  	"os/signal"
    29  	"runtime"
    30  	"strings"
    31  	"syscall"
    32  
    33  	"github.com/intfoundation/intchain/common"
    34  	"github.com/intfoundation/intchain/core"
    35  	"github.com/intfoundation/intchain/core/rawdb"
    36  	"github.com/intfoundation/intchain/core/types"
    37  	"github.com/intfoundation/intchain/crypto"
    38  	"github.com/intfoundation/intchain/intdb"
    39  	"github.com/intfoundation/intchain/log"
    40  	"github.com/intfoundation/intchain/node"
    41  	"github.com/intfoundation/intchain/rlp"
    42  )
    43  
    44  const (
    45  	importBatchSize = 2500
    46  )
    47  
    48  // Fatalf formats a message to standard error and exits the program.
    49  // The message is also printed to standard output if standard error
    50  // is redirected to a different file.
    51  func Fatalf(format string, args ...interface{}) {
    52  	w := io.MultiWriter(os.Stdout, os.Stderr)
    53  	if runtime.GOOS == "windows" {
    54  		// The SameFile check below doesn't work on Windows.
    55  		// stdout is unlikely to get redirected though, so just print there.
    56  		w = os.Stdout
    57  	} else {
    58  		outf, _ := os.Stdout.Stat()
    59  		errf, _ := os.Stderr.Stat()
    60  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    61  			w = os.Stderr
    62  		}
    63  	}
    64  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    65  	os.Exit(1)
    66  }
    67  
    68  func StartNode(ctx *cli.Context, stack *node.Node) error {
    69  	if err := stack.Start1(); err != nil {
    70  		Fatalf("Error starting protocol stack: %v", err)
    71  	}
    72  	//go func() {
    73  	//	sigc := make(chan os.Signal, 1)
    74  	//	signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    75  	//	defer signal.Stop(sigc)
    76  	//	<-sigc
    77  	//	log.Info("Got interrupt, shutting down...")
    78  	//	go stack.Stop()
    79  	//	for i := 10; i > 0; i-- {
    80  	//		<-sigc
    81  	//		if i > 1 {
    82  	//			log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    83  	//		}
    84  	//	}
    85  	//	debug.Exit() // ensure trace and CPU profile data is flushed.
    86  	//	debug.LoudPanic("boom")
    87  	//}()
    88  
    89  	mining := false
    90  	var intchain *intprotocol.IntChain
    91  	if err := stack.Service(&intchain); err == nil {
    92  		if ipbft, ok := intchain.Engine().(consensus.IPBFT); ok {
    93  			mining = ipbft.ShouldStart()
    94  			if mining {
    95  				stack.GetLogger().Info("IPBFT Consensus Engine will be start shortly")
    96  			}
    97  		}
    98  	}
    99  
   100  	// Start auxiliary services if enabled
   101  	if mining || ctx.GlobalBool(DeveloperFlag.Name) {
   102  		stack.GetLogger().Info("Mine will be start shortly")
   103  		// Mining only makes sense if a full intchain node is running
   104  		var intchain *intprotocol.IntChain
   105  		if err := stack.Service(&intchain); err != nil {
   106  			Fatalf("INT Chain service not running: %v", err)
   107  		}
   108  
   109  		// Use a reduced number of threads if requested
   110  		if threads := ctx.GlobalInt(MinerThreadsFlag.Name); threads > 0 {
   111  			type threaded interface {
   112  				SetThreads(threads int)
   113  			}
   114  			if th, ok := intchain.Engine().(threaded); ok {
   115  				th.SetThreads(threads)
   116  			}
   117  		}
   118  		// Set the gas price to the limits from the CLI and start mining
   119  		intchain.TxPool().SetGasPrice(GlobalBig(ctx, MinerGasPriceFlag.Name))
   120  		if err := intchain.StartMining(true); err != nil {
   121  			Fatalf("Failed to start mining: %v", err)
   122  		}
   123  	}
   124  
   125  	return nil
   126  }
   127  
   128  func ImportChain(chain *core.BlockChain, fn string) error {
   129  	// Watch for Ctrl-C while the import is running.
   130  	// If a signal is received, the import will stop at the next batch.
   131  	interrupt := make(chan os.Signal, 1)
   132  	stop := make(chan struct{})
   133  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   134  	defer signal.Stop(interrupt)
   135  	defer close(interrupt)
   136  	go func() {
   137  		if _, ok := <-interrupt; ok {
   138  			log.Info("Interrupted during import, stopping at next batch")
   139  		}
   140  		close(stop)
   141  	}()
   142  	checkInterrupt := func() bool {
   143  		select {
   144  		case <-stop:
   145  			return true
   146  		default:
   147  			return false
   148  		}
   149  	}
   150  
   151  	log.Info("Importing blockchain", "file", fn)
   152  	fh, err := os.Open(fn)
   153  	if err != nil {
   154  		return err
   155  	}
   156  	defer fh.Close()
   157  
   158  	var reader io.Reader = fh
   159  	if strings.HasSuffix(fn, ".gz") {
   160  		if reader, err = gzip.NewReader(reader); err != nil {
   161  			return err
   162  		}
   163  	}
   164  	stream := rlp.NewStream(reader, 0)
   165  
   166  	// Run actual the import.
   167  	blocks := make(types.Blocks, importBatchSize)
   168  	n := 0
   169  	for batch := 0; ; batch++ {
   170  		// Load a batch of RLP blocks.
   171  		if checkInterrupt() {
   172  			return fmt.Errorf("interrupted")
   173  		}
   174  		i := 0
   175  		for ; i < importBatchSize; i++ {
   176  			var b types.Block
   177  			if err := stream.Decode(&b); err == io.EOF {
   178  				break
   179  			} else if err != nil {
   180  				return fmt.Errorf("at block %d: %v", n, err)
   181  			}
   182  			// don't import first block
   183  			if b.NumberU64() == 0 {
   184  				i--
   185  				continue
   186  			}
   187  			blocks[i] = &b
   188  			n++
   189  		}
   190  		if i == 0 {
   191  			break
   192  		}
   193  		// Import the batch.
   194  		if checkInterrupt() {
   195  			return fmt.Errorf("interrupted")
   196  		}
   197  		missing := missingBlocks(chain, blocks[:i])
   198  		if len(missing) == 0 {
   199  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   200  			continue
   201  		}
   202  		if _, err := chain.InsertChain(missing); err != nil {
   203  			return fmt.Errorf("invalid block %d: %v", n, err)
   204  		}
   205  	}
   206  	return nil
   207  }
   208  
   209  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   210  	head := chain.CurrentBlock()
   211  	for i, block := range blocks {
   212  		// If we're behind the chain head, only check block, state is available at head
   213  		if head.NumberU64() > block.NumberU64() {
   214  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   215  				return blocks[i:]
   216  			}
   217  			continue
   218  		}
   219  		// If we're above the chain head, state availability is a must
   220  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   221  			return blocks[i:]
   222  		}
   223  	}
   224  	return nil
   225  }
   226  
   227  func ExportChain(blockchain *core.BlockChain, fn string) error {
   228  	log.Info("Exporting blockchain", "file", fn)
   229  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   230  	if err != nil {
   231  		return err
   232  	}
   233  	defer fh.Close()
   234  
   235  	var writer io.Writer = fh
   236  	if strings.HasSuffix(fn, ".gz") {
   237  		writer = gzip.NewWriter(writer)
   238  		defer writer.(*gzip.Writer).Close()
   239  	}
   240  
   241  	if err := blockchain.Export(writer); err != nil {
   242  		return err
   243  	}
   244  	log.Info("Exported blockchain", "file", fn)
   245  
   246  	return nil
   247  }
   248  
   249  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   250  	log.Info("Exporting blockchain", "file", fn)
   251  	// TODO verify mode perms
   252  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   253  	if err != nil {
   254  		return err
   255  	}
   256  	defer fh.Close()
   257  
   258  	var writer io.Writer = fh
   259  	if strings.HasSuffix(fn, ".gz") {
   260  		writer = gzip.NewWriter(writer)
   261  		defer writer.(*gzip.Writer).Close()
   262  	}
   263  
   264  	if err := blockchain.ExportN(writer, first, last); err != nil {
   265  		return err
   266  	}
   267  	log.Info("Exported blockchain to", "file", fn)
   268  	return nil
   269  }
   270  
   271  // ImportPreimages imports a batch of exported hash preimages into the database.
   272  func ImportPreimages(db intdb.Database, fn string) error {
   273  	log.Info("Importing preimages", "file", fn)
   274  
   275  	// Open the file handle and potentially unwrap the gzip stream
   276  	fh, err := os.Open(fn)
   277  	if err != nil {
   278  		return err
   279  	}
   280  	defer fh.Close()
   281  
   282  	var reader io.Reader = fh
   283  	if strings.HasSuffix(fn, ".gz") {
   284  		if reader, err = gzip.NewReader(reader); err != nil {
   285  			return err
   286  		}
   287  	}
   288  	stream := rlp.NewStream(reader, 0)
   289  
   290  	// Import the preimages in batches to prevent disk trashing
   291  	preimages := make(map[common.Hash][]byte)
   292  
   293  	for {
   294  		// Read the next entry and ensure it's not junk
   295  		var blob []byte
   296  
   297  		if err := stream.Decode(&blob); err != nil {
   298  			if err == io.EOF {
   299  				break
   300  			}
   301  			return err
   302  		}
   303  		// Accumulate the preimages and flush when enough ws gathered
   304  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   305  		if len(preimages) > 1024 {
   306  			rawdb.WritePreimages(db, preimages)
   307  			preimages = make(map[common.Hash][]byte)
   308  		}
   309  	}
   310  	// Flush the last batch preimage data
   311  	if len(preimages) > 0 {
   312  		rawdb.WritePreimages(db, preimages)
   313  	}
   314  	return nil
   315  }
   316  
   317  // ExportPreimages exports all known hash preimages into the specified file,
   318  // truncating any data already present in the file.
   319  func ExportPreimages(db intdb.Database, fn string) error {
   320  	log.Info("Exporting preimages", "file", fn)
   321  
   322  	// Open the file handle and potentially wrap with a gzip stream
   323  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   324  	if err != nil {
   325  		return err
   326  	}
   327  	defer fh.Close()
   328  
   329  	var writer io.Writer = fh
   330  	if strings.HasSuffix(fn, ".gz") {
   331  		writer = gzip.NewWriter(writer)
   332  		defer writer.(*gzip.Writer).Close()
   333  	}
   334  	// Iterate over the preimages and export them
   335  	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
   336  	defer it.Release()
   337  
   338  	for it.Next() {
   339  		if err := rlp.Encode(writer, it.Value()); err != nil {
   340  			return err
   341  		}
   342  	}
   343  	log.Info("Exported preimages", "file", fn)
   344  	return nil
   345  }