github.com/klaytn/klaytn@v1.12.1/cmd/utils/cmd.go (about)

     1  // Modifications Copyright 2018 The klaytn Authors
     2  // Copyright 2014 The go-ethereum Authors
     3  // This file is part of go-ethereum.
     4  //
     5  // go-ethereum is free software: you can redistribute it and/or modify
     6  // it under the terms of the GNU General Public License as published by
     7  // the Free Software Foundation, either version 3 of the License, or
     8  // (at your option) any later version.
     9  //
    10  // go-ethereum is distributed in the hope that it will be useful,
    11  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    12  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    13  // GNU General Public License for more details.
    14  //
    15  // You should have received a copy of the GNU General Public License
    16  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    17  //
    18  // This file is derived from cmd/utils/cmd.go (2018/06/04).
    19  // Modified and improved for the klaytn development.
    20  
    21  package utils
    22  
    23  import (
    24  	"compress/gzip"
    25  	"fmt"
    26  	"io"
    27  	"os"
    28  	"os/signal"
    29  	"strings"
    30  	"syscall"
    31  
    32  	"github.com/klaytn/klaytn/blockchain"
    33  	"github.com/klaytn/klaytn/blockchain/types"
    34  	"github.com/klaytn/klaytn/log"
    35  	"github.com/klaytn/klaytn/node"
    36  	"github.com/klaytn/klaytn/rlp"
    37  )
    38  
    39  const (
    40  	importBatchSize = 2500
    41  )
    42  
    43  var logger = log.NewModuleLogger(log.CMDUtils)
    44  
    45  func StartNode(stack *node.Node) {
    46  	if err := stack.Start(); err != nil {
    47  		log.Fatalf("Error starting protocol stack: %v", err)
    48  	}
    49  	go func() {
    50  		sigc := make(chan os.Signal, 1)
    51  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    52  		defer signal.Stop(sigc)
    53  		<-sigc
    54  		logger.Info("Got interrupt, shutting down...")
    55  		go stack.Stop()
    56  		for i := 10; i > 0; i-- {
    57  			<-sigc
    58  			if i > 1 {
    59  				logger.Info("Already shutting down, interrupt more to panic.", "times", i-1)
    60  			}
    61  		}
    62  	}()
    63  }
    64  
    65  func ImportChain(chain *blockchain.BlockChain, fn string) error {
    66  	// Watch for Ctrl-C while the import is running.
    67  	// If a signal is received, the import will stop at the next batch.
    68  	interrupt := make(chan os.Signal, 1)
    69  	stop := make(chan struct{})
    70  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
    71  	defer signal.Stop(interrupt)
    72  	defer close(interrupt)
    73  	go func() {
    74  		if _, ok := <-interrupt; ok {
    75  			logger.Info("Interrupted during import, stopping at next batch")
    76  		}
    77  		close(stop)
    78  	}()
    79  	checkInterrupt := func() bool {
    80  		select {
    81  		case <-stop:
    82  			return true
    83  		default:
    84  			return false
    85  		}
    86  	}
    87  
    88  	logger.Info("Importing blockchain", "file", fn)
    89  
    90  	// Open the file handle and potentially unwrap the gzip stream
    91  	fh, err := os.Open(fn)
    92  	if err != nil {
    93  		return err
    94  	}
    95  	defer fh.Close()
    96  
    97  	var reader io.Reader = fh
    98  	if strings.HasSuffix(fn, ".gz") {
    99  		if reader, err = gzip.NewReader(reader); err != nil {
   100  			return err
   101  		}
   102  	}
   103  	stream := rlp.NewStream(reader, 0)
   104  
   105  	// Run actual the import.
   106  	blocks := make(types.Blocks, importBatchSize)
   107  	n := 0
   108  	for batch := 0; ; batch++ {
   109  		// Load a batch of RLP blocks.
   110  		if checkInterrupt() {
   111  			return fmt.Errorf("interrupted")
   112  		}
   113  		i := 0
   114  		for ; i < importBatchSize; i++ {
   115  			var b types.Block
   116  			if err := stream.Decode(&b); err == io.EOF {
   117  				break
   118  			} else if err != nil {
   119  				return fmt.Errorf("at block %d: %v", n, err)
   120  			}
   121  			// don't import first block
   122  			if b.NumberU64() == 0 {
   123  				i--
   124  				continue
   125  			}
   126  			blocks[i] = &b
   127  			n++
   128  		}
   129  		if i == 0 {
   130  			break
   131  		}
   132  		// Import the batch.
   133  		if checkInterrupt() {
   134  			return fmt.Errorf("interrupted")
   135  		}
   136  		missing := missingBlocks(chain, blocks[:i])
   137  		if len(missing) == 0 {
   138  			logger.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   139  			continue
   140  		}
   141  		if _, err := chain.InsertChain(missing); err != nil {
   142  			return fmt.Errorf("invalid block %d: %v", n, err)
   143  		}
   144  	}
   145  	return nil
   146  }
   147  
   148  func missingBlocks(chain *blockchain.BlockChain, blocks []*types.Block) []*types.Block {
   149  	head := chain.CurrentBlock()
   150  	for i, block := range blocks {
   151  		// If we're behind the chain head, only check block, state is available at head
   152  		if head.NumberU64() > block.NumberU64() {
   153  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   154  				return blocks[i:]
   155  			}
   156  			continue
   157  		}
   158  		// If we're above the chain head, state availability is a must
   159  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   160  			return blocks[i:]
   161  		}
   162  	}
   163  	return nil
   164  }
   165  
   166  // ExportChain exports a blockchain into the specified file, truncating any data
   167  // already present in the file.
   168  func ExportChain(blockchain *blockchain.BlockChain, fn string) error {
   169  	logger.Info("Exporting blockchain", "file", fn)
   170  
   171  	// Open the file handle and potentially wrap with a gzip stream
   172  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   173  	if err != nil {
   174  		return err
   175  	}
   176  	defer fh.Close()
   177  
   178  	var writer io.Writer = fh
   179  	if strings.HasSuffix(fn, ".gz") {
   180  		writer = gzip.NewWriter(writer)
   181  		defer writer.(*gzip.Writer).Close()
   182  	}
   183  	// Iterate over the blocks and export them
   184  	if err := blockchain.Export(writer); err != nil {
   185  		return err
   186  	}
   187  	logger.Info("Exported blockchain", "file", fn)
   188  
   189  	return nil
   190  }
   191  
   192  // ExportAppendChain exports a blockchain into the specified file, appending to
   193  // the file if data already exists in it.
   194  func ExportAppendChain(blockchain *blockchain.BlockChain, fn string, first uint64, last uint64) error {
   195  	logger.Info("Exporting blockchain", "file", fn)
   196  
   197  	// Open the file handle and potentially wrap with a gzip stream
   198  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   199  	if err != nil {
   200  		return err
   201  	}
   202  	defer fh.Close()
   203  
   204  	var writer io.Writer = fh
   205  	if strings.HasSuffix(fn, ".gz") {
   206  		writer = gzip.NewWriter(writer)
   207  		defer writer.(*gzip.Writer).Close()
   208  	}
   209  	// Iterate over the blocks and export them
   210  	if err := blockchain.ExportN(writer, first, last); err != nil {
   211  		return err
   212  	}
   213  	logger.Info("Exported blockchain to", "file", fn)
   214  	return nil
   215  }
   216  
   217  // TODO-Klaytn Commented out due to mismatched interface.
   218  //// ImportPreimages imports a batch of exported hash preimages into the database.
   219  //func ImportPreimages(db *database.LevelDB, fn string) error {
   220  //	logger.Info("Importing preimages", "file", fn)
   221  //
   222  //	// Open the file handle and potentially unwrap the gzip stream
   223  //	fh, err := os.Open(fn)
   224  //	if err != nil {
   225  //		return err
   226  //	}
   227  //	defer fh.Close()
   228  //
   229  //	var reader io.Reader = fh
   230  //	if strings.HasSuffix(fn, ".gz") {
   231  //		if reader, err = gzip.NewReader(reader); err != nil {
   232  //			return err
   233  //		}
   234  //	}
   235  //	stream := rlp.NewStream(reader, 0)
   236  //
   237  //	// Import the preimages in batches to prevent disk trashing
   238  //	preimages := make(map[common.Hash][]byte)
   239  //
   240  //	for {
   241  //		// Read the next entry and ensure it's not junk
   242  //		var blob []byte
   243  //
   244  //		if err := stream.Decode(&blob); err != nil {
   245  //			if err == io.EOF {
   246  //				break
   247  //			}
   248  //			return err
   249  //		}
   250  //		// Accumulate the preimages and flush when enough ws gathered
   251  //		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   252  //		if len(preimages) > 1024 {
   253  //			rawdb.WritePreimages(db, 0, preimages)
   254  //			preimages = make(map[common.Hash][]byte)
   255  //		}
   256  //	}
   257  //	// Flush the last batch preimage data
   258  //	if len(preimages) > 0 {
   259  //		rawdb.WritePreimages(db, 0, preimages)
   260  //	}
   261  //	return nil
   262  //}
   263  //
   264  //// ExportPreimages exports all known hash preimages into the specified file,
   265  //// truncating any data already present in the file.
   266  //func ExportPreimages(db *database.LevelDB, fn string) error {
   267  //	logger.Info("Exporting preimages", "file", fn)
   268  //
   269  //	// Open the file handle and potentially wrap with a gzip stream
   270  //	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   271  //	if err != nil {
   272  //		return err
   273  //	}
   274  //	defer fh.Close()
   275  //
   276  //	var writer io.Writer = fh
   277  //	if strings.HasSuffix(fn, ".gz") {
   278  //		writer = gzip.NewWriter(writer)
   279  //		defer writer.(*gzip.Writer).Close()
   280  //	}
   281  //	// Iterate over the preimages and export them
   282  //	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
   283  //	for it.Next() {
   284  //		if err := rlp.Encode(writer, it.Value()); err != nil {
   285  //			return err
   286  //		}
   287  //	}
   288  //	logger.Info("Exported preimages", "file", fn)
   289  //	return nil
   290  //}