github.com/theQRL/go-zond@v0.1.1/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"bufio"
    22  	"compress/gzip"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"os"
    27  	"os/signal"
    28  	"runtime"
    29  	"strings"
    30  	"syscall"
    31  	"time"
    32  
    33  	"github.com/theQRL/go-zond/common"
    34  	"github.com/theQRL/go-zond/core"
    35  	"github.com/theQRL/go-zond/core/rawdb"
    36  	"github.com/theQRL/go-zond/core/types"
    37  	"github.com/theQRL/go-zond/crypto"
    38  	"github.com/theQRL/go-zond/internal/debug"
    39  	"github.com/theQRL/go-zond/log"
    40  	"github.com/theQRL/go-zond/node"
    41  	"github.com/theQRL/go-zond/rlp"
    42  	"github.com/theQRL/go-zond/zond/ethconfig"
    43  	"github.com/theQRL/go-zond/zonddb"
    44  	"github.com/urfave/cli/v2"
    45  )
    46  
    47  const (
    48  	importBatchSize = 2500
    49  )
    50  
    51  // Fatalf formats a message to standard error and exits the program.
    52  // The message is also printed to standard output if standard error
    53  // is redirected to a different file.
    54  func Fatalf(format string, args ...interface{}) {
    55  	w := io.MultiWriter(os.Stdout, os.Stderr)
    56  	if runtime.GOOS == "windows" {
    57  		// The SameFile check below doesn't work on Windows.
    58  		// stdout is unlikely to get redirected though, so just print there.
    59  		w = os.Stdout
    60  	} else {
    61  		outf, _ := os.Stdout.Stat()
    62  		errf, _ := os.Stderr.Stat()
    63  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    64  			w = os.Stderr
    65  		}
    66  	}
    67  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    68  	os.Exit(1)
    69  }
    70  
    71  func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
    72  	if err := stack.Start(); err != nil {
    73  		Fatalf("Error starting protocol stack: %v", err)
    74  	}
    75  	go func() {
    76  		sigc := make(chan os.Signal, 1)
    77  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    78  		defer signal.Stop(sigc)
    79  
    80  		minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
    81  		if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
    82  			minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
    83  		} else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
    84  			minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
    85  		}
    86  		if minFreeDiskSpace > 0 {
    87  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    88  		}
    89  
    90  		shutdown := func() {
    91  			log.Info("Got interrupt, shutting down...")
    92  			go stack.Close()
    93  			for i := 10; i > 0; i-- {
    94  				<-sigc
    95  				if i > 1 {
    96  					log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    97  				}
    98  			}
    99  			debug.Exit() // ensure trace and CPU profile data is flushed.
   100  			debug.LoudPanic("boom")
   101  		}
   102  
   103  		if isConsole {
   104  			// In JS console mode, SIGINT is ignored because it's handled by the console.
   105  			// However, SIGTERM still shuts down the node.
   106  			for {
   107  				sig := <-sigc
   108  				if sig == syscall.SIGTERM {
   109  					shutdown()
   110  					return
   111  				}
   112  			}
   113  		} else {
   114  			<-sigc
   115  			shutdown()
   116  		}
   117  	}()
   118  }
   119  
   120  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   121  	if path == "" {
   122  		return
   123  	}
   124  	for {
   125  		freeSpace, err := getFreeDiskSpace(path)
   126  		if err != nil {
   127  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   128  			break
   129  		}
   130  		if freeSpace < freeDiskSpaceCritical {
   131  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace), "path", path)
   132  			sigc <- syscall.SIGTERM
   133  			break
   134  		} else if freeSpace < 2*freeDiskSpaceCritical {
   135  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical), "path", path)
   136  		}
   137  		time.Sleep(30 * time.Second)
   138  	}
   139  }
   140  
   141  func ImportChain(chain *core.BlockChain, fn string) error {
   142  	// Watch for Ctrl-C while the import is running.
   143  	// If a signal is received, the import will stop at the next batch.
   144  	interrupt := make(chan os.Signal, 1)
   145  	stop := make(chan struct{})
   146  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   147  	defer signal.Stop(interrupt)
   148  	defer close(interrupt)
   149  	go func() {
   150  		if _, ok := <-interrupt; ok {
   151  			log.Info("Interrupted during import, stopping at next batch")
   152  		}
   153  		close(stop)
   154  	}()
   155  	checkInterrupt := func() bool {
   156  		select {
   157  		case <-stop:
   158  			return true
   159  		default:
   160  			return false
   161  		}
   162  	}
   163  
   164  	log.Info("Importing blockchain", "file", fn)
   165  
   166  	// Open the file handle and potentially unwrap the gzip stream
   167  	fh, err := os.Open(fn)
   168  	if err != nil {
   169  		return err
   170  	}
   171  	defer fh.Close()
   172  
   173  	var reader io.Reader = fh
   174  	if strings.HasSuffix(fn, ".gz") {
   175  		if reader, err = gzip.NewReader(reader); err != nil {
   176  			return err
   177  		}
   178  	}
   179  	stream := rlp.NewStream(reader, 0)
   180  
   181  	// Run actual the import.
   182  	blocks := make(types.Blocks, importBatchSize)
   183  	n := 0
   184  	for batch := 0; ; batch++ {
   185  		// Load a batch of RLP blocks.
   186  		if checkInterrupt() {
   187  			return errors.New("interrupted")
   188  		}
   189  		i := 0
   190  		for ; i < importBatchSize; i++ {
   191  			var b types.Block
   192  			if err := stream.Decode(&b); err == io.EOF {
   193  				break
   194  			} else if err != nil {
   195  				return fmt.Errorf("at block %d: %v", n, err)
   196  			}
   197  			// don't import first block
   198  			if b.NumberU64() == 0 {
   199  				i--
   200  				continue
   201  			}
   202  			blocks[i] = &b
   203  			n++
   204  		}
   205  		if i == 0 {
   206  			break
   207  		}
   208  		// Import the batch.
   209  		if checkInterrupt() {
   210  			return errors.New("interrupted")
   211  		}
   212  		missing := missingBlocks(chain, blocks[:i])
   213  		if len(missing) == 0 {
   214  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   215  			continue
   216  		}
   217  		if failindex, err := chain.InsertChain(missing); err != nil {
   218  			var failnumber uint64
   219  			if failindex > 0 && failindex < len(missing) {
   220  				failnumber = missing[failindex].NumberU64()
   221  			} else {
   222  				failnumber = missing[0].NumberU64()
   223  			}
   224  			return fmt.Errorf("invalid block %d: %v", failnumber, err)
   225  		}
   226  	}
   227  	return nil
   228  }
   229  
   230  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   231  	head := chain.CurrentBlock()
   232  	for i, block := range blocks {
   233  		// If we're behind the chain head, only check block, state is available at head
   234  		if head.Number.Uint64() > block.NumberU64() {
   235  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   236  				return blocks[i:]
   237  			}
   238  			continue
   239  		}
   240  		// If we're above the chain head, state availability is a must
   241  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   242  			return blocks[i:]
   243  		}
   244  	}
   245  	return nil
   246  }
   247  
   248  // ExportChain exports a blockchain into the specified file, truncating any data
   249  // already present in the file.
   250  func ExportChain(blockchain *core.BlockChain, fn string) error {
   251  	log.Info("Exporting blockchain", "file", fn)
   252  
   253  	// Open the file handle and potentially wrap with a gzip stream
   254  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   255  	if err != nil {
   256  		return err
   257  	}
   258  	defer fh.Close()
   259  
   260  	var writer io.Writer = fh
   261  	if strings.HasSuffix(fn, ".gz") {
   262  		writer = gzip.NewWriter(writer)
   263  		defer writer.(*gzip.Writer).Close()
   264  	}
   265  	// Iterate over the blocks and export them
   266  	if err := blockchain.Export(writer); err != nil {
   267  		return err
   268  	}
   269  	log.Info("Exported blockchain", "file", fn)
   270  
   271  	return nil
   272  }
   273  
   274  // ExportAppendChain exports a blockchain into the specified file, appending to
   275  // the file if data already exists in it.
   276  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   277  	log.Info("Exporting blockchain", "file", fn)
   278  
   279  	// Open the file handle and potentially wrap with a gzip stream
   280  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   281  	if err != nil {
   282  		return err
   283  	}
   284  	defer fh.Close()
   285  
   286  	var writer io.Writer = fh
   287  	if strings.HasSuffix(fn, ".gz") {
   288  		writer = gzip.NewWriter(writer)
   289  		defer writer.(*gzip.Writer).Close()
   290  	}
   291  	// Iterate over the blocks and export them
   292  	if err := blockchain.ExportN(writer, first, last); err != nil {
   293  		return err
   294  	}
   295  	log.Info("Exported blockchain to", "file", fn)
   296  	return nil
   297  }
   298  
   299  // ImportPreimages imports a batch of exported hash preimages into the database.
   300  // It's a part of the deprecated functionality, should be removed in the future.
   301  func ImportPreimages(db zonddb.Database, fn string) error {
   302  	log.Info("Importing preimages", "file", fn)
   303  
   304  	// Open the file handle and potentially unwrap the gzip stream
   305  	fh, err := os.Open(fn)
   306  	if err != nil {
   307  		return err
   308  	}
   309  	defer fh.Close()
   310  
   311  	var reader io.Reader = bufio.NewReader(fh)
   312  	if strings.HasSuffix(fn, ".gz") {
   313  		if reader, err = gzip.NewReader(reader); err != nil {
   314  			return err
   315  		}
   316  	}
   317  	stream := rlp.NewStream(reader, 0)
   318  
   319  	// Import the preimages in batches to prevent disk thrashing
   320  	preimages := make(map[common.Hash][]byte)
   321  
   322  	for {
   323  		// Read the next entry and ensure it's not junk
   324  		var blob []byte
   325  
   326  		if err := stream.Decode(&blob); err != nil {
   327  			if err == io.EOF {
   328  				break
   329  			}
   330  			return err
   331  		}
   332  		// Accumulate the preimages and flush when enough ws gathered
   333  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   334  		if len(preimages) > 1024 {
   335  			rawdb.WritePreimages(db, preimages)
   336  			preimages = make(map[common.Hash][]byte)
   337  		}
   338  	}
   339  	// Flush the last batch preimage data
   340  	if len(preimages) > 0 {
   341  		rawdb.WritePreimages(db, preimages)
   342  	}
   343  	return nil
   344  }
   345  
   346  // ExportPreimages exports all known hash preimages into the specified file,
   347  // truncating any data already present in the file.
   348  // It's a part of the deprecated functionality, should be removed in the future.
   349  func ExportPreimages(db zonddb.Database, fn string) error {
   350  	log.Info("Exporting preimages", "file", fn)
   351  
   352  	// Open the file handle and potentially wrap with a gzip stream
   353  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   354  	if err != nil {
   355  		return err
   356  	}
   357  	defer fh.Close()
   358  
   359  	var writer io.Writer = fh
   360  	if strings.HasSuffix(fn, ".gz") {
   361  		writer = gzip.NewWriter(writer)
   362  		defer writer.(*gzip.Writer).Close()
   363  	}
   364  	// Iterate over the preimages and export them
   365  	it := db.NewIterator([]byte("secure-key-"), nil)
   366  	defer it.Release()
   367  
   368  	for it.Next() {
   369  		if err := rlp.Encode(writer, it.Value()); err != nil {
   370  			return err
   371  		}
   372  	}
   373  	log.Info("Exported preimages", "file", fn)
   374  	return nil
   375  }
   376  
   377  // exportHeader is used in the export/import flow. When we do an export,
   378  // the first element we output is the exportHeader.
   379  // Whenever a backwards-incompatible change is made, the Version header
   380  // should be bumped.
   381  // If the importer sees a higher version, it should reject the import.
   382  type exportHeader struct {
   383  	Magic    string // Always set to 'gethdbdump' for disambiguation
   384  	Version  uint64
   385  	Kind     string
   386  	UnixTime uint64
   387  }
   388  
   389  const exportMagic = "gzonddbdump"
   390  const (
   391  	OpBatchAdd = 0
   392  	OpBatchDel = 1
   393  )
   394  
   395  // ImportLDBData imports a batch of snapshot data into the database
   396  func ImportLDBData(db zonddb.Database, f string, startIndex int64, interrupt chan struct{}) error {
   397  	log.Info("Importing leveldb data", "file", f)
   398  
   399  	// Open the file handle and potentially unwrap the gzip stream
   400  	fh, err := os.Open(f)
   401  	if err != nil {
   402  		return err
   403  	}
   404  	defer fh.Close()
   405  
   406  	var reader io.Reader = bufio.NewReader(fh)
   407  	if strings.HasSuffix(f, ".gz") {
   408  		if reader, err = gzip.NewReader(reader); err != nil {
   409  			return err
   410  		}
   411  	}
   412  	stream := rlp.NewStream(reader, 0)
   413  
   414  	// Read the header
   415  	var header exportHeader
   416  	if err := stream.Decode(&header); err != nil {
   417  		return fmt.Errorf("could not decode header: %v", err)
   418  	}
   419  	if header.Magic != exportMagic {
   420  		return errors.New("incompatible data, wrong magic")
   421  	}
   422  	if header.Version != 0 {
   423  		return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
   424  	}
   425  	log.Info("Importing data", "file", f, "type", header.Kind, "data age",
   426  		common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
   427  
   428  	// Import the snapshot in batches to prevent disk thrashing
   429  	var (
   430  		count  int64
   431  		start  = time.Now()
   432  		logged = time.Now()
   433  		batch  = db.NewBatch()
   434  	)
   435  	for {
   436  		// Read the next entry
   437  		var (
   438  			op       byte
   439  			key, val []byte
   440  		)
   441  		if err := stream.Decode(&op); err != nil {
   442  			if err == io.EOF {
   443  				break
   444  			}
   445  			return err
   446  		}
   447  		if err := stream.Decode(&key); err != nil {
   448  			return err
   449  		}
   450  		if err := stream.Decode(&val); err != nil {
   451  			return err
   452  		}
   453  		if count < startIndex {
   454  			count++
   455  			continue
   456  		}
   457  		switch op {
   458  		case OpBatchDel:
   459  			batch.Delete(key)
   460  		case OpBatchAdd:
   461  			batch.Put(key, val)
   462  		default:
   463  			return fmt.Errorf("unknown op %d\n", op)
   464  		}
   465  		if batch.ValueSize() > zonddb.IdealBatchSize {
   466  			if err := batch.Write(); err != nil {
   467  				return err
   468  			}
   469  			batch.Reset()
   470  		}
   471  		// Check interruption emitted by ctrl+c
   472  		if count%1000 == 0 {
   473  			select {
   474  			case <-interrupt:
   475  				if err := batch.Write(); err != nil {
   476  					return err
   477  				}
   478  				log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   479  				return nil
   480  			default:
   481  			}
   482  		}
   483  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   484  			log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   485  			logged = time.Now()
   486  		}
   487  		count += 1
   488  	}
   489  	// Flush the last batch snapshot data
   490  	if batch.ValueSize() > 0 {
   491  		if err := batch.Write(); err != nil {
   492  			return err
   493  		}
   494  	}
   495  	log.Info("Imported chain data", "file", f, "count", count,
   496  		"elapsed", common.PrettyDuration(time.Since(start)))
   497  	return nil
   498  }
   499  
   500  // ChainDataIterator is an interface wraps all necessary functions to iterate
   501  // the exporting chain data.
   502  type ChainDataIterator interface {
   503  	// Next returns the key-value pair for next exporting entry in the iterator.
   504  	// When the end is reached, it will return (0, nil, nil, false).
   505  	Next() (byte, []byte, []byte, bool)
   506  
   507  	// Release releases associated resources. Release should always succeed and can
   508  	// be called multiple times without causing error.
   509  	Release()
   510  }
   511  
   512  // ExportChaindata exports the given data type (truncating any data already present)
   513  // in the file. If the suffix is 'gz', gzip compression is used.
   514  func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
   515  	log.Info("Exporting chain data", "file", fn, "kind", kind)
   516  	defer iter.Release()
   517  
   518  	// Open the file handle and potentially wrap with a gzip stream
   519  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   520  	if err != nil {
   521  		return err
   522  	}
   523  	defer fh.Close()
   524  
   525  	var writer io.Writer = fh
   526  	if strings.HasSuffix(fn, ".gz") {
   527  		writer = gzip.NewWriter(writer)
   528  		defer writer.(*gzip.Writer).Close()
   529  	}
   530  	// Write the header
   531  	if err := rlp.Encode(writer, &exportHeader{
   532  		Magic:    exportMagic,
   533  		Version:  0,
   534  		Kind:     kind,
   535  		UnixTime: uint64(time.Now().Unix()),
   536  	}); err != nil {
   537  		return err
   538  	}
   539  	// Extract data from source iterator and dump them out to file
   540  	var (
   541  		count  int64
   542  		start  = time.Now()
   543  		logged = time.Now()
   544  	)
   545  	for {
   546  		op, key, val, ok := iter.Next()
   547  		if !ok {
   548  			break
   549  		}
   550  		if err := rlp.Encode(writer, op); err != nil {
   551  			return err
   552  		}
   553  		if err := rlp.Encode(writer, key); err != nil {
   554  			return err
   555  		}
   556  		if err := rlp.Encode(writer, val); err != nil {
   557  			return err
   558  		}
   559  		if count%1000 == 0 {
   560  			// Check interruption emitted by ctrl+c
   561  			select {
   562  			case <-interrupt:
   563  				log.Info("Chain data exporting interrupted", "file", fn,
   564  					"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   565  				return nil
   566  			default:
   567  			}
   568  			if time.Since(logged) > 8*time.Second {
   569  				log.Info("Exporting chain data", "file", fn, "kind", kind,
   570  					"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   571  				logged = time.Now()
   572  			}
   573  		}
   574  		count++
   575  	}
   576  	log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
   577  		"elapsed", common.PrettyDuration(time.Since(start)))
   578  	return nil
   579  }