github.com/ethxdao/go-ethereum@v0.0.0-20221218102228-5ae34a9cc189/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"bufio"
    22  	"compress/gzip"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"os"
    27  	"os/signal"
    28  	"runtime"
    29  	"strings"
    30  	"syscall"
    31  	"time"
    32  
    33  	"github.com/ethxdao/go-ethereum/common"
    34  	"github.com/ethxdao/go-ethereum/core"
    35  	"github.com/ethxdao/go-ethereum/core/rawdb"
    36  	"github.com/ethxdao/go-ethereum/core/types"
    37  	"github.com/ethxdao/go-ethereum/crypto"
    38  	"github.com/ethxdao/go-ethereum/eth/ethconfig"
    39  	"github.com/ethxdao/go-ethereum/ethdb"
    40  	"github.com/ethxdao/go-ethereum/internal/debug"
    41  	"github.com/ethxdao/go-ethereum/log"
    42  	"github.com/ethxdao/go-ethereum/node"
    43  	"github.com/ethxdao/go-ethereum/rlp"
    44  )
    45  
    46  const (
    47  	importBatchSize = 2500
    48  )
    49  
    50  // Fatalf formats a message to standard error and exits the program.
    51  // The message is also printed to standard output if standard error
    52  // is redirected to a different file.
    53  func Fatalf(format string, args ...interface{}) {
    54  	w := io.MultiWriter(os.Stdout, os.Stderr)
    55  	if runtime.GOOS == "windows" {
    56  		// The SameFile check below doesn't work on Windows.
    57  		// stdout is unlikely to get redirected though, so just print there.
    58  		w = os.Stdout
    59  	} else {
    60  		outf, _ := os.Stdout.Stat()
    61  		errf, _ := os.Stderr.Stat()
    62  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    63  			w = os.Stderr
    64  		}
    65  	}
    66  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    67  	os.Exit(1)
    68  }
    69  
    70  func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
    71  	if err := stack.Start(); err != nil {
    72  		Fatalf("Error starting protocol stack: %v", err)
    73  	}
    74  	go func() {
    75  		sigc := make(chan os.Signal, 1)
    76  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    77  		defer signal.Stop(sigc)
    78  
    79  		minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
    80  		if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
    81  			minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
    82  		} else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
    83  			minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
    84  		}
    85  		if minFreeDiskSpace > 0 {
    86  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    87  		}
    88  
    89  		shutdown := func() {
    90  			log.Info("Got interrupt, shutting down...")
    91  			go stack.Close()
    92  			for i := 10; i > 0; i-- {
    93  				<-sigc
    94  				if i > 1 {
    95  					log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    96  				}
    97  			}
    98  			debug.Exit() // ensure trace and CPU profile data is flushed.
    99  			debug.LoudPanic("boom")
   100  		}
   101  
   102  		if isConsole {
   103  			// In JS console mode, SIGINT is ignored because it's handled by the console.
   104  			// However, SIGTERM still shuts down the node.
   105  			for {
   106  				sig := <-sigc
   107  				if sig == syscall.SIGTERM {
   108  					shutdown()
   109  					return
   110  				}
   111  			}
   112  		} else {
   113  			<-sigc
   114  			shutdown()
   115  		}
   116  	}()
   117  }
   118  
   119  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   120  	for {
   121  		freeSpace, err := getFreeDiskSpace(path)
   122  		if err != nil {
   123  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   124  			break
   125  		}
   126  		if freeSpace < freeDiskSpaceCritical {
   127  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
   128  			sigc <- syscall.SIGTERM
   129  			break
   130  		} else if freeSpace < 2*freeDiskSpaceCritical {
   131  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
   132  		}
   133  		time.Sleep(30 * time.Second)
   134  	}
   135  }
   136  
   137  func ImportChain(chain *core.BlockChain, fn string) error {
   138  	// Watch for Ctrl-C while the import is running.
   139  	// If a signal is received, the import will stop at the next batch.
   140  	interrupt := make(chan os.Signal, 1)
   141  	stop := make(chan struct{})
   142  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   143  	defer signal.Stop(interrupt)
   144  	defer close(interrupt)
   145  	go func() {
   146  		if _, ok := <-interrupt; ok {
   147  			log.Info("Interrupted during import, stopping at next batch")
   148  		}
   149  		close(stop)
   150  	}()
   151  	checkInterrupt := func() bool {
   152  		select {
   153  		case <-stop:
   154  			return true
   155  		default:
   156  			return false
   157  		}
   158  	}
   159  
   160  	log.Info("Importing blockchain", "file", fn)
   161  
   162  	// Open the file handle and potentially unwrap the gzip stream
   163  	fh, err := os.Open(fn)
   164  	if err != nil {
   165  		return err
   166  	}
   167  	defer fh.Close()
   168  
   169  	var reader io.Reader = fh
   170  	if strings.HasSuffix(fn, ".gz") {
   171  		if reader, err = gzip.NewReader(reader); err != nil {
   172  			return err
   173  		}
   174  	}
   175  	stream := rlp.NewStream(reader, 0)
   176  
   177  	// Run actual the import.
   178  	blocks := make(types.Blocks, importBatchSize)
   179  	n := 0
   180  	for batch := 0; ; batch++ {
   181  		// Load a batch of RLP blocks.
   182  		if checkInterrupt() {
   183  			return fmt.Errorf("interrupted")
   184  		}
   185  		i := 0
   186  		for ; i < importBatchSize; i++ {
   187  			var b types.Block
   188  			if err := stream.Decode(&b); err == io.EOF {
   189  				break
   190  			} else if err != nil {
   191  				return fmt.Errorf("at block %d: %v", n, err)
   192  			}
   193  			// don't import first block
   194  			if b.NumberU64() == 0 {
   195  				i--
   196  				continue
   197  			}
   198  			blocks[i] = &b
   199  			n++
   200  		}
   201  		if i == 0 {
   202  			break
   203  		}
   204  		// Import the batch.
   205  		if checkInterrupt() {
   206  			return fmt.Errorf("interrupted")
   207  		}
   208  		missing := missingBlocks(chain, blocks[:i])
   209  		if len(missing) == 0 {
   210  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   211  			continue
   212  		}
   213  		if _, err := chain.InsertChain(missing); err != nil {
   214  			return fmt.Errorf("invalid block %d: %v", n, err)
   215  		}
   216  	}
   217  	return nil
   218  }
   219  
   220  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   221  	head := chain.CurrentBlock()
   222  	for i, block := range blocks {
   223  		// If we're behind the chain head, only check block, state is available at head
   224  		if head.NumberU64() > block.NumberU64() {
   225  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   226  				return blocks[i:]
   227  			}
   228  			continue
   229  		}
   230  		// If we're above the chain head, state availability is a must
   231  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   232  			return blocks[i:]
   233  		}
   234  	}
   235  	return nil
   236  }
   237  
   238  // ExportChain exports a blockchain into the specified file, truncating any data
   239  // already present in the file.
   240  func ExportChain(blockchain *core.BlockChain, fn string) error {
   241  	log.Info("Exporting blockchain", "file", fn)
   242  
   243  	// Open the file handle and potentially wrap with a gzip stream
   244  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   245  	if err != nil {
   246  		return err
   247  	}
   248  	defer fh.Close()
   249  
   250  	var writer io.Writer = fh
   251  	if strings.HasSuffix(fn, ".gz") {
   252  		writer = gzip.NewWriter(writer)
   253  		defer writer.(*gzip.Writer).Close()
   254  	}
   255  	// Iterate over the blocks and export them
   256  	if err := blockchain.Export(writer); err != nil {
   257  		return err
   258  	}
   259  	log.Info("Exported blockchain", "file", fn)
   260  
   261  	return nil
   262  }
   263  
   264  // ExportAppendChain exports a blockchain into the specified file, appending to
   265  // the file if data already exists in it.
   266  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   267  	log.Info("Exporting blockchain", "file", fn)
   268  
   269  	// Open the file handle and potentially wrap with a gzip stream
   270  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   271  	if err != nil {
   272  		return err
   273  	}
   274  	defer fh.Close()
   275  
   276  	var writer io.Writer = fh
   277  	if strings.HasSuffix(fn, ".gz") {
   278  		writer = gzip.NewWriter(writer)
   279  		defer writer.(*gzip.Writer).Close()
   280  	}
   281  	// Iterate over the blocks and export them
   282  	if err := blockchain.ExportN(writer, first, last); err != nil {
   283  		return err
   284  	}
   285  	log.Info("Exported blockchain to", "file", fn)
   286  	return nil
   287  }
   288  
   289  // ImportPreimages imports a batch of exported hash preimages into the database.
   290  // It's a part of the deprecated functionality, should be removed in the future.
   291  func ImportPreimages(db ethdb.Database, fn string) error {
   292  	log.Info("Importing preimages", "file", fn)
   293  
   294  	// Open the file handle and potentially unwrap the gzip stream
   295  	fh, err := os.Open(fn)
   296  	if err != nil {
   297  		return err
   298  	}
   299  	defer fh.Close()
   300  
   301  	var reader io.Reader = bufio.NewReader(fh)
   302  	if strings.HasSuffix(fn, ".gz") {
   303  		if reader, err = gzip.NewReader(reader); err != nil {
   304  			return err
   305  		}
   306  	}
   307  	stream := rlp.NewStream(reader, 0)
   308  
   309  	// Import the preimages in batches to prevent disk thrashing
   310  	preimages := make(map[common.Hash][]byte)
   311  
   312  	for {
   313  		// Read the next entry and ensure it's not junk
   314  		var blob []byte
   315  
   316  		if err := stream.Decode(&blob); err != nil {
   317  			if err == io.EOF {
   318  				break
   319  			}
   320  			return err
   321  		}
   322  		// Accumulate the preimages and flush when enough ws gathered
   323  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   324  		if len(preimages) > 1024 {
   325  			rawdb.WritePreimages(db, preimages)
   326  			preimages = make(map[common.Hash][]byte)
   327  		}
   328  	}
   329  	// Flush the last batch preimage data
   330  	if len(preimages) > 0 {
   331  		rawdb.WritePreimages(db, preimages)
   332  	}
   333  	return nil
   334  }
   335  
   336  // ExportPreimages exports all known hash preimages into the specified file,
   337  // truncating any data already present in the file.
   338  // It's a part of the deprecated functionality, should be removed in the future.
   339  func ExportPreimages(db ethdb.Database, fn string) error {
   340  	log.Info("Exporting preimages", "file", fn)
   341  
   342  	// Open the file handle and potentially wrap with a gzip stream
   343  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   344  	if err != nil {
   345  		return err
   346  	}
   347  	defer fh.Close()
   348  
   349  	var writer io.Writer = fh
   350  	if strings.HasSuffix(fn, ".gz") {
   351  		writer = gzip.NewWriter(writer)
   352  		defer writer.(*gzip.Writer).Close()
   353  	}
   354  	// Iterate over the preimages and export them
   355  	it := db.NewIterator([]byte("secure-key-"), nil)
   356  	defer it.Release()
   357  
   358  	for it.Next() {
   359  		if err := rlp.Encode(writer, it.Value()); err != nil {
   360  			return err
   361  		}
   362  	}
   363  	log.Info("Exported preimages", "file", fn)
   364  	return nil
   365  }
   366  
   367  // exportHeader is used in the export/import flow. When we do an export,
   368  // the first element we output is the exportHeader.
   369  // Whenever a backwards-incompatible change is made, the Version header
   370  // should be bumped.
   371  // If the importer sees a higher version, it should reject the import.
   372  type exportHeader struct {
   373  	Magic    string // Always set to 'gethdbdump' for disambiguation
   374  	Version  uint64
   375  	Kind     string
   376  	UnixTime uint64
   377  }
   378  
   379  const exportMagic = "gethdbdump"
   380  const (
   381  	OpBatchAdd = 0
   382  	OpBatchDel = 1
   383  )
   384  
   385  // ImportLDBData imports a batch of snapshot data into the database
   386  func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
   387  	log.Info("Importing leveldb data", "file", f)
   388  
   389  	// Open the file handle and potentially unwrap the gzip stream
   390  	fh, err := os.Open(f)
   391  	if err != nil {
   392  		return err
   393  	}
   394  	defer fh.Close()
   395  
   396  	var reader io.Reader = bufio.NewReader(fh)
   397  	if strings.HasSuffix(f, ".gz") {
   398  		if reader, err = gzip.NewReader(reader); err != nil {
   399  			return err
   400  		}
   401  	}
   402  	stream := rlp.NewStream(reader, 0)
   403  
   404  	// Read the header
   405  	var header exportHeader
   406  	if err := stream.Decode(&header); err != nil {
   407  		return fmt.Errorf("could not decode header: %v", err)
   408  	}
   409  	if header.Magic != exportMagic {
   410  		return errors.New("incompatible data, wrong magic")
   411  	}
   412  	if header.Version != 0 {
   413  		return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
   414  	}
   415  	log.Info("Importing data", "file", f, "type", header.Kind, "data age",
   416  		common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
   417  
   418  	// Import the snapshot in batches to prevent disk thrashing
   419  	var (
   420  		count  int64
   421  		start  = time.Now()
   422  		logged = time.Now()
   423  		batch  = db.NewBatch()
   424  	)
   425  	for {
   426  		// Read the next entry
   427  		var (
   428  			op       byte
   429  			key, val []byte
   430  		)
   431  		if err := stream.Decode(&op); err != nil {
   432  			if err == io.EOF {
   433  				break
   434  			}
   435  			return err
   436  		}
   437  		if err := stream.Decode(&key); err != nil {
   438  			return err
   439  		}
   440  		if err := stream.Decode(&val); err != nil {
   441  			return err
   442  		}
   443  		if count < startIndex {
   444  			count++
   445  			continue
   446  		}
   447  		switch op {
   448  		case OpBatchDel:
   449  			batch.Delete(key)
   450  		case OpBatchAdd:
   451  			batch.Put(key, val)
   452  		default:
   453  			return fmt.Errorf("unknown op %d\n", op)
   454  		}
   455  		if batch.ValueSize() > ethdb.IdealBatchSize {
   456  			if err := batch.Write(); err != nil {
   457  				return err
   458  			}
   459  			batch.Reset()
   460  		}
   461  		// Check interruption emitted by ctrl+c
   462  		if count%1000 == 0 {
   463  			select {
   464  			case <-interrupt:
   465  				if err := batch.Write(); err != nil {
   466  					return err
   467  				}
   468  				log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   469  				return nil
   470  			default:
   471  			}
   472  		}
   473  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   474  			log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   475  			logged = time.Now()
   476  		}
   477  		count += 1
   478  	}
   479  	// Flush the last batch snapshot data
   480  	if batch.ValueSize() > 0 {
   481  		if err := batch.Write(); err != nil {
   482  			return err
   483  		}
   484  	}
   485  	log.Info("Imported chain data", "file", f, "count", count,
   486  		"elapsed", common.PrettyDuration(time.Since(start)))
   487  	return nil
   488  }
   489  
   490  // ChainDataIterator is an interface wraps all necessary functions to iterate
   491  // the exporting chain data.
   492  type ChainDataIterator interface {
   493  	// Next returns the key-value pair for next exporting entry in the iterator.
   494  	// When the end is reached, it will return (0, nil, nil, false).
   495  	Next() (byte, []byte, []byte, bool)
   496  
   497  	// Release releases associated resources. Release should always succeed and can
   498  	// be called multiple times without causing error.
   499  	Release()
   500  }
   501  
   502  // ExportChaindata exports the given data type (truncating any data already present)
   503  // in the file. If the suffix is 'gz', gzip compression is used.
   504  func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
   505  	log.Info("Exporting chain data", "file", fn, "kind", kind)
   506  	defer iter.Release()
   507  
   508  	// Open the file handle and potentially wrap with a gzip stream
   509  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   510  	if err != nil {
   511  		return err
   512  	}
   513  	defer fh.Close()
   514  
   515  	var writer io.Writer = fh
   516  	if strings.HasSuffix(fn, ".gz") {
   517  		writer = gzip.NewWriter(writer)
   518  		defer writer.(*gzip.Writer).Close()
   519  	}
   520  	// Write the header
   521  	if err := rlp.Encode(writer, &exportHeader{
   522  		Magic:    exportMagic,
   523  		Version:  0,
   524  		Kind:     kind,
   525  		UnixTime: uint64(time.Now().Unix()),
   526  	}); err != nil {
   527  		return err
   528  	}
   529  	// Extract data from source iterator and dump them out to file
   530  	var (
   531  		count  int64
   532  		start  = time.Now()
   533  		logged = time.Now()
   534  	)
   535  	for {
   536  		op, key, val, ok := iter.Next()
   537  		if !ok {
   538  			break
   539  		}
   540  		if err := rlp.Encode(writer, op); err != nil {
   541  			return err
   542  		}
   543  		if err := rlp.Encode(writer, key); err != nil {
   544  			return err
   545  		}
   546  		if err := rlp.Encode(writer, val); err != nil {
   547  			return err
   548  		}
   549  		if count%1000 == 0 {
   550  			// Check interruption emitted by ctrl+c
   551  			select {
   552  			case <-interrupt:
   553  				log.Info("Chain data exporting interrupted", "file", fn,
   554  					"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   555  				return nil
   556  			default:
   557  			}
   558  			if time.Since(logged) > 8*time.Second {
   559  				log.Info("Exporting chain data", "file", fn, "kind", kind,
   560  					"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   561  				logged = time.Now()
   562  			}
   563  		}
   564  		count++
   565  	}
   566  	log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
   567  		"elapsed", common.PrettyDuration(time.Since(start)))
   568  	return nil
   569  }