github.com/cryptotooltop/go-ethereum@v0.0.0-20231103184714-151d1922f3e5/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"bufio"
    22  	"compress/gzip"
    23  	"errors"
    24  	"fmt"
    25  	"io"
    26  	"os"
    27  	"os/signal"
    28  	"runtime"
    29  	"strings"
    30  	"syscall"
    31  	"time"
    32  
    33  	"gopkg.in/urfave/cli.v1"
    34  
    35  	"github.com/scroll-tech/go-ethereum/common"
    36  	"github.com/scroll-tech/go-ethereum/core"
    37  	"github.com/scroll-tech/go-ethereum/core/rawdb"
    38  	"github.com/scroll-tech/go-ethereum/core/types"
    39  	"github.com/scroll-tech/go-ethereum/crypto"
    40  	"github.com/scroll-tech/go-ethereum/eth/ethconfig"
    41  	"github.com/scroll-tech/go-ethereum/ethdb"
    42  	"github.com/scroll-tech/go-ethereum/internal/debug"
    43  	"github.com/scroll-tech/go-ethereum/log"
    44  	"github.com/scroll-tech/go-ethereum/node"
    45  	"github.com/scroll-tech/go-ethereum/rlp"
    46  )
    47  
    48  const (
    49  	importBatchSize = 2500
    50  )
    51  
    52  // Fatalf formats a message to standard error and exits the program.
    53  // The message is also printed to standard output if standard error
    54  // is redirected to a different file.
    55  func Fatalf(format string, args ...interface{}) {
    56  	w := io.MultiWriter(os.Stdout, os.Stderr)
    57  	if runtime.GOOS == "windows" {
    58  		// The SameFile check below doesn't work on Windows.
    59  		// stdout is unlikely to get redirected though, so just print there.
    60  		w = os.Stdout
    61  	} else {
    62  		outf, _ := os.Stdout.Stat()
    63  		errf, _ := os.Stderr.Stat()
    64  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    65  			w = os.Stderr
    66  		}
    67  	}
    68  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    69  	os.Exit(1)
    70  }
    71  
    72  func StartNode(ctx *cli.Context, stack *node.Node) {
    73  	if err := stack.Start(); err != nil {
    74  		Fatalf("Error starting protocol stack: %v", err)
    75  	}
    76  	go func() {
    77  		sigc := make(chan os.Signal, 1)
    78  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    79  		defer signal.Stop(sigc)
    80  
    81  		minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache
    82  		if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) {
    83  			minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name)
    84  		} else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
    85  			minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
    86  		}
    87  		if minFreeDiskSpace > 0 {
    88  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    89  		}
    90  
    91  		<-sigc
    92  		log.Info("Got interrupt, shutting down...")
    93  		go stack.Close()
    94  		for i := 10; i > 0; i-- {
    95  			<-sigc
    96  			if i > 1 {
    97  				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    98  			}
    99  		}
   100  		debug.Exit() // ensure trace and CPU profile data is flushed.
   101  		debug.LoudPanic("boom")
   102  	}()
   103  }
   104  
   105  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   106  	for {
   107  		freeSpace, err := getFreeDiskSpace(path)
   108  		if err != nil {
   109  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   110  			break
   111  		}
   112  		if freeSpace < freeDiskSpaceCritical {
   113  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
   114  			sigc <- syscall.SIGTERM
   115  			break
   116  		} else if freeSpace < 2*freeDiskSpaceCritical {
   117  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
   118  		}
   119  		time.Sleep(60 * time.Second)
   120  	}
   121  }
   122  
   123  func ImportChain(chain *core.BlockChain, fn string) error {
   124  	// Watch for Ctrl-C while the import is running.
   125  	// If a signal is received, the import will stop at the next batch.
   126  	interrupt := make(chan os.Signal, 1)
   127  	stop := make(chan struct{})
   128  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   129  	defer signal.Stop(interrupt)
   130  	defer close(interrupt)
   131  	go func() {
   132  		if _, ok := <-interrupt; ok {
   133  			log.Info("Interrupted during import, stopping at next batch")
   134  		}
   135  		close(stop)
   136  	}()
   137  	checkInterrupt := func() bool {
   138  		select {
   139  		case <-stop:
   140  			return true
   141  		default:
   142  			return false
   143  		}
   144  	}
   145  
   146  	log.Info("Importing blockchain", "file", fn)
   147  
   148  	// Open the file handle and potentially unwrap the gzip stream
   149  	fh, err := os.Open(fn)
   150  	if err != nil {
   151  		return err
   152  	}
   153  	defer fh.Close()
   154  
   155  	var reader io.Reader = fh
   156  	if strings.HasSuffix(fn, ".gz") {
   157  		if reader, err = gzip.NewReader(reader); err != nil {
   158  			return err
   159  		}
   160  	}
   161  	stream := rlp.NewStream(reader, 0)
   162  
   163  	// Run actual the import.
   164  	blocks := make(types.Blocks, importBatchSize)
   165  	n := 0
   166  	for batch := 0; ; batch++ {
   167  		// Load a batch of RLP blocks.
   168  		if checkInterrupt() {
   169  			return fmt.Errorf("interrupted")
   170  		}
   171  		i := 0
   172  		for ; i < importBatchSize; i++ {
   173  			var b types.Block
   174  			if err := stream.Decode(&b); err == io.EOF {
   175  				break
   176  			} else if err != nil {
   177  				return fmt.Errorf("at block %d: %v", n, err)
   178  			}
   179  			// don't import first block
   180  			if b.NumberU64() == 0 {
   181  				i--
   182  				continue
   183  			}
   184  			blocks[i] = &b
   185  			n++
   186  		}
   187  		if i == 0 {
   188  			break
   189  		}
   190  		// Import the batch.
   191  		if checkInterrupt() {
   192  			return fmt.Errorf("interrupted")
   193  		}
   194  		missing := missingBlocks(chain, blocks[:i])
   195  		if len(missing) == 0 {
   196  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   197  			continue
   198  		}
   199  		if _, err := chain.InsertChain(missing); err != nil {
   200  			return fmt.Errorf("invalid block %d: %v", n, err)
   201  		}
   202  	}
   203  	return nil
   204  }
   205  
   206  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   207  	head := chain.CurrentBlock()
   208  	for i, block := range blocks {
   209  		// If we're behind the chain head, only check block, state is available at head
   210  		if head.NumberU64() > block.NumberU64() {
   211  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   212  				return blocks[i:]
   213  			}
   214  			continue
   215  		}
   216  		// If we're above the chain head, state availability is a must
   217  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   218  			return blocks[i:]
   219  		}
   220  	}
   221  	return nil
   222  }
   223  
   224  // ExportChain exports a blockchain into the specified file, truncating any data
   225  // already present in the file.
   226  func ExportChain(blockchain *core.BlockChain, fn string) error {
   227  	log.Info("Exporting blockchain", "file", fn)
   228  
   229  	// Open the file handle and potentially wrap with a gzip stream
   230  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   231  	if err != nil {
   232  		return err
   233  	}
   234  	defer fh.Close()
   235  
   236  	var writer io.Writer = fh
   237  	if strings.HasSuffix(fn, ".gz") {
   238  		writer = gzip.NewWriter(writer)
   239  		defer writer.(*gzip.Writer).Close()
   240  	}
   241  	// Iterate over the blocks and export them
   242  	if err := blockchain.Export(writer); err != nil {
   243  		return err
   244  	}
   245  	log.Info("Exported blockchain", "file", fn)
   246  
   247  	return nil
   248  }
   249  
   250  // ExportAppendChain exports a blockchain into the specified file, appending to
   251  // the file if data already exists in it.
   252  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   253  	log.Info("Exporting blockchain", "file", fn)
   254  
   255  	// Open the file handle and potentially wrap with a gzip stream
   256  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   257  	if err != nil {
   258  		return err
   259  	}
   260  	defer fh.Close()
   261  
   262  	var writer io.Writer = fh
   263  	if strings.HasSuffix(fn, ".gz") {
   264  		writer = gzip.NewWriter(writer)
   265  		defer writer.(*gzip.Writer).Close()
   266  	}
   267  	// Iterate over the blocks and export them
   268  	if err := blockchain.ExportN(writer, first, last); err != nil {
   269  		return err
   270  	}
   271  	log.Info("Exported blockchain to", "file", fn)
   272  	return nil
   273  }
   274  
   275  // ImportPreimages imports a batch of exported hash preimages into the database.
   276  // It's a part of the deprecated functionality, should be removed in the future.
   277  func ImportPreimages(db ethdb.Database, fn string) error {
   278  	log.Info("Importing preimages", "file", fn)
   279  
   280  	// Open the file handle and potentially unwrap the gzip stream
   281  	fh, err := os.Open(fn)
   282  	if err != nil {
   283  		return err
   284  	}
   285  	defer fh.Close()
   286  
   287  	var reader io.Reader = bufio.NewReader(fh)
   288  	if strings.HasSuffix(fn, ".gz") {
   289  		if reader, err = gzip.NewReader(reader); err != nil {
   290  			return err
   291  		}
   292  	}
   293  	stream := rlp.NewStream(reader, 0)
   294  
   295  	// Import the preimages in batches to prevent disk thrashing
   296  	preimages := make(map[common.Hash][]byte)
   297  
   298  	for {
   299  		// Read the next entry and ensure it's not junk
   300  		var blob []byte
   301  
   302  		if err := stream.Decode(&blob); err != nil {
   303  			if err == io.EOF {
   304  				break
   305  			}
   306  			return err
   307  		}
   308  		// Accumulate the preimages and flush when enough ws gathered
   309  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   310  		if len(preimages) > 1024 {
   311  			rawdb.WritePreimages(db, preimages)
   312  			preimages = make(map[common.Hash][]byte)
   313  		}
   314  	}
   315  	// Flush the last batch preimage data
   316  	if len(preimages) > 0 {
   317  		rawdb.WritePreimages(db, preimages)
   318  	}
   319  	return nil
   320  }
   321  
   322  // ExportPreimages exports all known hash preimages into the specified file,
   323  // truncating any data already present in the file.
   324  // It's a part of the deprecated functionality, should be removed in the future.
   325  func ExportPreimages(db ethdb.Database, fn string) error {
   326  	log.Info("Exporting preimages", "file", fn)
   327  
   328  	// Open the file handle and potentially wrap with a gzip stream
   329  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   330  	if err != nil {
   331  		return err
   332  	}
   333  	defer fh.Close()
   334  
   335  	var writer io.Writer = fh
   336  	if strings.HasSuffix(fn, ".gz") {
   337  		writer = gzip.NewWriter(writer)
   338  		defer writer.(*gzip.Writer).Close()
   339  	}
   340  	// Iterate over the preimages and export them
   341  	it := db.NewIterator([]byte("secure-key-"), nil)
   342  	defer it.Release()
   343  
   344  	for it.Next() {
   345  		if err := rlp.Encode(writer, it.Value()); err != nil {
   346  			return err
   347  		}
   348  	}
   349  	log.Info("Exported preimages", "file", fn)
   350  	return nil
   351  }
   352  
   353  // exportHeader is used in the export/import flow. When we do an export,
   354  // the first element we output is the exportHeader.
   355  // Whenever a backwards-incompatible change is made, the Version header
   356  // should be bumped.
   357  // If the importer sees a higher version, it should reject the import.
   358  type exportHeader struct {
   359  	Magic    string // Always set to 'gethdbdump' for disambiguation
   360  	Version  uint64
   361  	Kind     string
   362  	UnixTime uint64
   363  }
   364  
   365  const exportMagic = "gethdbdump"
   366  const (
   367  	OpBatchAdd = 0
   368  	OpBatchDel = 1
   369  )
   370  
   371  // ImportLDBData imports a batch of snapshot data into the database
   372  func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
   373  	log.Info("Importing leveldb data", "file", f)
   374  
   375  	// Open the file handle and potentially unwrap the gzip stream
   376  	fh, err := os.Open(f)
   377  	if err != nil {
   378  		return err
   379  	}
   380  	defer fh.Close()
   381  
   382  	var reader io.Reader = bufio.NewReader(fh)
   383  	if strings.HasSuffix(f, ".gz") {
   384  		if reader, err = gzip.NewReader(reader); err != nil {
   385  			return err
   386  		}
   387  	}
   388  	stream := rlp.NewStream(reader, 0)
   389  
   390  	// Read the header
   391  	var header exportHeader
   392  	if err := stream.Decode(&header); err != nil {
   393  		return fmt.Errorf("could not decode header: %v", err)
   394  	}
   395  	if header.Magic != exportMagic {
   396  		return errors.New("incompatible data, wrong magic")
   397  	}
   398  	if header.Version != 0 {
   399  		return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
   400  	}
   401  	log.Info("Importing data", "file", f, "type", header.Kind, "data age",
   402  		common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
   403  
   404  	// Import the snapshot in batches to prevent disk thrashing
   405  	var (
   406  		count  int64
   407  		start  = time.Now()
   408  		logged = time.Now()
   409  		batch  = db.NewBatch()
   410  	)
   411  	for {
   412  		// Read the next entry
   413  		var (
   414  			op       byte
   415  			key, val []byte
   416  		)
   417  		if err := stream.Decode(&op); err != nil {
   418  			if err == io.EOF {
   419  				break
   420  			}
   421  			return err
   422  		}
   423  		if err := stream.Decode(&key); err != nil {
   424  			return err
   425  		}
   426  		if err := stream.Decode(&val); err != nil {
   427  			return err
   428  		}
   429  		if count < startIndex {
   430  			count++
   431  			continue
   432  		}
   433  		switch op {
   434  		case OpBatchDel:
   435  			batch.Delete(key)
   436  		case OpBatchAdd:
   437  			batch.Put(key, val)
   438  		default:
   439  			return fmt.Errorf("unknown op %d\n", op)
   440  		}
   441  		if batch.ValueSize() > ethdb.IdealBatchSize {
   442  			if err := batch.Write(); err != nil {
   443  				return err
   444  			}
   445  			batch.Reset()
   446  		}
   447  		// Check interruption emitted by ctrl+c
   448  		if count%1000 == 0 {
   449  			select {
   450  			case <-interrupt:
   451  				if err := batch.Write(); err != nil {
   452  					return err
   453  				}
   454  				log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   455  				return nil
   456  			default:
   457  			}
   458  		}
   459  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   460  			log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   461  			logged = time.Now()
   462  		}
   463  		count += 1
   464  	}
   465  	// Flush the last batch snapshot data
   466  	if batch.ValueSize() > 0 {
   467  		if err := batch.Write(); err != nil {
   468  			return err
   469  		}
   470  	}
   471  	log.Info("Imported chain data", "file", f, "count", count,
   472  		"elapsed", common.PrettyDuration(time.Since(start)))
   473  	return nil
   474  }
   475  
   476  // ChainDataIterator is an interface wraps all necessary functions to iterate
   477  // the exporting chain data.
   478  type ChainDataIterator interface {
   479  	// Next returns the key-value pair for next exporting entry in the iterator.
   480  	// When the end is reached, it will return (0, nil, nil, false).
   481  	Next() (byte, []byte, []byte, bool)
   482  
   483  	// Release releases associated resources. Release should always succeed and can
   484  	// be called multiple times without causing error.
   485  	Release()
   486  }
   487  
   488  // ExportChaindata exports the given data type (truncating any data already present)
   489  // in the file. If the suffix is 'gz', gzip compression is used.
   490  func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
   491  	log.Info("Exporting chain data", "file", fn, "kind", kind)
   492  	defer iter.Release()
   493  
   494  	// Open the file handle and potentially wrap with a gzip stream
   495  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   496  	if err != nil {
   497  		return err
   498  	}
   499  	defer fh.Close()
   500  
   501  	var writer io.Writer = fh
   502  	if strings.HasSuffix(fn, ".gz") {
   503  		writer = gzip.NewWriter(writer)
   504  		defer writer.(*gzip.Writer).Close()
   505  	}
   506  	// Write the header
   507  	if err := rlp.Encode(writer, &exportHeader{
   508  		Magic:    exportMagic,
   509  		Version:  0,
   510  		Kind:     kind,
   511  		UnixTime: uint64(time.Now().Unix()),
   512  	}); err != nil {
   513  		return err
   514  	}
   515  	// Extract data from source iterator and dump them out to file
   516  	var (
   517  		count  int64
   518  		start  = time.Now()
   519  		logged = time.Now()
   520  	)
   521  	for {
   522  		op, key, val, ok := iter.Next()
   523  		if !ok {
   524  			break
   525  		}
   526  		if err := rlp.Encode(writer, op); err != nil {
   527  			return err
   528  		}
   529  		if err := rlp.Encode(writer, key); err != nil {
   530  			return err
   531  		}
   532  		if err := rlp.Encode(writer, val); err != nil {
   533  			return err
   534  		}
   535  		if count%1000 == 0 {
   536  			// Check interruption emitted by ctrl+c
   537  			select {
   538  			case <-interrupt:
   539  				log.Info("Chain data exporting interrupted", "file", fn,
   540  					"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   541  				return nil
   542  			default:
   543  			}
   544  			if time.Since(logged) > 8*time.Second {
   545  				log.Info("Exporting chain data", "file", fn, "kind", kind,
   546  					"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   547  				logged = time.Now()
   548  			}
   549  		}
   550  		count++
   551  	}
   552  	log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
   553  		"elapsed", common.PrettyDuration(time.Since(start)))
   554  	return nil
   555  }