github.com/aidoskuneen/adk-node@v0.0.0-20220315131952-2e32567cb7f4/cmd/utils/cmd.go (about)

     1  // Copyright 2021 The adkgo Authors
     2  // This file is part of adkgo.
     3  //
     4  // adkgo is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // adkgo is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with adkgo. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for adkgo commands.
    18  package utils
    19  
    20  import (
    21  	"compress/gzip"
    22  	"fmt"
    23  	"io"
    24  	"os"
    25  	"os/signal"
    26  	"runtime"
    27  	"strings"
    28  	"syscall"
    29  	"time"
    30  
    31  	"github.com/aidoskuneen/adk-node/common"
    32  	"github.com/aidoskuneen/adk-node/core"
    33  	"github.com/aidoskuneen/adk-node/core/rawdb"
    34  	"github.com/aidoskuneen/adk-node/core/types"
    35  	"github.com/aidoskuneen/adk-node/crypto"
    36  	"github.com/aidoskuneen/adk-node/eth/ethconfig"
    37  	"github.com/aidoskuneen/adk-node/ethdb"
    38  	"github.com/aidoskuneen/adk-node/internal/debug"
    39  	"github.com/aidoskuneen/adk-node/log"
    40  	"github.com/aidoskuneen/adk-node/node"
    41  	"github.com/aidoskuneen/adk-node/rlp"
    42  	"gopkg.in/urfave/cli.v1"
    43  )
    44  
    45  const (
    46  	importBatchSize = 2500
    47  )
    48  
    49  // Fatalf formats a message to standard error and exits the program.
    50  // The message is also printed to standard output if standard error
    51  // is redirected to a different file.
    52  func Fatalf(format string, args ...interface{}) {
    53  	w := io.MultiWriter(os.Stdout, os.Stderr)
    54  	if runtime.GOOS == "windows" {
    55  		// The SameFile check below doesn't work on Windows.
    56  		// stdout is unlikely to get redirected though, so just print there.
    57  		w = os.Stdout
    58  	} else {
    59  		outf, _ := os.Stdout.Stat()
    60  		errf, _ := os.Stderr.Stat()
    61  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    62  			w = os.Stderr
    63  		}
    64  	}
    65  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    66  	os.Exit(1)
    67  }
    68  
    69  func StartNode(ctx *cli.Context, stack *node.Node) {
    70  	if err := stack.Start(); err != nil {
    71  		Fatalf("Error starting protocol stack: %v", err)
    72  	}
    73  	go func() {
    74  		sigc := make(chan os.Signal, 1)
    75  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    76  		defer signal.Stop(sigc)
    77  
    78  		minFreeDiskSpace := ethconfig.Defaults.TrieDirtyCache
    79  		if ctx.GlobalIsSet(MinFreeDiskSpaceFlag.Name) {
    80  			minFreeDiskSpace = ctx.GlobalInt(MinFreeDiskSpaceFlag.Name)
    81  		} else if ctx.GlobalIsSet(CacheFlag.Name) || ctx.GlobalIsSet(CacheGCFlag.Name) {
    82  			minFreeDiskSpace = ctx.GlobalInt(CacheFlag.Name) * ctx.GlobalInt(CacheGCFlag.Name) / 100
    83  		}
    84  		if minFreeDiskSpace > 0 {
    85  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    86  		}
    87  
    88  		<-sigc
    89  		log.Info("Got interrupt, shutting down...")
    90  		go stack.Close()
    91  		for i := 10; i > 0; i-- {
    92  			<-sigc
    93  			if i > 1 {
    94  				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    95  			}
    96  		}
    97  		debug.Exit() // ensure trace and CPU profile data is flushed.
    98  		debug.LoudPanic("boom")
    99  	}()
   100  }
   101  
   102  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   103  	for {
   104  		freeSpace, err := getFreeDiskSpace(path)
   105  		if err != nil {
   106  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   107  			break
   108  		}
   109  		if freeSpace < freeDiskSpaceCritical {
   110  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace))
   111  			sigc <- syscall.SIGTERM
   112  			break
   113  		} else if freeSpace < 2*freeDiskSpaceCritical {
   114  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical))
   115  		}
   116  		time.Sleep(60 * time.Second)
   117  	}
   118  }
   119  
   120  func ImportChain(chain *core.BlockChain, fn string) error {
   121  	// Watch for Ctrl-C while the import is running.
   122  	// If a signal is received, the import will stop at the next batch.
   123  	interrupt := make(chan os.Signal, 1)
   124  	stop := make(chan struct{})
   125  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   126  	defer signal.Stop(interrupt)
   127  	defer close(interrupt)
   128  	go func() {
   129  		if _, ok := <-interrupt; ok {
   130  			log.Info("Interrupted during import, stopping at next batch")
   131  		}
   132  		close(stop)
   133  	}()
   134  	checkInterrupt := func() bool {
   135  		select {
   136  		case <-stop:
   137  			return true
   138  		default:
   139  			return false
   140  		}
   141  	}
   142  
   143  	log.Info("Importing blockchain", "file", fn)
   144  
   145  	// Open the file handle and potentially unwrap the gzip stream
   146  	fh, err := os.Open(fn)
   147  	if err != nil {
   148  		return err
   149  	}
   150  	defer fh.Close()
   151  
   152  	var reader io.Reader = fh
   153  	if strings.HasSuffix(fn, ".gz") {
   154  		if reader, err = gzip.NewReader(reader); err != nil {
   155  			return err
   156  		}
   157  	}
   158  	stream := rlp.NewStream(reader, 0)
   159  
   160  	// Run actual the import.
   161  	blocks := make(types.Blocks, importBatchSize)
   162  	n := 0
   163  	for batch := 0; ; batch++ {
   164  		// Load a batch of RLP blocks.
   165  		if checkInterrupt() {
   166  			return fmt.Errorf("interrupted")
   167  		}
   168  		i := 0
   169  		for ; i < importBatchSize; i++ {
   170  			var b types.Block
   171  			if err := stream.Decode(&b); err == io.EOF {
   172  				break
   173  			} else if err != nil {
   174  				return fmt.Errorf("at block %d: %v", n, err)
   175  			}
   176  			// don't import first block
   177  			if b.NumberU64() == 0 {
   178  				i--
   179  				continue
   180  			}
   181  			blocks[i] = &b
   182  			n++
   183  		}
   184  		if i == 0 {
   185  			break
   186  		}
   187  		// Import the batch.
   188  		if checkInterrupt() {
   189  			return fmt.Errorf("interrupted")
   190  		}
   191  		missing := missingBlocks(chain, blocks[:i])
   192  		if len(missing) == 0 {
   193  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   194  			continue
   195  		}
   196  		if _, err := chain.InsertChain(missing); err != nil {
   197  			return fmt.Errorf("invalid block %d: %v", n, err)
   198  		}
   199  	}
   200  	return nil
   201  }
   202  
   203  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   204  	head := chain.CurrentBlock()
   205  	for i, block := range blocks {
   206  		// If we're behind the chain head, only check block, state is available at head
   207  		if head.NumberU64() > block.NumberU64() {
   208  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   209  				return blocks[i:]
   210  			}
   211  			continue
   212  		}
   213  		// If we're above the chain head, state availability is a must
   214  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   215  			return blocks[i:]
   216  		}
   217  	}
   218  	return nil
   219  }
   220  
   221  // ExportChain exports a blockchain into the specified file, truncating any data
   222  // already present in the file.
   223  func ExportChain(blockchain *core.BlockChain, fn string) error {
   224  	log.Info("Exporting blockchain", "file", fn)
   225  
   226  	// Open the file handle and potentially wrap with a gzip stream
   227  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   228  	if err != nil {
   229  		return err
   230  	}
   231  	defer fh.Close()
   232  
   233  	var writer io.Writer = fh
   234  	if strings.HasSuffix(fn, ".gz") {
   235  		writer = gzip.NewWriter(writer)
   236  		defer writer.(*gzip.Writer).Close()
   237  	}
   238  	// Iterate over the blocks and export them
   239  	if err := blockchain.Export(writer); err != nil {
   240  		return err
   241  	}
   242  	log.Info("Exported blockchain", "file", fn)
   243  
   244  	return nil
   245  }
   246  
   247  // ExportAppendChain exports a blockchain into the specified file, appending to
   248  // the file if data already exists in it.
   249  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   250  	log.Info("Exporting blockchain", "file", fn)
   251  
   252  	// Open the file handle and potentially wrap with a gzip stream
   253  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   254  	if err != nil {
   255  		return err
   256  	}
   257  	defer fh.Close()
   258  
   259  	var writer io.Writer = fh
   260  	if strings.HasSuffix(fn, ".gz") {
   261  		writer = gzip.NewWriter(writer)
   262  		defer writer.(*gzip.Writer).Close()
   263  	}
   264  	// Iterate over the blocks and export them
   265  	if err := blockchain.ExportN(writer, first, last); err != nil {
   266  		return err
   267  	}
   268  	log.Info("Exported blockchain to", "file", fn)
   269  	return nil
   270  }
   271  
   272  // ImportPreimages imports a batch of exported hash preimages into the database.
   273  func ImportPreimages(db ethdb.Database, fn string) error {
   274  	log.Info("Importing preimages", "file", fn)
   275  
   276  	// Open the file handle and potentially unwrap the gzip stream
   277  	fh, err := os.Open(fn)
   278  	if err != nil {
   279  		return err
   280  	}
   281  	defer fh.Close()
   282  
   283  	var reader io.Reader = fh
   284  	if strings.HasSuffix(fn, ".gz") {
   285  		if reader, err = gzip.NewReader(reader); err != nil {
   286  			return err
   287  		}
   288  	}
   289  	stream := rlp.NewStream(reader, 0)
   290  
   291  	// Import the preimages in batches to prevent disk trashing
   292  	preimages := make(map[common.Hash][]byte)
   293  
   294  	for {
   295  		// Read the next entry and ensure it's not junk
   296  		var blob []byte
   297  
   298  		if err := stream.Decode(&blob); err != nil {
   299  			if err == io.EOF {
   300  				break
   301  			}
   302  			return err
   303  		}
   304  		// Accumulate the preimages and flush when enough ws gathered
   305  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   306  		if len(preimages) > 1024 {
   307  			rawdb.WritePreimages(db, preimages)
   308  			preimages = make(map[common.Hash][]byte)
   309  		}
   310  	}
   311  	// Flush the last batch preimage data
   312  	if len(preimages) > 0 {
   313  		rawdb.WritePreimages(db, preimages)
   314  	}
   315  	return nil
   316  }
   317  
   318  // ExportPreimages exports all known hash preimages into the specified file,
   319  // truncating any data already present in the file.
   320  func ExportPreimages(db ethdb.Database, fn string) error {
   321  	log.Info("Exporting preimages", "file", fn)
   322  
   323  	// Open the file handle and potentially wrap with a gzip stream
   324  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   325  	if err != nil {
   326  		return err
   327  	}
   328  	defer fh.Close()
   329  
   330  	var writer io.Writer = fh
   331  	if strings.HasSuffix(fn, ".gz") {
   332  		writer = gzip.NewWriter(writer)
   333  		defer writer.(*gzip.Writer).Close()
   334  	}
   335  	// Iterate over the preimages and export them
   336  	it := db.NewIterator([]byte("secure-key-"), nil)
   337  	defer it.Release()
   338  
   339  	for it.Next() {
   340  		if err := rlp.Encode(writer, it.Value()); err != nil {
   341  			return err
   342  		}
   343  	}
   344  	log.Info("Exported preimages", "file", fn)
   345  	return nil
   346  }