github.com/ethereum/go-ethereum@v1.16.1/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"bufio"
    22  	"bytes"
    23  	"compress/gzip"
    24  	"crypto/sha256"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"math/big"
    29  	"os"
    30  	"os/signal"
    31  	"path/filepath"
    32  	"runtime"
    33  	"strings"
    34  	"syscall"
    35  	"time"
    36  
    37  	"github.com/ethereum/go-ethereum/common"
    38  	"github.com/ethereum/go-ethereum/core"
    39  	"github.com/ethereum/go-ethereum/core/rawdb"
    40  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    41  	"github.com/ethereum/go-ethereum/core/types"
    42  	"github.com/ethereum/go-ethereum/crypto"
    43  	"github.com/ethereum/go-ethereum/eth/ethconfig"
    44  	"github.com/ethereum/go-ethereum/ethdb"
    45  	"github.com/ethereum/go-ethereum/internal/debug"
    46  	"github.com/ethereum/go-ethereum/internal/era"
    47  	"github.com/ethereum/go-ethereum/log"
    48  	"github.com/ethereum/go-ethereum/node"
    49  	"github.com/ethereum/go-ethereum/params"
    50  	"github.com/ethereum/go-ethereum/rlp"
    51  	"github.com/urfave/cli/v2"
    52  )
    53  
    54  const (
    55  	importBatchSize = 2500
    56  )
    57  
    58  // ErrImportInterrupted is returned when the user interrupts the import process.
    59  var ErrImportInterrupted = errors.New("interrupted")
    60  
    61  // Fatalf formats a message to standard error and exits the program.
    62  // The message is also printed to standard output if standard error
    63  // is redirected to a different file.
    64  func Fatalf(format string, args ...interface{}) {
    65  	w := io.MultiWriter(os.Stdout, os.Stderr)
    66  	if runtime.GOOS == "windows" || runtime.GOOS == "openbsd" {
    67  		// The SameFile check below doesn't work on Windows neither OpenBSD.
    68  		// stdout is unlikely to get redirected though, so just print there.
    69  		w = os.Stdout
    70  	} else {
    71  		outf, _ := os.Stdout.Stat()
    72  		errf, _ := os.Stderr.Stat()
    73  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    74  			w = os.Stderr
    75  		}
    76  	}
    77  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    78  	os.Exit(1)
    79  }
    80  
    81  func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
    82  	if err := stack.Start(); err != nil {
    83  		Fatalf("Error starting protocol stack: %v", err)
    84  	}
    85  	go func() {
    86  		sigc := make(chan os.Signal, 1)
    87  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    88  		defer signal.Stop(sigc)
    89  
    90  		minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
    91  		if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
    92  			minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
    93  		} else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
    94  			minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
    95  		}
    96  		if minFreeDiskSpace > 0 {
    97  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    98  		}
    99  
   100  		shutdown := func() {
   101  			log.Info("Got interrupt, shutting down...")
   102  			go stack.Close()
   103  			for i := 10; i > 0; i-- {
   104  				<-sigc
   105  				if i > 1 {
   106  					log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
   107  				}
   108  			}
   109  			debug.Exit() // ensure trace and CPU profile data is flushed.
   110  			debug.LoudPanic("boom")
   111  		}
   112  
   113  		if isConsole {
   114  			// In JS console mode, SIGINT is ignored because it's handled by the console.
   115  			// However, SIGTERM still shuts down the node.
   116  			for {
   117  				sig := <-sigc
   118  				if sig == syscall.SIGTERM {
   119  					shutdown()
   120  					return
   121  				}
   122  			}
   123  		} else {
   124  			<-sigc
   125  			shutdown()
   126  		}
   127  	}()
   128  }
   129  
   130  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   131  	if path == "" {
   132  		return
   133  	}
   134  	for {
   135  		freeSpace, err := getFreeDiskSpace(path)
   136  		if err != nil {
   137  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   138  			break
   139  		}
   140  		if freeSpace < freeDiskSpaceCritical {
   141  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace), "path", path)
   142  			sigc <- syscall.SIGTERM
   143  			break
   144  		} else if freeSpace < 2*freeDiskSpaceCritical {
   145  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical), "path", path)
   146  		}
   147  		time.Sleep(30 * time.Second)
   148  	}
   149  }
   150  
   151  func ImportChain(chain *core.BlockChain, fn string) error {
   152  	// Watch for Ctrl-C while the import is running.
   153  	// If a signal is received, the import will stop at the next batch.
   154  	interrupt := make(chan os.Signal, 1)
   155  	stop := make(chan struct{})
   156  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   157  	defer signal.Stop(interrupt)
   158  	defer close(interrupt)
   159  	go func() {
   160  		if _, ok := <-interrupt; ok {
   161  			log.Info("Interrupted during import, stopping at next batch")
   162  		}
   163  		close(stop)
   164  	}()
   165  	checkInterrupt := func() bool {
   166  		select {
   167  		case <-stop:
   168  			return true
   169  		default:
   170  			return false
   171  		}
   172  	}
   173  
   174  	log.Info("Importing blockchain", "file", fn)
   175  
   176  	// Open the file handle and potentially unwrap the gzip stream
   177  	fh, err := os.Open(fn)
   178  	if err != nil {
   179  		return err
   180  	}
   181  	defer fh.Close()
   182  
   183  	var reader io.Reader = fh
   184  	if strings.HasSuffix(fn, ".gz") {
   185  		if reader, err = gzip.NewReader(reader); err != nil {
   186  			return err
   187  		}
   188  	}
   189  	stream := rlp.NewStream(reader, 0)
   190  
   191  	// Run actual the import.
   192  	blocks := make(types.Blocks, importBatchSize)
   193  	n := 0
   194  	for batch := 0; ; batch++ {
   195  		// Load a batch of RLP blocks.
   196  		if checkInterrupt() {
   197  			return ErrImportInterrupted
   198  		}
   199  		i := 0
   200  		for ; i < importBatchSize; i++ {
   201  			var b types.Block
   202  			if err := stream.Decode(&b); err == io.EOF {
   203  				break
   204  			} else if err != nil {
   205  				return fmt.Errorf("at block %d: %v", n, err)
   206  			}
   207  			// don't import first block
   208  			if b.NumberU64() == 0 {
   209  				i--
   210  				continue
   211  			}
   212  			blocks[i] = &b
   213  			n++
   214  		}
   215  		if i == 0 {
   216  			break
   217  		}
   218  		// Import the batch.
   219  		if checkInterrupt() {
   220  			return errors.New("interrupted")
   221  		}
   222  		missing := missingBlocks(chain, blocks[:i])
   223  		if len(missing) == 0 {
   224  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   225  			continue
   226  		}
   227  		if failindex, err := chain.InsertChain(missing); err != nil {
   228  			var failnumber uint64
   229  			if failindex > 0 && failindex < len(missing) {
   230  				failnumber = missing[failindex].NumberU64()
   231  			} else {
   232  				failnumber = missing[0].NumberU64()
   233  			}
   234  			return fmt.Errorf("invalid block %d: %v", failnumber, err)
   235  		}
   236  	}
   237  	return nil
   238  }
   239  
   240  func readList(filename string) ([]string, error) {
   241  	b, err := os.ReadFile(filename)
   242  	if err != nil {
   243  		return nil, err
   244  	}
   245  	return strings.Split(string(b), "\n"), nil
   246  }
   247  
   248  // ImportHistory imports Era1 files containing historical block information,
   249  // starting from genesis. The assumption is held that the provided chain
   250  // segment in Era1 file should all be canonical and verified.
   251  func ImportHistory(chain *core.BlockChain, dir string, network string) error {
   252  	if chain.CurrentSnapBlock().Number.BitLen() != 0 {
   253  		return errors.New("history import only supported when starting from genesis")
   254  	}
   255  	entries, err := era.ReadDir(dir, network)
   256  	if err != nil {
   257  		return fmt.Errorf("error reading %s: %w", dir, err)
   258  	}
   259  	checksums, err := readList(filepath.Join(dir, "checksums.txt"))
   260  	if err != nil {
   261  		return fmt.Errorf("unable to read checksums.txt: %w", err)
   262  	}
   263  	if len(checksums) != len(entries) {
   264  		return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
   265  	}
   266  	var (
   267  		start    = time.Now()
   268  		reported = time.Now()
   269  		imported = 0
   270  		h        = sha256.New()
   271  		buf      = bytes.NewBuffer(nil)
   272  	)
   273  	for i, filename := range entries {
   274  		err := func() error {
   275  			f, err := os.Open(filepath.Join(dir, filename))
   276  			if err != nil {
   277  				return fmt.Errorf("unable to open era: %w", err)
   278  			}
   279  			defer f.Close()
   280  
   281  			// Validate checksum.
   282  			if _, err := io.Copy(h, f); err != nil {
   283  				return fmt.Errorf("unable to recalculate checksum: %w", err)
   284  			}
   285  			if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
   286  				return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
   287  			}
   288  			h.Reset()
   289  			buf.Reset()
   290  
   291  			// Import all block data from Era1.
   292  			e, err := era.From(f)
   293  			if err != nil {
   294  				return fmt.Errorf("error opening era: %w", err)
   295  			}
   296  			it, err := era.NewIterator(e)
   297  			if err != nil {
   298  				return fmt.Errorf("error making era reader: %w", err)
   299  			}
   300  			for it.Next() {
   301  				block, err := it.Block()
   302  				if err != nil {
   303  					return fmt.Errorf("error reading block %d: %w", it.Number(), err)
   304  				}
   305  				if block.Number().BitLen() == 0 {
   306  					continue // skip genesis
   307  				}
   308  				receipts, err := it.Receipts()
   309  				if err != nil {
   310  					return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
   311  				}
   312  				encReceipts := types.EncodeBlockReceiptLists([]types.Receipts{receipts})
   313  				if _, err := chain.InsertReceiptChain([]*types.Block{block}, encReceipts, 2^64-1); err != nil {
   314  					return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
   315  				}
   316  				imported += 1
   317  
   318  				// Give the user some feedback that something is happening.
   319  				if time.Since(reported) >= 8*time.Second {
   320  					log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
   321  					imported = 0
   322  					reported = time.Now()
   323  				}
   324  			}
   325  			return nil
   326  		}()
   327  		if err != nil {
   328  			return err
   329  		}
   330  	}
   331  
   332  	return nil
   333  }
   334  
   335  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   336  	head := chain.CurrentBlock()
   337  	for i, block := range blocks {
   338  		// If we're behind the chain head, only check block, state is available at head
   339  		if head.Number.Uint64() > block.NumberU64() {
   340  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   341  				return blocks[i:]
   342  			}
   343  			continue
   344  		}
   345  		// If we're above the chain head, state availability is a must
   346  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   347  			return blocks[i:]
   348  		}
   349  	}
   350  	return nil
   351  }
   352  
   353  // ExportChain exports a blockchain into the specified file, truncating any data
   354  // already present in the file.
   355  func ExportChain(blockchain *core.BlockChain, fn string) error {
   356  	log.Info("Exporting blockchain", "file", fn)
   357  
   358  	// Open the file handle and potentially wrap with a gzip stream
   359  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   360  	if err != nil {
   361  		return err
   362  	}
   363  	defer fh.Close()
   364  
   365  	var writer io.Writer = fh
   366  	if strings.HasSuffix(fn, ".gz") {
   367  		writer = gzip.NewWriter(writer)
   368  		defer writer.(*gzip.Writer).Close()
   369  	}
   370  	// Iterate over the blocks and export them
   371  	if err := blockchain.Export(writer); err != nil {
   372  		return err
   373  	}
   374  	log.Info("Exported blockchain", "file", fn)
   375  
   376  	return nil
   377  }
   378  
   379  // ExportAppendChain exports a blockchain into the specified file, appending to
   380  // the file if data already exists in it.
   381  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   382  	log.Info("Exporting blockchain", "file", fn)
   383  
   384  	// Open the file handle and potentially wrap with a gzip stream
   385  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   386  	if err != nil {
   387  		return err
   388  	}
   389  	defer fh.Close()
   390  
   391  	var writer io.Writer = fh
   392  	if strings.HasSuffix(fn, ".gz") {
   393  		writer = gzip.NewWriter(writer)
   394  		defer writer.(*gzip.Writer).Close()
   395  	}
   396  	// Iterate over the blocks and export them
   397  	if err := blockchain.ExportN(writer, first, last); err != nil {
   398  		return err
   399  	}
   400  	log.Info("Exported blockchain to", "file", fn)
   401  	return nil
   402  }
   403  
   404  // ExportHistory exports blockchain history into the specified directory,
   405  // following the Era format.
   406  func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
   407  	log.Info("Exporting blockchain history", "dir", dir)
   408  	if head := bc.CurrentBlock().Number.Uint64(); head < last {
   409  		log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
   410  		last = head
   411  	}
   412  	network := "unknown"
   413  	if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
   414  		network = name
   415  	}
   416  	if err := os.MkdirAll(dir, os.ModePerm); err != nil {
   417  		return fmt.Errorf("error creating output directory: %w", err)
   418  	}
   419  	var (
   420  		start     = time.Now()
   421  		reported  = time.Now()
   422  		h         = sha256.New()
   423  		buf       = bytes.NewBuffer(nil)
   424  		checksums []string
   425  	)
   426  	td := new(big.Int)
   427  	for i := uint64(0); i < first; i++ {
   428  		td.Add(td, bc.GetHeaderByNumber(i).Difficulty)
   429  	}
   430  	for i := first; i <= last; i += step {
   431  		err := func() error {
   432  			filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
   433  			f, err := os.Create(filename)
   434  			if err != nil {
   435  				return fmt.Errorf("could not create era file: %w", err)
   436  			}
   437  			defer f.Close()
   438  
   439  			w := era.NewBuilder(f)
   440  			for j := uint64(0); j < step && j <= last-i; j++ {
   441  				var (
   442  					n     = i + j
   443  					block = bc.GetBlockByNumber(n)
   444  				)
   445  				if block == nil {
   446  					return fmt.Errorf("export failed on #%d: not found", n)
   447  				}
   448  				receipts := bc.GetReceiptsByHash(block.Hash())
   449  				if receipts == nil {
   450  					return fmt.Errorf("export failed on #%d: receipts not found", n)
   451  				}
   452  				td.Add(td, block.Difficulty())
   453  				if err := w.Add(block, receipts, new(big.Int).Set(td)); err != nil {
   454  					return err
   455  				}
   456  			}
   457  			root, err := w.Finalize()
   458  			if err != nil {
   459  				return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
   460  			}
   461  			// Set correct filename with root.
   462  			os.Rename(filename, filepath.Join(dir, era.Filename(network, int(i/step), root)))
   463  
   464  			// Compute checksum of entire Era1.
   465  			if _, err := f.Seek(0, io.SeekStart); err != nil {
   466  				return err
   467  			}
   468  			if _, err := io.Copy(h, f); err != nil {
   469  				return fmt.Errorf("unable to calculate checksum: %w", err)
   470  			}
   471  			checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
   472  			h.Reset()
   473  			buf.Reset()
   474  			return nil
   475  		}()
   476  		if err != nil {
   477  			return err
   478  		}
   479  		if time.Since(reported) >= 8*time.Second {
   480  			log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
   481  			reported = time.Now()
   482  		}
   483  	}
   484  
   485  	os.WriteFile(filepath.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
   486  
   487  	log.Info("Exported blockchain to", "dir", dir)
   488  
   489  	return nil
   490  }
   491  
   492  // ImportPreimages imports a batch of exported hash preimages into the database.
   493  // It's a part of the deprecated functionality, should be removed in the future.
   494  func ImportPreimages(db ethdb.Database, fn string) error {
   495  	log.Info("Importing preimages", "file", fn)
   496  
   497  	// Open the file handle and potentially unwrap the gzip stream
   498  	fh, err := os.Open(fn)
   499  	if err != nil {
   500  		return err
   501  	}
   502  	defer fh.Close()
   503  
   504  	var reader io.Reader = bufio.NewReader(fh)
   505  	if strings.HasSuffix(fn, ".gz") {
   506  		if reader, err = gzip.NewReader(reader); err != nil {
   507  			return err
   508  		}
   509  	}
   510  	stream := rlp.NewStream(reader, 0)
   511  
   512  	// Import the preimages in batches to prevent disk thrashing
   513  	preimages := make(map[common.Hash][]byte)
   514  
   515  	for {
   516  		// Read the next entry and ensure it's not junk
   517  		var blob []byte
   518  
   519  		if err := stream.Decode(&blob); err != nil {
   520  			if err == io.EOF {
   521  				break
   522  			}
   523  			return err
   524  		}
   525  		// Accumulate the preimages and flush when enough ws gathered
   526  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   527  		if len(preimages) > 1024 {
   528  			rawdb.WritePreimages(db, preimages)
   529  			preimages = make(map[common.Hash][]byte)
   530  		}
   531  	}
   532  	// Flush the last batch preimage data
   533  	if len(preimages) > 0 {
   534  		rawdb.WritePreimages(db, preimages)
   535  	}
   536  	return nil
   537  }
   538  
   539  // ExportPreimages exports all known hash preimages into the specified file,
   540  // truncating any data already present in the file.
   541  // It's a part of the deprecated functionality, should be removed in the future.
   542  func ExportPreimages(db ethdb.Database, fn string) error {
   543  	log.Info("Exporting preimages", "file", fn)
   544  
   545  	// Open the file handle and potentially wrap with a gzip stream
   546  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   547  	if err != nil {
   548  		return err
   549  	}
   550  	defer fh.Close()
   551  
   552  	var writer io.Writer = fh
   553  	if strings.HasSuffix(fn, ".gz") {
   554  		writer = gzip.NewWriter(writer)
   555  		defer writer.(*gzip.Writer).Close()
   556  	}
   557  	// Iterate over the preimages and export them
   558  	it := db.NewIterator([]byte("secure-key-"), nil)
   559  	defer it.Release()
   560  
   561  	for it.Next() {
   562  		if err := rlp.Encode(writer, it.Value()); err != nil {
   563  			return err
   564  		}
   565  	}
   566  	log.Info("Exported preimages", "file", fn)
   567  	return nil
   568  }
   569  
   570  // ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
   571  // the snapshot for a given root.
   572  func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
   573  	log.Info("Exporting preimages", "file", fn)
   574  
   575  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   576  	if err != nil {
   577  		return err
   578  	}
   579  	defer fh.Close()
   580  
   581  	// Enable gzip compressing if file name has gz suffix.
   582  	var writer io.Writer = fh
   583  	if strings.HasSuffix(fn, ".gz") {
   584  		gz := gzip.NewWriter(writer)
   585  		defer gz.Close()
   586  		writer = gz
   587  	}
   588  	buf := bufio.NewWriter(writer)
   589  	defer buf.Flush()
   590  	writer = buf
   591  
   592  	type hashAndPreimageSize struct {
   593  		Hash common.Hash
   594  		Size int
   595  	}
   596  	hashCh := make(chan hashAndPreimageSize)
   597  
   598  	var (
   599  		start     = time.Now()
   600  		logged    = time.Now()
   601  		preimages int
   602  	)
   603  	go func() {
   604  		defer close(hashCh)
   605  		accIt, err := snaptree.AccountIterator(root, common.Hash{})
   606  		if err != nil {
   607  			log.Error("Failed to create account iterator", "error", err)
   608  			return
   609  		}
   610  		defer accIt.Release()
   611  
   612  		for accIt.Next() {
   613  			acc, err := types.FullAccount(accIt.Account())
   614  			if err != nil {
   615  				log.Error("Failed to get full account", "error", err)
   616  				return
   617  			}
   618  			preimages += 1
   619  			hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
   620  
   621  			if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
   622  				stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
   623  				if err != nil {
   624  					log.Error("Failed to create storage iterator", "error", err)
   625  					return
   626  				}
   627  				for stIt.Next() {
   628  					preimages += 1
   629  					hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
   630  
   631  					if time.Since(logged) > time.Second*8 {
   632  						logged = time.Now()
   633  						log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
   634  					}
   635  				}
   636  				stIt.Release()
   637  			}
   638  			if time.Since(logged) > time.Second*8 {
   639  				logged = time.Now()
   640  				log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
   641  			}
   642  		}
   643  	}()
   644  
   645  	for item := range hashCh {
   646  		preimage := rawdb.ReadPreimage(chaindb, item.Hash)
   647  		if len(preimage) == 0 {
   648  			return fmt.Errorf("missing preimage for %v", item.Hash)
   649  		}
   650  		if len(preimage) != item.Size {
   651  			return fmt.Errorf("invalid preimage size, have %d", len(preimage))
   652  		}
   653  		rlpenc, err := rlp.EncodeToBytes(preimage)
   654  		if err != nil {
   655  			return fmt.Errorf("error encoding preimage: %w", err)
   656  		}
   657  		if _, err := writer.Write(rlpenc); err != nil {
   658  			return fmt.Errorf("failed to write preimage: %w", err)
   659  		}
   660  	}
   661  	log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
   662  	return nil
   663  }
   664  
   665  // exportHeader is used in the export/import flow. When we do an export,
   666  // the first element we output is the exportHeader.
   667  // Whenever a backwards-incompatible change is made, the Version header
   668  // should be bumped.
   669  // If the importer sees a higher version, it should reject the import.
   670  type exportHeader struct {
   671  	Magic    string // Always set to 'gethdbdump' for disambiguation
   672  	Version  uint64
   673  	Kind     string
   674  	UnixTime uint64
   675  }
   676  
   677  const exportMagic = "gethdbdump"
   678  const (
   679  	OpBatchAdd = 0
   680  	OpBatchDel = 1
   681  )
   682  
   683  // ImportLDBData imports a batch of snapshot data into the database
   684  func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
   685  	log.Info("Importing leveldb data", "file", f)
   686  
   687  	// Open the file handle and potentially unwrap the gzip stream
   688  	fh, err := os.Open(f)
   689  	if err != nil {
   690  		return err
   691  	}
   692  	defer fh.Close()
   693  
   694  	var reader io.Reader = bufio.NewReader(fh)
   695  	if strings.HasSuffix(f, ".gz") {
   696  		if reader, err = gzip.NewReader(reader); err != nil {
   697  			return err
   698  		}
   699  	}
   700  	stream := rlp.NewStream(reader, 0)
   701  
   702  	// Read the header
   703  	var header exportHeader
   704  	if err := stream.Decode(&header); err != nil {
   705  		return fmt.Errorf("could not decode header: %v", err)
   706  	}
   707  	if header.Magic != exportMagic {
   708  		return errors.New("incompatible data, wrong magic")
   709  	}
   710  	if header.Version != 0 {
   711  		return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
   712  	}
   713  	log.Info("Importing data", "file", f, "type", header.Kind, "data age",
   714  		common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
   715  
   716  	// Import the snapshot in batches to prevent disk thrashing
   717  	var (
   718  		count  int64
   719  		start  = time.Now()
   720  		logged = time.Now()
   721  		batch  = db.NewBatch()
   722  	)
   723  	for {
   724  		// Read the next entry
   725  		var (
   726  			op       byte
   727  			key, val []byte
   728  		)
   729  		if err := stream.Decode(&op); err != nil {
   730  			if err == io.EOF {
   731  				break
   732  			}
   733  			return err
   734  		}
   735  		if err := stream.Decode(&key); err != nil {
   736  			return err
   737  		}
   738  		if err := stream.Decode(&val); err != nil {
   739  			return err
   740  		}
   741  		if count < startIndex {
   742  			count++
   743  			continue
   744  		}
   745  		switch op {
   746  		case OpBatchDel:
   747  			batch.Delete(key)
   748  		case OpBatchAdd:
   749  			batch.Put(key, val)
   750  		default:
   751  			return fmt.Errorf("unknown op %d", op)
   752  		}
   753  		if batch.ValueSize() > ethdb.IdealBatchSize {
   754  			if err := batch.Write(); err != nil {
   755  				return err
   756  			}
   757  			batch.Reset()
   758  		}
   759  		// Check interruption emitted by ctrl+c
   760  		if count%1000 == 0 {
   761  			select {
   762  			case <-interrupt:
   763  				if err := batch.Write(); err != nil {
   764  					return err
   765  				}
   766  				log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   767  				return nil
   768  			default:
   769  			}
   770  		}
   771  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   772  			log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   773  			logged = time.Now()
   774  		}
   775  		count += 1
   776  	}
   777  	// Flush the last batch snapshot data
   778  	if batch.ValueSize() > 0 {
   779  		if err := batch.Write(); err != nil {
   780  			return err
   781  		}
   782  	}
   783  	log.Info("Imported chain data", "file", f, "count", count,
   784  		"elapsed", common.PrettyDuration(time.Since(start)))
   785  	return nil
   786  }
   787  
   788  // ChainDataIterator is an interface wraps all necessary functions to iterate
   789  // the exporting chain data.
   790  type ChainDataIterator interface {
   791  	// Next returns the key-value pair for next exporting entry in the iterator.
   792  	// When the end is reached, it will return (0, nil, nil, false).
   793  	Next() (byte, []byte, []byte, bool)
   794  
   795  	// Release releases associated resources. Release should always succeed and can
   796  	// be called multiple times without causing error.
   797  	Release()
   798  }
   799  
   800  // ExportChaindata exports the given data type (truncating any data already present)
   801  // in the file. If the suffix is 'gz', gzip compression is used.
   802  func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
   803  	log.Info("Exporting chain data", "file", fn, "kind", kind)
   804  	defer iter.Release()
   805  
   806  	// Open the file handle and potentially wrap with a gzip stream
   807  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   808  	if err != nil {
   809  		return err
   810  	}
   811  	defer fh.Close()
   812  
   813  	var writer io.Writer = fh
   814  	if strings.HasSuffix(fn, ".gz") {
   815  		writer = gzip.NewWriter(writer)
   816  		defer writer.(*gzip.Writer).Close()
   817  	}
   818  	// Write the header
   819  	if err := rlp.Encode(writer, &exportHeader{
   820  		Magic:    exportMagic,
   821  		Version:  0,
   822  		Kind:     kind,
   823  		UnixTime: uint64(time.Now().Unix()),
   824  	}); err != nil {
   825  		return err
   826  	}
   827  	// Extract data from source iterator and dump them out to file
   828  	var (
   829  		count  int64
   830  		start  = time.Now()
   831  		logged = time.Now()
   832  	)
   833  	for {
   834  		op, key, val, ok := iter.Next()
   835  		if !ok {
   836  			break
   837  		}
   838  		if err := rlp.Encode(writer, op); err != nil {
   839  			return err
   840  		}
   841  		if err := rlp.Encode(writer, key); err != nil {
   842  			return err
   843  		}
   844  		if err := rlp.Encode(writer, val); err != nil {
   845  			return err
   846  		}
   847  		if count%1000 == 0 {
   848  			// Check interruption emitted by ctrl+c
   849  			select {
   850  			case <-interrupt:
   851  				log.Info("Chain data exporting interrupted", "file", fn,
   852  					"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   853  				return nil
   854  			default:
   855  			}
   856  			if time.Since(logged) > 8*time.Second {
   857  				log.Info("Exporting chain data", "file", fn, "kind", kind,
   858  					"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   859  				logged = time.Now()
   860  			}
   861  		}
   862  		count++
   863  	}
   864  	log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
   865  		"elapsed", common.PrettyDuration(time.Since(start)))
   866  	return nil
   867  }