github.com/ethereum/go-ethereum@v1.14.4-0.20240516095835-473ee8fc07a3/cmd/utils/cmd.go (about)

     1  // Copyright 2014 The go-ethereum Authors
     2  // This file is part of go-ethereum.
     3  //
     4  // go-ethereum is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // go-ethereum is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU General Public License
    15  // along with go-ethereum. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  // Package utils contains internal helper functions for go-ethereum commands.
    18  package utils
    19  
    20  import (
    21  	"bufio"
    22  	"bytes"
    23  	"compress/gzip"
    24  	"crypto/sha256"
    25  	"errors"
    26  	"fmt"
    27  	"io"
    28  	"os"
    29  	"os/signal"
    30  	"path/filepath"
    31  	"runtime"
    32  	"strings"
    33  	"syscall"
    34  	"time"
    35  
    36  	"github.com/ethereum/go-ethereum/common"
    37  	"github.com/ethereum/go-ethereum/core"
    38  	"github.com/ethereum/go-ethereum/core/rawdb"
    39  	"github.com/ethereum/go-ethereum/core/state/snapshot"
    40  	"github.com/ethereum/go-ethereum/core/types"
    41  	"github.com/ethereum/go-ethereum/crypto"
    42  	"github.com/ethereum/go-ethereum/eth/ethconfig"
    43  	"github.com/ethereum/go-ethereum/ethdb"
    44  	"github.com/ethereum/go-ethereum/internal/debug"
    45  	"github.com/ethereum/go-ethereum/internal/era"
    46  	"github.com/ethereum/go-ethereum/log"
    47  	"github.com/ethereum/go-ethereum/node"
    48  	"github.com/ethereum/go-ethereum/params"
    49  	"github.com/ethereum/go-ethereum/rlp"
    50  	"github.com/urfave/cli/v2"
    51  )
    52  
    53  const (
    54  	importBatchSize = 2500
    55  )
    56  
    57  // Fatalf formats a message to standard error and exits the program.
    58  // The message is also printed to standard output if standard error
    59  // is redirected to a different file.
    60  func Fatalf(format string, args ...interface{}) {
    61  	w := io.MultiWriter(os.Stdout, os.Stderr)
    62  	if runtime.GOOS == "windows" {
    63  		// The SameFile check below doesn't work on Windows.
    64  		// stdout is unlikely to get redirected though, so just print there.
    65  		w = os.Stdout
    66  	} else {
    67  		outf, _ := os.Stdout.Stat()
    68  		errf, _ := os.Stderr.Stat()
    69  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    70  			w = os.Stderr
    71  		}
    72  	}
    73  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    74  	os.Exit(1)
    75  }
    76  
    77  func StartNode(ctx *cli.Context, stack *node.Node, isConsole bool) {
    78  	if err := stack.Start(); err != nil {
    79  		Fatalf("Error starting protocol stack: %v", err)
    80  	}
    81  	go func() {
    82  		sigc := make(chan os.Signal, 1)
    83  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    84  		defer signal.Stop(sigc)
    85  
    86  		minFreeDiskSpace := 2 * ethconfig.Defaults.TrieDirtyCache // Default 2 * 256Mb
    87  		if ctx.IsSet(MinFreeDiskSpaceFlag.Name) {
    88  			minFreeDiskSpace = ctx.Int(MinFreeDiskSpaceFlag.Name)
    89  		} else if ctx.IsSet(CacheFlag.Name) || ctx.IsSet(CacheGCFlag.Name) {
    90  			minFreeDiskSpace = 2 * ctx.Int(CacheFlag.Name) * ctx.Int(CacheGCFlag.Name) / 100
    91  		}
    92  		if minFreeDiskSpace > 0 {
    93  			go monitorFreeDiskSpace(sigc, stack.InstanceDir(), uint64(minFreeDiskSpace)*1024*1024)
    94  		}
    95  
    96  		shutdown := func() {
    97  			log.Info("Got interrupt, shutting down...")
    98  			go stack.Close()
    99  			for i := 10; i > 0; i-- {
   100  				<-sigc
   101  				if i > 1 {
   102  					log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
   103  				}
   104  			}
   105  			debug.Exit() // ensure trace and CPU profile data is flushed.
   106  			debug.LoudPanic("boom")
   107  		}
   108  
   109  		if isConsole {
   110  			// In JS console mode, SIGINT is ignored because it's handled by the console.
   111  			// However, SIGTERM still shuts down the node.
   112  			for {
   113  				sig := <-sigc
   114  				if sig == syscall.SIGTERM {
   115  					shutdown()
   116  					return
   117  				}
   118  			}
   119  		} else {
   120  			<-sigc
   121  			shutdown()
   122  		}
   123  	}()
   124  }
   125  
   126  func monitorFreeDiskSpace(sigc chan os.Signal, path string, freeDiskSpaceCritical uint64) {
   127  	if path == "" {
   128  		return
   129  	}
   130  	for {
   131  		freeSpace, err := getFreeDiskSpace(path)
   132  		if err != nil {
   133  			log.Warn("Failed to get free disk space", "path", path, "err", err)
   134  			break
   135  		}
   136  		if freeSpace < freeDiskSpaceCritical {
   137  			log.Error("Low disk space. Gracefully shutting down Geth to prevent database corruption.", "available", common.StorageSize(freeSpace), "path", path)
   138  			sigc <- syscall.SIGTERM
   139  			break
   140  		} else if freeSpace < 2*freeDiskSpaceCritical {
   141  			log.Warn("Disk space is running low. Geth will shutdown if disk space runs below critical level.", "available", common.StorageSize(freeSpace), "critical_level", common.StorageSize(freeDiskSpaceCritical), "path", path)
   142  		}
   143  		time.Sleep(30 * time.Second)
   144  	}
   145  }
   146  
   147  func ImportChain(chain *core.BlockChain, fn string) error {
   148  	// Watch for Ctrl-C while the import is running.
   149  	// If a signal is received, the import will stop at the next batch.
   150  	interrupt := make(chan os.Signal, 1)
   151  	stop := make(chan struct{})
   152  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
   153  	defer signal.Stop(interrupt)
   154  	defer close(interrupt)
   155  	go func() {
   156  		if _, ok := <-interrupt; ok {
   157  			log.Info("Interrupted during import, stopping at next batch")
   158  		}
   159  		close(stop)
   160  	}()
   161  	checkInterrupt := func() bool {
   162  		select {
   163  		case <-stop:
   164  			return true
   165  		default:
   166  			return false
   167  		}
   168  	}
   169  
   170  	log.Info("Importing blockchain", "file", fn)
   171  
   172  	// Open the file handle and potentially unwrap the gzip stream
   173  	fh, err := os.Open(fn)
   174  	if err != nil {
   175  		return err
   176  	}
   177  	defer fh.Close()
   178  
   179  	var reader io.Reader = fh
   180  	if strings.HasSuffix(fn, ".gz") {
   181  		if reader, err = gzip.NewReader(reader); err != nil {
   182  			return err
   183  		}
   184  	}
   185  	stream := rlp.NewStream(reader, 0)
   186  
   187  	// Run actual the import.
   188  	blocks := make(types.Blocks, importBatchSize)
   189  	n := 0
   190  	for batch := 0; ; batch++ {
   191  		// Load a batch of RLP blocks.
   192  		if checkInterrupt() {
   193  			return errors.New("interrupted")
   194  		}
   195  		i := 0
   196  		for ; i < importBatchSize; i++ {
   197  			var b types.Block
   198  			if err := stream.Decode(&b); err == io.EOF {
   199  				break
   200  			} else if err != nil {
   201  				return fmt.Errorf("at block %d: %v", n, err)
   202  			}
   203  			// don't import first block
   204  			if b.NumberU64() == 0 {
   205  				i--
   206  				continue
   207  			}
   208  			blocks[i] = &b
   209  			n++
   210  		}
   211  		if i == 0 {
   212  			break
   213  		}
   214  		// Import the batch.
   215  		if checkInterrupt() {
   216  			return errors.New("interrupted")
   217  		}
   218  		missing := missingBlocks(chain, blocks[:i])
   219  		if len(missing) == 0 {
   220  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   221  			continue
   222  		}
   223  		if failindex, err := chain.InsertChain(missing); err != nil {
   224  			var failnumber uint64
   225  			if failindex > 0 && failindex < len(missing) {
   226  				failnumber = missing[failindex].NumberU64()
   227  			} else {
   228  				failnumber = missing[0].NumberU64()
   229  			}
   230  			return fmt.Errorf("invalid block %d: %v", failnumber, err)
   231  		}
   232  	}
   233  	return nil
   234  }
   235  
   236  func readList(filename string) ([]string, error) {
   237  	b, err := os.ReadFile(filename)
   238  	if err != nil {
   239  		return nil, err
   240  	}
   241  	return strings.Split(string(b), "\n"), nil
   242  }
   243  
   244  // ImportHistory imports Era1 files containing historical block information,
   245  // starting from genesis.
   246  func ImportHistory(chain *core.BlockChain, db ethdb.Database, dir string, network string) error {
   247  	if chain.CurrentSnapBlock().Number.BitLen() != 0 {
   248  		return errors.New("history import only supported when starting from genesis")
   249  	}
   250  	entries, err := era.ReadDir(dir, network)
   251  	if err != nil {
   252  		return fmt.Errorf("error reading %s: %w", dir, err)
   253  	}
   254  	checksums, err := readList(filepath.Join(dir, "checksums.txt"))
   255  	if err != nil {
   256  		return fmt.Errorf("unable to read checksums.txt: %w", err)
   257  	}
   258  	if len(checksums) != len(entries) {
   259  		return fmt.Errorf("expected equal number of checksums and entries, have: %d checksums, %d entries", len(checksums), len(entries))
   260  	}
   261  	var (
   262  		start    = time.Now()
   263  		reported = time.Now()
   264  		imported = 0
   265  		forker   = core.NewForkChoice(chain, nil)
   266  		h        = sha256.New()
   267  		buf      = bytes.NewBuffer(nil)
   268  	)
   269  	for i, filename := range entries {
   270  		err := func() error {
   271  			f, err := os.Open(filepath.Join(dir, filename))
   272  			if err != nil {
   273  				return fmt.Errorf("unable to open era: %w", err)
   274  			}
   275  			defer f.Close()
   276  
   277  			// Validate checksum.
   278  			if _, err := io.Copy(h, f); err != nil {
   279  				return fmt.Errorf("unable to recalculate checksum: %w", err)
   280  			}
   281  			if have, want := common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex(), checksums[i]; have != want {
   282  				return fmt.Errorf("checksum mismatch: have %s, want %s", have, want)
   283  			}
   284  			h.Reset()
   285  			buf.Reset()
   286  
   287  			// Import all block data from Era1.
   288  			e, err := era.From(f)
   289  			if err != nil {
   290  				return fmt.Errorf("error opening era: %w", err)
   291  			}
   292  			it, err := era.NewIterator(e)
   293  			if err != nil {
   294  				return fmt.Errorf("error making era reader: %w", err)
   295  			}
   296  			for it.Next() {
   297  				block, err := it.Block()
   298  				if err != nil {
   299  					return fmt.Errorf("error reading block %d: %w", it.Number(), err)
   300  				}
   301  				if block.Number().BitLen() == 0 {
   302  					continue // skip genesis
   303  				}
   304  				receipts, err := it.Receipts()
   305  				if err != nil {
   306  					return fmt.Errorf("error reading receipts %d: %w", it.Number(), err)
   307  				}
   308  				if status, err := chain.HeaderChain().InsertHeaderChain([]*types.Header{block.Header()}, start, forker); err != nil {
   309  					return fmt.Errorf("error inserting header %d: %w", it.Number(), err)
   310  				} else if status != core.CanonStatTy {
   311  					return fmt.Errorf("error inserting header %d, not canon: %v", it.Number(), status)
   312  				}
   313  				if _, err := chain.InsertReceiptChain([]*types.Block{block}, []types.Receipts{receipts}, 2^64-1); err != nil {
   314  					return fmt.Errorf("error inserting body %d: %w", it.Number(), err)
   315  				}
   316  				imported += 1
   317  
   318  				// Give the user some feedback that something is happening.
   319  				if time.Since(reported) >= 8*time.Second {
   320  					log.Info("Importing Era files", "head", it.Number(), "imported", imported, "elapsed", common.PrettyDuration(time.Since(start)))
   321  					imported = 0
   322  					reported = time.Now()
   323  				}
   324  			}
   325  			return nil
   326  		}()
   327  		if err != nil {
   328  			return err
   329  		}
   330  	}
   331  
   332  	return nil
   333  }
   334  
   335  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   336  	head := chain.CurrentBlock()
   337  	for i, block := range blocks {
   338  		// If we're behind the chain head, only check block, state is available at head
   339  		if head.Number.Uint64() > block.NumberU64() {
   340  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   341  				return blocks[i:]
   342  			}
   343  			continue
   344  		}
   345  		// If we're above the chain head, state availability is a must
   346  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   347  			return blocks[i:]
   348  		}
   349  	}
   350  	return nil
   351  }
   352  
   353  // ExportChain exports a blockchain into the specified file, truncating any data
   354  // already present in the file.
   355  func ExportChain(blockchain *core.BlockChain, fn string) error {
   356  	log.Info("Exporting blockchain", "file", fn)
   357  
   358  	// Open the file handle and potentially wrap with a gzip stream
   359  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   360  	if err != nil {
   361  		return err
   362  	}
   363  	defer fh.Close()
   364  
   365  	var writer io.Writer = fh
   366  	if strings.HasSuffix(fn, ".gz") {
   367  		writer = gzip.NewWriter(writer)
   368  		defer writer.(*gzip.Writer).Close()
   369  	}
   370  	// Iterate over the blocks and export them
   371  	if err := blockchain.Export(writer); err != nil {
   372  		return err
   373  	}
   374  	log.Info("Exported blockchain", "file", fn)
   375  
   376  	return nil
   377  }
   378  
   379  // ExportAppendChain exports a blockchain into the specified file, appending to
   380  // the file if data already exists in it.
   381  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   382  	log.Info("Exporting blockchain", "file", fn)
   383  
   384  	// Open the file handle and potentially wrap with a gzip stream
   385  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   386  	if err != nil {
   387  		return err
   388  	}
   389  	defer fh.Close()
   390  
   391  	var writer io.Writer = fh
   392  	if strings.HasSuffix(fn, ".gz") {
   393  		writer = gzip.NewWriter(writer)
   394  		defer writer.(*gzip.Writer).Close()
   395  	}
   396  	// Iterate over the blocks and export them
   397  	if err := blockchain.ExportN(writer, first, last); err != nil {
   398  		return err
   399  	}
   400  	log.Info("Exported blockchain to", "file", fn)
   401  	return nil
   402  }
   403  
   404  // ExportHistory exports blockchain history into the specified directory,
   405  // following the Era format.
   406  func ExportHistory(bc *core.BlockChain, dir string, first, last, step uint64) error {
   407  	log.Info("Exporting blockchain history", "dir", dir)
   408  	if head := bc.CurrentBlock().Number.Uint64(); head < last {
   409  		log.Warn("Last block beyond head, setting last = head", "head", head, "last", last)
   410  		last = head
   411  	}
   412  	network := "unknown"
   413  	if name, ok := params.NetworkNames[bc.Config().ChainID.String()]; ok {
   414  		network = name
   415  	}
   416  	if err := os.MkdirAll(dir, os.ModePerm); err != nil {
   417  		return fmt.Errorf("error creating output directory: %w", err)
   418  	}
   419  	var (
   420  		start     = time.Now()
   421  		reported  = time.Now()
   422  		h         = sha256.New()
   423  		buf       = bytes.NewBuffer(nil)
   424  		checksums []string
   425  	)
   426  	for i := first; i <= last; i += step {
   427  		err := func() error {
   428  			filename := filepath.Join(dir, era.Filename(network, int(i/step), common.Hash{}))
   429  			f, err := os.Create(filename)
   430  			if err != nil {
   431  				return fmt.Errorf("could not create era file: %w", err)
   432  			}
   433  			defer f.Close()
   434  
   435  			w := era.NewBuilder(f)
   436  			for j := uint64(0); j < step && j <= last-i; j++ {
   437  				var (
   438  					n     = i + j
   439  					block = bc.GetBlockByNumber(n)
   440  				)
   441  				if block == nil {
   442  					return fmt.Errorf("export failed on #%d: not found", n)
   443  				}
   444  				receipts := bc.GetReceiptsByHash(block.Hash())
   445  				if receipts == nil {
   446  					return fmt.Errorf("export failed on #%d: receipts not found", n)
   447  				}
   448  				td := bc.GetTd(block.Hash(), block.NumberU64())
   449  				if td == nil {
   450  					return fmt.Errorf("export failed on #%d: total difficulty not found", n)
   451  				}
   452  				if err := w.Add(block, receipts, td); err != nil {
   453  					return err
   454  				}
   455  			}
   456  			root, err := w.Finalize()
   457  			if err != nil {
   458  				return fmt.Errorf("export failed to finalize %d: %w", step/i, err)
   459  			}
   460  			// Set correct filename with root.
   461  			os.Rename(filename, filepath.Join(dir, era.Filename(network, int(i/step), root)))
   462  
   463  			// Compute checksum of entire Era1.
   464  			if _, err := f.Seek(0, io.SeekStart); err != nil {
   465  				return err
   466  			}
   467  			if _, err := io.Copy(h, f); err != nil {
   468  				return fmt.Errorf("unable to calculate checksum: %w", err)
   469  			}
   470  			checksums = append(checksums, common.BytesToHash(h.Sum(buf.Bytes()[:])).Hex())
   471  			h.Reset()
   472  			buf.Reset()
   473  			return nil
   474  		}()
   475  		if err != nil {
   476  			return err
   477  		}
   478  		if time.Since(reported) >= 8*time.Second {
   479  			log.Info("Exporting blocks", "exported", i, "elapsed", common.PrettyDuration(time.Since(start)))
   480  			reported = time.Now()
   481  		}
   482  	}
   483  
   484  	os.WriteFile(filepath.Join(dir, "checksums.txt"), []byte(strings.Join(checksums, "\n")), os.ModePerm)
   485  
   486  	log.Info("Exported blockchain to", "dir", dir)
   487  
   488  	return nil
   489  }
   490  
   491  // ImportPreimages imports a batch of exported hash preimages into the database.
   492  // It's a part of the deprecated functionality, should be removed in the future.
   493  func ImportPreimages(db ethdb.Database, fn string) error {
   494  	log.Info("Importing preimages", "file", fn)
   495  
   496  	// Open the file handle and potentially unwrap the gzip stream
   497  	fh, err := os.Open(fn)
   498  	if err != nil {
   499  		return err
   500  	}
   501  	defer fh.Close()
   502  
   503  	var reader io.Reader = bufio.NewReader(fh)
   504  	if strings.HasSuffix(fn, ".gz") {
   505  		if reader, err = gzip.NewReader(reader); err != nil {
   506  			return err
   507  		}
   508  	}
   509  	stream := rlp.NewStream(reader, 0)
   510  
   511  	// Import the preimages in batches to prevent disk thrashing
   512  	preimages := make(map[common.Hash][]byte)
   513  
   514  	for {
   515  		// Read the next entry and ensure it's not junk
   516  		var blob []byte
   517  
   518  		if err := stream.Decode(&blob); err != nil {
   519  			if err == io.EOF {
   520  				break
   521  			}
   522  			return err
   523  		}
   524  		// Accumulate the preimages and flush when enough ws gathered
   525  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   526  		if len(preimages) > 1024 {
   527  			rawdb.WritePreimages(db, preimages)
   528  			preimages = make(map[common.Hash][]byte)
   529  		}
   530  	}
   531  	// Flush the last batch preimage data
   532  	if len(preimages) > 0 {
   533  		rawdb.WritePreimages(db, preimages)
   534  	}
   535  	return nil
   536  }
   537  
   538  // ExportPreimages exports all known hash preimages into the specified file,
   539  // truncating any data already present in the file.
   540  // It's a part of the deprecated functionality, should be removed in the future.
   541  func ExportPreimages(db ethdb.Database, fn string) error {
   542  	log.Info("Exporting preimages", "file", fn)
   543  
   544  	// Open the file handle and potentially wrap with a gzip stream
   545  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   546  	if err != nil {
   547  		return err
   548  	}
   549  	defer fh.Close()
   550  
   551  	var writer io.Writer = fh
   552  	if strings.HasSuffix(fn, ".gz") {
   553  		writer = gzip.NewWriter(writer)
   554  		defer writer.(*gzip.Writer).Close()
   555  	}
   556  	// Iterate over the preimages and export them
   557  	it := db.NewIterator([]byte("secure-key-"), nil)
   558  	defer it.Release()
   559  
   560  	for it.Next() {
   561  		if err := rlp.Encode(writer, it.Value()); err != nil {
   562  			return err
   563  		}
   564  	}
   565  	log.Info("Exported preimages", "file", fn)
   566  	return nil
   567  }
   568  
   569  // ExportSnapshotPreimages exports the preimages corresponding to the enumeration of
   570  // the snapshot for a given root.
   571  func ExportSnapshotPreimages(chaindb ethdb.Database, snaptree *snapshot.Tree, fn string, root common.Hash) error {
   572  	log.Info("Exporting preimages", "file", fn)
   573  
   574  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   575  	if err != nil {
   576  		return err
   577  	}
   578  	defer fh.Close()
   579  
   580  	// Enable gzip compressing if file name has gz suffix.
   581  	var writer io.Writer = fh
   582  	if strings.HasSuffix(fn, ".gz") {
   583  		gz := gzip.NewWriter(writer)
   584  		defer gz.Close()
   585  		writer = gz
   586  	}
   587  	buf := bufio.NewWriter(writer)
   588  	defer buf.Flush()
   589  	writer = buf
   590  
   591  	type hashAndPreimageSize struct {
   592  		Hash common.Hash
   593  		Size int
   594  	}
   595  	hashCh := make(chan hashAndPreimageSize)
   596  
   597  	var (
   598  		start     = time.Now()
   599  		logged    = time.Now()
   600  		preimages int
   601  	)
   602  	go func() {
   603  		defer close(hashCh)
   604  		accIt, err := snaptree.AccountIterator(root, common.Hash{})
   605  		if err != nil {
   606  			log.Error("Failed to create account iterator", "error", err)
   607  			return
   608  		}
   609  		defer accIt.Release()
   610  
   611  		for accIt.Next() {
   612  			acc, err := types.FullAccount(accIt.Account())
   613  			if err != nil {
   614  				log.Error("Failed to get full account", "error", err)
   615  				return
   616  			}
   617  			preimages += 1
   618  			hashCh <- hashAndPreimageSize{Hash: accIt.Hash(), Size: common.AddressLength}
   619  
   620  			if acc.Root != (common.Hash{}) && acc.Root != types.EmptyRootHash {
   621  				stIt, err := snaptree.StorageIterator(root, accIt.Hash(), common.Hash{})
   622  				if err != nil {
   623  					log.Error("Failed to create storage iterator", "error", err)
   624  					return
   625  				}
   626  				for stIt.Next() {
   627  					preimages += 1
   628  					hashCh <- hashAndPreimageSize{Hash: stIt.Hash(), Size: common.HashLength}
   629  
   630  					if time.Since(logged) > time.Second*8 {
   631  						logged = time.Now()
   632  						log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
   633  					}
   634  				}
   635  				stIt.Release()
   636  			}
   637  			if time.Since(logged) > time.Second*8 {
   638  				logged = time.Now()
   639  				log.Info("Exporting preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)))
   640  			}
   641  		}
   642  	}()
   643  
   644  	for item := range hashCh {
   645  		preimage := rawdb.ReadPreimage(chaindb, item.Hash)
   646  		if len(preimage) == 0 {
   647  			return fmt.Errorf("missing preimage for %v", item.Hash)
   648  		}
   649  		if len(preimage) != item.Size {
   650  			return fmt.Errorf("invalid preimage size, have %d", len(preimage))
   651  		}
   652  		rlpenc, err := rlp.EncodeToBytes(preimage)
   653  		if err != nil {
   654  			return fmt.Errorf("error encoding preimage: %w", err)
   655  		}
   656  		if _, err := writer.Write(rlpenc); err != nil {
   657  			return fmt.Errorf("failed to write preimage: %w", err)
   658  		}
   659  	}
   660  	log.Info("Exported preimages", "count", preimages, "elapsed", common.PrettyDuration(time.Since(start)), "file", fn)
   661  	return nil
   662  }
   663  
   664  // exportHeader is used in the export/import flow. When we do an export,
   665  // the first element we output is the exportHeader.
   666  // Whenever a backwards-incompatible change is made, the Version header
   667  // should be bumped.
   668  // If the importer sees a higher version, it should reject the import.
   669  type exportHeader struct {
   670  	Magic    string // Always set to 'gethdbdump' for disambiguation
   671  	Version  uint64
   672  	Kind     string
   673  	UnixTime uint64
   674  }
   675  
   676  const exportMagic = "gethdbdump"
   677  const (
   678  	OpBatchAdd = 0
   679  	OpBatchDel = 1
   680  )
   681  
   682  // ImportLDBData imports a batch of snapshot data into the database
   683  func ImportLDBData(db ethdb.Database, f string, startIndex int64, interrupt chan struct{}) error {
   684  	log.Info("Importing leveldb data", "file", f)
   685  
   686  	// Open the file handle and potentially unwrap the gzip stream
   687  	fh, err := os.Open(f)
   688  	if err != nil {
   689  		return err
   690  	}
   691  	defer fh.Close()
   692  
   693  	var reader io.Reader = bufio.NewReader(fh)
   694  	if strings.HasSuffix(f, ".gz") {
   695  		if reader, err = gzip.NewReader(reader); err != nil {
   696  			return err
   697  		}
   698  	}
   699  	stream := rlp.NewStream(reader, 0)
   700  
   701  	// Read the header
   702  	var header exportHeader
   703  	if err := stream.Decode(&header); err != nil {
   704  		return fmt.Errorf("could not decode header: %v", err)
   705  	}
   706  	if header.Magic != exportMagic {
   707  		return errors.New("incompatible data, wrong magic")
   708  	}
   709  	if header.Version != 0 {
   710  		return fmt.Errorf("incompatible version %d, (support only 0)", header.Version)
   711  	}
   712  	log.Info("Importing data", "file", f, "type", header.Kind, "data age",
   713  		common.PrettyDuration(time.Since(time.Unix(int64(header.UnixTime), 0))))
   714  
   715  	// Import the snapshot in batches to prevent disk thrashing
   716  	var (
   717  		count  int64
   718  		start  = time.Now()
   719  		logged = time.Now()
   720  		batch  = db.NewBatch()
   721  	)
   722  	for {
   723  		// Read the next entry
   724  		var (
   725  			op       byte
   726  			key, val []byte
   727  		)
   728  		if err := stream.Decode(&op); err != nil {
   729  			if err == io.EOF {
   730  				break
   731  			}
   732  			return err
   733  		}
   734  		if err := stream.Decode(&key); err != nil {
   735  			return err
   736  		}
   737  		if err := stream.Decode(&val); err != nil {
   738  			return err
   739  		}
   740  		if count < startIndex {
   741  			count++
   742  			continue
   743  		}
   744  		switch op {
   745  		case OpBatchDel:
   746  			batch.Delete(key)
   747  		case OpBatchAdd:
   748  			batch.Put(key, val)
   749  		default:
   750  			return fmt.Errorf("unknown op %d", op)
   751  		}
   752  		if batch.ValueSize() > ethdb.IdealBatchSize {
   753  			if err := batch.Write(); err != nil {
   754  				return err
   755  			}
   756  			batch.Reset()
   757  		}
   758  		// Check interruption emitted by ctrl+c
   759  		if count%1000 == 0 {
   760  			select {
   761  			case <-interrupt:
   762  				if err := batch.Write(); err != nil {
   763  					return err
   764  				}
   765  				log.Info("External data import interrupted", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   766  				return nil
   767  			default:
   768  			}
   769  		}
   770  		if count%1000 == 0 && time.Since(logged) > 8*time.Second {
   771  			log.Info("Importing external data", "file", f, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   772  			logged = time.Now()
   773  		}
   774  		count += 1
   775  	}
   776  	// Flush the last batch snapshot data
   777  	if batch.ValueSize() > 0 {
   778  		if err := batch.Write(); err != nil {
   779  			return err
   780  		}
   781  	}
   782  	log.Info("Imported chain data", "file", f, "count", count,
   783  		"elapsed", common.PrettyDuration(time.Since(start)))
   784  	return nil
   785  }
   786  
   787  // ChainDataIterator is an interface wraps all necessary functions to iterate
   788  // the exporting chain data.
   789  type ChainDataIterator interface {
   790  	// Next returns the key-value pair for next exporting entry in the iterator.
   791  	// When the end is reached, it will return (0, nil, nil, false).
   792  	Next() (byte, []byte, []byte, bool)
   793  
   794  	// Release releases associated resources. Release should always succeed and can
   795  	// be called multiple times without causing error.
   796  	Release()
   797  }
   798  
   799  // ExportChaindata exports the given data type (truncating any data already present)
   800  // in the file. If the suffix is 'gz', gzip compression is used.
   801  func ExportChaindata(fn string, kind string, iter ChainDataIterator, interrupt chan struct{}) error {
   802  	log.Info("Exporting chain data", "file", fn, "kind", kind)
   803  	defer iter.Release()
   804  
   805  	// Open the file handle and potentially wrap with a gzip stream
   806  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   807  	if err != nil {
   808  		return err
   809  	}
   810  	defer fh.Close()
   811  
   812  	var writer io.Writer = fh
   813  	if strings.HasSuffix(fn, ".gz") {
   814  		writer = gzip.NewWriter(writer)
   815  		defer writer.(*gzip.Writer).Close()
   816  	}
   817  	// Write the header
   818  	if err := rlp.Encode(writer, &exportHeader{
   819  		Magic:    exportMagic,
   820  		Version:  0,
   821  		Kind:     kind,
   822  		UnixTime: uint64(time.Now().Unix()),
   823  	}); err != nil {
   824  		return err
   825  	}
   826  	// Extract data from source iterator and dump them out to file
   827  	var (
   828  		count  int64
   829  		start  = time.Now()
   830  		logged = time.Now()
   831  	)
   832  	for {
   833  		op, key, val, ok := iter.Next()
   834  		if !ok {
   835  			break
   836  		}
   837  		if err := rlp.Encode(writer, op); err != nil {
   838  			return err
   839  		}
   840  		if err := rlp.Encode(writer, key); err != nil {
   841  			return err
   842  		}
   843  		if err := rlp.Encode(writer, val); err != nil {
   844  			return err
   845  		}
   846  		if count%1000 == 0 {
   847  			// Check interruption emitted by ctrl+c
   848  			select {
   849  			case <-interrupt:
   850  				log.Info("Chain data exporting interrupted", "file", fn,
   851  					"kind", kind, "count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   852  				return nil
   853  			default:
   854  			}
   855  			if time.Since(logged) > 8*time.Second {
   856  				log.Info("Exporting chain data", "file", fn, "kind", kind,
   857  					"count", count, "elapsed", common.PrettyDuration(time.Since(start)))
   858  				logged = time.Now()
   859  			}
   860  		}
   861  		count++
   862  	}
   863  	log.Info("Exported chain data", "file", fn, "kind", kind, "count", count,
   864  		"elapsed", common.PrettyDuration(time.Since(start)))
   865  	return nil
   866  }