github.com/linapex/ethereum-dpos-chinese@v0.0.0-20190316121959-b78b3a4a1ece/cmd/utils/cmd.go (about)

     1  
     2  //<developer>
     3  //    <name>linapex 曹一峰</name>
     4  //    <email>linapex@163.com</email>
     5  //    <wx>superexc</wx>
     6  //    <qqgroup>128148617</qqgroup>
     7  //    <url>https://jsq.ink</url>
     8  //    <role>pku engineer</role>
     9  //    <date>2019-03-16 12:09:31</date>
    10  //</624342606737838080>
    11  
    12  
    13  //
    14  package utils
    15  
    16  import (
    17  	"compress/gzip"
    18  	"fmt"
    19  	"io"
    20  	"os"
    21  	"os/signal"
    22  	"runtime"
    23  	"strings"
    24  	"syscall"
    25  
    26  	"github.com/ethereum/go-ethereum/common"
    27  	"github.com/ethereum/go-ethereum/core"
    28  	"github.com/ethereum/go-ethereum/core/rawdb"
    29  	"github.com/ethereum/go-ethereum/core/types"
    30  	"github.com/ethereum/go-ethereum/crypto"
    31  	"github.com/ethereum/go-ethereum/ethdb"
    32  	"github.com/ethereum/go-ethereum/internal/debug"
    33  	"github.com/ethereum/go-ethereum/log"
    34  	"github.com/ethereum/go-ethereum/node"
    35  	"github.com/ethereum/go-ethereum/rlp"
    36  )
    37  
    38  const (
    39  	importBatchSize = 2500
    40  )
    41  
    42  //fatalf将消息格式化为标准错误并退出程序。
    43  //如果标准错误,消息也会打印到标准输出。
    44  //已重定向到其他文件。
    45  func Fatalf(format string, args ...interface{}) {
    46  	w := io.MultiWriter(os.Stdout, os.Stderr)
    47  	if runtime.GOOS == "windows" {
    48  //
    49  //
    50  		w = os.Stdout
    51  	} else {
    52  		outf, _ := os.Stdout.Stat()
    53  		errf, _ := os.Stderr.Stat()
    54  		if outf != nil && errf != nil && os.SameFile(outf, errf) {
    55  			w = os.Stderr
    56  		}
    57  	}
    58  	fmt.Fprintf(w, "Fatal: "+format+"\n", args...)
    59  	os.Exit(1)
    60  }
    61  
    62  func StartNode(stack *node.Node) {
    63  	if err := stack.Start(); err != nil {
    64  		Fatalf("Error starting protocol stack: %v", err)
    65  	}
    66  	go func() {
    67  		sigc := make(chan os.Signal, 1)
    68  		signal.Notify(sigc, syscall.SIGINT, syscall.SIGTERM)
    69  		defer signal.Stop(sigc)
    70  		<-sigc
    71  		log.Info("Got interrupt, shutting down...")
    72  		go stack.Stop()
    73  		for i := 10; i > 0; i-- {
    74  			<-sigc
    75  			if i > 1 {
    76  				log.Warn("Already shutting down, interrupt more to panic.", "times", i-1)
    77  			}
    78  		}
    79  debug.Exit() //确保刷新跟踪和CPU配置文件数据。
    80  		debug.LoudPanic("boom")
    81  	}()
    82  }
    83  
    84  func ImportChain(chain *core.BlockChain, fn string) error {
    85  //当导入正在运行时,请注意ctrl-c。
    86  //如果收到信号,导入将在下一批停止。
    87  	interrupt := make(chan os.Signal, 1)
    88  	stop := make(chan struct{})
    89  	signal.Notify(interrupt, syscall.SIGINT, syscall.SIGTERM)
    90  	defer signal.Stop(interrupt)
    91  	defer close(interrupt)
    92  	go func() {
    93  		if _, ok := <-interrupt; ok {
    94  			log.Info("Interrupted during import, stopping at next batch")
    95  		}
    96  		close(stop)
    97  	}()
    98  	checkInterrupt := func() bool {
    99  		select {
   100  		case <-stop:
   101  			return true
   102  		default:
   103  			return false
   104  		}
   105  	}
   106  
   107  	log.Info("Importing blockchain", "file", fn)
   108  
   109  //打开文件句柄并可能打开gzip流
   110  	fh, err := os.Open(fn)
   111  	if err != nil {
   112  		return err
   113  	}
   114  	defer fh.Close()
   115  
   116  	var reader io.Reader = fh
   117  	if strings.HasSuffix(fn, ".gz") {
   118  		if reader, err = gzip.NewReader(reader); err != nil {
   119  			return err
   120  		}
   121  	}
   122  	stream := rlp.NewStream(reader, 0)
   123  
   124  //实际运行导入。
   125  	blocks := make(types.Blocks, importBatchSize)
   126  	n := 0
   127  	for batch := 0; ; batch++ {
   128  //加载一批RLP块。
   129  		if checkInterrupt() {
   130  			return fmt.Errorf("interrupted")
   131  		}
   132  		i := 0
   133  		for ; i < importBatchSize; i++ {
   134  			var b types.Block
   135  			if err := stream.Decode(&b); err == io.EOF {
   136  				break
   137  			} else if err != nil {
   138  				return fmt.Errorf("at block %d: %v", n, err)
   139  			}
   140  //不导入第一个块
   141  			if b.NumberU64() == 0 {
   142  				i--
   143  				continue
   144  			}
   145  			blocks[i] = &b
   146  			n++
   147  		}
   148  		if i == 0 {
   149  			break
   150  		}
   151  //
   152  		if checkInterrupt() {
   153  			return fmt.Errorf("interrupted")
   154  		}
   155  		missing := missingBlocks(chain, blocks[:i])
   156  		if len(missing) == 0 {
   157  			log.Info("Skipping batch as all blocks present", "batch", batch, "first", blocks[0].Hash(), "last", blocks[i-1].Hash())
   158  			continue
   159  		}
   160  		if _, err := chain.InsertChain(missing); err != nil {
   161  			return fmt.Errorf("invalid block %d: %v", n, err)
   162  		}
   163  	}
   164  	return nil
   165  }
   166  
   167  func missingBlocks(chain *core.BlockChain, blocks []*types.Block) []*types.Block {
   168  	head := chain.CurrentBlock()
   169  	for i, block := range blocks {
   170  //
   171  		if head.NumberU64() > block.NumberU64() {
   172  			if !chain.HasBlock(block.Hash(), block.NumberU64()) {
   173  				return blocks[i:]
   174  			}
   175  			continue
   176  		}
   177  //如果我们在链头之上,状态可用性是必须的
   178  		if !chain.HasBlockAndState(block.Hash(), block.NumberU64()) {
   179  			return blocks[i:]
   180  		}
   181  	}
   182  	return nil
   183  }
   184  
   185  //exportchain将区块链导出到指定文件中,截断任何数据
   186  //
   187  func ExportChain(blockchain *core.BlockChain, fn string) error {
   188  	log.Info("Exporting blockchain", "file", fn)
   189  
   190  //
   191  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   192  	if err != nil {
   193  		return err
   194  	}
   195  	defer fh.Close()
   196  
   197  	var writer io.Writer = fh
   198  	if strings.HasSuffix(fn, ".gz") {
   199  		writer = gzip.NewWriter(writer)
   200  		defer writer.(*gzip.Writer).Close()
   201  	}
   202  //遍历块并导出它们
   203  	if err := blockchain.Export(writer); err != nil {
   204  		return err
   205  	}
   206  	log.Info("Exported blockchain", "file", fn)
   207  
   208  	return nil
   209  }
   210  
   211  //
   212  //文件中已存在数据。
   213  func ExportAppendChain(blockchain *core.BlockChain, fn string, first uint64, last uint64) error {
   214  	log.Info("Exporting blockchain", "file", fn)
   215  
   216  //打开文件句柄并可能使用gzip流进行包装
   217  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_APPEND|os.O_WRONLY, os.ModePerm)
   218  	if err != nil {
   219  		return err
   220  	}
   221  	defer fh.Close()
   222  
   223  	var writer io.Writer = fh
   224  	if strings.HasSuffix(fn, ".gz") {
   225  		writer = gzip.NewWriter(writer)
   226  		defer writer.(*gzip.Writer).Close()
   227  	}
   228  //遍历块并导出它们
   229  	if err := blockchain.ExportN(writer, first, last); err != nil {
   230  		return err
   231  	}
   232  	log.Info("Exported blockchain to", "file", fn)
   233  	return nil
   234  }
   235  
   236  //importpreimages将一批导出的哈希预映像导入数据库。
   237  func ImportPreimages(db *ethdb.LDBDatabase, fn string) error {
   238  	log.Info("Importing preimages", "file", fn)
   239  
   240  //打开文件句柄并可能打开gzip流
   241  	fh, err := os.Open(fn)
   242  	if err != nil {
   243  		return err
   244  	}
   245  	defer fh.Close()
   246  
   247  	var reader io.Reader = fh
   248  	if strings.HasSuffix(fn, ".gz") {
   249  		if reader, err = gzip.NewReader(reader); err != nil {
   250  			return err
   251  		}
   252  	}
   253  	stream := rlp.NewStream(reader, 0)
   254  
   255  //批量导入预映像以防止磁盘损坏
   256  	preimages := make(map[common.Hash][]byte)
   257  
   258  	for {
   259  //阅读下一个条目并确保它不是垃圾
   260  		var blob []byte
   261  
   262  		if err := stream.Decode(&blob); err != nil {
   263  			if err == io.EOF {
   264  				break
   265  			}
   266  			return err
   267  		}
   268  //积累预映像并在收集足够的WS时刷新
   269  		preimages[crypto.Keccak256Hash(blob)] = common.CopyBytes(blob)
   270  		if len(preimages) > 1024 {
   271  			rawdb.WritePreimages(db, 0, preimages)
   272  			preimages = make(map[common.Hash][]byte)
   273  		}
   274  	}
   275  //刷新上一批预映像数据
   276  	if len(preimages) > 0 {
   277  		rawdb.WritePreimages(db, 0, preimages)
   278  	}
   279  	return nil
   280  }
   281  
   282  //exportpreimages将所有已知的哈希preimages导出到指定的文件中,
   283  //
   284  func ExportPreimages(db *ethdb.LDBDatabase, fn string) error {
   285  	log.Info("Exporting preimages", "file", fn)
   286  
   287  //打开文件句柄并可能使用gzip流进行包装
   288  	fh, err := os.OpenFile(fn, os.O_CREATE|os.O_WRONLY|os.O_TRUNC, os.ModePerm)
   289  	if err != nil {
   290  		return err
   291  	}
   292  	defer fh.Close()
   293  
   294  	var writer io.Writer = fh
   295  	if strings.HasSuffix(fn, ".gz") {
   296  		writer = gzip.NewWriter(writer)
   297  		defer writer.(*gzip.Writer).Close()
   298  	}
   299  //迭代预映像并导出它们
   300  	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
   301  	for it.Next() {
   302  		if err := rlp.Encode(writer, it.Value()); err != nil {
   303  			return err
   304  		}
   305  	}
   306  	log.Info("Exported preimages", "file", fn)
   307  	return nil
   308  }
   309