github.com/Gessiux/neatchain@v1.3.1/neatptc/handler.go (about)

     1  // Copyright 2015 The go-ethereum Authors
     2  // This file is part of the go-ethereum library.
     3  //
     4  // The go-ethereum library is free software: you can redistribute it and/or modify
     5  // it under the terms of the GNU Lesser General Public License as published by
     6  // the Free Software Foundation, either version 3 of the License, or
     7  // (at your option) any later version.
     8  //
     9  // The go-ethereum library is distributed in the hope that it will be useful,
    10  // but WITHOUT ANY WARRANTY; without even the implied warranty of
    11  // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
    12  // GNU Lesser General Public License for more details.
    13  //
    14  // You should have received a copy of the GNU Lesser General Public License
    15  // along with the go-ethereum library. If not, see <http://www.gnu.org/licenses/>.
    16  
    17  package neatptc
    18  
    19  import (
    20  	"encoding/json"
    21  	"errors"
    22  	"fmt"
    23  	"math"
    24  	"math/big"
    25  	"sync"
    26  	"sync/atomic"
    27  	"time"
    28  
    29  	"github.com/Gessiux/neatchain/chain/core/rawdb"
    30  
    31  	"github.com/Gessiux/neatchain/chain/consensus"
    32  	"github.com/Gessiux/neatchain/chain/core"
    33  	"github.com/Gessiux/neatchain/chain/core/types"
    34  	"github.com/Gessiux/neatchain/chain/log"
    35  	"github.com/Gessiux/neatchain/neatdb"
    36  	"github.com/Gessiux/neatchain/neatptc/downloader"
    37  	"github.com/Gessiux/neatchain/neatptc/fetcher"
    38  	"github.com/Gessiux/neatchain/network/p2p"
    39  	"github.com/Gessiux/neatchain/network/p2p/discover"
    40  	"github.com/Gessiux/neatchain/params"
    41  	"github.com/Gessiux/neatchain/utilities/common"
    42  	"github.com/Gessiux/neatchain/utilities/crypto"
    43  	"github.com/Gessiux/neatchain/utilities/event"
    44  	"github.com/Gessiux/neatchain/utilities/rlp"
    45  )
    46  
    47  const (
    48  	softResponseLimit = 2 * 1024 * 1024 // Target maximum size of returned blocks, headers or node data.
    49  	estHeaderRlpSize  = 500             // Approximate size of an RLP encoded block header
    50  
    51  	// txChanSize is the size of channel listening to TxPreEvent.
    52  	// The number is referenced from the size of tx pool.
    53  	txChanSize = 4096
    54  
    55  	// tx3PrfDtChainSize is the size of channel listening to Tx3PrfDataEvent.
    56  	// The number is referenced from the size of tx pool.
    57  	tx3PrfDtChainSize = 4096
    58  )
    59  
    60  var (
    61  	daoChallengeTimeout = 15 * time.Second // Time allowance for a node to reply to the DAO handshake challenge
    62  )
    63  
    64  // errIncompatibleConfig is returned if the requested protocols and configs are
    65  // not compatible (low protocol version restrictions and high requirements).
    66  var errIncompatibleConfig = errors.New("incompatible configuration")
    67  
    68  func errResp(code errCode, format string, v ...interface{}) error {
    69  	return fmt.Errorf("%v - %v", code, fmt.Sprintf(format, v...))
    70  }
    71  
    72  type ProtocolManager struct {
    73  	networkId uint64
    74  
    75  	fastSync  uint32 // Flag whether fast sync is enabled (gets disabled if we already have blocks)
    76  	acceptTxs uint32 // Flag whether we're considered synchronised (enables transaction processing)
    77  
    78  	txpool      txPool
    79  	blockchain  *core.BlockChain
    80  	chainconfig *params.ChainConfig
    81  	maxPeers    int
    82  
    83  	downloader *downloader.Downloader
    84  	fetcher    *fetcher.Fetcher
    85  	peers      *peerSet
    86  
    87  	SubProtocols []p2p.Protocol
    88  
    89  	eventMux *event.TypeMux
    90  	txCh     chan core.TxPreEvent
    91  	txSub    event.Subscription
    92  
    93  	tx3PrfDtCh    chan core.Tx3ProofDataEvent
    94  	tx3PrfDtFeed  event.Feed
    95  	tx3PrfDtScope event.SubscriptionScope
    96  	tx3PrfDtSub   event.Subscription
    97  
    98  	minedBlockSub *event.TypeMuxSubscription
    99  
   100  	// channels for fetcher, syncer, txsyncLoop
   101  	newPeerCh   chan *peer
   102  	txsyncCh    chan *txsync
   103  	quitSync    chan struct{}
   104  	noMorePeers chan struct{}
   105  
   106  	// wait group is used for graceful shutdowns during downloading
   107  	// and processing
   108  	wg sync.WaitGroup
   109  
   110  	engine consensus.Engine
   111  
   112  	cch core.CrossChainHelper
   113  
   114  	logger         log.Logger
   115  	preimageLogger log.Logger
   116  }
   117  
   118  // NewProtocolManager returns a new NEAT Chain sub protocol manager. The NEAT Chain sub protocol manages peers capable
   119  // with the NEAT Chain network.
   120  func NewProtocolManager(config *params.ChainConfig, mode downloader.SyncMode, networkId uint64, mux *event.TypeMux, txpool txPool, engine consensus.Engine, blockchain *core.BlockChain, chaindb neatdb.Database, cch core.CrossChainHelper) (*ProtocolManager, error) {
   121  	// Create the protocol manager with the base fields
   122  	manager := &ProtocolManager{
   123  		networkId:      networkId,
   124  		eventMux:       mux,
   125  		txpool:         txpool,
   126  		blockchain:     blockchain,
   127  		chainconfig:    config,
   128  		peers:          newPeerSet(),
   129  		newPeerCh:      make(chan *peer),
   130  		noMorePeers:    make(chan struct{}),
   131  		txsyncCh:       make(chan *txsync),
   132  		quitSync:       make(chan struct{}),
   133  		engine:         engine,
   134  		cch:            cch,
   135  		logger:         config.ChainLogger,
   136  		preimageLogger: config.ChainLogger.New("module", "preimages"),
   137  	}
   138  
   139  	if handler, ok := manager.engine.(consensus.Handler); ok {
   140  		handler.SetBroadcaster(manager)
   141  	}
   142  
   143  	// Figure out whether to allow fast sync or not
   144  	if mode == downloader.FastSync && blockchain.CurrentBlock().NumberU64() > 0 {
   145  		manager.logger.Warn("Blockchain not empty, fast sync disabled")
   146  		mode = downloader.FullSync
   147  	}
   148  	if mode == downloader.FastSync {
   149  		manager.fastSync = uint32(1)
   150  	}
   151  	protocol := engine.Protocol()
   152  	// Initiate a sub-protocol for every implemented version we can handle
   153  	manager.SubProtocols = make([]p2p.Protocol, 0, len(protocol.Versions))
   154  	for i, version := range protocol.Versions {
   155  		// Skip protocol version if incompatible with the mode of operation
   156  		if mode == downloader.FastSync && version < consensus.Eth63 {
   157  			continue
   158  		}
   159  		// Compatible; initialise the sub-protocol
   160  		version := version // Closure for the run
   161  		manager.SubProtocols = append(manager.SubProtocols, p2p.Protocol{
   162  			Name:    protocol.Name,
   163  			Version: version,
   164  			Length:  protocol.Lengths[i],
   165  			Run: func(p *p2p.Peer, rw p2p.MsgReadWriter) error {
   166  				peer := manager.newPeer(int(version), p, rw)
   167  				select {
   168  				case manager.newPeerCh <- peer:
   169  					manager.wg.Add(1)
   170  					defer manager.wg.Done()
   171  					return manager.handle(peer)
   172  				case <-manager.quitSync:
   173  					return p2p.DiscQuitting
   174  				}
   175  			},
   176  			NodeInfo: func() interface{} {
   177  				return manager.NodeInfo()
   178  			},
   179  			PeerInfo: func(id discover.NodeID) interface{} {
   180  				if p := manager.peers.Peer(fmt.Sprintf("%x", id[:8])); p != nil {
   181  					return p.Info()
   182  				}
   183  				return nil
   184  			},
   185  		})
   186  	}
   187  	if len(manager.SubProtocols) == 0 {
   188  		return nil, errIncompatibleConfig
   189  	}
   190  	// Construct the different synchronisation mechanisms
   191  	manager.downloader = downloader.New(mode, chaindb, manager.eventMux, blockchain, nil, manager.removePeer, manager.logger)
   192  
   193  	validator := func(header *types.Header) error {
   194  		return engine.VerifyHeader(blockchain, header, true)
   195  	}
   196  	heighter := func() uint64 {
   197  		return blockchain.CurrentBlock().NumberU64()
   198  	}
   199  	inserter := func(blocks types.Blocks) (int, error) {
   200  		// If fast sync is running, deny importing weird blocks
   201  		if atomic.LoadUint32(&manager.fastSync) == 1 {
   202  			manager.logger.Warn("Discarded bad propagated block", "number", blocks[0].Number(), "hash", blocks[0].Hash())
   203  			return 0, nil
   204  		}
   205  		atomic.StoreUint32(&manager.acceptTxs, 1) // Mark initial sync done on any fetcher import
   206  		return manager.blockchain.InsertChain(blocks)
   207  	}
   208  	manager.fetcher = fetcher.New(blockchain.GetBlockByHash, validator, manager.BroadcastBlock, heighter, inserter, manager.removePeer)
   209  
   210  	return manager, nil
   211  }
   212  
   213  func (pm *ProtocolManager) removePeer(id string) {
   214  	// Short circuit if the peer was already removed
   215  	peer := pm.peers.Peer(id)
   216  	if peer == nil {
   217  		return
   218  	}
   219  	pm.logger.Debug("Removing NEAT Chain peer", "peer", id)
   220  
   221  	// Unregister the peer from the downloader and NEAT Chain peer set
   222  	pm.downloader.UnregisterPeer(id)
   223  	if err := pm.peers.Unregister(id); err != nil {
   224  		pm.logger.Error("Peer removal failed", "peer", id, "err", err)
   225  	}
   226  	// Hard disconnect at the networking layer
   227  	if peer != nil {
   228  		peer.Peer.Disconnect(p2p.DiscUselessPeer)
   229  	}
   230  }
   231  
   232  func (pm *ProtocolManager) Start(maxPeers int) {
   233  	pm.maxPeers = maxPeers
   234  
   235  	// broadcast transactions
   236  	pm.txCh = make(chan core.TxPreEvent, txChanSize)
   237  	pm.txSub = pm.txpool.SubscribeTxPreEvent(pm.txCh)
   238  	go pm.txBroadcastLoop()
   239  
   240  	pm.tx3PrfDtCh = make(chan core.Tx3ProofDataEvent, tx3PrfDtChainSize)
   241  	pm.tx3PrfDtSub = pm.tx3PrfDtScope.Track(pm.tx3PrfDtFeed.Subscribe(pm.tx3PrfDtCh))
   242  	go pm.tx3PrfDtBroadcastLoop()
   243  
   244  	// broadcast mined blocks
   245  	pm.minedBlockSub = pm.eventMux.Subscribe(core.NewMinedBlockEvent{})
   246  	go pm.minedBroadcastLoop()
   247  
   248  	// start sync handlers
   249  	go pm.syncer()
   250  	go pm.txsyncLoop()
   251  }
   252  
   253  func (pm *ProtocolManager) Stop() {
   254  	pm.logger.Info("Stopping Neatio protocol")
   255  
   256  	pm.txSub.Unsubscribe()         // quits txBroadcastLoop
   257  	pm.tx3PrfDtSub.Unsubscribe()   // quits tx3PrfDtBroadcastLoop
   258  	pm.minedBlockSub.Unsubscribe() // quits blockBroadcastLoop
   259  
   260  	// Quit the sync loop.
   261  	// After this send has completed, no new peers will be accepted.
   262  	pm.noMorePeers <- struct{}{}
   263  
   264  	// Quit fetcher, txsyncLoop.
   265  	close(pm.quitSync)
   266  
   267  	// Disconnect existing sessions.
   268  	// This also closes the gate for any new registrations on the peer set.
   269  	// sessions which are already established but not added to pm.peers yet
   270  	// will exit when they try to register.
   271  	pm.peers.Close()
   272  
   273  	// Wait for all peer handler goroutines and the loops to come down.
   274  	pm.wg.Wait()
   275  
   276  	pm.logger.Info("Neatio protocol stopped")
   277  }
   278  
   279  func (pm *ProtocolManager) newPeer(pv int, p *p2p.Peer, rw p2p.MsgReadWriter) *peer {
   280  	return newPeer(pv, p, newMeteredMsgWriter(rw))
   281  }
   282  
   283  // handle is the callback invoked to manage the life cycle of an neatptc peer. When
   284  // this function terminates, the peer is disconnected.
   285  func (pm *ProtocolManager) handle(p *peer) error {
   286  	// Ignore maxPeers if this is a trusted peer
   287  	if pm.peers.Len() >= pm.maxPeers && !p.Peer.Info().Network.Trusted {
   288  		return p2p.DiscTooManyPeers
   289  	}
   290  	p.Log().Debug("NEAT Chain peer connected", "name", p.Name())
   291  
   292  	// Execute the NEAT Chain handshake
   293  	var (
   294  		genesis = pm.blockchain.Genesis()
   295  		head    = pm.blockchain.CurrentHeader()
   296  		hash    = head.Hash()
   297  		number  = head.Number.Uint64()
   298  		td      = pm.blockchain.GetTd(hash, number)
   299  	)
   300  	if err := p.Handshake(pm.networkId, td, hash, genesis.Hash()); err != nil {
   301  		p.Log().Debug("NEAT Chain handshake failed", "err", err)
   302  		return err
   303  	}
   304  	if rw, ok := p.rw.(*meteredMsgReadWriter); ok {
   305  		rw.Init(p.version)
   306  	}
   307  	// Register the peer locally
   308  	if err := pm.peers.Register(p); err != nil {
   309  		p.Log().Error("NEAT Chain peer registration failed", "err", err)
   310  		return err
   311  	}
   312  
   313  	defer func() {
   314  		pm.removePeer(p.id)
   315  		if handler, ok := pm.engine.(consensus.Handler); ok {
   316  			handler.RemovePeer(p)
   317  		}
   318  	}()
   319  
   320  	// Register the peer in the downloader. If the downloader considers it banned, we disconnect
   321  	if err := pm.downloader.RegisterPeer(p.id, p.version, p); err != nil {
   322  		return err
   323  	}
   324  	// Propagate existing transactions. new transactions appearing
   325  	// after this will be sent via broadcasts.
   326  	pm.syncTransactions(p)
   327  
   328  	// Add Peer to Consensus Engine
   329  	if handler, ok := pm.engine.(consensus.Handler); ok {
   330  		handler.AddPeer(p)
   331  	} else {
   332  		p.Log().Info("AddPeer not executed")
   333  	}
   334  
   335  	// main loop. handle incoming messages.
   336  	for {
   337  		if err := pm.handleMsg(p); err != nil {
   338  			p.Log().Debug("NEAT Chain message handling failed", "err", err)
   339  			return err
   340  		}
   341  	}
   342  }
   343  
   344  // handleMsg is invoked whenever an inbound message is received from a remote
   345  // peer. The remote connection is torn down upon returning any error.
   346  func (pm *ProtocolManager) handleMsg(p *peer) error {
   347  	// Read the next message from the remote peer, and ensure it's fully consumed
   348  	msg, err := p.rw.ReadMsg()
   349  	if err != nil {
   350  		return err
   351  	}
   352  	if msg.Size > ProtocolMaxMsgSize {
   353  		return errResp(ErrMsgTooLarge, "%v > %v", msg.Size, ProtocolMaxMsgSize)
   354  	}
   355  	defer msg.Discard()
   356  
   357  	// Handle the message depending on its contents
   358  	switch {
   359  	// NeatChain Consensus Message
   360  	case msg.Code >= 0x20 && msg.Code <= 0x23:
   361  		if handler, ok := pm.engine.(consensus.Handler); ok {
   362  			var msgBytes []byte
   363  			if err := msg.Decode(&msgBytes); err != nil {
   364  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   365  			}
   366  			handler.HandleMsg(msg.Code, p, msgBytes)
   367  		}
   368  	case msg.Code == StatusMsg:
   369  		// Status messages should never arrive after the handshake
   370  		return errResp(ErrExtraStatusMsg, "uncontrolled status message")
   371  
   372  	// Block header query, collect the requested headers and reply
   373  	case msg.Code == GetBlockHeadersMsg:
   374  		// Decode the complex header query
   375  		var query getBlockHeadersData
   376  		if err := msg.Decode(&query); err != nil {
   377  			return errResp(ErrDecode, "%v: %v", msg, err)
   378  		}
   379  		hashMode := query.Origin.Hash != (common.Hash{})
   380  
   381  		// Gather headers until the fetch or network limits is reached
   382  		var (
   383  			bytes   common.StorageSize
   384  			headers []*types.Header
   385  			unknown bool
   386  		)
   387  		for !unknown && len(headers) < int(query.Amount) && bytes < softResponseLimit && len(headers) < downloader.MaxHeaderFetch {
   388  			// Retrieve the next header satisfying the query
   389  			var origin *types.Header
   390  			if hashMode {
   391  				origin = pm.blockchain.GetHeaderByHash(query.Origin.Hash)
   392  			} else {
   393  				origin = pm.blockchain.GetHeaderByNumber(query.Origin.Number)
   394  			}
   395  			if origin == nil {
   396  				break
   397  			}
   398  			number := origin.Number.Uint64()
   399  			headers = append(headers, origin)
   400  			bytes += estHeaderRlpSize
   401  
   402  			// Advance to the next header of the query
   403  			switch {
   404  			case query.Origin.Hash != (common.Hash{}) && query.Reverse:
   405  				// Hash based traversal towards the genesis block
   406  				for i := 0; i < int(query.Skip)+1; i++ {
   407  					if header := pm.blockchain.GetHeader(query.Origin.Hash, number); header != nil {
   408  						query.Origin.Hash = header.ParentHash
   409  						number--
   410  					} else {
   411  						unknown = true
   412  						break
   413  					}
   414  				}
   415  			case query.Origin.Hash != (common.Hash{}) && !query.Reverse:
   416  				// Hash based traversal towards the leaf block
   417  				var (
   418  					current = origin.Number.Uint64()
   419  					next    = current + query.Skip + 1
   420  				)
   421  				if next <= current {
   422  					infos, _ := json.MarshalIndent(p.Peer.Info(), "", "  ")
   423  					p.Log().Warn("GetBlockHeaders skip overflow attack", "current", current, "skip", query.Skip, "next", next, "attacker", infos)
   424  					unknown = true
   425  				} else {
   426  					if header := pm.blockchain.GetHeaderByNumber(next); header != nil {
   427  						if pm.blockchain.GetBlockHashesFromHash(header.Hash(), query.Skip+1)[query.Skip] == query.Origin.Hash {
   428  							query.Origin.Hash = header.Hash()
   429  						} else {
   430  							unknown = true
   431  						}
   432  					} else {
   433  						unknown = true
   434  					}
   435  				}
   436  			case query.Reverse:
   437  				// Number based traversal towards the genesis block
   438  				if query.Origin.Number >= query.Skip+1 {
   439  					query.Origin.Number -= query.Skip + 1
   440  				} else {
   441  					unknown = true
   442  				}
   443  
   444  			case !query.Reverse:
   445  				// Number based traversal towards the leaf block
   446  				query.Origin.Number += query.Skip + 1
   447  			}
   448  		}
   449  		return p.SendBlockHeaders(headers)
   450  
   451  	case msg.Code == BlockHeadersMsg:
   452  		// A batch of headers arrived to one of our previous requests
   453  		var headers []*types.Header
   454  		if err := msg.Decode(&headers); err != nil {
   455  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   456  		}
   457  
   458  		// Filter out any explicitly requested headers, deliver the rest to the downloader
   459  		filter := len(headers) == 1
   460  		if filter {
   461  			// Irrelevant of the fork checks, send the header to the fetcher just in case
   462  			headers = pm.fetcher.FilterHeaders(p.id, headers, time.Now())
   463  		}
   464  		if len(headers) > 0 || !filter {
   465  			err := pm.downloader.DeliverHeaders(p.id, headers)
   466  			if err != nil {
   467  				pm.logger.Debug("Failed to deliver headers", "err", err)
   468  			}
   469  		}
   470  
   471  	case msg.Code == GetBlockBodiesMsg:
   472  		// Decode the retrieval message
   473  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   474  		if _, err := msgStream.List(); err != nil {
   475  			return err
   476  		}
   477  		// Gather blocks until the fetch or network limits is reached
   478  		var (
   479  			hash   common.Hash
   480  			bytes  int
   481  			bodies []rlp.RawValue
   482  		)
   483  		for bytes < softResponseLimit && len(bodies) < downloader.MaxBlockFetch {
   484  			// Retrieve the hash of the next block
   485  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   486  				break
   487  			} else if err != nil {
   488  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   489  			}
   490  			// Retrieve the requested block body, stopping if enough was found
   491  			if data := pm.blockchain.GetBodyRLP(hash); len(data) != 0 {
   492  				bodies = append(bodies, data)
   493  				bytes += len(data)
   494  			}
   495  		}
   496  		return p.SendBlockBodiesRLP(bodies)
   497  
   498  	case msg.Code == BlockBodiesMsg:
   499  		// A batch of block bodies arrived to one of our previous requests
   500  		var request blockBodiesData
   501  		if err := msg.Decode(&request); err != nil {
   502  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   503  		}
   504  		// Deliver them all to the downloader for queuing
   505  		trasactions := make([][]*types.Transaction, len(request))
   506  		uncles := make([][]*types.Header, len(request))
   507  
   508  		for i, body := range request {
   509  			trasactions[i] = body.Transactions
   510  			uncles[i] = body.Uncles
   511  		}
   512  		// Filter out any explicitly requested bodies, deliver the rest to the downloader
   513  		filter := len(trasactions) > 0 || len(uncles) > 0
   514  		if filter {
   515  			trasactions, uncles = pm.fetcher.FilterBodies(p.id, trasactions, uncles, time.Now())
   516  		}
   517  		if len(trasactions) > 0 || len(uncles) > 0 || !filter {
   518  			err := pm.downloader.DeliverBodies(p.id, trasactions, uncles)
   519  			if err != nil {
   520  				pm.logger.Debug("Failed to deliver bodies", "err", err)
   521  			}
   522  		}
   523  
   524  	case p.version >= consensus.Eth63 && msg.Code == GetNodeDataMsg:
   525  		// Decode the retrieval message
   526  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   527  		if _, err := msgStream.List(); err != nil {
   528  			return err
   529  		}
   530  		// Gather state data until the fetch or network limits is reached
   531  		var (
   532  			hash  common.Hash
   533  			bytes int
   534  			data  [][]byte
   535  		)
   536  		for bytes < softResponseLimit && len(data) < downloader.MaxStateFetch {
   537  			// Retrieve the hash of the next state entry
   538  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   539  				break
   540  			} else if err != nil {
   541  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   542  			}
   543  			// Retrieve the requested state entry, stopping if enough was found
   544  			if entry, err := pm.blockchain.TrieNode(hash); err == nil {
   545  				data = append(data, entry)
   546  				bytes += len(entry)
   547  			}
   548  		}
   549  		return p.SendNodeData(data)
   550  
   551  	case p.version >= consensus.Eth63 && msg.Code == NodeDataMsg:
   552  		// A batch of node state data arrived to one of our previous requests
   553  		var data [][]byte
   554  		if err := msg.Decode(&data); err != nil {
   555  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   556  		}
   557  		// Deliver all to the downloader
   558  		if err := pm.downloader.DeliverNodeData(p.id, data); err != nil {
   559  			pm.logger.Debug("Failed to deliver node state data", "err", err)
   560  		}
   561  
   562  	case p.version >= consensus.Eth63 && msg.Code == GetReceiptsMsg:
   563  		// Decode the retrieval message
   564  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   565  		if _, err := msgStream.List(); err != nil {
   566  			return err
   567  		}
   568  		// Gather state data until the fetch or network limits is reached
   569  		var (
   570  			hash     common.Hash
   571  			bytes    int
   572  			receipts []rlp.RawValue
   573  		)
   574  		for bytes < softResponseLimit && len(receipts) < downloader.MaxReceiptFetch {
   575  			// Retrieve the hash of the next block
   576  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   577  				break
   578  			} else if err != nil {
   579  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   580  			}
   581  			// Retrieve the requested block's receipts, skipping if unknown to us
   582  			results := pm.blockchain.GetReceiptsByHash(hash)
   583  			if results == nil {
   584  				if header := pm.blockchain.GetHeaderByHash(hash); header == nil || header.ReceiptHash != types.EmptyRootHash {
   585  					continue
   586  				}
   587  			}
   588  			// If known, encode and queue for response packet
   589  			if encoded, err := rlp.EncodeToBytes(results); err != nil {
   590  				pm.logger.Error("Failed to encode receipt", "err", err)
   591  			} else {
   592  				receipts = append(receipts, encoded)
   593  				bytes += len(encoded)
   594  			}
   595  		}
   596  		return p.SendReceiptsRLP(receipts)
   597  
   598  	case p.version >= consensus.Eth63 && msg.Code == ReceiptsMsg:
   599  		// A batch of receipts arrived to one of our previous requests
   600  		var receipts [][]*types.Receipt
   601  		if err := msg.Decode(&receipts); err != nil {
   602  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   603  		}
   604  		// Deliver all to the downloader
   605  		if err := pm.downloader.DeliverReceipts(p.id, receipts); err != nil {
   606  			pm.logger.Debug("Failed to deliver receipts", "err", err)
   607  		}
   608  
   609  	case msg.Code == NewBlockHashesMsg:
   610  		var announces newBlockHashesData
   611  		if err := msg.Decode(&announces); err != nil {
   612  			return errResp(ErrDecode, "%v: %v", msg, err)
   613  		}
   614  		// Mark the hashes as present at the remote node
   615  		for _, block := range announces {
   616  			p.MarkBlock(block.Hash)
   617  		}
   618  		// Schedule all the unknown hashes for retrieval
   619  		unknown := make(newBlockHashesData, 0, len(announces))
   620  		for _, block := range announces {
   621  			if !pm.blockchain.HasBlock(block.Hash, block.Number) {
   622  				unknown = append(unknown, block)
   623  			}
   624  		}
   625  		for _, block := range unknown {
   626  			pm.fetcher.Notify(p.id, block.Hash, block.Number, time.Now(), p.RequestOneHeader, p.RequestBodies)
   627  		}
   628  
   629  	case msg.Code == NewBlockMsg:
   630  		// Retrieve and decode the propagated block
   631  		var request newBlockData
   632  		if err := msg.Decode(&request); err != nil {
   633  			return errResp(ErrDecode, "%v: %v", msg, err)
   634  		}
   635  		request.Block.ReceivedAt = msg.ReceivedAt
   636  		request.Block.ReceivedFrom = p
   637  
   638  		// Mark the peer as owning the block and schedule it for import
   639  		p.MarkBlock(request.Block.Hash())
   640  		pm.fetcher.Enqueue(p.id, request.Block)
   641  
   642  		// Assuming the block is importable by the peer, but possibly not yet done so,
   643  		// calculate the head hash and TD that the peer truly must have.
   644  		var (
   645  			trueHead = request.Block.ParentHash()
   646  			trueTD   = new(big.Int).Sub(request.TD, request.Block.Difficulty())
   647  		)
   648  		// Update the peers total difficulty if better than the previous
   649  		if _, td := p.Head(); trueTD.Cmp(td) > 0 {
   650  			p.SetHead(trueHead, trueTD)
   651  
   652  			// Schedule a sync if above ours. Note, this will not fire a sync for a gap of
   653  			// a singe block (as the true TD is below the propagated block), however this
   654  			// scenario should easily be covered by the fetcher.
   655  			currentBlock := pm.blockchain.CurrentBlock()
   656  			if trueTD.Cmp(pm.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64())) > 0 {
   657  				go pm.synchronise(p)
   658  			}
   659  		}
   660  
   661  	case msg.Code == TxMsg:
   662  		// Transactions arrived, make sure we have a valid and fresh chain to handle them
   663  		if atomic.LoadUint32(&pm.acceptTxs) == 0 {
   664  			break
   665  		}
   666  		// Transactions can be processed, parse all of them and deliver to the pool
   667  		var txs []*types.Transaction
   668  		if err := msg.Decode(&txs); err != nil {
   669  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   670  		}
   671  		for i, tx := range txs {
   672  			// Validate and mark the remote transaction
   673  			if tx == nil {
   674  				return errResp(ErrDecode, "transaction %d is nil", i)
   675  			}
   676  			p.MarkTransaction(tx.Hash())
   677  		}
   678  		pm.txpool.AddRemotes(txs)
   679  
   680  	case msg.Code == TX3ProofDataMsg:
   681  		pm.logger.Debug("TX3ProofDataMsg received")
   682  		var proofDatas []*types.TX3ProofData
   683  		if err := msg.Decode(&proofDatas); err != nil {
   684  			pm.logger.Error("TX3ProofDataMsg decode error", "msg", msg, "error", err)
   685  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   686  		}
   687  		for _, proofData := range proofDatas {
   688  			// Validate and mark the remote TX3ProofData
   689  			if err := pm.cch.ValidateTX3ProofData(proofData); err != nil {
   690  				pm.logger.Error("TX3ProofDataMsg validate error", "msg", msg, "error", err)
   691  				return errResp(ErrTX3ValidateFail, "msg %v: %v", msg, err)
   692  			}
   693  			p.MarkTX3ProofData(proofData.Header.Hash())
   694  			// Write the remote TX3ProofData
   695  			if err := pm.cch.WriteTX3ProofData(proofData); err != nil {
   696  				pm.logger.Error("TX3ProofDataMsg write error", "msg", msg, "error", err)
   697  			}
   698  
   699  			go pm.tx3PrfDtFeed.Send(core.Tx3ProofDataEvent{proofData})
   700  		}
   701  
   702  	case msg.Code == GetPreImagesMsg:
   703  		pm.preimageLogger.Debug("GetPreImagesMsg received")
   704  		// Decode the retrieval message
   705  		msgStream := rlp.NewStream(msg.Payload, uint64(msg.Size))
   706  		if _, err := msgStream.List(); err != nil {
   707  			return err
   708  		}
   709  		// Gather state data until the fetch or network limits is reached
   710  		var (
   711  			hash      common.Hash
   712  			bytes     int
   713  			preimages [][]byte
   714  		)
   715  
   716  		for bytes < softResponseLimit && len(preimages) < downloader.MaxReceiptFetch {
   717  			// Retrieve the hash of the next block
   718  			if err := msgStream.Decode(&hash); err == rlp.EOL {
   719  				break
   720  			} else if err != nil {
   721  
   722  				return errResp(ErrDecode, "msg %v: %v", msg, err)
   723  			}
   724  			// Retrieve the requested block's receipts, skipping if unknown to us
   725  			preimage := rawdb.ReadPreimage(pm.blockchain.StateCache().TrieDB().DiskDB(), hash)
   726  			// Double check the local preimage
   727  			if hash != crypto.Keccak256Hash(preimage) {
   728  				pm.preimageLogger.Errorf("Failed to pass the preimage double check. Request hash %x, Local Preimage %x", hash, preimage)
   729  				continue
   730  			}
   731  
   732  			preimages = append(preimages, preimage)
   733  			bytes += len(preimage)
   734  		}
   735  		return p.SendPreimagesRLP(preimages)
   736  
   737  	case msg.Code == PreImagesMsg:
   738  		pm.preimageLogger.Debug("PreImagesMsg received")
   739  		var preimages [][]byte
   740  		if err := msg.Decode(&preimages); err != nil {
   741  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   742  		}
   743  
   744  		preimagesMap := make(map[common.Hash][]byte)
   745  		for _, preimage := range preimages {
   746  			pm.preimageLogger.Debugf("PreImagesMsg received: %x", preimage)
   747  			preimagesMap[crypto.Keccak256Hash(preimage)] = common.CopyBytes(preimage)
   748  		}
   749  		if len(preimagesMap) > 0 {
   750  			db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(neatdb.Database)
   751  			rawdb.WritePreimages(db, preimagesMap)
   752  			pm.preimageLogger.Info("PreImages wrote into database")
   753  		}
   754  	case msg.Code == TrieNodeDataMsg:
   755  		pm.logger.Debug("TrieNodeDataMsg received")
   756  		var trienodes [][]byte
   757  		if err := msg.Decode(&trienodes); err != nil {
   758  			pm.logger.Warnf("Unable decode TrieNodeData %v", err)
   759  			return errResp(ErrDecode, "msg %v: %v", msg, err)
   760  		}
   761  		pm.logger.Debugf("%d TrieNodeData received", len(trienodes))
   762  
   763  		db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(neatdb.Database)
   764  		for _, tnode := range trienodes {
   765  			thash := crypto.Keccak256Hash(tnode)
   766  			if has, herr := db.Has(thash.Bytes()); !has && herr == nil {
   767  				puterr := db.Put(thash.Bytes(), tnode)
   768  				if puterr == nil {
   769  					pm.logger.Debugf("Insert TrieNodeData %x", thash)
   770  				}
   771  			} else if has {
   772  				pm.logger.Debugf("TrieNodeData %x already existed", thash)
   773  			}
   774  		}
   775  	default:
   776  		return errResp(ErrInvalidMsgCode, "%v", msg.Code)
   777  	}
   778  	return nil
   779  }
   780  
   781  func (pm *ProtocolManager) Enqueue(id string, block *types.Block) {
   782  	pm.fetcher.Enqueue(id, block)
   783  }
   784  
   785  // BroadcastBlock will either propagate a block to a subset of it's peers, or
   786  // will only announce it's availability (depending what's requested).
   787  func (pm *ProtocolManager) BroadcastBlock(block *types.Block, propagate bool) {
   788  	hash := block.Hash()
   789  	peers := pm.peers.PeersWithoutBlock(hash)
   790  
   791  	// If propagation is requested, send to a subset of the peer
   792  	if propagate {
   793  		// Calculate the TD of the block (it's not imported yet, so block.Td is not valid)
   794  		var td *big.Int
   795  		if parent := pm.blockchain.GetBlock(block.ParentHash(), block.NumberU64()-1); parent != nil {
   796  			td = new(big.Int).Add(block.Difficulty(), pm.blockchain.GetTd(block.ParentHash(), block.NumberU64()-1))
   797  		} else {
   798  			pm.logger.Error("Propagating dangling block", "number", block.Number(), "hash", hash)
   799  			return
   800  		}
   801  		// Send the block to a subset of our peers
   802  		transfer := peers[:int(math.Sqrt(float64(len(peers))))]
   803  		for _, peer := range transfer {
   804  			peer.SendNewBlock(block, td)
   805  		}
   806  		pm.logger.Trace("Propagated block", "hash", hash, "recipients", len(transfer), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   807  		return
   808  	}
   809  	// Otherwise if the block is indeed in out own chain, announce it
   810  	if pm.blockchain.HasBlock(hash, block.NumberU64()) {
   811  		for _, peer := range peers {
   812  			peer.SendNewBlockHashes([]common.Hash{hash}, []uint64{block.NumberU64()})
   813  		}
   814  		pm.logger.Trace("Announced block", "hash", hash, "recipients", len(peers), "duration", common.PrettyDuration(time.Since(block.ReceivedAt)))
   815  	}
   816  }
   817  
   818  // BroadcastTx will propagate a transaction to all peers which are not known to
   819  // already have the given transaction.
   820  func (pm *ProtocolManager) BroadcastTx(hash common.Hash, tx *types.Transaction) {
   821  	// Broadcast transaction to a batch of peers not knowing about it
   822  	peers := pm.peers.PeersWithoutTx(hash)
   823  	//FIXME include this again: peers = peers[:int(math.Sqrt(float64(len(peers))))]
   824  	for _, peer := range peers {
   825  		peer.SendTransactions(types.Transactions{tx})
   826  	}
   827  	pm.logger.Trace("Broadcast transaction", "hash", hash, "recipients", len(peers))
   828  }
   829  
   830  // BroadcastTX3ProofData will propagate a TX3ProofData to all peers which are not known to
   831  // already have the given TX3ProofData.
   832  func (pm *ProtocolManager) BroadcastTX3ProofData(hash common.Hash, proofData *types.TX3ProofData) {
   833  	// Broadcast TX3ProofData to a batch of peers not knowing about it
   834  	peers := pm.peers.PeersWithoutTX3ProofData(hash)
   835  	for _, peer := range peers {
   836  		peer.SendTX3ProofData([]*types.TX3ProofData{proofData})
   837  	}
   838  	pm.logger.Trace("Broadcast TX3ProofData", "hash", hash, "recipients", len(peers))
   839  }
   840  
   841  func (pm *ProtocolManager) BroadcastMessage(msgcode uint64, data interface{}) {
   842  	recipients := 0
   843  	for _, peer := range pm.peers.Peers() {
   844  		peer.Send(msgcode, data)
   845  		recipients++
   846  	}
   847  	pm.logger.Trace("Broadcast p2p message", "code", msgcode, "recipients", recipients, "msg", data)
   848  }
   849  
   850  func (pm *ProtocolManager) TryFixBadPreimages() {
   851  	// Record all preimages (Testing)
   852  	images := make(map[common.Hash][]byte)
   853  
   854  	var hashes []common.Hash
   855  
   856  	// Iterate the entire sha3 preimages for checking
   857  	db, _ := pm.blockchain.StateCache().TrieDB().DiskDB().(neatdb.Database)
   858  	it := db.NewIteratorWithPrefix([]byte("secure-key-"))
   859  	for it.Next() {
   860  		keyHash := common.BytesToHash(it.Key())
   861  		valueHash := crypto.Keccak256Hash(it.Value())
   862  		if keyHash != valueHash {
   863  			// If value's hash doesn't match the key hash, add to list and send to other peer for correct
   864  			hashes = append(hashes, keyHash)
   865  		}
   866  		// Add to all preimages (Testing)
   867  		images[keyHash] = common.CopyBytes(it.Value())
   868  	}
   869  	it.Release()
   870  
   871  	if len(hashes) > 0 {
   872  		pm.preimageLogger.Critf("Found %d Bad Preimage(s)", len(hashes))
   873  		pm.preimageLogger.Critf("Bad Preimages: %x", hashes)
   874  
   875  		// Print all preimages (Testing)
   876  		//pm.preimageLogger.Crit("All Preimage(s)")
   877  		//var list []common.Hash
   878  		//for k := range images {
   879  		//	list = append(list, k)
   880  		//}
   881  		//sort.Slice(list, func(i, j int) bool {
   882  		//	return bytes.Compare(list[i][:], list[j][:]) == 1
   883  		//})
   884  		//for _, k := range list {
   885  		//	pm.preimageLogger.Crit(k.Hex(), common.Bytes2Hex(images[k]))
   886  		//}
   887  
   888  		//panic("Stop the system when found bad preimages, for testing purpose")
   889  
   890  		pm.peers.BestPeer().RequestPreimages(hashes)
   891  	}
   892  
   893  }
   894  
   895  // Mined broadcast loop
   896  func (self *ProtocolManager) minedBroadcastLoop() {
   897  	// automatically stops if unsubscribe
   898  	for obj := range self.minedBlockSub.Chan() {
   899  		switch ev := obj.Data.(type) {
   900  		case core.NewMinedBlockEvent:
   901  			self.BroadcastBlock(ev.Block, true)  // First propagate block to peers
   902  			self.BroadcastBlock(ev.Block, false) // Only then announce to the rest
   903  		}
   904  	}
   905  }
   906  
   907  func (self *ProtocolManager) txBroadcastLoop() {
   908  	for {
   909  		select {
   910  		case event := <-self.txCh:
   911  			self.BroadcastTx(event.Tx.Hash(), event.Tx)
   912  
   913  		// Err() channel will be closed when unsubscribing.
   914  		case <-self.txSub.Err():
   915  			return
   916  		}
   917  	}
   918  }
   919  
   920  func (self *ProtocolManager) tx3PrfDtBroadcastLoop() {
   921  	for {
   922  		select {
   923  		case event := <-self.tx3PrfDtCh:
   924  			self.BroadcastTX3ProofData(event.Tx3PrfDt.Header.Hash(), event.Tx3PrfDt)
   925  
   926  		// Err() channel will be closed when unsubscribing.
   927  		case <-self.tx3PrfDtSub.Err():
   928  			return
   929  		}
   930  	}
   931  }
   932  
   933  // NodeInfo represents a short summary of the NEAT Chain sub-protocol metadata
   934  // known about the host peer.
   935  type NodeInfo struct {
   936  	Network    uint64              `json:"network"`    // NEAT Chain network ID (1=Frontier, 2=Morden, Ropsten=3, Rinkeby=4)
   937  	Difficulty *big.Int            `json:"difficulty"` // Total difficulty of the host's blockchain
   938  	Genesis    common.Hash         `json:"genesis"`    // SHA3 hash of the host's genesis block
   939  	Config     *params.ChainConfig `json:"config"`     // Chain configuration for the fork rules
   940  	Head       common.Hash         `json:"head"`       // SHA3 hash of the host's best owned block
   941  }
   942  
   943  // NodeInfo retrieves some protocol metadata about the running host node.
   944  func (self *ProtocolManager) NodeInfo() *NodeInfo {
   945  	currentBlock := self.blockchain.CurrentBlock()
   946  	return &NodeInfo{
   947  		Network:    self.networkId,
   948  		Difficulty: self.blockchain.GetTd(currentBlock.Hash(), currentBlock.NumberU64()),
   949  		Genesis:    self.blockchain.Genesis().Hash(),
   950  		Config:     self.blockchain.Config(),
   951  		Head:       currentBlock.Hash(),
   952  	}
   953  }
   954  
   955  func (self *ProtocolManager) FindPeers(targets map[common.Address]bool) map[common.Address]consensus.Peer {
   956  	m := make(map[common.Address]consensus.Peer)
   957  	for _, p := range self.peers.Peers() {
   958  		pubKey, err := p.ID().Pubkey()
   959  		if err != nil {
   960  			continue
   961  		}
   962  		addr := crypto.PubkeyToAddress(*pubKey)
   963  		if targets[addr] {
   964  			m[addr] = p
   965  		}
   966  	}
   967  	return m
   968  }